riplog-view/backend/database.rs

183 lines
5.2 KiB
Rust

extern crate rusqlite;
extern crate walkdir;
use chrono::prelude::*;
use rusqlite::{Connection, OpenFlags, Result as SQLResult, NO_PARAMS};
use std::collections::HashMap;
use walkdir::{DirEntry, WalkDir};
/// Used for deduplication
#[derive(Hash, Eq, PartialEq, Debug)]
struct MessageInfo {
time: DateTime<Utc>,
channel_name: String,
username: String,
}
#[derive(Debug)]
struct HashDB {
info: DBLog,
messagemap: HashMap<MessageInfo, DBMessage>,
}
#[derive(Debug, Clone)]
pub struct DBMessage {
pub time: DateTime<Utc>,
pub content: String,
pub username: String,
pub user_realname: String,
pub channel_name: String,
}
#[derive(Debug, Clone)]
pub struct DBLog {
pub name: String,
pub icon: String,
pub messages: Vec<DBMessage>,
}
fn to_unixtime(ts: i64) -> DateTime<Utc> {
Utc.timestamp(ts >> 32, 0)
}
/// Pulls all the data needed from the database into memory
fn load_db(conn: &Connection) -> SQLResult<DBLog> {
let mut statement = conn.prepare(
"SELECT
message.ts,
message.content,
coalesce(user.name, \"-\"),
coalesce(user.real_name, \"-\"),
channel_normal.name as normal_ch_name,
coalesce(user2.name, \"-\") as user_ch_name
FROM message
LEFT JOIN user ON message.user_id == user.id
LEFT JOIN channel_normal ON message.channel_id == channel_normal.id
LEFT JOIN channel_direct ON message.channel_id == channel_direct.id
LEFT JOIN user AS user2 ON channel_direct.user_id == user2.id",
)?;
let results = statement.query_map(NO_PARAMS, |row| {
let userchname: Option<String> = row.get(4)?;
let channelname: Option<String> = row.get(5)?;
Ok(DBMessage {
time: to_unixtime(row.get(0)?),
content: row.get(1)?,
username: row.get(2)?,
user_realname: row.get(3)?,
channel_name: if userchname != None {
format!("#{}", userchname.unwrap_or_else(|| "<unknown>".to_string()))
} else {
channelname.unwrap_or_else(|| "<unknown>".to_string())
},
})
})?;
let mut messages = vec![];
for message in results {
messages.push(message?);
}
let mut namestmt = conn.prepare("SELECT name, icon_url FROM team LIMIT 1")?;
let name = namestmt.query_row(NO_PARAMS, |row| Ok(row.get(0)?))?;
let icon = namestmt.query_row(NO_PARAMS, |row| Ok(row.get(1)?))?;
Ok(DBLog {
name,
icon,
messages,
})
}
/// Returns true if file is a ripcord db (sqlite)
fn is_ripcord_db(entry: &DirEntry) -> bool {
entry
.file_name()
.to_str()
.map(|s| s.ends_with(".ripdb"))
.unwrap_or(false)
}
/// Add messages to message map
fn append_msgs(map: &mut HashMap<MessageInfo, DBMessage>, new: Vec<DBMessage>) {
for msg in new {
map.insert(
MessageInfo {
time: msg.time,
channel_name: msg.channel_name.clone(),
username: msg.username.clone(),
},
msg,
);
}
}
/// Convert message map to vector (sorted)
fn msgmap_vec(mut map: HashMap<MessageInfo, DBMessage>) -> Vec<DBMessage> {
let mut messages = vec![];
for (_, msg) in map.drain() {
messages.push(msg);
}
messages.sort_by_key(|k| k.time);
messages
}
/// Consolidate all databases from the same workspace and de-duplicate messages
fn consolidate_dbs(dbs: Vec<DBLog>) -> Vec<DBLog> {
let mut dbmap = HashMap::new();
for db in dbs {
let key = db.name.clone();
let messages = dbmap.get_mut(&key);
match messages {
None => {
let mut map = HashMap::new();
append_msgs(&mut map, db.messages);
dbmap.insert(
key,
HashDB {
info: DBLog {
name: db.name.clone(),
icon: db.icon.clone(),
messages: vec![],
},
messagemap: map,
},
);
}
Some(dbentry) => {
append_msgs(&mut dbentry.messagemap, db.messages);
}
}
}
let mut databases = vec![];
for (_, db) in dbmap.drain() {
println!("[WORKSPACE] {}", db.info.name);
databases.push(DBLog {
name: db.info.name,
icon: db.info.icon,
messages: msgmap_vec(db.messagemap),
});
}
databases
}
/// Scan a directory for ripcord database files, load them and consolidate them
pub fn scan_dbs(basedir: &str) -> Vec<DBLog> {
let mut logs = vec![];
for entry in WalkDir::new(basedir).follow_links(true) {
let entry = entry.unwrap();
if is_ripcord_db(&entry) {
let conn = Connection::open_with_flags(entry.path(), OpenFlags::SQLITE_OPEN_READ_ONLY)
.unwrap();
let db = load_db(&conn).unwrap();
println!(
"[LOADED] {} ({})",
entry.file_name().to_str().unwrap(),
db.name
);
logs.push(db);
conn.close().unwrap();
}
}
consolidate_dbs(logs)
}