Rewrite in rust

This commit is contained in:
Hamcha 2020-01-24 16:16:42 +01:00
commit a28a848d1d
Signed by: hamcha
GPG Key ID: 44AD3571EB09A39E
9 changed files with 2307 additions and 0 deletions

4
.gitignore vendored Normal file
View File

@ -0,0 +1,4 @@
/target
**/*.rs.bk
logs
*.ripdb*

17
.vscode/launch.json vendored Normal file
View File

@ -0,0 +1,17 @@
{
// Use IntelliSense to learn about possible attributes.
// Hover to view descriptions of existing attributes.
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
"version": "0.2.0",
"configurations": [{
"name": "(Windows) Launch",
"type": "cppvsdbg",
"request": "launch",
"program": "${workspaceFolder}/target/debug/riplog-be.exe",
"args": [],
"stopAtEntry": false,
"cwd": "${workspaceFolder}",
"environment": [],
"externalConsole": false
}]
}

3
.vscode/settings.json vendored Normal file
View File

@ -0,0 +1,3 @@
{
"debug.allowBreakpointsEverywhere": true
}

1773
Cargo.lock generated Normal file

File diff suppressed because it is too large Load Diff

21
Cargo.toml Normal file
View File

@ -0,0 +1,21 @@
[package]
name = "riplog-view"
version = "0.1.0"
authors = ["Hamcha <hamcha@crunchy.rocks>"]
edition = "2018"
[[bin]]
name = "riplog-be"
path = "backend/main.rs"
[dependencies]
chrono = "0.4"
clap = "2.33"
walkdir = "2"
juniper = "0.14"
juniper_warp = "0.5.2"
warp = "0.1.8"
[dependencies.rusqlite]
version = "0.21.0"
features = ["bundled"]

8
README.md Normal file
View File

@ -0,0 +1,8 @@
# riplog-ng
Web UI for viewing Slack ripcord logs files
## Requirements
- deno (for running the backend)
- node (for building the frontend)

182
backend/database.rs Normal file
View File

@ -0,0 +1,182 @@
extern crate rusqlite;
extern crate walkdir;
use chrono::prelude::*;
use rusqlite::{Connection, OpenFlags, Result as SQLResult, NO_PARAMS};
use std::collections::HashMap;
use walkdir::{DirEntry, WalkDir};
/// Used for deduplication
#[derive(Hash, Eq, PartialEq, Debug)]
struct MessageInfo {
time: DateTime<Utc>,
channel_name: String,
username: String,
}
#[derive(Debug)]
struct HashDB {
info: DBLog,
messagemap: HashMap<MessageInfo, DBMessage>,
}
#[derive(Debug, Clone)]
pub struct DBMessage {
pub time: DateTime<Utc>,
pub content: String,
pub username: String,
pub user_realname: String,
pub channel_name: String,
}
#[derive(Debug, Clone)]
pub struct DBLog {
pub name: String,
pub icon: String,
pub messages: Vec<DBMessage>,
}
fn to_unixtime(ts: i64) -> DateTime<Utc> {
Utc.timestamp(ts >> 32, 0)
}
/// Pulls all the data needed from the database into memory
fn load_db(conn: &Connection) -> SQLResult<DBLog> {
let mut statement = conn.prepare(
"SELECT
message.ts,
message.content,
coalesce(user.name, \"-\"),
coalesce(user.real_name, \"-\"),
channel_normal.name as normal_ch_name,
coalesce(user2.name, \"-\") as user_ch_name
FROM message
LEFT JOIN user ON message.user_id == user.id
LEFT JOIN channel_normal ON message.channel_id == channel_normal.id
LEFT JOIN channel_direct ON message.channel_id == channel_direct.id
LEFT JOIN user AS user2 ON channel_direct.user_id == user2.id",
)?;
let results = statement.query_map(NO_PARAMS, |row| {
let userchname: Option<String> = row.get(4)?;
let channelname: Option<String> = row.get(5)?;
Ok(DBMessage {
time: to_unixtime(row.get(0)?),
content: row.get(1)?,
username: row.get(2)?,
user_realname: row.get(3)?,
channel_name: if userchname != None {
format!("#{}", userchname.unwrap_or("<unknown>".to_string()))
} else {
format!("@{}", channelname.unwrap_or("<unknown>".to_string()))
},
})
})?;
let mut messages = vec![];
for message in results {
messages.push(message?);
}
let mut namestmt = conn.prepare("SELECT name, icon_url FROM team LIMIT 1")?;
let name = namestmt.query_row(NO_PARAMS, |row| Ok(row.get(0)?))?;
let icon = namestmt.query_row(NO_PARAMS, |row| Ok(row.get(1)?))?;
Ok(DBLog {
name,
icon,
messages,
})
}
/// Returns true if file is a ripcord db (sqlite)
fn is_ripcord_db(entry: &DirEntry) -> bool {
entry
.file_name()
.to_str()
.map(|s| s.ends_with(".ripdb"))
.unwrap_or(false)
}
/// Add messages to message map
fn append_msgs(map: &mut HashMap<MessageInfo, DBMessage>, new: Vec<DBMessage>) {
for msg in new {
map.insert(
MessageInfo {
time: msg.time.clone(),
channel_name: msg.channel_name.clone(),
username: msg.username.clone(),
},
msg,
);
}
}
/// Convert message map to vector (sorted)
fn msgmap_vec(mut map: HashMap<MessageInfo, DBMessage>) -> Vec<DBMessage> {
let mut messages = vec![];
for (_, msg) in map.drain() {
messages.push(msg);
}
messages.sort_by_key(|k| k.time);
messages
}
/// Consolidate all databases from the same workspace and de-duplicate messages
fn consolidate_dbs(dbs: Vec<DBLog>) -> Vec<DBLog> {
let mut dbmap = HashMap::new();
for db in dbs {
let key = db.name.clone();
let messages = dbmap.get_mut(&key);
match messages {
None => {
let mut map = HashMap::new();
append_msgs(&mut map, db.messages);
dbmap.insert(
key,
HashDB {
info: DBLog {
name: db.name.clone(),
icon: db.icon.clone(),
messages: vec![],
},
messagemap: map,
},
);
}
Some(dbentry) => {
append_msgs(&mut dbentry.messagemap, db.messages);
}
}
}
let mut databases = vec![];
for (_, db) in dbmap.drain() {
println!("[WORKSPACE] {}", db.info.name);
databases.push(DBLog {
name: db.info.name,
icon: db.info.icon,
messages: msgmap_vec(db.messagemap),
});
}
databases
}
/// Scan a directory for ripcord database files, load them and consolidate them
pub fn scan_dbs(basedir: &str) -> Vec<DBLog> {
let mut logs = vec![];
for entry in WalkDir::new(basedir).follow_links(true) {
let entry = entry.unwrap();
if is_ripcord_db(&entry) {
let conn = Connection::open_with_flags(entry.path(), OpenFlags::SQLITE_OPEN_READ_ONLY)
.unwrap();
let db = load_db(&conn).unwrap();
println!(
"[LOADED] {} ({})",
entry.file_name().to_str().unwrap(),
db.name
);
logs.push(db);
conn.close().unwrap();
}
}
consolidate_dbs(logs)
}

265
backend/graphql.rs Normal file
View File

@ -0,0 +1,265 @@
extern crate juniper;
use crate::database::{DBLog, DBMessage};
use chrono::prelude::*;
use juniper::Value::Null;
use juniper::{FieldError, FieldResult};
use std::collections::HashSet;
use std::convert::TryInto;
use warp::Filter;
#[derive(Debug, juniper::GraphQLObject)]
#[graphql(description = "Paginated list of messages")]
struct MessageList {
#[graphql(description = "List of messages")]
messages: Vec<Message>,
#[graphql(description = "Next message, if any (when using pagination)")]
next: Option<juniper::ID>,
}
#[derive(Debug, Clone, juniper::GraphQLObject)]
#[graphql(description = "A single message in a Slack workspace")]
struct Message {
#[graphql(description = "Message timestamp")]
time: DateTime<Utc>,
#[graphql(description = "Message content")]
content: String,
#[graphql(description = "Slack username, if applicable")]
username: String,
#[graphql(description = "Slack real name, if applicable")]
user_realname: String,
#[graphql(
description = "Channel/Private chat name. Channels are prefixed with #, Private chats with @"
)]
channel_name: String,
#[graphql(description = "Unique message ID (hopefully)")]
message_id: juniper::ID,
}
#[derive(Debug, juniper::GraphQLObject)]
#[graphql(description = "A slack workspace info")]
struct Workspace {
#[graphql(description = "Workspace name / ID")]
name: String,
#[graphql(description = "URL to workspace icon")]
icon: String,
}
#[derive(Debug, juniper::GraphQLObject)]
#[graphql(description = "A slack channel or private chat")]
struct Channel {
#[graphql(description = "Channel/Chat name")]
name: String,
#[graphql(description = "True if a private chat (or group chat), False if channel")]
is_private: bool,
}
struct WorkspaceData {
name: String,
icon: String,
messages: Vec<Message>,
}
#[derive(Debug, juniper::GraphQLInputObject)]
struct Pagination {
#[graphql(description = "Skip messages before this one")]
after: Option<juniper::ID>,
#[graphql(description = "Show at most the first X messages")]
first: Option<i32>,
}
#[derive(Debug, juniper::GraphQLInputObject)]
struct MessageFilter {
#[graphql(description = "Only show messages from this channel/chat")]
channel: Option<String>,
}
#[derive(juniper::GraphQLEnum)]
enum SortOrder {
#[graphql(description = "Sort from oldest")]
DateAsc,
#[graphql(description = "Sort from newest")]
DateDesc,
}
struct Context {
databases: Vec<WorkspaceData>,
}
impl juniper::Context for Context {}
/// Get message id for slack message
fn message_id(msg: &DBMessage) -> juniper::ID {
juniper::ID::new(format!("{}/{}", msg.channel_name, msg.time.timestamp()))
}
/// Convert from DB struct to GQL
fn from_db(log: DBLog) -> WorkspaceData {
WorkspaceData {
name: log.name,
icon: log.icon,
messages: log
.messages
.iter()
.map(|m| Message {
message_id: message_id(&m),
time: m.time,
content: m.content.clone(),
username: m.username.clone(),
user_realname: m.user_realname.clone(),
channel_name: m.channel_name.clone(),
})
.collect(),
}
}
struct Query;
#[juniper::object(
Context = Context,
)]
impl Query {
fn apiVersion() -> &str {
"1.0"
}
fn workspace(context: &Context) -> FieldResult<Vec<Workspace>> {
let mut results = vec![];
for ws in context.databases.as_slice() {
results.push(Workspace {
name: ws.name.clone(),
icon: ws.icon.clone(),
})
}
Ok(results)
}
fn channels(context: &Context, workspace: String) -> FieldResult<Vec<Channel>> {
let dbs = context
.databases
.iter()
.filter(|db| db.name == workspace)
.take(1)
.next();
match dbs {
None => Err(FieldError::new("workspace not found", Null)),
Some(db) => {
let mut channels = HashSet::new();
for msg in &db.messages {
channels.insert(msg.channel_name.clone());
}
Ok(channels
.iter()
.map(|name| Channel {
name: name.clone(),
is_private: !name.starts_with("#"),
})
.collect())
}
}
}
fn messages(
context: &Context,
workspace: String,
filter: Option<MessageFilter>,
order: Option<SortOrder>,
pagination: Option<Pagination>,
) -> FieldResult<MessageList> {
let dbs = context
.databases
.iter()
.filter(|db| db.name == workspace)
.take(1)
.next();
match dbs {
None => Err(FieldError::new("workspace not found", Null)),
Some(db) => {
let mut messages = db.messages.clone();
// Apply filters
if filter.is_some() {
let filters = filter.unwrap();
if filters.channel.is_some() {
let channel = filters.channel.unwrap();
messages = messages
.iter()
.filter(|x| x.channel_name == channel)
.cloned()
.collect();
}
}
// Apply order
match order.unwrap_or(SortOrder::DateAsc) {
SortOrder::DateAsc => messages.sort_by(|a, b| a.time.cmp(&b.time)),
SortOrder::DateDesc => messages.sort_by(|a, b| b.time.cmp(&a.time)),
}
// Apply pagination
let (messages, next) = match pagination {
None => (messages, None),
Some(pdata) => {
// Apply after, if specified
let skipped = match pdata.after {
None => messages,
Some(after) => messages
.iter()
.skip_while(|m| m.message_id != after)
.cloned()
.collect(),
};
// Apply limit, if specified
let limit: usize = pdata.first.unwrap_or(1000).try_into().unwrap_or(0);
if limit >= skipped.len() {
(skipped, None)
} else {
(
skipped.iter().take(limit).cloned().collect(),
Some(skipped.get(limit).unwrap().message_id.clone()),
)
}
}
};
Ok(MessageList { messages, next })
}
}
}
}
struct Mutation;
#[juniper::object(
Context = Context,
)]
impl Mutation {}
type Schema = juniper::RootNode<'static, Query, Mutation>;
pub fn server(databases: Vec<DBLog>) {
let schema = Schema::new(Query, Mutation);
let state = warp::any().map(move || Context {
databases: databases.clone().into_iter().map(from_db).collect(),
});
let graphql_filter = juniper_warp::make_graphql_filter(schema, state.boxed());
warp::serve(
warp::get2()
.and(warp::path("graphiql"))
.and(juniper_warp::graphiql_filter("/graphql"))
.or(warp::path("graphql").and(graphql_filter)),
)
.run(([127, 0, 0, 1], 8080));
}

34
backend/main.rs Normal file
View File

@ -0,0 +1,34 @@
mod database;
mod graphql;
use clap::{App, Arg};
use database::scan_dbs;
use graphql::server;
fn main() -> std::io::Result<()> {
let cmd = App::new("Riplog")
.version("1.0")
.arg(
Arg::with_name("basedir")
.required(true)
.short("d")
.help("Base directory containing ripcord db files")
.default_value(".")
.index(1),
)
.arg(
Arg::with_name("bind")
.required(true)
.short("b")
.help("Address to bind to")
.default_value("127.0.0.1:9743")
.index(2),
)
.get_matches();
let basedir = cmd.value_of("basedir").unwrap();
let logs = scan_dbs(basedir);
println!("Loaded data for {} workspaces", logs.len());
server(logs);
Ok(())
}