This commit is contained in:
Dominic Grimm 2023-06-02 16:27:39 +02:00
commit ce49c3df81
Signed by: dergrimm
GPG key ID: B6FFE500AAD54A3A
29 changed files with 3466 additions and 0 deletions

3
.cargo/config Normal file
View file

@ -0,0 +1,3 @@
[target.x86_64-unknown-linux-gnu]
linker = "clang"
rustflags = ["-C", "link-arg=-fuse-ld=/usr/bin/mold"]

9
.dockerignore Normal file
View file

@ -0,0 +1,9 @@
/target
Dockerfile
.gitignore
.dockerignore
vendor/
examples/
# static/
LICENSE

2
.env Normal file
View file

@ -0,0 +1,2 @@
POSTGRES_USER="fiddle"
POSTGRES_PASSWORD="fiddle"

1
.gitignore vendored Normal file
View file

@ -0,0 +1 @@
/target

6
.sqlfluff Normal file
View file

@ -0,0 +1,6 @@
[sqlfluff]
dialect = postgres
exclude_rules = LT05
[sqlfluff:indentation]
tab_space_size = 4

2536
Cargo.lock generated Normal file

File diff suppressed because it is too large Load diff

30
Cargo.toml Normal file
View file

@ -0,0 +1,30 @@
[package]
name = "fiddle"
version = "0.1.0"
edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
actix-cors = "0.6.4"
actix-web = "4.3.1"
anyhow = { version = "1.0.71", features = ["backtrace"] }
async-trait = "0.1.68"
chrono = "0.4.26"
dataloader = "0.16.0"
debug-ignore = "1.0.5"
diesel = { version = "2.1.0", features = [
"postgres",
"chrono",
"r2d2",
"uuid",
] }
env_logger = "0.10.0"
envconfig = "0.10.0"
juniper = { version = "0.15.11", features = ["uuid"] }
juniper_actix = "0.4.0"
lazy_static = "1.4.0"
log = "0.4.18"
tokio = { version = "1.28.2", features = ["full"] }
uuid-simd = "0.8.0"
uuidv7 = { version = "1.3.2", package = "uuid", features = ["serde"] }

44
Dockerfile Normal file
View file

@ -0,0 +1,44 @@
FROM docker.io/lukemathwalker/cargo-chef:latest-rust-1.69.0 as chef
SHELL ["/bin/bash", "-o", "pipefail", "-c"]
# hadolint ignore=DL3008,DL3009
RUN apt-get update && \
apt-get install -y --no-install-recommends \
wget \
software-properties-common \
clang
WORKDIR /tmp
ARG MOLD_VERSION="1.11.0"
RUN wget -qO- https://github.com/rui314/mold/releases/download/v${MOLD_VERSION}/mold-${MOLD_VERSION}-x86_64-linux.tar.gz | tar xzf - \
&& cp -RT ./mold-${MOLD_VERSION}-x86_64-linux /usr \
&& rm -rf ./mold-${MOLD_VERSION}-x86_64-linux
WORKDIR /
FROM chef as diesel
RUN cargo install diesel_cli --no-default-features --features postgres
FROM chef as planner
WORKDIR /usr/src/fiddle
RUN mkdir src && touch src/main.rs
COPY ./Cargo.toml ./Cargo.lock ./
RUN cargo chef prepare --recipe-path recipe.json
FROM chef as builder
WORKDIR /usr/src/fiddle
COPY ./.cargo ./.cargo
COPY --from=planner /usr/src/fiddle/recipe.json .
RUN cargo chef cook --release --recipe-path recipe.json
COPY ./assets ./assets
COPY ./src ./src
RUN cargo build --release
FROM docker.io/bitnami/minideb:bullseye as runner
RUN install_packages libpq5
WORKDIR /usr/local/bin
COPY --from=diesel /usr/local/cargo/bin/diesel .
WORKDIR /usr/src/fiddle
COPY ./run.sh .
RUN chmod +x ./run.sh
COPY ./migrations ./migrations
COPY --from=builder /usr/src/fiddle/target/release/fiddle ./bin/fiddle
EXPOSE 80
CMD [ "./run.sh" ]

6
assets/logo.txt Normal file
View file

@ -0,0 +1,6 @@
__ _ _ _ _
/ _(_) | | | | |
| |_ _ __| | __| | | ___
| _| |/ _` |/ _` | |/ _ \
| | | | (_| | (_| | | __/
|_| |_|\__,_|\__,_|_|\___|

9
diesel.toml Normal file
View file

@ -0,0 +1,9 @@
# For documentation on how to configure this file,
# see https://diesel.rs/guides/configuring-diesel-cli
[print_schema]
file = "src/schema.rs"
custom_type_derives = ["diesel::query_builder::QueryId"]
[migrations_directory]
dir = "migrations"

31
docker-compose.yml Normal file
View file

@ -0,0 +1,31 @@
version: "3"
services:
db:
image: docker.io/postgres:15.2-alpine
restart: always
environment:
POSTGRES_USER: ${POSTGRES_USER}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
volumes:
- db:/var/lib/postgresql/data
fiddle:
image: git.dergrimm.net/dergrimm/fiddle:latest
build: .
restart: always
environment:
FIDDLE_DB_URL: postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@db:5432/${POSTGRES_USER}
FIDDLE_DATA_DIR: /data
FIDDLE_AUTO_PRUNE_SLEEP: 1800
FIDDLE_TAMPER_SLEEP: 10
volumes:
- fiddle:/data
ports:
- 8080:8080
depends_on:
- db
volumes:
db:
fiddle:

View file

@ -0,0 +1 @@
DROP TABLE directories;

View file

@ -0,0 +1,8 @@
CREATE EXTENSION IF NOT EXISTS pgcrypto;
CREATE TABLE directories (
id uuid PRIMARY KEY DEFAULT GEN_RANDOM_UUID(),
active boolean NOT NULL,
created_at timestamptz NOT NULL DEFAULT CURRENT_TIMESTAMP,
updated_at timestamptz
);

10
run.sh Normal file
View file

@ -0,0 +1,10 @@
#!/usr/bin/env bash
# -*- coding: utf-8 -*-
set -e
DATABASE_URL="$FIDDLE_DB_URL" diesel setup \
--migration-dir ./migrations \
--locked-schema
RUST_LOG=info ./bin/fiddle

37
src/api/context.rs Normal file
View file

@ -0,0 +1,37 @@
use dataloader::non_cached::Loader;
use juniper::FieldResult;
use juniper::IntoFieldError;
use crate::{
api::{loaders, Error},
db,
};
#[derive(Clone)]
pub struct Loaders {
pub directory: loaders::directory::DirectoryLoader,
}
impl Default for Loaders {
fn default() -> Self {
Self {
directory: Loader::new(loaders::directory::DirectoryBatcher)
.with_yield_count(loaders::directory::YIELD_COUNT),
}
}
}
pub struct Context {
pub db_pool: db::Pool,
pub loaders: Loaders,
}
impl Context {
pub fn get_db_conn(&self) -> FieldResult<db::PoolConnection> {
self.db_pool
.get()
.map_or(Err(Error::Internal.into_field_error()), Ok)
}
}
impl juniper::Context for Context {}

58
src/api/error.rs Normal file
View file

@ -0,0 +1,58 @@
use juniper::{graphql_value, FieldError, FieldResult, IntoFieldError, ScalarValue};
pub enum Error {
Internal,
DoesNotExist,
CountNegative,
}
impl<S: ScalarValue> IntoFieldError<S> for Error {
fn into_field_error(self) -> FieldError<S> {
match self {
Self::Internal => FieldError::new(
"Internal server error",
graphql_value!({
"type": "INTERNAL"
}),
),
Self::DoesNotExist => FieldError::new(
"Record does not exist",
graphql_value!({
"type": "DOES_NOT_EXIST"
}),
),
Self::CountNegative => FieldError::new(
"Count can not be negative",
graphql_value!({
"type": "COUNT_NEGATIVE",
}),
),
}
}
}
pub trait QueryResultIntoFieldResult<T> {
fn into_field_result(self) -> FieldResult<T>;
}
impl<T> QueryResultIntoFieldResult<T> for diesel::QueryResult<T> {
fn into_field_result(self) -> FieldResult<T> {
self.map_err(|_| Error::Internal.into_field_error())
}
}
// pub trait AsyncResultIntoFieldResult<T> {
// fn into_field_result(self) -> FieldResult<T>;
// }
// impl AsyncResultIntoFieldResult<celery::task::AsyncResult>
// for Result<celery::task::AsyncResult, celery::error::CeleryError>
// {
// fn into_field_result(self) -> FieldResult<celery::task::AsyncResult> {
// // match self {
// // Ok(x) => Ok(x),
// // Err(_) => Err(Error::Internal.into_field_error()),
// // }
// self.map_err(|_| Error::Internal.into_field_error())
// }
// }

View file

@ -0,0 +1,43 @@
use async_trait::async_trait;
use chrono::prelude::*;
use dataloader::non_cached::Loader;
use dataloader::BatchFn;
use diesel::prelude::*;
use std::collections::HashMap;
use uuidv7::Uuid;
use crate::{api::models, db};
pub struct DirectoryBatcher;
#[async_trait]
impl BatchFn<Uuid, models::directory::Directory> for DirectoryBatcher {
async fn load(&mut self, keys: &[Uuid]) -> HashMap<Uuid, models::directory::Directory> {
let db_conn = &mut db::POOL.get().unwrap();
let mut map = HashMap::new();
for row in db::schema::directories::table
.select((
db::schema::directories::id,
db::schema::directories::created_at,
db::schema::directories::updated_at,
))
.filter(db::schema::directories::id.eq_any(keys))
.load::<(Uuid, DateTime<Utc>, Option<DateTime<Utc>>)>(db_conn)
.unwrap()
{
let row: (Uuid, DateTime<Utc>, Option<DateTime<Utc>>) = row;
let data = models::directory::Directory {
id: row.0,
created_at: row.1,
updated_at: row.2,
};
map.insert(data.id, data);
}
map
}
}
pub type DirectoryLoader = Loader<Uuid, models::directory::Directory, DirectoryBatcher>;
pub const YIELD_COUNT: usize = 100;

76
src/api/loaders/mod.rs Normal file
View file

@ -0,0 +1,76 @@
use async_trait::async_trait;
use std::clone::Clone;
use std::fmt::Debug;
use std::hash::Hash;
use std::io::{Error, ErrorKind};
pub mod directory;
#[async_trait]
pub trait TryOptionLoad<K, V>: Clone
where
K: Eq + Hash + Clone + Debug + Send + Sync,
V: Clone + Debug + Send,
{
async fn try_option_load(&self, key: K) -> Result<Option<V>, Error>;
}
#[async_trait]
impl<K, V, F> TryOptionLoad<K, V> for dataloader::non_cached::Loader<K, V, F>
where
K: Eq + Hash + Clone + Debug + Send + Sync,
V: Clone + Debug + Send,
F: dataloader::BatchFn<K, V> + Send + Sync,
{
async fn try_option_load(&self, key: K) -> Result<Option<V>, Error> {
async fn internal_try_option_load<K, V, F>(
loader: &dataloader::non_cached::Loader<K, V, F>,
key: K,
) -> Result<Option<V>, Error>
where
K: Eq + Hash + Clone + Debug + Send + Sync,
V: Clone + Debug + Send,
F: dataloader::BatchFn<K, V> + Send + Sync,
{
match loader.try_load(key).await {
Ok(x) => Ok(Some(x)),
Err(e) => match e.kind() {
ErrorKind::NotFound => Ok(None),
_ => Err(e),
},
}
}
internal_try_option_load(self, key).await
}
}
#[async_trait]
impl<K, V, F> TryOptionLoad<K, V> for dataloader::cached::Loader<K, V, F>
where
K: Eq + Hash + Clone + Debug + Send + Sync,
V: Clone + Debug + Send,
F: dataloader::BatchFn<K, V> + Send + Sync,
{
async fn try_option_load(&self, key: K) -> Result<Option<V>, Error> {
async fn internal_try_option_load<K, V, F>(
loader: &dataloader::cached::Loader<K, V, F>,
key: K,
) -> Result<Option<V>, Error>
where
K: Eq + Hash + Clone + Debug + Send + Sync,
V: Clone + Debug + Send,
F: dataloader::BatchFn<K, V> + Send + Sync,
{
match loader.try_load(key).await {
Ok(x) => Ok(Some(x)),
Err(e) => match e.kind() {
ErrorKind::NotFound => Ok(None),
_ => Err(e),
},
}
}
internal_try_option_load(self, key).await
}
}

209
src/api/mod.rs Normal file
View file

@ -0,0 +1,209 @@
use diesel::{prelude::*, row::Field};
use juniper::{graphql_object, EmptySubscription, FieldResult, IntoFieldError, RootNode};
use std::fs;
use uuidv7::Uuid;
use crate::{db, prune_many, prune_single, CONFIG};
pub mod context;
pub mod error;
pub mod loaders;
pub mod models;
pub mod scalars;
pub use context::Context;
pub use error::{Error, QueryResultIntoFieldResult};
use loaders::TryOptionLoad;
pub struct Query;
#[graphql_object(context = Context)]
impl Query {
fn ping() -> &'static str {
"pong"
}
async fn directories(context: &Context) -> FieldResult<Vec<models::directory::Directory>> {
let db_conn = &mut context.get_db_conn()?;
let ids: Vec<Uuid> = db::schema::directories::table
.select(db::schema::directories::id)
.filter(db::schema::directories::active)
.load(db_conn)
.into_field_result()?;
context
.loaders
.directory
.try_load_many(ids)
.await
.map_or_else(
|_| Err(Error::Internal.into_field_error()),
|x| Ok(x.into_values().collect()),
)
}
async fn directory(
context: &Context,
id: scalars::Uuid,
) -> FieldResult<Option<models::directory::Directory>> {
context
.loaders
.directory
.try_option_load(*id)
.await
.map_err(|_| Error::Internal.into_field_error())
}
}
pub struct Mutation;
#[graphql_object(context = Context)]
impl Mutation {
async fn create_directory(context: &Context) -> FieldResult<models::directory::Directory> {
let db_conn = &mut context.get_db_conn()?;
let id = diesel::insert_into(db::schema::directories::table)
.values(db::models::NewDirectory { active: true })
.returning(db::schema::directories::id)
.get_result::<Uuid>(db_conn)
.into_field_result()?;
fs::create_dir(format!("{}/{}", CONFIG.data_dir, id))
.map_err(|_| Error::Internal.into_field_error())?;
context
.loaders
.directory
.try_load(id)
.await
.map_err(|_| Error::Internal.into_field_error())
}
async fn create_directories(
context: &Context,
count: i32,
) -> FieldResult<Vec<models::directory::Directory>> {
match count {
_ if count < 0 => return Err(Error::CountNegative.into_field_error()),
0 => return Ok(vec![]),
_ => {}
}
let db_conn = &mut context.get_db_conn()?;
let input = vec![db::models::NewDirectory { active: true }];
let ids = diesel::insert_into(db::schema::directories::table)
.values(
input
.iter()
.cycle()
.take(count as usize)
.collect::<Vec<_>>(),
)
.returning(db::schema::directories::id)
.load::<Uuid>(db_conn)
.into_field_result()?;
for id in ids.iter() {
fs::create_dir(format!("{}/{}", CONFIG.data_dir, id))
.map_err(|_| Error::Internal.into_field_error())?;
}
context
.loaders
.directory
.try_load_many(ids)
.await
.map_or_else(
|_| Err(Error::Internal.into_field_error()),
|x| Ok(x.into_values().collect()),
)
}
async fn delete_directory(
context: &Context,
id: scalars::Uuid,
immediate: Option<bool>,
) -> FieldResult<bool> {
let db_conn = &mut context.get_db_conn()?;
if diesel::select(diesel::dsl::not(diesel::dsl::exists(
db::schema::directories::table.filter(db::schema::directories::id.eq(*id)),
)))
.get_result::<bool>(db_conn)
.into_field_result()?
{
return Err(Error::DoesNotExist.into_field_error());
}
if immediate.unwrap_or(false) {
prune_single(&id.to_string()).map_err(|_| Error::Internal.into_field_error())?;
diesel::delete(
db::schema::directories::table.filter(db::schema::directories::id.eq(*id)),
)
.execute(db_conn)
.into_field_result()?;
} else {
diesel::update(
db::schema::directories::table.filter(db::schema::directories::id.eq(*id)),
)
.set(db::schema::directories::active.eq(false))
.execute(db_conn)
.into_field_result()?;
}
Ok(true)
}
async fn delete_directories(
context: &Context,
ids: Vec<scalars::Uuid>,
immediate: Option<bool>,
) -> FieldResult<bool> {
let db_conn = &mut context.get_db_conn()?;
let ids: Vec<Uuid> = ids.into_iter().map(|id| *id).collect();
let count: i64 = db::schema::directories::table
.filter(db::schema::directories::id.eq_any(&ids))
.count()
.get_result::<i64>(db_conn)
.into_field_result()?;
dbg!(&count);
if count == ids.len() as i64 {
if immediate.unwrap_or(false) {
prune_many(&ids.iter().map(|id| id.to_string()).collect::<Vec<_>>())?;
diesel::delete(
db::schema::directories::table.filter(db::schema::directories::id.eq_any(ids)),
)
.execute(db_conn)
.into_field_result()?;
} else {
diesel::update(
db::schema::directories::table.filter(db::schema::directories::id.eq_any(ids)),
)
.set(db::schema::directories::active.eq(false))
.execute(db_conn)
.into_field_result()?;
}
} else {
return Err(Error::DoesNotExist.into_field_error());
}
Ok(true)
}
async fn prune(context: &Context) -> FieldResult<bool> {
let db_conn = &mut context.get_db_conn()?;
crate::prune(db_conn).map_err(|_| Error::Internal.into_field_error())?;
Ok(true)
}
}
pub type Schema = RootNode<'static, Query, Mutation, EmptySubscription<Context>>;
pub fn schema() -> Schema {
Schema::new(Query, Mutation, EmptySubscription::new())
}

View file

@ -0,0 +1,31 @@
use chrono::prelude::*;
use juniper::graphql_object;
use uuidv7::Uuid;
use crate::api::{scalars, Context};
#[derive(Clone, Debug)]
pub struct Directory {
pub id: Uuid,
pub created_at: DateTime<Utc>,
pub updated_at: Option<DateTime<Utc>>,
}
#[graphql_object(context = Context)]
impl Directory {
fn id(&self) -> scalars::Uuid {
scalars::Uuid(self.id)
}
fn path(&self) -> String {
format!("{}", self.id)
}
fn created_at(&self) -> DateTime<Utc> {
self.created_at
}
fn updated_at(&self) -> Option<DateTime<Utc>> {
self.updated_at
}
}

1
src/api/models/mod.rs Normal file
View file

@ -0,0 +1 @@
pub mod directory;

3
src/api/scalars/mod.rs Normal file
View file

@ -0,0 +1,3 @@
pub mod uuid;
pub use uuid::Uuid;

39
src/api/scalars/uuid.rs Normal file
View file

@ -0,0 +1,39 @@
use std::ops::Deref;
type Value = uuidv7::Uuid;
pub struct Uuid(pub Value);
#[juniper::graphql_scalar(name = "UUID", description = "UUID encoded as a string")]
impl<S> GraphQLScalar for Uuid
where
S: juniper::ScalarValue,
{
fn resolve(&self) -> juniper::Value {
juniper::Value::scalar(self.0.to_string())
}
fn from_input_value(value: &juniper::InputValue) -> Option<Uuid> {
value
.as_string_value()
.and_then(|s| {
use uuid_simd::UuidExt;
use uuidv7::Uuid;
Uuid::parse(s.as_bytes()).ok()
})
.map(Uuid)
}
fn from_str<'a>(value: juniper::ScalarToken<'a>) -> juniper::ParseScalarResult<'a, S> {
<String as juniper::ParseScalarValue<S>>::from_str(value)
}
}
impl Deref for Uuid {
type Target = Value;
fn deref(&self) -> &Self::Target {
&self.0
}
}

22
src/config.rs Normal file
View file

@ -0,0 +1,22 @@
use debug_ignore::DebugIgnore;
use envconfig::Envconfig;
use lazy_static::lazy_static;
#[derive(Envconfig, Debug)]
pub struct Config {
#[envconfig(from = "FIDDLE_DB_URL")]
pub db_url: DebugIgnore<String>,
#[envconfig(from = "FIDDLE_DATA_DIR")]
pub data_dir: String,
#[envconfig(from = "FIDDLE_AUTO_PRUNE_SLEEP")]
pub auto_prune_sleep: u64,
#[envconfig(from = "FIDDLE_TAMPER_SLEEP")]
pub tamper_sleep: u64,
}
lazy_static! {
pub static ref CONFIG: Config = Config::init_from_env().unwrap();
}

29
src/db/mod.rs Normal file
View file

@ -0,0 +1,29 @@
use anyhow::Result;
use diesel::pg::PgConnection;
use diesel::prelude::*;
use diesel::r2d2::{ConnectionManager, PooledConnection};
use lazy_static::lazy_static;
use crate::CONFIG;
pub mod models;
pub mod schema;
pub type Pool = diesel::r2d2::Pool<ConnectionManager<PgConnection>>;
pub type Connection = PgConnection;
pub type PoolConnection = PooledConnection<ConnectionManager<PgConnection>>;
pub fn establish_connection() -> ConnectionResult<PgConnection> {
use diesel::Connection;
PgConnection::establish(&CONFIG.db_url)
}
pub fn pool() -> Result<Pool> {
Ok(diesel::r2d2::Pool::builder()
.build(ConnectionManager::<PgConnection>::new(&*CONFIG.db_url))?)
}
lazy_static! {
pub static ref POOL: Pool = pool().unwrap();
}

20
src/db/models.rs Normal file
View file

@ -0,0 +1,20 @@
use chrono::prelude::*;
use diesel::prelude::*;
use uuidv7::Uuid;
use crate::db::schema;
#[derive(Identifiable, Queryable, Debug)]
#[diesel(table_name = schema::directories)]
pub struct Directory {
pub id: Uuid,
pub active: bool,
pub created_at: DateTime<Utc>,
pub updated_at: Option<DateTime<Utc>>,
}
#[derive(Insertable, Debug)]
#[diesel(table_name = schema::directories)]
pub struct NewDirectory {
pub active: bool,
}

8
src/db/schema.rs Normal file
View file

@ -0,0 +1,8 @@
diesel::table! {
directories {
id -> Uuid,
active -> Bool,
created_at -> Timestamptz,
updated_at -> Nullable<Timestamptz>,
}
}

74
src/lib.rs Normal file
View file

@ -0,0 +1,74 @@
use anyhow::Result;
use diesel::prelude::*;
use std::fs;
use uuidv7::Uuid;
pub mod api;
pub mod config;
pub mod db;
pub use config::CONFIG;
pub const ASCII_LOGO: &str = include_str!(concat!(env!("CARGO_MANIFEST_DIR"), "/assets/logo.txt"));
pub fn init() {
println!("{}\n", ASCII_LOGO);
log::info!("Initializing config from environment variables");
let _ = *CONFIG;
log::info!("Config: {:?}", *CONFIG);
}
pub fn prune_single(entry: &str) -> Result<()> {
let path = format!("{}/{}", CONFIG.data_dir, entry);
log::info!("Pruning: {}", path);
if fs::metadata(&path).is_ok() {
fs::remove_file(&path).or_else(|_| fs::remove_dir_all(&path))?;
log::info!("File or directory deleted successfully: {}", path);
} else {
log::warn!("File or directory does not exist: {}", path);
}
Ok(())
}
pub fn prune_many(ids: &[String]) -> Result<()> {
ids.iter().try_for_each(|s| prune_single(s))?;
Ok(())
}
pub fn prune(db_conn: &mut db::Connection) -> Result<()> {
log::info!("Pruning deactivated directories");
let ids: Vec<Uuid> = db::schema::directories::table
.select(db::schema::directories::id)
.filter(db::schema::directories::active.eq(false))
.load(db_conn)?;
prune_many(&ids.iter().map(|id| id.to_string()).collect::<Vec<_>>())?;
diesel::delete(db::schema::directories::table.filter(db::schema::directories::id.eq_any(ids)))
.execute(db_conn)?;
log::info!("Pruning done");
Ok(())
}
pub fn prune_job() {
const NAME: &str = "prune";
log::info!("Starting cron job: {}", NAME);
let db_conn = &mut match db::POOL.get() {
Ok(x) => x,
Err(e) => {
log::error!("{}: {:?}", NAME, e);
return;
}
};
if let Err(e) = prune(db_conn) {
log::error!("{}: {:?}", NAME, e);
}
}

120
src/main.rs Normal file
View file

@ -0,0 +1,120 @@
use actix_cors::Cors;
use actix_web::{http::header, middleware, web, App, HttpRequest, HttpResponse, HttpServer};
use anyhow::Result;
use diesel::prelude::*;
use std::fs;
use std::thread;
use std::time::Duration;
use uuidv7::Uuid;
use juniper_actix::graphql_handler;
use fiddle::{api, db, init, prune_job, prune_many, CONFIG};
fn tamper_prune() -> Result<()> {
let db_conn = &mut db::POOL.get()?;
let entries = fs::read_dir(&CONFIG.data_dir)?;
let allowed: Vec<String> = db::schema::directories::table
.select(db::schema::directories::id)
.load::<Uuid>(db_conn)?
.iter()
.map(|id| id.to_string())
.collect();
let mut prunes: Vec<String> = vec![];
for p in entries {
let path = match p {
Ok(x) => x.path(),
Err(e) => {
log::error!("{:?}", e);
continue;
}
};
let relative = match path.strip_prefix(&CONFIG.data_dir) {
Ok(x) => x.to_string_lossy().to_string(),
Err(e) => {
log::error!("{:?}", e);
continue;
}
};
if path.is_file() || (path.is_dir() && !allowed.contains(&relative)) {
log::warn!("Invalid entry found: {}", relative);
prunes.push(relative);
}
}
prune_many(&prunes)?;
Ok(())
}
async fn not_found() -> &'static str {
"Not found!"
}
async fn graphql_route(
req: HttpRequest,
payload: web::Payload,
schema: web::Data<api::Schema>,
) -> Result<HttpResponse, actix_web::Error> {
let context = api::Context {
db_pool: db::POOL.clone(),
loaders: api::context::Loaders::default(),
};
graphql_handler(&schema, &context, req, payload).await
}
#[tokio::main]
async fn main() -> Result<()> {
env_logger::init();
init();
thread::spawn(move || {
let sleep_dur = Duration::from_secs(CONFIG.auto_prune_sleep);
loop {
prune_job();
thread::sleep(sleep_dur);
}
});
thread::spawn(move || {
let sleep_dur = Duration::from_secs(CONFIG.tamper_sleep);
loop {
if let Err(e) = tamper_prune() {
log::error!("{:?}", e);
}
thread::sleep(sleep_dur);
}
});
HttpServer::new(move || {
App::new()
.app_data(web::Data::new(api::schema()))
.wrap(middleware::Logger::default())
.wrap(middleware::Compress::default())
.wrap(
Cors::default()
.allow_any_origin()
.allowed_methods(["POST", "GET"])
.allowed_headers([header::ACCEPT])
.allowed_header(header::CONTENT_TYPE)
.supports_credentials()
.max_age(3600),
)
.service(
web::resource("/graphql")
.route(web::post().to(graphql_route))
.route(web::get().to(graphql_route)),
)
.default_service(web::to(not_found))
})
.bind(("0.0.0.0", 8080))?
.run()
.await?;
Ok(())
}