Merge branch 'refactor' into 'next'

Some refactorings

See merge request famedly/conduit!279
This commit is contained in:
Timo Kösters 2022-02-03 12:50:55 +00:00
commit 79345dc2a6
26 changed files with 148 additions and 170 deletions

View file

@ -1,12 +1,7 @@
use crate::{utils, Error, Result};
use bytes::BytesMut;
use ruma::api::{IncomingResponse, OutgoingRequest, SendAccessToken};
use std::{
convert::{TryFrom, TryInto},
fmt::Debug,
mem,
time::Duration,
};
use std::{fmt::Debug, mem, time::Duration};
use tracing::warn;
pub(crate) async fn send_request<T: OutgoingRequest>(

View file

@ -1,4 +1,4 @@
use std::{collections::BTreeMap, convert::TryInto, sync::Arc};
use std::{collections::BTreeMap, sync::Arc};
use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH};
use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, ConduitResult, Error, Ruma};

View file

@ -3,7 +3,7 @@ use ruma::{
api::client::{error::ErrorKind, r0::context::get_context},
events::EventType,
};
use std::{collections::HashSet, convert::TryFrom};
use std::collections::HashSet;
#[cfg(feature = "conduit_bin")]
use rocket::get;

View file

@ -1,5 +1,3 @@
use std::convert::TryInto;
use crate::{database::DatabaseGuard, ConduitResult, Database, Error, Result, Ruma};
use ruma::{
api::{

View file

@ -9,7 +9,6 @@ use ruma::api::client::{
get_media_config,
},
};
use std::convert::TryInto;
#[cfg(feature = "conduit_bin")]
use rocket::{get, post};

View file

@ -30,7 +30,6 @@ use ruma::{
use serde_json::value::{to_raw_value, RawValue as RawJsonValue};
use std::{
collections::{hash_map::Entry, BTreeMap, HashMap, HashSet},
convert::{TryFrom, TryInto},
iter,
sync::{Arc, RwLock},
time::{Duration, Instant},

View file

@ -8,7 +8,6 @@ use ruma::{
};
use std::{
collections::{BTreeMap, HashSet},
convert::TryInto,
sync::Arc,
};

View file

@ -1,6 +1,6 @@
use crate::{database::DatabaseGuard, utils, ConduitResult, Ruma};
use ruma::api::client::r0::presence::{get_presence, set_presence};
use std::{convert::TryInto, time::Duration};
use std::time::Duration;
#[cfg(feature = "conduit_bin")]
use rocket::{get, put};

View file

@ -12,7 +12,7 @@ use ruma::{
events::{room::member::RoomMemberEventContent, EventType},
};
use serde_json::value::to_raw_value;
use std::{convert::TryInto, sync::Arc};
use std::sync::Arc;
#[cfg(feature = "conduit_bin")]
use rocket::{get, put};

View file

@ -27,7 +27,7 @@ use ruma::{
RoomAliasId, RoomId, RoomVersionId,
};
use serde_json::{json, value::to_raw_value};
use std::{cmp::max, collections::BTreeMap, convert::TryInto, sync::Arc};
use std::{cmp::max, collections::BTreeMap, sync::Arc};
use tracing::{info, warn};
#[cfg(feature = "conduit_bin")]

View file

@ -14,7 +14,6 @@ use ruma::{
};
use std::{
collections::{hash_map::Entry, BTreeMap, HashMap, HashSet},
convert::TryInto,
sync::Arc,
time::Duration,
};

131
src/config.rs Normal file
View file

@ -0,0 +1,131 @@
use std::collections::BTreeMap;
use ruma::ServerName;
use serde::{de::IgnoredAny, Deserialize};
use tracing::warn;
mod proxy;
use self::proxy::ProxyConfig;
#[derive(Clone, Debug, Deserialize)]
pub struct Config {
pub server_name: Box<ServerName>,
#[serde(default = "default_database_backend")]
pub database_backend: String,
pub database_path: String,
#[serde(default = "default_db_cache_capacity_mb")]
pub db_cache_capacity_mb: f64,
#[serde(default = "default_conduit_cache_capacity_modifier")]
pub conduit_cache_capacity_modifier: f64,
#[serde(default = "default_rocksdb_max_open_files")]
pub rocksdb_max_open_files: i32,
#[serde(default = "default_pdu_cache_capacity")]
pub pdu_cache_capacity: u32,
#[serde(default = "default_cleanup_second_interval")]
pub cleanup_second_interval: u32,
#[serde(default = "default_max_request_size")]
pub max_request_size: u32,
#[serde(default = "default_max_concurrent_requests")]
pub max_concurrent_requests: u16,
#[serde(default = "false_fn")]
pub allow_registration: bool,
#[serde(default = "true_fn")]
pub allow_encryption: bool,
#[serde(default = "false_fn")]
pub allow_federation: bool,
#[serde(default = "true_fn")]
pub allow_room_creation: bool,
#[serde(default = "false_fn")]
pub allow_jaeger: bool,
#[serde(default = "false_fn")]
pub tracing_flame: bool,
#[serde(default)]
pub proxy: ProxyConfig,
pub jwt_secret: Option<String>,
#[serde(default = "Vec::new")]
pub trusted_servers: Vec<Box<ServerName>>,
#[serde(default = "default_log")]
pub log: String,
#[serde(default)]
pub turn_username: String,
#[serde(default)]
pub turn_password: String,
#[serde(default = "Vec::new")]
pub turn_uris: Vec<String>,
#[serde(default)]
pub turn_secret: String,
#[serde(default = "default_turn_ttl")]
pub turn_ttl: u64,
#[serde(flatten)]
pub catchall: BTreeMap<String, IgnoredAny>,
}
const DEPRECATED_KEYS: &[&str] = &["cache_capacity"];
impl Config {
pub fn warn_deprecated(&self) {
let mut was_deprecated = false;
for key in self
.catchall
.keys()
.filter(|key| DEPRECATED_KEYS.iter().any(|s| s == key))
{
warn!("Config parameter {} is deprecated", key);
was_deprecated = true;
}
if was_deprecated {
warn!("Read conduit documentation and check your configuration if any new configuration parameters should be adjusted");
}
}
}
fn false_fn() -> bool {
false
}
fn true_fn() -> bool {
true
}
fn default_database_backend() -> String {
"sqlite".to_owned()
}
fn default_db_cache_capacity_mb() -> f64 {
10.0
}
fn default_conduit_cache_capacity_modifier() -> f64 {
1.0
}
fn default_rocksdb_max_open_files() -> i32 {
20
}
fn default_pdu_cache_capacity() -> u32 {
150_000
}
fn default_cleanup_second_interval() -> u32 {
1 * 60 // every minute
}
fn default_max_request_size() -> u32 {
20 * 1024 * 1024 // Default to 20 MB
}
fn default_max_concurrent_requests() -> u16 {
100
}
fn default_log() -> String {
"info,state_res=warn,rocket=off,_=off,sled=off".to_owned()
}
fn default_turn_ttl() -> u64 {
60 * 60 * 24
}

View file

@ -6,7 +6,6 @@ pub mod appservice;
pub mod globals;
pub mod key_backups;
pub mod media;
pub mod proxy;
pub mod pusher;
pub mod rooms;
pub mod sending;
@ -14,7 +13,7 @@ pub mod transaction_ids;
pub mod uiaa;
pub mod users;
use crate::{utils, Error, Result};
use crate::{utils, Config, Error, Result};
use abstraction::DatabaseEngine;
use directories::ProjectDirs;
use lru_cache::LruCache;
@ -24,11 +23,9 @@ use rocket::{
request::{FromRequest, Request},
Shutdown, State,
};
use ruma::{DeviceId, EventId, RoomId, ServerName, UserId};
use serde::{de::IgnoredAny, Deserialize};
use ruma::{DeviceId, EventId, RoomId, UserId};
use std::{
collections::{BTreeMap, HashMap, HashSet},
convert::{TryFrom, TryInto},
fs::{self, remove_dir_all},
io::Write,
mem::size_of,
@ -39,130 +36,6 @@ use std::{
use tokio::sync::{OwnedRwLockReadGuard, RwLock as TokioRwLock, Semaphore};
use tracing::{debug, error, warn};
use self::proxy::ProxyConfig;
#[derive(Clone, Debug, Deserialize)]
pub struct Config {
server_name: Box<ServerName>,
#[serde(default = "default_database_backend")]
database_backend: String,
database_path: String,
#[serde(default = "default_db_cache_capacity_mb")]
db_cache_capacity_mb: f64,
#[serde(default = "default_conduit_cache_capacity_modifier")]
conduit_cache_capacity_modifier: f64,
#[serde(default = "default_rocksdb_max_open_files")]
rocksdb_max_open_files: i32,
#[serde(default = "default_pdu_cache_capacity")]
pdu_cache_capacity: u32,
#[serde(default = "default_cleanup_second_interval")]
cleanup_second_interval: u32,
#[serde(default = "default_max_request_size")]
max_request_size: u32,
#[serde(default = "default_max_concurrent_requests")]
max_concurrent_requests: u16,
#[serde(default = "false_fn")]
allow_registration: bool,
#[serde(default = "true_fn")]
allow_encryption: bool,
#[serde(default = "false_fn")]
allow_federation: bool,
#[serde(default = "true_fn")]
allow_room_creation: bool,
#[serde(default = "false_fn")]
pub allow_jaeger: bool,
#[serde(default = "false_fn")]
pub tracing_flame: bool,
#[serde(default)]
proxy: ProxyConfig,
jwt_secret: Option<String>,
#[serde(default = "Vec::new")]
trusted_servers: Vec<Box<ServerName>>,
#[serde(default = "default_log")]
pub log: String,
#[serde(default)]
turn_username: String,
#[serde(default)]
turn_password: String,
#[serde(default = "Vec::new")]
turn_uris: Vec<String>,
#[serde(default)]
turn_secret: String,
#[serde(default = "default_turn_ttl")]
turn_ttl: u64,
#[serde(flatten)]
catchall: BTreeMap<String, IgnoredAny>,
}
const DEPRECATED_KEYS: &[&str] = &["cache_capacity"];
impl Config {
pub fn warn_deprecated(&self) {
let mut was_deprecated = false;
for key in self
.catchall
.keys()
.filter(|key| DEPRECATED_KEYS.iter().any(|s| s == key))
{
warn!("Config parameter {} is deprecated", key);
was_deprecated = true;
}
if was_deprecated {
warn!("Read conduit documentation and check your configuration if any new configuration parameters should be adjusted");
}
}
}
fn false_fn() -> bool {
false
}
fn true_fn() -> bool {
true
}
fn default_database_backend() -> String {
"sqlite".to_owned()
}
fn default_db_cache_capacity_mb() -> f64 {
10.0
}
fn default_conduit_cache_capacity_modifier() -> f64 {
1.0
}
fn default_rocksdb_max_open_files() -> i32 {
20
}
fn default_pdu_cache_capacity() -> u32 {
150_000
}
fn default_cleanup_second_interval() -> u32 {
1 * 60 // every minute
}
fn default_max_request_size() -> u32 {
20 * 1024 * 1024 // Default to 20 MB
}
fn default_max_concurrent_requests() -> u16 {
100
}
fn default_log() -> String {
"info,state_res=warn,rocket=off,_=off,sled=off".to_owned()
}
fn default_turn_ttl() -> u64 {
60 * 60 * 24
}
pub struct Database {
_db: Arc<dyn DatabaseEngine>,
pub globals: globals::Globals,

View file

@ -6,7 +6,7 @@ use ruma::{
RoomId, UserId,
};
use serde::{de::DeserializeOwned, Serialize};
use std::{collections::HashMap, convert::TryFrom, sync::Arc};
use std::{collections::HashMap, sync::Arc};
use super::abstraction::Tree;

View file

@ -1,4 +1,4 @@
use std::{convert::TryInto, sync::Arc};
use std::sync::Arc;
use crate::{pdu::PduBuilder, Database};
use rocket::futures::{channel::mpsc, stream::StreamExt};

View file

@ -19,7 +19,7 @@ use ruma::{
};
use tracing::{error, info, warn};
use std::{convert::TryFrom, fmt::Debug, mem, sync::Arc};
use std::{fmt::Debug, mem, sync::Arc};
use super::abstraction::Tree;

View file

@ -35,7 +35,6 @@ use serde_json::value::to_raw_value;
use std::{
borrow::Cow,
collections::{BTreeMap, HashMap, HashSet},
convert::{TryFrom, TryInto},
fmt::Debug,
iter,
mem::size_of,

View file

@ -11,7 +11,6 @@ use ruma::{
};
use std::{
collections::{HashMap, HashSet},
convert::TryInto,
mem,
sync::Arc,
};

View file

@ -1,6 +1,5 @@
use std::{
collections::{BTreeMap, HashMap, HashSet},
convert::TryInto,
fmt::Debug,
sync::Arc,
time::{Duration, Instant},

View file

@ -11,12 +11,7 @@ use ruma::{
DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, RoomAliasId, UInt,
UserId,
};
use std::{
collections::BTreeMap,
convert::{TryFrom, TryInto},
mem,
sync::Arc,
};
use std::{collections::BTreeMap, mem, sync::Arc};
use tracing::warn;
use super::abstraction::Tree;

View file

@ -9,6 +9,7 @@
use std::ops::Deref;
mod config;
mod database;
mod error;
mod pdu;
@ -19,7 +20,8 @@ pub mod appservice_server;
pub mod client_server;
pub mod server_server;
pub use database::{Config, Database};
pub use config::Config;
pub use database::Database;
pub use error::{Error, Result};
pub use pdu::PduEvent;
pub use rocket::Config as RocketConfig;

View file

@ -184,9 +184,6 @@ fn setup_rocket(config: Figment, data: Arc<RwLock<Database>>) -> rocket::Rocket<
#[rocket::main]
async fn main() {
// Force log level off, so we can use our own logger
std::env::set_var("CONDUIT_LOG_LEVEL", "off");
let raw_config =
Figment::from(default_config())
.merge(
@ -197,8 +194,6 @@ async fn main() {
)
.merge(Env::prefixed("CONDUIT_").global());
std::env::set_var("RUST_LOG", "warn");
let config = match raw_config.extract::<Config>() {
Ok(s) => s,
Err(e) => {
@ -244,8 +239,6 @@ async fn main() {
println!("exporting");
opentelemetry::global::shutdown_tracer_provider();
} else {
std::env::set_var("RUST_LOG", &config.log);
let registry = tracing_subscriber::Registry::default();
if config.tracing_flame {
let (flame_layer, _guard) =
@ -259,7 +252,7 @@ async fn main() {
start.await;
} else {
let fmt_layer = tracing_subscriber::fmt::Layer::new();
let filter_layer = EnvFilter::try_from_default_env()
let filter_layer = EnvFilter::try_new(&config.log)
.or_else(|_| EnvFilter::try_new("info"))
.unwrap();

View file

@ -12,7 +12,7 @@ use serde_json::{
json,
value::{to_raw_value, RawValue as RawJsonValue},
};
use std::{cmp::Ordering, collections::BTreeMap, convert::TryInto, sync::Arc};
use std::{cmp::Ordering, collections::BTreeMap, sync::Arc};
use tracing::warn;
/// Content hashes of a PDU.

View file

@ -60,7 +60,6 @@ use ruma::{
use serde_json::value::{to_raw_value, RawValue as RawJsonValue};
use std::{
collections::{btree_map, hash_map, BTreeMap, BTreeSet, HashMap, HashSet},
convert::{TryFrom, TryInto},
fmt::Debug,
future::Future,
mem,

View file

@ -4,7 +4,6 @@ use rand::prelude::*;
use ruma::serde::{try_from_json_map, CanonicalJsonError, CanonicalJsonObject};
use std::{
cmp,
convert::TryInto,
str::FromStr,
time::{SystemTime, UNIX_EPOCH},
};