mirror of
https://github.com/girlbossceo/conduwuit.git
synced 2024-11-28 05:45:48 +00:00
Merge branch 'spaces' into 'next'
Automatic update checker See merge request famedly/conduit!522
This commit is contained in:
commit
afd8112e25
|
@ -172,6 +172,7 @@ max_request_size = 20_000_000 # in bytes
|
||||||
allow_registration = true
|
allow_registration = true
|
||||||
|
|
||||||
allow_federation = true
|
allow_federation = true
|
||||||
|
allow_check_for_updates = true
|
||||||
|
|
||||||
# Server to get public keys from. You probably shouldn't change this
|
# Server to get public keys from. You probably shouldn't change this
|
||||||
trusted_servers = ["matrix.org"]
|
trusted_servers = ["matrix.org"]
|
||||||
|
|
|
@ -30,6 +30,7 @@ ENV CONDUIT_CONFIG=/workdir/conduit.toml
|
||||||
|
|
||||||
RUN sed -i "s/port = 6167/port = 8008/g" conduit.toml
|
RUN sed -i "s/port = 6167/port = 8008/g" conduit.toml
|
||||||
RUN echo "allow_federation = true" >> conduit.toml
|
RUN echo "allow_federation = true" >> conduit.toml
|
||||||
|
RUN echo "allow_check_for_updates = true" >> conduit.toml
|
||||||
RUN echo "allow_encryption = true" >> conduit.toml
|
RUN echo "allow_encryption = true" >> conduit.toml
|
||||||
RUN echo "allow_registration = true" >> conduit.toml
|
RUN echo "allow_registration = true" >> conduit.toml
|
||||||
RUN echo "log = \"warn,_=off,sled=off\"" >> conduit.toml
|
RUN echo "log = \"warn,_=off,sled=off\"" >> conduit.toml
|
||||||
|
|
|
@ -39,6 +39,7 @@ max_request_size = 20_000_000 # in bytes
|
||||||
allow_registration = true
|
allow_registration = true
|
||||||
|
|
||||||
allow_federation = true
|
allow_federation = true
|
||||||
|
allow_check_for_updates = true
|
||||||
|
|
||||||
# Enable the display name lightning bolt on registration.
|
# Enable the display name lightning bolt on registration.
|
||||||
enable_lightning_bolt = true
|
enable_lightning_bolt = true
|
||||||
|
|
1
debian/postinst
vendored
1
debian/postinst
vendored
|
@ -73,6 +73,7 @@ max_request_size = 20_000_000 # in bytes
|
||||||
allow_registration = true
|
allow_registration = true
|
||||||
|
|
||||||
allow_federation = true
|
allow_federation = true
|
||||||
|
allow_check_for_updates = true
|
||||||
|
|
||||||
trusted_servers = ["matrix.org"]
|
trusted_servers = ["matrix.org"]
|
||||||
|
|
||||||
|
|
|
@ -29,6 +29,7 @@ services:
|
||||||
CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB
|
CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB
|
||||||
CONDUIT_ALLOW_REGISTRATION: 'true'
|
CONDUIT_ALLOW_REGISTRATION: 'true'
|
||||||
CONDUIT_ALLOW_FEDERATION: 'true'
|
CONDUIT_ALLOW_FEDERATION: 'true'
|
||||||
|
CONDUIT_ALLOW_CHECK_FOR_UPDATES: 'true'
|
||||||
CONDUIT_TRUSTED_SERVERS: '["matrix.org"]'
|
CONDUIT_TRUSTED_SERVERS: '["matrix.org"]'
|
||||||
#CONDUIT_MAX_CONCURRENT_REQUESTS: 100
|
#CONDUIT_MAX_CONCURRENT_REQUESTS: 100
|
||||||
#CONDUIT_LOG: warn,rocket=off,_=off,sled=off
|
#CONDUIT_LOG: warn,rocket=off,_=off,sled=off
|
||||||
|
|
|
@ -29,6 +29,7 @@ services:
|
||||||
CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB
|
CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB
|
||||||
CONDUIT_ALLOW_REGISTRATION: 'true'
|
CONDUIT_ALLOW_REGISTRATION: 'true'
|
||||||
CONDUIT_ALLOW_FEDERATION: 'true'
|
CONDUIT_ALLOW_FEDERATION: 'true'
|
||||||
|
CONDUIT_ALLOW_CHECK_FOR_UPDATES: 'true'
|
||||||
CONDUIT_TRUSTED_SERVERS: '["matrix.org"]'
|
CONDUIT_TRUSTED_SERVERS: '["matrix.org"]'
|
||||||
#CONDUIT_MAX_CONCURRENT_REQUESTS: 100
|
#CONDUIT_MAX_CONCURRENT_REQUESTS: 100
|
||||||
#CONDUIT_LOG: warn,rocket=off,_=off,sled=off
|
#CONDUIT_LOG: warn,rocket=off,_=off,sled=off
|
||||||
|
|
|
@ -35,8 +35,9 @@ services:
|
||||||
# Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging
|
# Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging
|
||||||
# CONDUIT_LOG: info # default is: "warn,_=off,sled=off"
|
# CONDUIT_LOG: info # default is: "warn,_=off,sled=off"
|
||||||
# CONDUIT_ALLOW_JAEGER: 'false'
|
# CONDUIT_ALLOW_JAEGER: 'false'
|
||||||
# CONDUIT_ALLOW_ENCRYPTION: 'false'
|
# CONDUIT_ALLOW_ENCRYPTION: 'true'
|
||||||
# CONDUIT_ALLOW_FEDERATION: 'false'
|
# CONDUIT_ALLOW_FEDERATION: 'true'
|
||||||
|
# CONDUIT_ALLOW_CHECK_FOR_UPDATES: 'true'
|
||||||
# CONDUIT_DATABASE_PATH: /srv/conduit/.local/share/conduit
|
# CONDUIT_DATABASE_PATH: /srv/conduit/.local/share/conduit
|
||||||
# CONDUIT_WORKERS: 10
|
# CONDUIT_WORKERS: 10
|
||||||
# CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB
|
# CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB
|
||||||
|
|
|
@ -28,6 +28,8 @@ pub struct Config {
|
||||||
pub db_cache_capacity_mb: f64,
|
pub db_cache_capacity_mb: f64,
|
||||||
#[serde(default = "true_fn")]
|
#[serde(default = "true_fn")]
|
||||||
pub enable_lightning_bolt: bool,
|
pub enable_lightning_bolt: bool,
|
||||||
|
#[serde(default = "true_fn")]
|
||||||
|
pub allow_check_for_updates: bool,
|
||||||
#[serde(default = "default_conduit_cache_capacity_modifier")]
|
#[serde(default = "default_conduit_cache_capacity_modifier")]
|
||||||
pub conduit_cache_capacity_modifier: f64,
|
pub conduit_cache_capacity_modifier: f64,
|
||||||
#[serde(default = "default_rocksdb_max_open_files")]
|
#[serde(default = "default_rocksdb_max_open_files")]
|
||||||
|
|
|
@ -12,6 +12,7 @@ use ruma::{
|
||||||
use crate::{database::KeyValueDatabase, service, services, utils, Error, Result};
|
use crate::{database::KeyValueDatabase, service, services, utils, Error, Result};
|
||||||
|
|
||||||
pub const COUNTER: &[u8] = b"c";
|
pub const COUNTER: &[u8] = b"c";
|
||||||
|
pub const LAST_CHECK_FOR_UPDATES_COUNT: &[u8] = b"u";
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl service::globals::Data for KeyValueDatabase {
|
impl service::globals::Data for KeyValueDatabase {
|
||||||
|
@ -27,6 +28,23 @@ impl service::globals::Data for KeyValueDatabase {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn last_check_for_updates_id(&self) -> Result<u64> {
|
||||||
|
self.global
|
||||||
|
.get(LAST_CHECK_FOR_UPDATES_COUNT)?
|
||||||
|
.map_or(Ok(0_u64), |bytes| {
|
||||||
|
utils::u64_from_bytes(&bytes).map_err(|_| {
|
||||||
|
Error::bad_database("last check for updates count has invalid bytes.")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn update_check_for_updates_id(&self, id: u64) -> Result<()> {
|
||||||
|
self.global
|
||||||
|
.insert(LAST_CHECK_FOR_UPDATES_COUNT, &id.to_be_bytes())?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
async fn watch(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> {
|
async fn watch(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> {
|
||||||
let userid_bytes = user_id.as_bytes().to_vec();
|
let userid_bytes = user_id.as_bytes().to_vec();
|
||||||
let mut userid_prefix = userid_bytes.clone();
|
let mut userid_prefix = userid_bytes.clone();
|
||||||
|
|
|
@ -18,6 +18,7 @@ use ruma::{
|
||||||
CanonicalJsonValue, EventId, OwnedDeviceId, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId,
|
CanonicalJsonValue, EventId, OwnedDeviceId, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId,
|
||||||
UserId,
|
UserId,
|
||||||
};
|
};
|
||||||
|
use serde::Deserialize;
|
||||||
use std::{
|
use std::{
|
||||||
collections::{BTreeMap, HashMap, HashSet},
|
collections::{BTreeMap, HashMap, HashSet},
|
||||||
fs::{self, remove_dir_all},
|
fs::{self, remove_dir_all},
|
||||||
|
@ -25,7 +26,9 @@ use std::{
|
||||||
mem::size_of,
|
mem::size_of,
|
||||||
path::Path,
|
path::Path,
|
||||||
sync::{Arc, Mutex, RwLock},
|
sync::{Arc, Mutex, RwLock},
|
||||||
|
time::Duration,
|
||||||
};
|
};
|
||||||
|
use tokio::time::interval;
|
||||||
|
|
||||||
use tracing::{debug, error, info, warn};
|
use tracing::{debug, error, info, warn};
|
||||||
|
|
||||||
|
@ -982,6 +985,9 @@ impl KeyValueDatabase {
|
||||||
services().sending.start_handler();
|
services().sending.start_handler();
|
||||||
|
|
||||||
Self::start_cleanup_task().await;
|
Self::start_cleanup_task().await;
|
||||||
|
if services().globals.allow_check_for_updates() {
|
||||||
|
Self::start_check_for_updates_task();
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -998,9 +1004,61 @@ impl KeyValueDatabase {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument]
|
#[tracing::instrument]
|
||||||
pub async fn start_cleanup_task() {
|
pub fn start_check_for_updates_task() {
|
||||||
use tokio::time::interval;
|
tokio::spawn(async move {
|
||||||
|
let timer_interval = Duration::from_secs(60 * 60);
|
||||||
|
let mut i = interval(timer_interval);
|
||||||
|
loop {
|
||||||
|
i.tick().await;
|
||||||
|
let _ = Self::try_handle_updates().await;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn try_handle_updates() -> Result<()> {
|
||||||
|
let response = services()
|
||||||
|
.globals
|
||||||
|
.default_client()
|
||||||
|
.get("https://conduit.rs/check-for-updates/stable")
|
||||||
|
.send()
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct CheckForUpdatesResponseEntry {
|
||||||
|
id: u64,
|
||||||
|
date: String,
|
||||||
|
message: String,
|
||||||
|
}
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct CheckForUpdatesResponse {
|
||||||
|
updates: Vec<CheckForUpdatesResponseEntry>,
|
||||||
|
}
|
||||||
|
|
||||||
|
let response = serde_json::from_str::<CheckForUpdatesResponse>(&response.text().await?)
|
||||||
|
.map_err(|_| Error::BadServerResponse("Bad version check response"))?;
|
||||||
|
|
||||||
|
let mut last_update_id = services().globals.last_check_for_updates_id()?;
|
||||||
|
for update in response.updates {
|
||||||
|
last_update_id = last_update_id.max(update.id);
|
||||||
|
if update.id > services().globals.last_check_for_updates_id()? {
|
||||||
|
println!("{}", update.message);
|
||||||
|
services()
|
||||||
|
.admin
|
||||||
|
.send_message(RoomMessageEventContent::text_plain(format!(
|
||||||
|
"@room: The following is a message from the Conduit developers. It was sent on '{}':\n\n{}",
|
||||||
|
update.date, update.message
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
services()
|
||||||
|
.globals
|
||||||
|
.update_check_for_updates_id(last_update_id)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tracing::instrument]
|
||||||
|
pub async fn start_cleanup_task() {
|
||||||
#[cfg(unix)]
|
#[cfg(unix)]
|
||||||
use tokio::signal::unix::{signal, SignalKind};
|
use tokio::signal::unix::{signal, SignalKind};
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
use std::{
|
use std::{
|
||||||
collections::BTreeMap,
|
collections::BTreeMap,
|
||||||
convert::{TryFrom, TryInto},
|
convert::{TryFrom, TryInto},
|
||||||
sync::Arc,
|
sync::{Arc, RwLock},
|
||||||
time::Instant,
|
time::Instant,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -163,6 +163,20 @@ enum AdminCommand {
|
||||||
DisableRoom { room_id: Box<RoomId> },
|
DisableRoom { room_id: Box<RoomId> },
|
||||||
/// Enables incoming federation handling for a room again.
|
/// Enables incoming federation handling for a room again.
|
||||||
EnableRoom { room_id: Box<RoomId> },
|
EnableRoom { room_id: Box<RoomId> },
|
||||||
|
|
||||||
|
/// Verify json signatures
|
||||||
|
/// [commandbody]
|
||||||
|
/// # ```
|
||||||
|
/// # json here
|
||||||
|
/// # ```
|
||||||
|
SignJson,
|
||||||
|
|
||||||
|
/// Verify json signatures
|
||||||
|
/// [commandbody]
|
||||||
|
/// # ```
|
||||||
|
/// # json here
|
||||||
|
/// # ```
|
||||||
|
VerifyJson,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
|
@ -754,6 +768,60 @@ impl Service {
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
AdminCommand::SignJson => {
|
||||||
|
if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```"
|
||||||
|
{
|
||||||
|
let string = body[1..body.len() - 1].join("\n");
|
||||||
|
match serde_json::from_str(&string) {
|
||||||
|
Ok(mut value) => {
|
||||||
|
ruma::signatures::sign_json(
|
||||||
|
services().globals.server_name().as_str(),
|
||||||
|
services().globals.keypair(),
|
||||||
|
&mut value,
|
||||||
|
)
|
||||||
|
.expect("our request json is what ruma expects");
|
||||||
|
let json_text = serde_json::to_string_pretty(&value)
|
||||||
|
.expect("canonical json is valid json");
|
||||||
|
RoomMessageEventContent::text_plain(json_text)
|
||||||
|
}
|
||||||
|
Err(e) => RoomMessageEventContent::text_plain(format!("Invalid json: {e}")),
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
RoomMessageEventContent::text_plain(
|
||||||
|
"Expected code block in command body. Add --help for details.",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
AdminCommand::VerifyJson => {
|
||||||
|
if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```"
|
||||||
|
{
|
||||||
|
let string = body[1..body.len() - 1].join("\n");
|
||||||
|
match serde_json::from_str(&string) {
|
||||||
|
Ok(value) => {
|
||||||
|
let pub_key_map = RwLock::new(BTreeMap::new());
|
||||||
|
|
||||||
|
services()
|
||||||
|
.rooms
|
||||||
|
.event_handler
|
||||||
|
.fetch_required_signing_keys(&value, &pub_key_map)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let pub_key_map = pub_key_map.read().unwrap();
|
||||||
|
match ruma::signatures::verify_json(&pub_key_map, &value) {
|
||||||
|
Ok(_) => RoomMessageEventContent::text_plain("Signature correct"),
|
||||||
|
Err(e) => RoomMessageEventContent::text_plain(format!(
|
||||||
|
"Signature verification failed: {e}"
|
||||||
|
)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => RoomMessageEventContent::text_plain(format!("Invalid json: {e}")),
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
RoomMessageEventContent::text_plain(
|
||||||
|
"Expected code block in command body. Add --help for details.",
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(reply_message_content)
|
Ok(reply_message_content)
|
||||||
|
|
|
@ -13,6 +13,8 @@ use crate::Result;
|
||||||
pub trait Data: Send + Sync {
|
pub trait Data: Send + Sync {
|
||||||
fn next_count(&self) -> Result<u64>;
|
fn next_count(&self) -> Result<u64>;
|
||||||
fn current_count(&self) -> Result<u64>;
|
fn current_count(&self) -> Result<u64>;
|
||||||
|
fn last_check_for_updates_id(&self) -> Result<u64>;
|
||||||
|
fn update_check_for_updates_id(&self, id: u64) -> Result<()>;
|
||||||
async fn watch(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()>;
|
async fn watch(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()>;
|
||||||
fn cleanup(&self) -> Result<()>;
|
fn cleanup(&self) -> Result<()>;
|
||||||
fn memory_usage(&self) -> String;
|
fn memory_usage(&self) -> String;
|
||||||
|
|
|
@ -209,6 +209,16 @@ impl Service {
|
||||||
self.db.current_count()
|
self.db.current_count()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tracing::instrument(skip(self))]
|
||||||
|
pub fn last_check_for_updates_id(&self) -> Result<u64> {
|
||||||
|
self.db.last_check_for_updates_id()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tracing::instrument(skip(self))]
|
||||||
|
pub fn update_check_for_updates_id(&self, id: u64) -> Result<()> {
|
||||||
|
self.db.update_check_for_updates_id(id)
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn watch(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> {
|
pub async fn watch(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> {
|
||||||
self.db.watch(user_id, device_id).await
|
self.db.watch(user_id, device_id).await
|
||||||
}
|
}
|
||||||
|
@ -257,6 +267,10 @@ impl Service {
|
||||||
self.config.enable_lightning_bolt
|
self.config.enable_lightning_bolt
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn allow_check_for_updates(&self) -> bool {
|
||||||
|
self.config.allow_check_for_updates
|
||||||
|
}
|
||||||
|
|
||||||
pub fn trusted_servers(&self) -> &[OwnedServerName] {
|
pub fn trusted_servers(&self) -> &[OwnedServerName] {
|
||||||
&self.config.trusted_servers
|
&self.config.trusted_servers
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue