resolve some pedantic lints, reduce some allocations
Signed-off-by: strawberry <strawberry@puppygock.gay>
This commit is contained in:
parent
507baf20fa
commit
496a9c7af8
|
@ -307,6 +307,7 @@ verbose_file_reads = "warn"
|
||||||
cast_possible_wrap = "warn"
|
cast_possible_wrap = "warn"
|
||||||
# cast_possible_truncation = "warn"
|
# cast_possible_truncation = "warn"
|
||||||
redundant_closure_for_method_calls = "warn"
|
redundant_closure_for_method_calls = "warn"
|
||||||
|
large_futures = "warn"
|
||||||
|
|
||||||
# not in rust 1.75.0 (breaks CI)
|
# not in rust 1.75.0 (breaks CI)
|
||||||
# infinite_loop = "warn"
|
# infinite_loop = "warn"
|
||||||
|
|
|
@ -806,10 +806,7 @@ async fn join_room_by_id_helper(
|
||||||
|
|
||||||
let restriction_rooms = match join_rules_event_content {
|
let restriction_rooms = match join_rules_event_content {
|
||||||
Some(RoomJoinRulesEventContent {
|
Some(RoomJoinRulesEventContent {
|
||||||
join_rule: JoinRule::Restricted(restricted),
|
join_rule: JoinRule::Restricted(restricted) | JoinRule::KnockRestricted(restricted),
|
||||||
})
|
|
||||||
| Some(RoomJoinRulesEventContent {
|
|
||||||
join_rule: JoinRule::KnockRestricted(restricted),
|
|
||||||
}) => restricted
|
}) => restricted
|
||||||
.allow
|
.allow
|
||||||
.into_iter()
|
.into_iter()
|
||||||
|
|
|
@ -28,8 +28,8 @@ pub async fn get_relating_events_with_rel_type_and_event_type_route(
|
||||||
sender_user,
|
sender_user,
|
||||||
&body.room_id,
|
&body.room_id,
|
||||||
&body.event_id,
|
&body.event_id,
|
||||||
Some(body.event_type.clone()),
|
&Some(body.event_type.clone()),
|
||||||
Some(body.rel_type.clone()),
|
&Some(body.rel_type.clone()),
|
||||||
from,
|
from,
|
||||||
to,
|
to,
|
||||||
limit,
|
limit,
|
||||||
|
@ -66,8 +66,8 @@ pub async fn get_relating_events_with_rel_type_route(
|
||||||
sender_user,
|
sender_user,
|
||||||
&body.room_id,
|
&body.room_id,
|
||||||
&body.event_id,
|
&body.event_id,
|
||||||
None,
|
&None,
|
||||||
Some(body.rel_type.clone()),
|
&Some(body.rel_type.clone()),
|
||||||
from,
|
from,
|
||||||
to,
|
to,
|
||||||
limit,
|
limit,
|
||||||
|
@ -104,8 +104,8 @@ pub async fn get_relating_events_route(
|
||||||
sender_user,
|
sender_user,
|
||||||
&body.room_id,
|
&body.room_id,
|
||||||
&body.event_id,
|
&body.event_id,
|
||||||
None,
|
&None,
|
||||||
None,
|
&None,
|
||||||
from,
|
from,
|
||||||
to,
|
to,
|
||||||
limit,
|
limit,
|
||||||
|
|
|
@ -470,7 +470,7 @@ async fn find_actual_destination(destination: &'_ ServerName) -> (FedDest, FedDe
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn query_srv_record(hostname: &'_ str) -> Option<FedDest> {
|
async fn query_srv_record(hostname: &'_ str) -> Option<FedDest> {
|
||||||
fn handle_successful_srv(srv: SrvLookup) -> Option<FedDest> {
|
fn handle_successful_srv(srv: &SrvLookup) -> Option<FedDest> {
|
||||||
srv.iter().next().map(|result| {
|
srv.iter().next().map(|result| {
|
||||||
FedDest::Named(
|
FedDest::Named(
|
||||||
result.target().to_string().trim_end_matches('.').to_owned(),
|
result.target().to_string().trim_end_matches('.').to_owned(),
|
||||||
|
@ -493,7 +493,7 @@ async fn query_srv_record(hostname: &'_ str) -> Option<FedDest> {
|
||||||
info!("Querying deprecated _matrix SRV record for host {:?}", hostname);
|
info!("Querying deprecated _matrix SRV record for host {:?}", hostname);
|
||||||
lookup_srv(&second_hostname)
|
lookup_srv(&second_hostname)
|
||||||
})
|
})
|
||||||
.and_then(|srv_lookup| async { Ok(handle_successful_srv(srv_lookup)) })
|
.and_then(|srv_lookup| async move { Ok(handle_successful_srv(&srv_lookup)) })
|
||||||
.await
|
.await
|
||||||
.ok()
|
.ok()
|
||||||
.flatten()
|
.flatten()
|
||||||
|
|
|
@ -219,7 +219,7 @@ impl Config {
|
||||||
|
|
||||||
/// Checks the presence of the `address` and `unix_socket_path` keys in the
|
/// Checks the presence of the `address` and `unix_socket_path` keys in the
|
||||||
/// raw_config, exiting the process if both keys were detected.
|
/// raw_config, exiting the process if both keys were detected.
|
||||||
pub fn is_dual_listening(&self, raw_config: Figment) -> bool {
|
pub fn is_dual_listening(&self, raw_config: &Figment) -> bool {
|
||||||
let check_address = raw_config.find_value("address");
|
let check_address = raw_config.find_value("address");
|
||||||
let check_unix_socket = raw_config.find_value("unix_socket_path");
|
let check_unix_socket = raw_config.find_value("unix_socket_path");
|
||||||
|
|
||||||
|
|
|
@ -27,7 +27,10 @@ use ruma::{
|
||||||
CanonicalJsonValue, EventId, OwnedDeviceId, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, UserId,
|
CanonicalJsonValue, EventId, OwnedDeviceId, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, UserId,
|
||||||
};
|
};
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
use tokio::{sync::mpsc, time::interval};
|
use tokio::{
|
||||||
|
sync::mpsc,
|
||||||
|
time::{interval, Instant},
|
||||||
|
};
|
||||||
use tracing::{debug, error, info, warn};
|
use tracing::{debug, error, info, warn};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
|
@ -186,6 +189,17 @@ pub struct KeyValueDatabase {
|
||||||
pub(super) presence_timer_sender: Arc<mpsc::UnboundedSender<(OwnedUserId, Duration)>>,
|
pub(super) presence_timer_sender: Arc<mpsc::UnboundedSender<(OwnedUserId, Duration)>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct CheckForUpdatesResponseEntry {
|
||||||
|
id: u64,
|
||||||
|
date: String,
|
||||||
|
message: String,
|
||||||
|
}
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct CheckForUpdatesResponse {
|
||||||
|
updates: Vec<CheckForUpdatesResponseEntry>,
|
||||||
|
}
|
||||||
|
|
||||||
impl KeyValueDatabase {
|
impl KeyValueDatabase {
|
||||||
fn check_db_setup(config: &Config) -> Result<()> {
|
fn check_db_setup(config: &Config) -> Result<()> {
|
||||||
let path = Path::new(&config.database_path);
|
let path = Path::new(&config.database_path);
|
||||||
|
@ -1035,17 +1049,6 @@ impl KeyValueDatabase {
|
||||||
let response =
|
let response =
|
||||||
services().globals.default_client().get("https://pupbrain.dev/check-for-updates/stable").send().await?;
|
services().globals.default_client().get("https://pupbrain.dev/check-for-updates/stable").send().await?;
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
|
||||||
struct CheckForUpdatesResponseEntry {
|
|
||||||
id: u64,
|
|
||||||
date: String,
|
|
||||||
message: String,
|
|
||||||
}
|
|
||||||
#[derive(Deserialize)]
|
|
||||||
struct CheckForUpdatesResponse {
|
|
||||||
updates: Vec<CheckForUpdatesResponseEntry>,
|
|
||||||
}
|
|
||||||
|
|
||||||
let response = serde_json::from_str::<CheckForUpdatesResponse>(&response.text().await?).map_err(|e| {
|
let response = serde_json::from_str::<CheckForUpdatesResponse>(&response.text().await?).map_err(|e| {
|
||||||
error!("Bad check for updates response: {e}");
|
error!("Bad check for updates response: {e}");
|
||||||
Error::BadServerResponse("Bad version check response")
|
Error::BadServerResponse("Bad version check response")
|
||||||
|
@ -1067,14 +1070,6 @@ impl KeyValueDatabase {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument]
|
|
||||||
async fn start_cleanup_task() {
|
|
||||||
#[cfg(unix)]
|
|
||||||
use tokio::signal::unix::{signal, SignalKind};
|
|
||||||
use tokio::time::Instant;
|
|
||||||
|
|
||||||
let timer_interval = Duration::from_secs(u64::from(services().globals.config.cleanup_second_interval));
|
|
||||||
|
|
||||||
fn perform_cleanup() {
|
fn perform_cleanup() {
|
||||||
let start = Instant::now();
|
let start = Instant::now();
|
||||||
if let Err(e) = services().globals.cleanup() {
|
if let Err(e) = services().globals.cleanup() {
|
||||||
|
@ -1084,6 +1079,13 @@ impl KeyValueDatabase {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tracing::instrument]
|
||||||
|
async fn start_cleanup_task() {
|
||||||
|
#[cfg(unix)]
|
||||||
|
use tokio::signal::unix::{signal, SignalKind};
|
||||||
|
|
||||||
|
let timer_interval = Duration::from_secs(u64::from(services().globals.config.cleanup_second_interval));
|
||||||
|
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
let mut i = interval(timer_interval);
|
let mut i = interval(timer_interval);
|
||||||
#[cfg(unix)]
|
#[cfg(unix)]
|
||||||
|
@ -1114,7 +1116,7 @@ impl KeyValueDatabase {
|
||||||
debug!(target: "database-cleanup", "Timer ticked")
|
debug!(target: "database-cleanup", "Timer ticked")
|
||||||
}
|
}
|
||||||
|
|
||||||
perform_cleanup();
|
Self::perform_cleanup();
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
|
@ -141,7 +141,7 @@ async fn main() {
|
||||||
config.warn_unknown_key();
|
config.warn_unknown_key();
|
||||||
|
|
||||||
// don't start if we're listening on both UNIX sockets and TCP at same time
|
// don't start if we're listening on both UNIX sockets and TCP at same time
|
||||||
if config.is_dual_listening(raw_config) {
|
if config.is_dual_listening(&raw_config) {
|
||||||
return;
|
return;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -1030,7 +1030,7 @@ impl Service {
|
||||||
|
|
||||||
for room_id in services().rooms.state_cache.rooms_joined(&user_id) {
|
for room_id in services().rooms.state_cache.rooms_joined(&user_id) {
|
||||||
let room_id = room_id?;
|
let room_id = room_id?;
|
||||||
rooms.push(Self::get_room_info(room_id));
|
rooms.push(Self::get_room_info(&room_id));
|
||||||
}
|
}
|
||||||
|
|
||||||
if rooms.is_empty() {
|
if rooms.is_empty() {
|
||||||
|
@ -1510,7 +1510,7 @@ impl Service {
|
||||||
.metadata
|
.metadata
|
||||||
.iter_ids()
|
.iter_ids()
|
||||||
.filter_map(std::result::Result::ok)
|
.filter_map(std::result::Result::ok)
|
||||||
.map(Self::get_room_info)
|
.map(|id: OwnedRoomId| Self::get_room_info(&id))
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
rooms.sort_by_key(|r| r.1);
|
rooms.sort_by_key(|r| r.1);
|
||||||
rooms.reverse();
|
rooms.reverse();
|
||||||
|
@ -1713,7 +1713,7 @@ impl Service {
|
||||||
.directory
|
.directory
|
||||||
.public_rooms()
|
.public_rooms()
|
||||||
.filter_map(std::result::Result::ok)
|
.filter_map(std::result::Result::ok)
|
||||||
.map(Self::get_room_info)
|
.map(|id: OwnedRoomId| Self::get_room_info(&id))
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
rooms.sort_by_key(|r| r.1);
|
rooms.sort_by_key(|r| r.1);
|
||||||
rooms.reverse();
|
rooms.reverse();
|
||||||
|
@ -1955,11 +1955,11 @@ impl Service {
|
||||||
Ok(reply_message_content)
|
Ok(reply_message_content)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_room_info(id: OwnedRoomId) -> (OwnedRoomId, u64, String) {
|
fn get_room_info(id: &OwnedRoomId) -> (OwnedRoomId, u64, String) {
|
||||||
(
|
(
|
||||||
id.clone(),
|
id.clone(),
|
||||||
services().rooms.state_cache.room_joined_count(&id).ok().flatten().unwrap_or(0),
|
services().rooms.state_cache.room_joined_count(id).ok().flatten().unwrap_or(0),
|
||||||
services().rooms.state_accessor.get_name(&id).ok().flatten().unwrap_or_else(|| id.to_string()),
|
services().rooms.state_accessor.get_name(id).ok().flatten().unwrap_or_else(|| id.to_string()),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -121,7 +121,7 @@ pub async fn presence_handler(
|
||||||
}
|
}
|
||||||
|
|
||||||
Some(user_id) = presence_timers.next() => {
|
Some(user_id) = presence_timers.next() => {
|
||||||
process_presence_timer(user_id)?;
|
process_presence_timer(&user_id)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -133,7 +133,7 @@ async fn presence_timer(user_id: OwnedUserId, timeout: Duration) -> OwnedUserId
|
||||||
user_id
|
user_id
|
||||||
}
|
}
|
||||||
|
|
||||||
fn process_presence_timer(user_id: OwnedUserId) -> Result<()> {
|
fn process_presence_timer(user_id: &OwnedUserId) -> Result<()> {
|
||||||
let idle_timeout = services().globals.config.presence_idle_timeout_s * 1_000;
|
let idle_timeout = services().globals.config.presence_idle_timeout_s * 1_000;
|
||||||
let offline_timeout = services().globals.config.presence_offline_timeout_s * 1_000;
|
let offline_timeout = services().globals.config.presence_offline_timeout_s * 1_000;
|
||||||
|
|
||||||
|
@ -141,8 +141,8 @@ fn process_presence_timer(user_id: OwnedUserId) -> Result<()> {
|
||||||
let mut last_active_ago = None;
|
let mut last_active_ago = None;
|
||||||
let mut status_msg = None;
|
let mut status_msg = None;
|
||||||
|
|
||||||
for room_id in services().rooms.state_cache.rooms_joined(&user_id) {
|
for room_id in services().rooms.state_cache.rooms_joined(user_id) {
|
||||||
let presence_event = services().rooms.edus.presence.get_presence(&room_id?, &user_id)?;
|
let presence_event = services().rooms.edus.presence.get_presence(&room_id?, user_id)?;
|
||||||
|
|
||||||
if let Some(presence_event) = presence_event {
|
if let Some(presence_event) = presence_event {
|
||||||
presence_state = presence_event.content.presence;
|
presence_state = presence_event.content.presence;
|
||||||
|
@ -162,10 +162,10 @@ fn process_presence_timer(user_id: OwnedUserId) -> Result<()> {
|
||||||
debug!("Processed presence timer for user '{user_id}': Old state = {presence_state}, New state = {new_state:?}");
|
debug!("Processed presence timer for user '{user_id}': Old state = {presence_state}, New state = {new_state:?}");
|
||||||
|
|
||||||
if let Some(new_state) = new_state {
|
if let Some(new_state) = new_state {
|
||||||
for room_id in services().rooms.state_cache.rooms_joined(&user_id) {
|
for room_id in services().rooms.state_cache.rooms_joined(user_id) {
|
||||||
services().rooms.edus.presence.set_presence(
|
services().rooms.edus.presence.set_presence(
|
||||||
&room_id?,
|
&room_id?,
|
||||||
&user_id,
|
user_id,
|
||||||
new_state.clone(),
|
new_state.clone(),
|
||||||
Some(false),
|
Some(false),
|
||||||
last_active_ago,
|
last_active_ago,
|
||||||
|
|
|
@ -41,8 +41,8 @@ impl Service {
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub fn paginate_relations_with_filter(
|
pub fn paginate_relations_with_filter(
|
||||||
&self, sender_user: &UserId, room_id: &RoomId, target: &EventId, filter_event_type: Option<TimelineEventType>,
|
&self, sender_user: &UserId, room_id: &RoomId, target: &EventId, filter_event_type: &Option<TimelineEventType>,
|
||||||
filter_rel_type: Option<RelationType>, from: PduCount, to: Option<PduCount>, limit: usize,
|
filter_rel_type: &Option<RelationType>, from: PduCount, to: Option<PduCount>, limit: usize,
|
||||||
) -> Result<get_relating_events::v1::Response> {
|
) -> Result<get_relating_events::v1::Response> {
|
||||||
let next_token;
|
let next_token;
|
||||||
|
|
||||||
|
|
|
@ -85,6 +85,23 @@ impl Ord for PduCount {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Update Relationships
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct ExtractRelatesTo {
|
||||||
|
#[serde(rename = "m.relates_to")]
|
||||||
|
relates_to: Relation,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Deserialize)]
|
||||||
|
struct ExtractEventId {
|
||||||
|
event_id: OwnedEventId,
|
||||||
|
}
|
||||||
|
#[derive(Clone, Debug, Deserialize)]
|
||||||
|
struct ExtractRelatesToEventId {
|
||||||
|
#[serde(rename = "m.relates_to")]
|
||||||
|
relates_to: ExtractEventId,
|
||||||
|
}
|
||||||
|
|
||||||
pub struct Service {
|
pub struct Service {
|
||||||
pub db: &'static dyn Data,
|
pub db: &'static dyn Data,
|
||||||
|
|
||||||
|
@ -467,23 +484,6 @@ impl Service {
|
||||||
_ => {},
|
_ => {},
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update Relationships
|
|
||||||
#[derive(Deserialize)]
|
|
||||||
struct ExtractRelatesTo {
|
|
||||||
#[serde(rename = "m.relates_to")]
|
|
||||||
relates_to: Relation,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, Deserialize)]
|
|
||||||
struct ExtractEventId {
|
|
||||||
event_id: OwnedEventId,
|
|
||||||
}
|
|
||||||
#[derive(Clone, Debug, Deserialize)]
|
|
||||||
struct ExtractRelatesToEventId {
|
|
||||||
#[serde(rename = "m.relates_to")]
|
|
||||||
relates_to: ExtractEventId,
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Ok(content) = serde_json::from_str::<ExtractRelatesToEventId>(pdu.content.get()) {
|
if let Ok(content) = serde_json::from_str::<ExtractRelatesToEventId>(pdu.content.get()) {
|
||||||
if let Some(related_pducount) = services().rooms.timeline.get_pdu_count(&content.relates_to.event_id)? {
|
if let Some(related_pducount) = services().rooms.timeline.get_pdu_count(&content.relates_to.event_id)? {
|
||||||
services().rooms.pdu_metadata.add_relation(PduCount::Normal(count2), related_pducount)?;
|
services().rooms.pdu_metadata.add_relation(PduCount::Normal(count2), related_pducount)?;
|
||||||
|
|
|
@ -10,6 +10,10 @@ use ruma::{
|
||||||
};
|
};
|
||||||
use thiserror::Error;
|
use thiserror::Error;
|
||||||
use tracing::{error, info};
|
use tracing::{error, info};
|
||||||
|
use ErrorKind::{
|
||||||
|
Forbidden, GuestAccessForbidden, LimitExceeded, MissingToken, NotFound, ThreepidAuthFailed, ThreepidDenied,
|
||||||
|
TooLarge, Unauthorized, Unknown, UnknownToken, Unrecognized, UserDeactivated, WrongRoomKeysVersion,
|
||||||
|
};
|
||||||
|
|
||||||
use crate::RumaResponse;
|
use crate::RumaResponse;
|
||||||
|
|
||||||
|
@ -105,10 +109,6 @@ impl Error {
|
||||||
|
|
||||||
let message = format!("{self}");
|
let message = format!("{self}");
|
||||||
|
|
||||||
use ErrorKind::{
|
|
||||||
Forbidden, GuestAccessForbidden, LimitExceeded, MissingToken, NotFound, ThreepidAuthFailed, ThreepidDenied,
|
|
||||||
TooLarge, Unauthorized, Unknown, UnknownToken, Unrecognized, UserDeactivated, WrongRoomKeysVersion,
|
|
||||||
};
|
|
||||||
let (kind, status_code) = match self {
|
let (kind, status_code) = match self {
|
||||||
Self::BadRequest(kind, _) => (
|
Self::BadRequest(kind, _) => (
|
||||||
kind.clone(),
|
kind.clone(),
|
||||||
|
|
Loading…
Reference in New Issue