fix as conversions

Signed-off-by: Jason Volk <jason@zemos.net>
This commit is contained in:
Jason Volk 2024-07-07 06:17:58 +00:00
parent 7397064edd
commit dcd7422c45
11 changed files with 107 additions and 51 deletions

View File

@ -770,7 +770,7 @@ perf = "warn"
#restriction = "warn"
arithmetic_side_effects = "warn"
#as_conversions = "warn" # TODO
as_conversions = "warn"
assertions_on_result_states = "warn"
dbg_macro = "warn"
default_union_representation = "warn"

View File

@ -47,7 +47,7 @@ pub(crate) async fn get_hierarchy_route(body: Ruma<get_hierarchy::v1::Request>)
&body.room_id,
limit.try_into().unwrap_or(10),
key.map_or(vec![], |token| token.short_room_ids),
max_depth.try_into().unwrap_or(3),
max_depth.into(),
body.suggested_only,
)
.await

View File

@ -4,7 +4,11 @@ use std::{
time::Duration,
};
use conduit::PduCount;
use conduit::{
error,
utils::math::{ruma_from_u64, ruma_from_usize, usize_from_ruma, usize_from_u64_truncated},
PduCount,
};
use ruma::{
api::client::{
filter::{FilterDefinition, LazyLoadOptions},
@ -27,7 +31,7 @@ use ruma::{
serde::Raw,
uint, DeviceId, EventId, OwnedUserId, RoomId, UInt, UserId,
};
use tracing::{error, Instrument as _, Span};
use tracing::{Instrument as _, Span};
use crate::{service::pdu::EventHash, services, utils, Error, PduEvent, Result, Ruma, RumaResponse};
@ -975,8 +979,8 @@ async fn load_joined_room(
},
summary: RoomSummary {
heroes,
joined_member_count: joined_member_count.map(|n| (n as u32).into()),
invited_member_count: invited_member_count.map(|n| (n as u32).into()),
joined_member_count: joined_member_count.map(ruma_from_u64),
invited_member_count: invited_member_count.map(ruma_from_u64),
},
unread_notifications: UnreadNotificationsCount {
highlight_count,
@ -1026,7 +1030,7 @@ fn load_timeline(
// Take the last events for the timeline
timeline_pdus = non_timeline_pdus
.by_ref()
.take(limit as usize)
.take(usize_from_u64_truncated(limit))
.collect::<Vec<_>>()
.into_iter()
.rev()
@ -1300,7 +1304,7 @@ pub(crate) async fn sync_events_v4_route(
r.0,
UInt::try_from(all_joined_rooms.len().saturating_sub(1)).unwrap_or(UInt::MAX),
);
let room_ids = all_joined_rooms[(u64::from(r.0) as usize)..=(u64::from(r.1) as usize)].to_vec();
let room_ids = all_joined_rooms[usize_from_ruma(r.0)..=usize_from_ruma(r.1)].to_vec();
new_known_rooms.extend(room_ids.iter().cloned());
for room_id in &room_ids {
let todo_room = todo_rooms
@ -1333,7 +1337,7 @@ pub(crate) async fn sync_events_v4_route(
}
})
.collect(),
count: UInt::from(all_joined_rooms.len() as u32),
count: ruma_from_usize(all_joined_rooms.len()),
},
);
@ -1529,20 +1533,22 @@ pub(crate) async fn sync_events_v4_route(
prev_batch,
limited,
joined_count: Some(
(services()
services()
.rooms
.state_cache
.room_joined_count(room_id)?
.unwrap_or(0) as u32)
.into(),
.unwrap_or(0)
.try_into()
.unwrap_or_else(|_| uint!(0)),
),
invited_count: Some(
(services()
services()
.rooms
.state_cache
.room_invited_count(room_id)?
.unwrap_or(0) as u32)
.into(),
.unwrap_or(0)
.try_into()
.unwrap_or_else(|_| uint!(0)),
),
num_live: None, // Count events in timeline greater than global sync counter
timestamp: None,

View File

@ -12,15 +12,24 @@ static JEMALLOC: jemalloc::Jemalloc = jemalloc::Jemalloc;
#[must_use]
pub fn memory_usage() -> String {
use mallctl::stats;
let allocated = stats::allocated::read().unwrap_or_default() as f64 / 1024.0 / 1024.0;
let active = stats::active::read().unwrap_or_default() as f64 / 1024.0 / 1024.0;
let mapped = stats::mapped::read().unwrap_or_default() as f64 / 1024.0 / 1024.0;
let metadata = stats::metadata::read().unwrap_or_default() as f64 / 1024.0 / 1024.0;
let resident = stats::resident::read().unwrap_or_default() as f64 / 1024.0 / 1024.0;
let retained = stats::retained::read().unwrap_or_default() as f64 / 1024.0 / 1024.0;
let mibs = |input: Result<usize, mallctl::Error>| {
let input = input.unwrap_or_default();
let kibs = input / 1024;
let kibs = u32::try_from(kibs).unwrap_or_default();
let kibs = f64::from(kibs);
kibs / 1024.0
};
let allocated = mibs(stats::allocated::read());
let active = mibs(stats::active::read());
let mapped = mibs(stats::mapped::read());
let metadata = mibs(stats::metadata::read());
let resident = mibs(stats::resident::read());
let retained = mibs(stats::retained::read());
format!(
"allocated: {allocated:.2} MiB\n active: {active:.2} MiB\n mapped: {mapped:.2} MiB\n metadata: {metadata:.2} \
MiB\n resident: {resident:.2} MiB\n retained: {retained:.2} MiB\n "
"allocated: {allocated:.2} MiB\nactive: {active:.2} MiB\nmapped: {mapped:.2} MiB\nmetadata: {metadata:.2} \
MiB\nresident: {resident:.2} MiB\nretained: {retained:.2} MiB\n"
)
}

View File

@ -2,6 +2,8 @@ use std::{cmp, time::Duration};
pub use checked_ops::checked_ops;
use crate::{Error, Result};
/// Checked arithmetic expression. Returns a Result<R, Error::Arithmetic>
#[macro_export]
macro_rules! checked {
@ -50,3 +52,36 @@ pub fn continue_exponential_backoff(min: Duration, max: Duration, elapsed: Durat
let min = cmp::min(min, max);
elapsed < min
}
#[inline]
#[allow(clippy::as_conversions)]
pub fn usize_from_f64(val: f64) -> Result<usize, Error> {
if val < 0.0 {
return Err(Error::Arithmetic("Converting negative float to unsigned integer"));
}
Ok(val as usize)
}
#[inline]
#[must_use]
pub fn usize_from_ruma(val: ruma::UInt) -> usize {
usize::try_from(val).expect("failed conversion from ruma::UInt to usize")
}
#[inline]
#[must_use]
pub fn ruma_from_u64(val: u64) -> ruma::UInt {
ruma::UInt::try_from(val).expect("failed conversion from u64 to ruma::UInt")
}
#[inline]
#[must_use]
pub fn ruma_from_usize(val: usize) -> ruma::UInt {
ruma::UInt::try_from(val).expect("failed conversion from usize to ruma::UInt")
}
#[inline]
#[must_use]
#[allow(clippy::as_conversions)]
pub fn usize_from_u64_truncated(val: u64) -> usize { val as usize }

View File

@ -28,11 +28,12 @@ pub fn format(ts: SystemTime, str: &str) -> String {
}
#[must_use]
#[allow(clippy::as_conversions)]
pub fn pretty(d: Duration) -> String {
use Unit::*;
let fmt = |w, f, u| format!("{w}.{f} {u}");
let gen64 = |w, f, u| fmt(w, (f * 100.0) as u64, u);
let gen64 = |w, f, u| fmt(w, (f * 100.0) as u32, u);
let gen128 = |w, f, u| gen64(u64::try_from(w).expect("u128 to u64"), f, u);
match whole_and_frac(d) {
(Days(whole), frac) => gen64(whole, frac, "days"),
@ -49,6 +50,7 @@ pub fn pretty(d: Duration) -> String {
/// part is the largest Unit containing a non-zero value, the frac part is a
/// rational remainder left over.
#[must_use]
#[allow(clippy::as_conversions)]
pub fn whole_and_frac(d: Duration) -> (Unit, f64) {
use Unit::*;

View File

@ -3,7 +3,7 @@ use std::{
sync::{Arc, Mutex},
};
use conduit::{utils, Result, Server};
use conduit::{utils, utils::math::usize_from_f64, Result, Server};
use database::{Database, Map};
use lru_cache::LruCache;
@ -16,7 +16,7 @@ impl Data {
pub(super) fn new(server: &Arc<Server>, db: &Arc<Database>) -> Self {
let config = &server.config;
let cache_size = f64::from(config.auth_chain_cache_capacity);
let cache_size = (cache_size * config.conduit_cache_capacity_modifier) as usize;
let cache_size = usize_from_f64(cache_size * config.conduit_cache_capacity_modifier).expect("valid cache size");
Self {
shorteventid_authchain: db["shorteventid_authchain"].clone(),
auth_chain_cache: Mutex::new(LruCache::new(cache_size)),

View File

@ -43,11 +43,11 @@ impl Service {
#[tracing::instrument(skip_all, name = "auth_chain")]
pub async fn get_auth_chain(&self, room_id: &RoomId, starting_events: &[&EventId]) -> Result<Vec<u64>> {
const NUM_BUCKETS: u64 = 50; //TODO: change possible w/o disrupting db?
const NUM_BUCKETS: usize = 50; //TODO: change possible w/o disrupting db?
const BUCKET: BTreeSet<(u64, &EventId)> = BTreeSet::new();
let started = std::time::Instant::now();
let mut buckets = [BUCKET; NUM_BUCKETS as usize];
let mut buckets = [BUCKET; NUM_BUCKETS];
for (i, &short) in services()
.rooms
.short
@ -55,8 +55,9 @@ impl Service {
.iter()
.enumerate()
{
let bucket = validated!(short % NUM_BUCKETS)?;
buckets[bucket as usize].insert((short, starting_events[i]));
let bucket: usize = short.try_into()?;
let bucket: usize = validated!(bucket % NUM_BUCKETS)?;
buckets[bucket].insert((short, starting_events[i]));
}
debug!(

View File

@ -7,7 +7,7 @@ use std::{
sync::Arc,
};
use conduit::{checked, debug_info};
use conduit::{checked, debug_info, utils::math::usize_from_f64};
use lru_cache::LruCache;
use ruma::{
api::{
@ -161,11 +161,10 @@ impl From<CachedSpaceHierarchySummary> for SpaceHierarchyRoomsChunk {
impl crate::Service for Service {
fn build(args: crate::Args<'_>) -> Result<Arc<Self>> {
let config = &args.server.config;
let cache_size = f64::from(config.roomid_spacehierarchy_cache_capacity);
let cache_size = cache_size * config.conduit_cache_capacity_modifier;
Ok(Arc::new(Self {
roomid_spacehierarchy_cache: Mutex::new(LruCache::new(
(f64::from(config.roomid_spacehierarchy_cache_capacity) * config.conduit_cache_capacity_modifier)
as usize,
)),
roomid_spacehierarchy_cache: Mutex::new(LruCache::new(usize_from_f64(cache_size)?)),
}))
}
@ -447,7 +446,7 @@ impl Service {
}
pub async fn get_client_hierarchy(
&self, sender_user: &UserId, room_id: &RoomId, limit: usize, short_room_ids: Vec<u64>, max_depth: usize,
&self, sender_user: &UserId, room_id: &RoomId, limit: usize, short_room_ids: Vec<u64>, max_depth: u64,
suggested_only: bool,
) -> Result<client::space::get_hierarchy::v1::Response> {
let mut parents = VecDeque::new();
@ -514,7 +513,8 @@ impl Service {
}
}
if !children.is_empty() && parents.len() < max_depth {
let parents_len: u64 = parents.len().try_into()?;
if !children.is_empty() && parents_len < max_depth {
parents.push_back(current_room.clone());
stack.push(children);
}
@ -549,9 +549,8 @@ impl Service {
Some(
PaginationToken {
short_room_ids,
limit: UInt::new(max_depth as u64).expect("When sent in request it must have been valid UInt"),
max_depth: UInt::new(max_depth as u64)
.expect("When sent in request it must have been valid UInt"),
limit: UInt::new(max_depth).expect("When sent in request it must have been valid UInt"),
max_depth: UInt::new(max_depth).expect("When sent in request it must have been valid UInt"),
suggested_only,
}
.to_string(),

View File

@ -6,7 +6,11 @@ use std::{
sync::{Arc, Mutex as StdMutex, Mutex},
};
use conduit::{error, utils::mutex_map, warn, Error, Result};
use conduit::{
error,
utils::{math::usize_from_f64, mutex_map},
warn, Error, Result,
};
use data::Data;
use lru_cache::LruCache;
use ruma::{
@ -44,14 +48,15 @@ pub struct Service {
impl crate::Service for Service {
fn build(args: crate::Args<'_>) -> Result<Arc<Self>> {
let config = &args.server.config;
let server_visibility_cache_capacity =
f64::from(config.server_visibility_cache_capacity) * config.conduit_cache_capacity_modifier;
let user_visibility_cache_capacity =
f64::from(config.user_visibility_cache_capacity) * config.conduit_cache_capacity_modifier;
Ok(Arc::new(Self {
db: Data::new(args.db),
server_visibility_cache: StdMutex::new(LruCache::new(
(f64::from(config.server_visibility_cache_capacity) * config.conduit_cache_capacity_modifier) as usize,
)),
user_visibility_cache: StdMutex::new(LruCache::new(
(f64::from(config.user_visibility_cache_capacity) * config.conduit_cache_capacity_modifier) as usize,
)),
server_visibility_cache: StdMutex::new(LruCache::new(usize_from_f64(server_visibility_cache_capacity)?)),
user_visibility_cache: StdMutex::new(LruCache::new(usize_from_f64(user_visibility_cache_capacity)?)),
}))
}

View File

@ -7,7 +7,7 @@ use std::{
sync::{Arc, Mutex as StdMutex, Mutex},
};
use conduit::{checked, utils, Result};
use conduit::{checked, utils, utils::math::usize_from_f64, Result};
use data::Data;
use lru_cache::LruCache;
use ruma::{EventId, RoomId};
@ -55,11 +55,10 @@ pub struct Service {
impl crate::Service for Service {
fn build(args: crate::Args<'_>) -> Result<Arc<Self>> {
let config = &args.server.config;
let cache_capacity = f64::from(config.stateinfo_cache_capacity) * config.conduit_cache_capacity_modifier;
Ok(Arc::new(Self {
db: Data::new(args.db),
stateinfo_cache: StdMutex::new(LruCache::new(
(f64::from(config.stateinfo_cache_capacity) * config.conduit_cache_capacity_modifier) as usize,
)),
stateinfo_cache: StdMutex::new(LruCache::new(usize_from_f64(cache_capacity)?)),
}))
}