mirror of
https://github.com/girlbossceo/conduwuit.git
synced 2024-11-28 05:45:48 +00:00
feat: ask for backfill
This commit is contained in:
parent
23b18d71ee
commit
7bdd9660aa
|
@ -27,25 +27,24 @@ pub async fn get_context_route(
|
||||||
|
|
||||||
let mut lazy_loaded = HashSet::new();
|
let mut lazy_loaded = HashSet::new();
|
||||||
|
|
||||||
let base_pdu_id = services()
|
let base_token = services()
|
||||||
.rooms
|
.rooms
|
||||||
.timeline
|
.timeline
|
||||||
.get_pdu_id(&body.event_id)?
|
.get_pdu_count(&body.event_id)?
|
||||||
.ok_or(Error::BadRequest(
|
.ok_or(Error::BadRequest(
|
||||||
ErrorKind::NotFound,
|
ErrorKind::NotFound,
|
||||||
"Base event id not found.",
|
"Base event id not found.",
|
||||||
))?;
|
))?;
|
||||||
|
|
||||||
let base_token = services().rooms.timeline.pdu_count(&base_pdu_id)?;
|
let base_event =
|
||||||
|
services()
|
||||||
let base_event = services()
|
.rooms
|
||||||
.rooms
|
.timeline
|
||||||
.timeline
|
.get_pdu(&body.event_id)?
|
||||||
.get_pdu_from_id(&base_pdu_id)?
|
.ok_or(Error::BadRequest(
|
||||||
.ok_or(Error::BadRequest(
|
ErrorKind::NotFound,
|
||||||
ErrorKind::NotFound,
|
"Base event not found.",
|
||||||
"Base event not found.",
|
))?;
|
||||||
))?;
|
|
||||||
|
|
||||||
let room_id = base_event.room_id.clone();
|
let room_id = base_event.room_id.clone();
|
||||||
|
|
||||||
|
@ -97,10 +96,7 @@ pub async fn get_context_route(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let start_token = events_before
|
let start_token = events_before.last().map(|(count, _)| count.stringify());
|
||||||
.last()
|
|
||||||
.and_then(|(pdu_id, _)| services().rooms.timeline.pdu_count(pdu_id).ok())
|
|
||||||
.map(|count| count.to_string());
|
|
||||||
|
|
||||||
let events_before: Vec<_> = events_before
|
let events_before: Vec<_> = events_before
|
||||||
.into_iter()
|
.into_iter()
|
||||||
|
@ -151,10 +147,7 @@ pub async fn get_context_route(
|
||||||
.state_full_ids(shortstatehash)
|
.state_full_ids(shortstatehash)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let end_token = events_after
|
let end_token = events_after.last().map(|(count, _)| count.stringify());
|
||||||
.last()
|
|
||||||
.and_then(|(pdu_id, _)| services().rooms.timeline.pdu_count(pdu_id).ok())
|
|
||||||
.map(|count| count.to_string());
|
|
||||||
|
|
||||||
let events_after: Vec<_> = events_after
|
let events_after: Vec<_> = events_after
|
||||||
.into_iter()
|
.into_iter()
|
||||||
|
|
|
@ -714,8 +714,10 @@ async fn join_room_by_id_helper(
|
||||||
.ok()?
|
.ok()?
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
.map_err(|_e| Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed"))?
|
.map_err(|e| {
|
||||||
{
|
warn!("Auth check failed: {e}");
|
||||||
|
Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed")
|
||||||
|
})? {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::InvalidParam,
|
ErrorKind::InvalidParam,
|
||||||
"Auth check failed",
|
"Auth check failed",
|
||||||
|
|
|
@ -1,4 +1,7 @@
|
||||||
use crate::{service::pdu::PduBuilder, services, utils, Error, Result, Ruma};
|
use crate::{
|
||||||
|
service::{pdu::PduBuilder, rooms::timeline::PduCount},
|
||||||
|
services, utils, Error, Result, Ruma,
|
||||||
|
};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::{
|
api::client::{
|
||||||
error::ErrorKind,
|
error::ErrorKind,
|
||||||
|
@ -122,17 +125,17 @@ pub async fn get_message_events_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
let from = match body.from.clone() {
|
let from = match body.from.clone() {
|
||||||
Some(from) => from
|
Some(from) => PduCount::try_from_string(&from)?,
|
||||||
.parse()
|
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from` value."))?,
|
|
||||||
|
|
||||||
None => match body.dir {
|
None => match body.dir {
|
||||||
ruma::api::client::Direction::Forward => 0,
|
ruma::api::client::Direction::Forward => PduCount::min(),
|
||||||
ruma::api::client::Direction::Backward => u64::MAX,
|
ruma::api::client::Direction::Backward => PduCount::max(),
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
let to = body.to.as_ref().map(|t| t.parse());
|
let to = body
|
||||||
|
.to
|
||||||
|
.as_ref()
|
||||||
|
.and_then(|t| PduCount::try_from_string(&t).ok());
|
||||||
|
|
||||||
services().rooms.lazy_loading.lazy_load_confirm_delivery(
|
services().rooms.lazy_loading.lazy_load_confirm_delivery(
|
||||||
sender_user,
|
sender_user,
|
||||||
|
@ -158,15 +161,7 @@ pub async fn get_message_events_route(
|
||||||
.pdus_after(sender_user, &body.room_id, from)?
|
.pdus_after(sender_user, &body.room_id, from)?
|
||||||
.take(limit)
|
.take(limit)
|
||||||
.filter_map(|r| r.ok()) // Filter out buggy events
|
.filter_map(|r| r.ok()) // Filter out buggy events
|
||||||
.filter_map(|(pdu_id, pdu)| {
|
.take_while(|&(k, _)| Some(k) != to) // Stop at `to`
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.pdu_count(&pdu_id)
|
|
||||||
.map(|pdu_count| (pdu_count, pdu))
|
|
||||||
.ok()
|
|
||||||
})
|
|
||||||
.take_while(|&(k, _)| Some(Ok(k)) != to) // Stop at `to`
|
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
for (_, event) in &events_after {
|
for (_, event) in &events_after {
|
||||||
|
@ -192,26 +187,23 @@ pub async fn get_message_events_route(
|
||||||
.map(|(_, pdu)| pdu.to_room_event())
|
.map(|(_, pdu)| pdu.to_room_event())
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
resp.start = from.to_string();
|
resp.start = from.stringify();
|
||||||
resp.end = next_token.map(|count| count.to_string());
|
resp.end = next_token.map(|count| count.stringify());
|
||||||
resp.chunk = events_after;
|
resp.chunk = events_after;
|
||||||
}
|
}
|
||||||
ruma::api::client::Direction::Backward => {
|
ruma::api::client::Direction::Backward => {
|
||||||
|
services()
|
||||||
|
.rooms
|
||||||
|
.timeline
|
||||||
|
.backfill_if_required(&body.room_id, from)
|
||||||
|
.await?;
|
||||||
let events_before: Vec<_> = services()
|
let events_before: Vec<_> = services()
|
||||||
.rooms
|
.rooms
|
||||||
.timeline
|
.timeline
|
||||||
.pdus_until(sender_user, &body.room_id, from)?
|
.pdus_until(sender_user, &body.room_id, from)?
|
||||||
.take(limit)
|
.take(limit)
|
||||||
.filter_map(|r| r.ok()) // Filter out buggy events
|
.filter_map(|r| r.ok()) // Filter out buggy events
|
||||||
.filter_map(|(pdu_id, pdu)| {
|
.take_while(|&(k, _)| Some(k) != to) // Stop at `to`
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.pdu_count(&pdu_id)
|
|
||||||
.map(|pdu_count| (pdu_count, pdu))
|
|
||||||
.ok()
|
|
||||||
})
|
|
||||||
.take_while(|&(k, _)| Some(Ok(k)) != to) // Stop at `to`
|
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
for (_, event) in &events_before {
|
for (_, event) in &events_before {
|
||||||
|
@ -237,8 +229,8 @@ pub async fn get_message_events_route(
|
||||||
.map(|(_, pdu)| pdu.to_room_event())
|
.map(|(_, pdu)| pdu.to_room_event())
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
resp.start = from.to_string();
|
resp.start = from.stringify();
|
||||||
resp.end = next_token.map(|count| count.to_string());
|
resp.end = next_token.map(|count| count.stringify());
|
||||||
resp.chunk = events_before;
|
resp.chunk = events_before;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
use crate::{services, Error, Result, Ruma};
|
use crate::{service::rooms::timeline::PduCount, services, Error, Result, Ruma};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::{error::ErrorKind, read_marker::set_read_marker, receipt::create_receipt},
|
api::client::{error::ErrorKind, read_marker::set_read_marker, receipt::create_receipt},
|
||||||
events::{
|
events::{
|
||||||
|
@ -42,18 +42,28 @@ pub async fn set_read_marker_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(event) = &body.private_read_receipt {
|
if let Some(event) = &body.private_read_receipt {
|
||||||
services().rooms.edus.read_receipt.private_read_set(
|
let count = services()
|
||||||
&body.room_id,
|
.rooms
|
||||||
sender_user,
|
.timeline
|
||||||
services()
|
.get_pdu_count(event)?
|
||||||
.rooms
|
.ok_or(Error::BadRequest(
|
||||||
.timeline
|
ErrorKind::InvalidParam,
|
||||||
.get_pdu_count(event)?
|
"Event does not exist.",
|
||||||
.ok_or(Error::BadRequest(
|
))?;
|
||||||
|
let count = match count {
|
||||||
|
PduCount::Backfilled(_) => {
|
||||||
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::InvalidParam,
|
ErrorKind::InvalidParam,
|
||||||
"Event does not exist.",
|
"Read receipt is in backfilled timeline",
|
||||||
))?,
|
))
|
||||||
)?;
|
}
|
||||||
|
PduCount::Normal(c) => c,
|
||||||
|
};
|
||||||
|
services()
|
||||||
|
.rooms
|
||||||
|
.edus
|
||||||
|
.read_receipt
|
||||||
|
.private_read_set(&body.room_id, sender_user, count)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(event) = &body.read_receipt {
|
if let Some(event) = &body.read_receipt {
|
||||||
|
@ -142,17 +152,27 @@ pub async fn create_receipt_route(
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
create_receipt::v3::ReceiptType::ReadPrivate => {
|
create_receipt::v3::ReceiptType::ReadPrivate => {
|
||||||
|
let count = services()
|
||||||
|
.rooms
|
||||||
|
.timeline
|
||||||
|
.get_pdu_count(&body.event_id)?
|
||||||
|
.ok_or(Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
|
"Event does not exist.",
|
||||||
|
))?;
|
||||||
|
let count = match count {
|
||||||
|
PduCount::Backfilled(_) => {
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
|
"Read receipt is in backfilled timeline",
|
||||||
|
))
|
||||||
|
}
|
||||||
|
PduCount::Normal(c) => c,
|
||||||
|
};
|
||||||
services().rooms.edus.read_receipt.private_read_set(
|
services().rooms.edus.read_receipt.private_read_set(
|
||||||
&body.room_id,
|
&body.room_id,
|
||||||
sender_user,
|
sender_user,
|
||||||
services()
|
count,
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.get_pdu_count(&body.event_id)?
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Event does not exist.",
|
|
||||||
))?,
|
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
_ => return Err(Error::bad_database("Unsupported receipt type")),
|
_ => return Err(Error::bad_database("Unsupported receipt type")),
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
use crate::{services, Error, Result, Ruma, RumaResponse};
|
use crate::{service::rooms::timeline::PduCount, services, Error, Result, Ruma, RumaResponse};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::{
|
api::client::{
|
||||||
filter::{FilterDefinition, LazyLoadOptions},
|
filter::{FilterDefinition, LazyLoadOptions},
|
||||||
|
@ -172,6 +172,7 @@ async fn sync_helper(
|
||||||
let watcher = services().globals.watch(&sender_user, &sender_device);
|
let watcher = services().globals.watch(&sender_user, &sender_device);
|
||||||
|
|
||||||
let next_batch = services().globals.current_count()?;
|
let next_batch = services().globals.current_count()?;
|
||||||
|
let next_batchcount = PduCount::Normal(next_batch);
|
||||||
let next_batch_string = next_batch.to_string();
|
let next_batch_string = next_batch.to_string();
|
||||||
|
|
||||||
// Load filter
|
// Load filter
|
||||||
|
@ -197,6 +198,7 @@ async fn sync_helper(
|
||||||
.clone()
|
.clone()
|
||||||
.and_then(|string| string.parse().ok())
|
.and_then(|string| string.parse().ok())
|
||||||
.unwrap_or(0);
|
.unwrap_or(0);
|
||||||
|
let sincecount = PduCount::Normal(since);
|
||||||
|
|
||||||
let mut presence_updates = HashMap::new();
|
let mut presence_updates = HashMap::new();
|
||||||
let mut left_encrypted_users = HashSet::new(); // Users that have left any encrypted rooms the sender was in
|
let mut left_encrypted_users = HashSet::new(); // Users that have left any encrypted rooms the sender was in
|
||||||
|
@ -241,12 +243,12 @@ async fn sync_helper(
|
||||||
.rooms
|
.rooms
|
||||||
.timeline
|
.timeline
|
||||||
.last_timeline_count(&sender_user, &room_id)?
|
.last_timeline_count(&sender_user, &room_id)?
|
||||||
> since
|
> sincecount
|
||||||
{
|
{
|
||||||
let mut non_timeline_pdus = services()
|
let mut non_timeline_pdus = services()
|
||||||
.rooms
|
.rooms
|
||||||
.timeline
|
.timeline
|
||||||
.pdus_until(&sender_user, &room_id, u64::MAX)?
|
.pdus_until(&sender_user, &room_id, PduCount::max())?
|
||||||
.filter_map(|r| {
|
.filter_map(|r| {
|
||||||
// Filter out buggy events
|
// Filter out buggy events
|
||||||
if r.is_err() {
|
if r.is_err() {
|
||||||
|
@ -254,13 +256,7 @@ async fn sync_helper(
|
||||||
}
|
}
|
||||||
r.ok()
|
r.ok()
|
||||||
})
|
})
|
||||||
.take_while(|(pduid, _)| {
|
.take_while(|(pducount, _)| pducount > &sincecount);
|
||||||
services()
|
|
||||||
.rooms
|
|
||||||
.timeline
|
|
||||||
.pdu_count(pduid)
|
|
||||||
.map_or(false, |count| count > since)
|
|
||||||
});
|
|
||||||
|
|
||||||
// Take the last 10 events for the timeline
|
// Take the last 10 events for the timeline
|
||||||
timeline_pdus = non_timeline_pdus
|
timeline_pdus = non_timeline_pdus
|
||||||
|
@ -295,7 +291,7 @@ async fn sync_helper(
|
||||||
&sender_user,
|
&sender_user,
|
||||||
&sender_device,
|
&sender_device,
|
||||||
&room_id,
|
&room_id,
|
||||||
since,
|
sincecount,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
// Database queries:
|
// Database queries:
|
||||||
|
@ -492,7 +488,7 @@ async fn sync_helper(
|
||||||
&sender_device,
|
&sender_device,
|
||||||
&room_id,
|
&room_id,
|
||||||
lazy_loaded,
|
lazy_loaded,
|
||||||
next_batch,
|
next_batchcount,
|
||||||
);
|
);
|
||||||
|
|
||||||
(
|
(
|
||||||
|
@ -582,7 +578,7 @@ async fn sync_helper(
|
||||||
&sender_device,
|
&sender_device,
|
||||||
&room_id,
|
&room_id,
|
||||||
lazy_loaded,
|
lazy_loaded,
|
||||||
next_batch,
|
next_batchcount,
|
||||||
);
|
);
|
||||||
|
|
||||||
let encrypted_room = services()
|
let encrypted_room = services()
|
||||||
|
@ -711,10 +707,14 @@ async fn sync_helper(
|
||||||
|
|
||||||
let prev_batch = timeline_pdus
|
let prev_batch = timeline_pdus
|
||||||
.first()
|
.first()
|
||||||
.map_or(Ok::<_, Error>(None), |(pdu_id, _)| {
|
.map_or(Ok::<_, Error>(None), |(pdu_count, _)| {
|
||||||
Ok(Some(
|
Ok(Some(match pdu_count {
|
||||||
services().rooms.timeline.pdu_count(pdu_id)?.to_string(),
|
PduCount::Backfilled(_) => {
|
||||||
))
|
error!("timeline in backfill state?!");
|
||||||
|
"0".to_owned()
|
||||||
|
}
|
||||||
|
PduCount::Normal(c) => c.to_string(),
|
||||||
|
}))
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
let room_events: Vec<_> = timeline_pdus
|
let room_events: Vec<_> = timeline_pdus
|
||||||
|
|
|
@ -629,6 +629,37 @@ pub async fn get_public_rooms_route(
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn parse_incoming_pdu(
|
||||||
|
pdu: &RawJsonValue,
|
||||||
|
) -> Result<(OwnedEventId, CanonicalJsonObject, OwnedRoomId)> {
|
||||||
|
let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| {
|
||||||
|
warn!("Error parsing incoming event {:?}: {:?}", pdu, e);
|
||||||
|
Error::BadServerResponse("Invalid PDU in server response")
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let room_id: OwnedRoomId = value
|
||||||
|
.get("room_id")
|
||||||
|
.and_then(|id| RoomId::parse(id.as_str()?).ok())
|
||||||
|
.ok_or(Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
|
"Invalid room id in pdu",
|
||||||
|
))?;
|
||||||
|
|
||||||
|
let room_version_id = services().rooms.state.get_room_version(&room_id)?;
|
||||||
|
|
||||||
|
let (event_id, value) = match gen_event_id_canonical_json(&pdu, &room_version_id) {
|
||||||
|
Ok(t) => t,
|
||||||
|
Err(_) => {
|
||||||
|
// Event could not be converted to canonical json
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
|
"Could not convert event to canonical json.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
Ok((event_id, value, room_id))
|
||||||
|
}
|
||||||
|
|
||||||
/// # `PUT /_matrix/federation/v1/send/{txnId}`
|
/// # `PUT /_matrix/federation/v1/send/{txnId}`
|
||||||
///
|
///
|
||||||
/// Push EDUs and PDUs to this server.
|
/// Push EDUs and PDUs to this server.
|
||||||
|
@ -657,36 +688,7 @@ pub async fn send_transaction_message_route(
|
||||||
// let mut auth_cache = EventMap::new();
|
// let mut auth_cache = EventMap::new();
|
||||||
|
|
||||||
for pdu in &body.pdus {
|
for pdu in &body.pdus {
|
||||||
let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| {
|
let (event_id, value, room_id) = parse_incoming_pdu(&pdu)?;
|
||||||
warn!("Error parsing incoming event {:?}: {:?}", pdu, e);
|
|
||||||
Error::BadServerResponse("Invalid PDU in server response")
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let room_id: OwnedRoomId = match value
|
|
||||||
.get("room_id")
|
|
||||||
.and_then(|id| RoomId::parse(id.as_str()?).ok())
|
|
||||||
{
|
|
||||||
Some(id) => id,
|
|
||||||
None => {
|
|
||||||
// Event is invalid
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let room_version_id = match services().rooms.state.get_room_version(&room_id) {
|
|
||||||
Ok(v) => v,
|
|
||||||
Err(_) => {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let (event_id, value) = match gen_event_id_canonical_json(pdu, &room_version_id) {
|
|
||||||
Ok(t) => t,
|
|
||||||
Err(_) => {
|
|
||||||
// Event could not be converted to canonical json
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
// We do not add the event_id field to the pdu here because of signature and hashes checks
|
// We do not add the event_id field to the pdu here because of signature and hashes checks
|
||||||
|
|
||||||
services()
|
services()
|
||||||
|
@ -1017,7 +1019,7 @@ pub async fn get_backfill_route(
|
||||||
Ok(true),
|
Ok(true),
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
.map(|(pdu_id, _)| services().rooms.timeline.get_pdu_json_from_id(&pdu_id))
|
.map(|(_, pdu)| services().rooms.timeline.get_pdu_json(&pdu.event_id))
|
||||||
.filter_map(|r| r.ok().flatten())
|
.filter_map(|r| r.ok().flatten())
|
||||||
.map(|pdu| PduEvent::convert_to_outgoing_federation_event(pdu))
|
.map(|pdu| PduEvent::convert_to_outgoing_federation_event(pdu))
|
||||||
.collect();
|
.collect();
|
||||||
|
|
|
@ -7,6 +7,8 @@ use tracing::error;
|
||||||
|
|
||||||
use crate::{database::KeyValueDatabase, service, services, utils, Error, PduEvent, Result};
|
use crate::{database::KeyValueDatabase, service, services, utils, Error, PduEvent, Result};
|
||||||
|
|
||||||
|
use service::rooms::timeline::PduCount;
|
||||||
|
|
||||||
impl service::rooms::timeline::Data for KeyValueDatabase {
|
impl service::rooms::timeline::Data for KeyValueDatabase {
|
||||||
fn first_pdu_in_room(&self, room_id: &RoomId) -> Result<Option<Arc<PduEvent>>> {
|
fn first_pdu_in_room(&self, room_id: &RoomId) -> Result<Option<Arc<PduEvent>>> {
|
||||||
let prefix = services()
|
let prefix = services()
|
||||||
|
@ -30,7 +32,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase {
|
||||||
.transpose()
|
.transpose()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result<u64> {
|
fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result<PduCount> {
|
||||||
match self
|
match self
|
||||||
.lasttimelinecount_cache
|
.lasttimelinecount_cache
|
||||||
.lock()
|
.lock()
|
||||||
|
@ -39,20 +41,18 @@ impl service::rooms::timeline::Data for KeyValueDatabase {
|
||||||
{
|
{
|
||||||
hash_map::Entry::Vacant(v) => {
|
hash_map::Entry::Vacant(v) => {
|
||||||
if let Some(last_count) = self
|
if let Some(last_count) = self
|
||||||
.pdus_until(sender_user, room_id, u64::MAX)?
|
.pdus_until(sender_user, room_id, PduCount::max())?
|
||||||
.filter_map(|r| {
|
.find_map(|r| {
|
||||||
// Filter out buggy events
|
// Filter out buggy events
|
||||||
if r.is_err() {
|
if r.is_err() {
|
||||||
error!("Bad pdu in pdus_since: {:?}", r);
|
error!("Bad pdu in pdus_since: {:?}", r);
|
||||||
}
|
}
|
||||||
r.ok()
|
r.ok()
|
||||||
})
|
})
|
||||||
.map(|(pduid, _)| self.pdu_count(&pduid))
|
|
||||||
.next()
|
|
||||||
{
|
{
|
||||||
Ok(*v.insert(last_count?))
|
Ok(*v.insert(last_count.0))
|
||||||
} else {
|
} else {
|
||||||
Ok(0)
|
Ok(PduCount::Normal(0))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
hash_map::Entry::Occupied(o) => Ok(*o.get()),
|
hash_map::Entry::Occupied(o) => Ok(*o.get()),
|
||||||
|
@ -60,11 +60,23 @@ impl service::rooms::timeline::Data for KeyValueDatabase {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the `count` of this pdu's id.
|
/// Returns the `count` of this pdu's id.
|
||||||
fn get_pdu_count(&self, event_id: &EventId) -> Result<Option<u64>> {
|
fn get_pdu_count(&self, event_id: &EventId) -> Result<Option<PduCount>> {
|
||||||
self.eventid_pduid
|
Ok(self
|
||||||
|
.eventid_pduid
|
||||||
.get(event_id.as_bytes())?
|
.get(event_id.as_bytes())?
|
||||||
.map(|pdu_id| self.pdu_count(&pdu_id))
|
.map(|pdu_id| Ok::<_, Error>(PduCount::Normal(pdu_count(&pdu_id)?)))
|
||||||
.transpose()
|
.transpose()?
|
||||||
|
.map_or_else(
|
||||||
|
|| {
|
||||||
|
Ok::<_, Error>(
|
||||||
|
self.eventid_backfillpduid
|
||||||
|
.get(event_id.as_bytes())?
|
||||||
|
.map(|pdu_id| Ok::<_, Error>(PduCount::Backfilled(pdu_count(&pdu_id)?)))
|
||||||
|
.transpose()?,
|
||||||
|
)
|
||||||
|
},
|
||||||
|
|x| Ok(Some(x)),
|
||||||
|
)?)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the json of a pdu.
|
/// Returns the json of a pdu.
|
||||||
|
@ -182,12 +194,6 @@ impl service::rooms::timeline::Data for KeyValueDatabase {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the `count` of this pdu's id.
|
|
||||||
fn pdu_count(&self, pdu_id: &[u8]) -> Result<u64> {
|
|
||||||
utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::<u64>()..])
|
|
||||||
.map_err(|_| Error::bad_database("PDU has invalid count bytes."))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn append_pdu(
|
fn append_pdu(
|
||||||
&self,
|
&self,
|
||||||
pdu_id: &[u8],
|
pdu_id: &[u8],
|
||||||
|
@ -203,7 +209,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase {
|
||||||
self.lasttimelinecount_cache
|
self.lasttimelinecount_cache
|
||||||
.lock()
|
.lock()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.insert(pdu.room_id.clone(), count);
|
.insert(pdu.room_id.clone(), PduCount::Normal(count));
|
||||||
|
|
||||||
self.eventid_pduid.insert(pdu.event_id.as_bytes(), pdu_id)?;
|
self.eventid_pduid.insert(pdu.event_id.as_bytes(), pdu_id)?;
|
||||||
self.eventid_outlierpdu.remove(pdu.event_id.as_bytes())?;
|
self.eventid_outlierpdu.remove(pdu.event_id.as_bytes())?;
|
||||||
|
@ -211,6 +217,24 @@ impl service::rooms::timeline::Data for KeyValueDatabase {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn prepend_backfill_pdu(
|
||||||
|
&self,
|
||||||
|
pdu_id: &[u8],
|
||||||
|
event_id: &EventId,
|
||||||
|
json: &CanonicalJsonObject,
|
||||||
|
) -> Result<()> {
|
||||||
|
self.pduid_backfillpdu.insert(
|
||||||
|
pdu_id,
|
||||||
|
&serde_json::to_vec(json).expect("CanonicalJsonObject is always a valid"),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
self.eventid_backfillpduid
|
||||||
|
.insert(event_id.as_bytes(), pdu_id)?;
|
||||||
|
self.eventid_outlierpdu.remove(event_id.as_bytes())?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
/// Removes a pdu and creates a new one with the same id.
|
/// Removes a pdu and creates a new one with the same id.
|
||||||
fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> {
|
fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> {
|
||||||
if self.pduid_pdu.get(pdu_id)?.is_some() {
|
if self.pduid_pdu.get(pdu_id)?.is_some() {
|
||||||
|
@ -227,51 +251,14 @@ impl service::rooms::timeline::Data for KeyValueDatabase {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns an iterator over all events in a room that happened after the event with id `since`
|
|
||||||
/// in chronological order.
|
|
||||||
fn pdus_since<'a>(
|
|
||||||
&'a self,
|
|
||||||
user_id: &UserId,
|
|
||||||
room_id: &RoomId,
|
|
||||||
since: u64,
|
|
||||||
) -> Result<Box<dyn Iterator<Item = Result<(Vec<u8>, PduEvent)>> + 'a>> {
|
|
||||||
let prefix = services()
|
|
||||||
.rooms
|
|
||||||
.short
|
|
||||||
.get_shortroomid(room_id)?
|
|
||||||
.expect("room exists")
|
|
||||||
.to_be_bytes()
|
|
||||||
.to_vec();
|
|
||||||
|
|
||||||
// Skip the first pdu if it's exactly at since, because we sent that last time
|
|
||||||
let mut first_pdu_id = prefix.clone();
|
|
||||||
first_pdu_id.extend_from_slice(&(since + 1).to_be_bytes());
|
|
||||||
|
|
||||||
let user_id = user_id.to_owned();
|
|
||||||
|
|
||||||
Ok(Box::new(
|
|
||||||
self.pduid_pdu
|
|
||||||
.iter_from(&first_pdu_id, false)
|
|
||||||
.take_while(move |(k, _)| k.starts_with(&prefix))
|
|
||||||
.map(move |(pdu_id, v)| {
|
|
||||||
let mut pdu = serde_json::from_slice::<PduEvent>(&v)
|
|
||||||
.map_err(|_| Error::bad_database("PDU in db is invalid."))?;
|
|
||||||
if pdu.sender != user_id {
|
|
||||||
pdu.remove_transaction_id()?;
|
|
||||||
}
|
|
||||||
Ok((pdu_id, pdu))
|
|
||||||
}),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns an iterator over all events and their tokens in a room that happened before the
|
/// Returns an iterator over all events and their tokens in a room that happened before the
|
||||||
/// event with id `until` in reverse-chronological order.
|
/// event with id `until` in reverse-chronological order.
|
||||||
fn pdus_until<'a>(
|
fn pdus_until<'a>(
|
||||||
&'a self,
|
&'a self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
until: u64,
|
until: PduCount,
|
||||||
) -> Result<Box<dyn Iterator<Item = Result<(Vec<u8>, PduEvent)>> + 'a>> {
|
) -> Result<Box<dyn Iterator<Item = Result<(PduCount, PduEvent)>> + 'a>> {
|
||||||
// Create the first part of the full pdu id
|
// Create the first part of the full pdu id
|
||||||
let prefix = services()
|
let prefix = services()
|
||||||
.rooms
|
.rooms
|
||||||
|
@ -281,34 +268,63 @@ impl service::rooms::timeline::Data for KeyValueDatabase {
|
||||||
.to_be_bytes()
|
.to_be_bytes()
|
||||||
.to_vec();
|
.to_vec();
|
||||||
|
|
||||||
let mut current = prefix.clone();
|
let mut current_backfill = prefix.clone();
|
||||||
current.extend_from_slice(&(until.saturating_sub(1)).to_be_bytes()); // -1 because we don't want event at `until`
|
// +1 so we don't send the base event
|
||||||
|
let backfill_count = match until {
|
||||||
let current: &[u8] = ¤t;
|
PduCount::Backfilled(x) => x + 1,
|
||||||
|
PduCount::Normal(_) => 0,
|
||||||
|
};
|
||||||
|
current_backfill.extend_from_slice(&backfill_count.to_be_bytes());
|
||||||
|
|
||||||
let user_id = user_id.to_owned();
|
let user_id = user_id.to_owned();
|
||||||
|
let user_id2 = user_id.to_owned();
|
||||||
|
let prefix2 = prefix.clone();
|
||||||
|
|
||||||
Ok(Box::new(
|
let backfill_iter = self
|
||||||
self.pduid_pdu
|
.pduid_backfillpdu
|
||||||
.iter_from(current, true)
|
.iter_from(¤t_backfill, false)
|
||||||
.take_while(move |(k, _)| k.starts_with(&prefix))
|
.take_while(move |(k, _)| k.starts_with(&prefix))
|
||||||
.map(move |(pdu_id, v)| {
|
.map(move |(pdu_id, v)| {
|
||||||
let mut pdu = serde_json::from_slice::<PduEvent>(&v)
|
let mut pdu = serde_json::from_slice::<PduEvent>(&v)
|
||||||
.map_err(|_| Error::bad_database("PDU in db is invalid."))?;
|
.map_err(|_| Error::bad_database("PDU in db is invalid."))?;
|
||||||
if pdu.sender != user_id {
|
if pdu.sender != user_id {
|
||||||
pdu.remove_transaction_id()?;
|
pdu.remove_transaction_id()?;
|
||||||
}
|
}
|
||||||
Ok((pdu_id, pdu))
|
let count = PduCount::Backfilled(pdu_count(&pdu_id)?);
|
||||||
}),
|
Ok((count, pdu))
|
||||||
))
|
});
|
||||||
|
|
||||||
|
match until {
|
||||||
|
PduCount::Backfilled(_) => Ok(Box::new(backfill_iter)),
|
||||||
|
PduCount::Normal(x) => {
|
||||||
|
let mut current_normal = prefix2.clone();
|
||||||
|
// -1 so we don't send the base event
|
||||||
|
current_normal.extend_from_slice(&x.saturating_sub(1).to_be_bytes());
|
||||||
|
let normal_iter = self
|
||||||
|
.pduid_pdu
|
||||||
|
.iter_from(¤t_normal, true)
|
||||||
|
.take_while(move |(k, _)| k.starts_with(&prefix2))
|
||||||
|
.map(move |(pdu_id, v)| {
|
||||||
|
let mut pdu = serde_json::from_slice::<PduEvent>(&v)
|
||||||
|
.map_err(|_| Error::bad_database("PDU in db is invalid."))?;
|
||||||
|
if pdu.sender != user_id2 {
|
||||||
|
pdu.remove_transaction_id()?;
|
||||||
|
}
|
||||||
|
let count = PduCount::Normal(pdu_count(&pdu_id)?);
|
||||||
|
Ok((count, pdu))
|
||||||
|
});
|
||||||
|
|
||||||
|
Ok(Box::new(normal_iter.chain(backfill_iter)))
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn pdus_after<'a>(
|
fn pdus_after<'a>(
|
||||||
&'a self,
|
&'a self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
from: u64,
|
from: PduCount,
|
||||||
) -> Result<Box<dyn Iterator<Item = Result<(Vec<u8>, PduEvent)>> + 'a>> {
|
) -> Result<Box<dyn Iterator<Item = Result<(PduCount, PduEvent)>> + 'a>> {
|
||||||
// Create the first part of the full pdu id
|
// Create the first part of the full pdu id
|
||||||
let prefix = services()
|
let prefix = services()
|
||||||
.rooms
|
.rooms
|
||||||
|
@ -318,26 +334,55 @@ impl service::rooms::timeline::Data for KeyValueDatabase {
|
||||||
.to_be_bytes()
|
.to_be_bytes()
|
||||||
.to_vec();
|
.to_vec();
|
||||||
|
|
||||||
let mut current = prefix.clone();
|
let mut current_normal = prefix.clone();
|
||||||
current.extend_from_slice(&(from + 1).to_be_bytes()); // +1 so we don't send the base event
|
// +1 so we don't send the base event
|
||||||
|
let normal_count = match from {
|
||||||
let current: &[u8] = ¤t;
|
PduCount::Normal(x) => x + 1,
|
||||||
|
PduCount::Backfilled(_) => 0,
|
||||||
|
};
|
||||||
|
current_normal.extend_from_slice(&normal_count.to_be_bytes());
|
||||||
|
|
||||||
let user_id = user_id.to_owned();
|
let user_id = user_id.to_owned();
|
||||||
|
let user_id2 = user_id.to_owned();
|
||||||
|
let prefix2 = prefix.clone();
|
||||||
|
|
||||||
Ok(Box::new(
|
let normal_iter = self
|
||||||
self.pduid_pdu
|
.pduid_pdu
|
||||||
.iter_from(current, false)
|
.iter_from(¤t_normal, false)
|
||||||
.take_while(move |(k, _)| k.starts_with(&prefix))
|
.take_while(move |(k, _)| k.starts_with(&prefix))
|
||||||
.map(move |(pdu_id, v)| {
|
.map(move |(pdu_id, v)| {
|
||||||
let mut pdu = serde_json::from_slice::<PduEvent>(&v)
|
let mut pdu = serde_json::from_slice::<PduEvent>(&v)
|
||||||
.map_err(|_| Error::bad_database("PDU in db is invalid."))?;
|
.map_err(|_| Error::bad_database("PDU in db is invalid."))?;
|
||||||
if pdu.sender != user_id {
|
if pdu.sender != user_id {
|
||||||
pdu.remove_transaction_id()?;
|
pdu.remove_transaction_id()?;
|
||||||
}
|
}
|
||||||
Ok((pdu_id, pdu))
|
let count = PduCount::Normal(pdu_count(&pdu_id)?);
|
||||||
}),
|
Ok((count, pdu))
|
||||||
))
|
});
|
||||||
|
|
||||||
|
match from {
|
||||||
|
PduCount::Normal(_) => Ok(Box::new(normal_iter)),
|
||||||
|
PduCount::Backfilled(x) => {
|
||||||
|
let mut current_backfill = prefix2.clone();
|
||||||
|
// -1 so we don't send the base event
|
||||||
|
current_backfill.extend_from_slice(&x.saturating_sub(1).to_be_bytes());
|
||||||
|
let backfill_iter = self
|
||||||
|
.pduid_backfillpdu
|
||||||
|
.iter_from(¤t_backfill, true)
|
||||||
|
.take_while(move |(k, _)| k.starts_with(&prefix2))
|
||||||
|
.map(move |(pdu_id, v)| {
|
||||||
|
let mut pdu = serde_json::from_slice::<PduEvent>(&v)
|
||||||
|
.map_err(|_| Error::bad_database("PDU in db is invalid."))?;
|
||||||
|
if pdu.sender != user_id2 {
|
||||||
|
pdu.remove_transaction_id()?;
|
||||||
|
}
|
||||||
|
let count = PduCount::Backfilled(pdu_count(&pdu_id)?);
|
||||||
|
Ok((count, pdu))
|
||||||
|
});
|
||||||
|
|
||||||
|
Ok(Box::new(backfill_iter.chain(normal_iter)))
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn increment_notification_counts(
|
fn increment_notification_counts(
|
||||||
|
@ -368,3 +413,9 @@ impl service::rooms::timeline::Data for KeyValueDatabase {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns the `count` of this pdu's id.
|
||||||
|
fn pdu_count(pdu_id: &[u8]) -> Result<u64> {
|
||||||
|
utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::<u64>()..])
|
||||||
|
.map_err(|_| Error::bad_database("PDU has invalid count bytes."))
|
||||||
|
}
|
||||||
|
|
|
@ -1,7 +1,10 @@
|
||||||
pub mod abstraction;
|
pub mod abstraction;
|
||||||
pub mod key_value;
|
pub mod key_value;
|
||||||
|
|
||||||
use crate::{services, utils, Config, Error, PduEvent, Result, Services, SERVICES};
|
use crate::{
|
||||||
|
service::rooms::timeline::PduCount, services, utils, Config, Error, PduEvent, Result, Services,
|
||||||
|
SERVICES,
|
||||||
|
};
|
||||||
use abstraction::{KeyValueDatabaseEngine, KvTree};
|
use abstraction::{KeyValueDatabaseEngine, KvTree};
|
||||||
use directories::ProjectDirs;
|
use directories::ProjectDirs;
|
||||||
use lru_cache::LruCache;
|
use lru_cache::LruCache;
|
||||||
|
@ -71,7 +74,9 @@ pub struct KeyValueDatabase {
|
||||||
|
|
||||||
//pub rooms: rooms::Rooms,
|
//pub rooms: rooms::Rooms,
|
||||||
pub(super) pduid_pdu: Arc<dyn KvTree>, // PduId = ShortRoomId + Count
|
pub(super) pduid_pdu: Arc<dyn KvTree>, // PduId = ShortRoomId + Count
|
||||||
|
pub(super) pduid_backfillpdu: Arc<dyn KvTree>, // PduId = ShortRoomId + Count
|
||||||
pub(super) eventid_pduid: Arc<dyn KvTree>,
|
pub(super) eventid_pduid: Arc<dyn KvTree>,
|
||||||
|
pub(super) eventid_backfillpduid: Arc<dyn KvTree>,
|
||||||
pub(super) roomid_pduleaves: Arc<dyn KvTree>,
|
pub(super) roomid_pduleaves: Arc<dyn KvTree>,
|
||||||
pub(super) alias_roomid: Arc<dyn KvTree>,
|
pub(super) alias_roomid: Arc<dyn KvTree>,
|
||||||
pub(super) aliasid_alias: Arc<dyn KvTree>, // AliasId = RoomId + Count
|
pub(super) aliasid_alias: Arc<dyn KvTree>, // AliasId = RoomId + Count
|
||||||
|
@ -161,7 +166,7 @@ pub struct KeyValueDatabase {
|
||||||
pub(super) shortstatekey_cache: Mutex<LruCache<u64, (StateEventType, String)>>,
|
pub(super) shortstatekey_cache: Mutex<LruCache<u64, (StateEventType, String)>>,
|
||||||
pub(super) our_real_users_cache: RwLock<HashMap<OwnedRoomId, Arc<HashSet<OwnedUserId>>>>,
|
pub(super) our_real_users_cache: RwLock<HashMap<OwnedRoomId, Arc<HashSet<OwnedUserId>>>>,
|
||||||
pub(super) appservice_in_room_cache: RwLock<HashMap<OwnedRoomId, HashMap<String, bool>>>,
|
pub(super) appservice_in_room_cache: RwLock<HashMap<OwnedRoomId, HashMap<String, bool>>>,
|
||||||
pub(super) lasttimelinecount_cache: Mutex<HashMap<OwnedRoomId, u64>>,
|
pub(super) lasttimelinecount_cache: Mutex<HashMap<OwnedRoomId, PduCount>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl KeyValueDatabase {
|
impl KeyValueDatabase {
|
||||||
|
@ -292,7 +297,9 @@ impl KeyValueDatabase {
|
||||||
presenceid_presence: builder.open_tree("presenceid_presence")?,
|
presenceid_presence: builder.open_tree("presenceid_presence")?,
|
||||||
userid_lastpresenceupdate: builder.open_tree("userid_lastpresenceupdate")?,
|
userid_lastpresenceupdate: builder.open_tree("userid_lastpresenceupdate")?,
|
||||||
pduid_pdu: builder.open_tree("pduid_pdu")?,
|
pduid_pdu: builder.open_tree("pduid_pdu")?,
|
||||||
|
pduid_backfillpdu: builder.open_tree("pduid_backfillpdu")?,
|
||||||
eventid_pduid: builder.open_tree("eventid_pduid")?,
|
eventid_pduid: builder.open_tree("eventid_pduid")?,
|
||||||
|
eventid_backfillpduid: builder.open_tree("eventid_backfillpduid")?,
|
||||||
roomid_pduleaves: builder.open_tree("roomid_pduleaves")?,
|
roomid_pduleaves: builder.open_tree("roomid_pduleaves")?,
|
||||||
|
|
||||||
alias_roomid: builder.open_tree("alias_roomid")?,
|
alias_roomid: builder.open_tree("alias_roomid")?,
|
||||||
|
|
|
@ -9,11 +9,13 @@ use ruma::{DeviceId, OwnedDeviceId, OwnedRoomId, OwnedUserId, RoomId, UserId};
|
||||||
|
|
||||||
use crate::Result;
|
use crate::Result;
|
||||||
|
|
||||||
|
use super::timeline::PduCount;
|
||||||
|
|
||||||
pub struct Service {
|
pub struct Service {
|
||||||
pub db: &'static dyn Data,
|
pub db: &'static dyn Data,
|
||||||
|
|
||||||
pub lazy_load_waiting:
|
pub lazy_load_waiting:
|
||||||
Mutex<HashMap<(OwnedUserId, OwnedDeviceId, OwnedRoomId, u64), HashSet<OwnedUserId>>>,
|
Mutex<HashMap<(OwnedUserId, OwnedDeviceId, OwnedRoomId, PduCount), HashSet<OwnedUserId>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Service {
|
impl Service {
|
||||||
|
@ -36,7 +38,7 @@ impl Service {
|
||||||
device_id: &DeviceId,
|
device_id: &DeviceId,
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
lazy_load: HashSet<OwnedUserId>,
|
lazy_load: HashSet<OwnedUserId>,
|
||||||
count: u64,
|
count: PduCount,
|
||||||
) {
|
) {
|
||||||
self.lazy_load_waiting.lock().unwrap().insert(
|
self.lazy_load_waiting.lock().unwrap().insert(
|
||||||
(
|
(
|
||||||
|
@ -55,7 +57,7 @@ impl Service {
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
device_id: &DeviceId,
|
device_id: &DeviceId,
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
since: u64,
|
since: PduCount,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&(
|
if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&(
|
||||||
user_id.to_owned(),
|
user_id.to_owned(),
|
||||||
|
|
|
@ -14,7 +14,7 @@ use ruma::{
|
||||||
},
|
},
|
||||||
StateEventType,
|
StateEventType,
|
||||||
},
|
},
|
||||||
EventId, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId,
|
EventId, OwnedServerName, RoomId, ServerName, UserId,
|
||||||
};
|
};
|
||||||
use tracing::error;
|
use tracing::error;
|
||||||
|
|
||||||
|
|
|
@ -4,12 +4,14 @@ use ruma::{CanonicalJsonObject, EventId, OwnedUserId, RoomId, UserId};
|
||||||
|
|
||||||
use crate::{PduEvent, Result};
|
use crate::{PduEvent, Result};
|
||||||
|
|
||||||
|
use super::PduCount;
|
||||||
|
|
||||||
pub trait Data: Send + Sync {
|
pub trait Data: Send + Sync {
|
||||||
fn first_pdu_in_room(&self, room_id: &RoomId) -> Result<Option<Arc<PduEvent>>>;
|
fn first_pdu_in_room(&self, room_id: &RoomId) -> Result<Option<Arc<PduEvent>>>;
|
||||||
fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result<u64>;
|
fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result<PduCount>;
|
||||||
|
|
||||||
/// Returns the `count` of this pdu's id.
|
/// Returns the `count` of this pdu's id.
|
||||||
fn get_pdu_count(&self, event_id: &EventId) -> Result<Option<u64>>;
|
fn get_pdu_count(&self, event_id: &EventId) -> Result<Option<PduCount>>;
|
||||||
|
|
||||||
/// Returns the json of a pdu.
|
/// Returns the json of a pdu.
|
||||||
fn get_pdu_json(&self, event_id: &EventId) -> Result<Option<CanonicalJsonObject>>;
|
fn get_pdu_json(&self, event_id: &EventId) -> Result<Option<CanonicalJsonObject>>;
|
||||||
|
@ -38,9 +40,6 @@ pub trait Data: Send + Sync {
|
||||||
/// Returns the pdu as a `BTreeMap<String, CanonicalJsonValue>`.
|
/// Returns the pdu as a `BTreeMap<String, CanonicalJsonValue>`.
|
||||||
fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result<Option<CanonicalJsonObject>>;
|
fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result<Option<CanonicalJsonObject>>;
|
||||||
|
|
||||||
/// Returns the `count` of this pdu's id.
|
|
||||||
fn pdu_count(&self, pdu_id: &[u8]) -> Result<u64>;
|
|
||||||
|
|
||||||
/// Adds a new pdu to the timeline
|
/// Adds a new pdu to the timeline
|
||||||
fn append_pdu(
|
fn append_pdu(
|
||||||
&self,
|
&self,
|
||||||
|
@ -50,33 +49,34 @@ pub trait Data: Send + Sync {
|
||||||
count: u64,
|
count: u64,
|
||||||
) -> Result<()>;
|
) -> Result<()>;
|
||||||
|
|
||||||
|
// Adds a new pdu to the backfilled timeline
|
||||||
|
fn prepend_backfill_pdu(
|
||||||
|
&self,
|
||||||
|
pdu_id: &[u8],
|
||||||
|
event_id: &EventId,
|
||||||
|
json: &CanonicalJsonObject,
|
||||||
|
) -> Result<()>;
|
||||||
|
|
||||||
/// Removes a pdu and creates a new one with the same id.
|
/// Removes a pdu and creates a new one with the same id.
|
||||||
fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()>;
|
fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()>;
|
||||||
|
|
||||||
/// Returns an iterator over all events in a room that happened after the event with id `since`
|
|
||||||
/// in chronological order.
|
|
||||||
fn pdus_since<'a>(
|
|
||||||
&'a self,
|
|
||||||
user_id: &UserId,
|
|
||||||
room_id: &RoomId,
|
|
||||||
since: u64,
|
|
||||||
) -> Result<Box<dyn Iterator<Item = Result<(Vec<u8>, PduEvent)>> + 'a>>;
|
|
||||||
|
|
||||||
/// Returns an iterator over all events and their tokens in a room that happened before the
|
/// Returns an iterator over all events and their tokens in a room that happened before the
|
||||||
/// event with id `until` in reverse-chronological order.
|
/// event with id `until` in reverse-chronological order.
|
||||||
fn pdus_until<'a>(
|
fn pdus_until<'a>(
|
||||||
&'a self,
|
&'a self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
until: u64,
|
until: PduCount,
|
||||||
) -> Result<Box<dyn Iterator<Item = Result<(Vec<u8>, PduEvent)>> + 'a>>;
|
) -> Result<Box<dyn Iterator<Item = Result<(PduCount, PduEvent)>> + 'a>>;
|
||||||
|
|
||||||
|
/// Returns an iterator over all events in a room that happened after the event with id `from`
|
||||||
|
/// in chronological order.
|
||||||
fn pdus_after<'a>(
|
fn pdus_after<'a>(
|
||||||
&'a self,
|
&'a self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
from: u64,
|
from: PduCount,
|
||||||
) -> Result<Box<dyn Iterator<Item = Result<(Vec<u8>, PduEvent)>> + 'a>>;
|
) -> Result<Box<dyn Iterator<Item = Result<(PduCount, PduEvent)>> + 'a>>;
|
||||||
|
|
||||||
fn increment_notification_counts(
|
fn increment_notification_counts(
|
||||||
&self,
|
&self,
|
||||||
|
|
|
@ -1,7 +1,9 @@
|
||||||
mod data;
|
mod data;
|
||||||
|
|
||||||
use std::collections::HashMap;
|
use std::cmp::Ordering;
|
||||||
|
use std::collections::{BTreeMap, HashMap};
|
||||||
|
|
||||||
|
use std::sync::RwLock;
|
||||||
use std::{
|
use std::{
|
||||||
collections::HashSet,
|
collections::HashSet,
|
||||||
sync::{Arc, Mutex},
|
sync::{Arc, Mutex},
|
||||||
|
@ -9,6 +11,8 @@ use std::{
|
||||||
|
|
||||||
pub use data::Data;
|
pub use data::Data;
|
||||||
use regex::Regex;
|
use regex::Regex;
|
||||||
|
use ruma::api::federation;
|
||||||
|
use ruma::serde::Base64;
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::error::ErrorKind,
|
api::client::error::ErrorKind,
|
||||||
canonical_json::to_canonical_value,
|
canonical_json::to_canonical_value,
|
||||||
|
@ -27,11 +31,13 @@ use ruma::{
|
||||||
uint, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId,
|
uint, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId,
|
||||||
OwnedServerName, RoomAliasId, RoomId, UserId,
|
OwnedServerName, RoomAliasId, RoomId, UserId,
|
||||||
};
|
};
|
||||||
|
use ruma::{user_id, ServerName};
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
use serde_json::value::to_raw_value;
|
use serde_json::value::{to_raw_value, RawValue as RawJsonValue};
|
||||||
use tokio::sync::MutexGuard;
|
use tokio::sync::MutexGuard;
|
||||||
use tracing::{error, warn};
|
use tracing::{error, info, warn};
|
||||||
|
|
||||||
|
use crate::api::server_server;
|
||||||
use crate::{
|
use crate::{
|
||||||
service::pdu::{EventHash, PduBuilder},
|
service::pdu::{EventHash, PduBuilder},
|
||||||
services, utils, Error, PduEvent, Result,
|
services, utils, Error, PduEvent, Result,
|
||||||
|
@ -39,10 +45,70 @@ use crate::{
|
||||||
|
|
||||||
use super::state_compressor::CompressedStateEvent;
|
use super::state_compressor::CompressedStateEvent;
|
||||||
|
|
||||||
|
#[derive(Hash, PartialEq, Eq, Clone, Copy, Debug)]
|
||||||
|
pub enum PduCount {
|
||||||
|
Backfilled(u64),
|
||||||
|
Normal(u64),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PduCount {
|
||||||
|
pub fn min() -> Self {
|
||||||
|
Self::Backfilled(u64::MAX)
|
||||||
|
}
|
||||||
|
pub fn max() -> Self {
|
||||||
|
Self::Normal(u64::MAX)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn try_from_string(token: &str) -> Result<Self> {
|
||||||
|
if token.starts_with('-') {
|
||||||
|
token[1..].parse().map(PduCount::Backfilled)
|
||||||
|
} else {
|
||||||
|
token.parse().map(PduCount::Normal)
|
||||||
|
}
|
||||||
|
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid pagination token."))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn stringify(&self) -> String {
|
||||||
|
match self {
|
||||||
|
PduCount::Backfilled(x) => format!("-{x}"),
|
||||||
|
PduCount::Normal(x) => x.to_string(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PartialOrd for PduCount {
|
||||||
|
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||||
|
Some(self.cmp(other))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Ord for PduCount {
|
||||||
|
fn cmp(&self, other: &Self) -> Ordering {
|
||||||
|
match (self, other) {
|
||||||
|
(PduCount::Normal(s), PduCount::Normal(o)) => s.cmp(o),
|
||||||
|
(PduCount::Backfilled(s), PduCount::Backfilled(o)) => o.cmp(s),
|
||||||
|
(PduCount::Normal(_), PduCount::Backfilled(_)) => Ordering::Greater,
|
||||||
|
(PduCount::Backfilled(_), PduCount::Normal(_)) => Ordering::Less,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn comparisons() {
|
||||||
|
assert!(PduCount::Normal(1) < PduCount::Normal(2));
|
||||||
|
assert!(PduCount::Backfilled(2) < PduCount::Backfilled(1));
|
||||||
|
assert!(PduCount::Normal(1) > PduCount::Backfilled(1));
|
||||||
|
assert!(PduCount::Backfilled(1) < PduCount::Normal(1));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub struct Service {
|
pub struct Service {
|
||||||
pub db: &'static dyn Data,
|
pub db: &'static dyn Data,
|
||||||
|
|
||||||
pub lasttimelinecount_cache: Mutex<HashMap<OwnedRoomId, u64>>,
|
pub lasttimelinecount_cache: Mutex<HashMap<OwnedRoomId, PduCount>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Service {
|
impl Service {
|
||||||
|
@ -52,10 +118,15 @@ impl Service {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip(self))]
|
#[tracing::instrument(skip(self))]
|
||||||
pub fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result<u64> {
|
pub fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result<PduCount> {
|
||||||
self.db.last_timeline_count(sender_user, room_id)
|
self.db.last_timeline_count(sender_user, room_id)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns the `count` of this pdu's id.
|
||||||
|
pub fn get_pdu_count(&self, event_id: &EventId) -> Result<Option<PduCount>> {
|
||||||
|
self.db.get_pdu_count(event_id)
|
||||||
|
}
|
||||||
|
|
||||||
// TODO Is this the same as the function above?
|
// TODO Is this the same as the function above?
|
||||||
/*
|
/*
|
||||||
#[tracing::instrument(skip(self))]
|
#[tracing::instrument(skip(self))]
|
||||||
|
@ -79,11 +150,6 @@ impl Service {
|
||||||
}
|
}
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/// Returns the `count` of this pdu's id.
|
|
||||||
pub fn get_pdu_count(&self, event_id: &EventId) -> Result<Option<u64>> {
|
|
||||||
self.db.get_pdu_count(event_id)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the json of a pdu.
|
/// Returns the json of a pdu.
|
||||||
pub fn get_pdu_json(&self, event_id: &EventId) -> Result<Option<CanonicalJsonObject>> {
|
pub fn get_pdu_json(&self, event_id: &EventId) -> Result<Option<CanonicalJsonObject>> {
|
||||||
self.db.get_pdu_json(event_id)
|
self.db.get_pdu_json(event_id)
|
||||||
|
@ -128,11 +194,6 @@ impl Service {
|
||||||
self.db.get_pdu_json_from_id(pdu_id)
|
self.db.get_pdu_json_from_id(pdu_id)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the `count` of this pdu's id.
|
|
||||||
pub fn pdu_count(&self, pdu_id: &[u8]) -> Result<u64> {
|
|
||||||
self.db.pdu_count(pdu_id)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Removes a pdu and creates a new one with the same id.
|
/// Removes a pdu and creates a new one with the same id.
|
||||||
#[tracing::instrument(skip(self))]
|
#[tracing::instrument(skip(self))]
|
||||||
fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> {
|
fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> {
|
||||||
|
@ -863,19 +924,8 @@ impl Service {
|
||||||
&'a self,
|
&'a self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
) -> Result<impl Iterator<Item = Result<(Vec<u8>, PduEvent)>> + 'a> {
|
) -> Result<impl Iterator<Item = Result<(PduCount, PduEvent)>> + 'a> {
|
||||||
self.pdus_since(user_id, room_id, 0)
|
self.pdus_after(user_id, room_id, PduCount::min())
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns an iterator over all events in a room that happened after the event with id `since`
|
|
||||||
/// in chronological order.
|
|
||||||
pub fn pdus_since<'a>(
|
|
||||||
&'a self,
|
|
||||||
user_id: &UserId,
|
|
||||||
room_id: &RoomId,
|
|
||||||
since: u64,
|
|
||||||
) -> Result<impl Iterator<Item = Result<(Vec<u8>, PduEvent)>> + 'a> {
|
|
||||||
self.db.pdus_since(user_id, room_id, since)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns an iterator over all events and their tokens in a room that happened before the
|
/// Returns an iterator over all events and their tokens in a room that happened before the
|
||||||
|
@ -885,8 +935,8 @@ impl Service {
|
||||||
&'a self,
|
&'a self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
until: u64,
|
until: PduCount,
|
||||||
) -> Result<impl Iterator<Item = Result<(Vec<u8>, PduEvent)>> + 'a> {
|
) -> Result<impl Iterator<Item = Result<(PduCount, PduEvent)>> + 'a> {
|
||||||
self.db.pdus_until(user_id, room_id, until)
|
self.db.pdus_until(user_id, room_id, until)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -897,8 +947,8 @@ impl Service {
|
||||||
&'a self,
|
&'a self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
from: u64,
|
from: PduCount,
|
||||||
) -> Result<impl Iterator<Item = Result<(Vec<u8>, PduEvent)>> + 'a> {
|
) -> Result<impl Iterator<Item = Result<(PduCount, PduEvent)>> + 'a> {
|
||||||
self.db.pdus_after(user_id, room_id, from)
|
self.db.pdus_after(user_id, room_id, from)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -915,4 +965,118 @@ impl Service {
|
||||||
// If event does not exist, just noop
|
// If event does not exist, just noop
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tracing::instrument(skip(self, room_id))]
|
||||||
|
pub async fn backfill_if_required(&self, room_id: &RoomId, from: PduCount) -> Result<()> {
|
||||||
|
let first_pdu = self
|
||||||
|
.all_pdus(&user_id!("@doesntmatter:conduit.rs"), &room_id)?
|
||||||
|
.next()
|
||||||
|
.expect("Room is not empty")?;
|
||||||
|
|
||||||
|
if first_pdu.0 < from {
|
||||||
|
// No backfill required, there are still events between them
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
let power_levels: RoomPowerLevelsEventContent = services()
|
||||||
|
.rooms
|
||||||
|
.state_accessor
|
||||||
|
.room_state_get(&room_id, &StateEventType::RoomPowerLevels, "")?
|
||||||
|
.map(|ev| {
|
||||||
|
serde_json::from_str(ev.content.get())
|
||||||
|
.map_err(|_| Error::bad_database("invalid m.room.power_levels event"))
|
||||||
|
})
|
||||||
|
.transpose()?
|
||||||
|
.unwrap_or_default();
|
||||||
|
let mut admin_servers = power_levels
|
||||||
|
.users
|
||||||
|
.iter()
|
||||||
|
.filter(|(_, level)| **level > power_levels.users_default)
|
||||||
|
.map(|(user_id, _)| user_id.server_name())
|
||||||
|
.collect::<HashSet<_>>();
|
||||||
|
admin_servers.remove(services().globals.server_name());
|
||||||
|
|
||||||
|
// Request backfill
|
||||||
|
for backfill_server in admin_servers {
|
||||||
|
info!("Asking {backfill_server} for backfill");
|
||||||
|
let response = services()
|
||||||
|
.sending
|
||||||
|
.send_federation_request(
|
||||||
|
backfill_server,
|
||||||
|
federation::backfill::get_backfill::v1::Request {
|
||||||
|
room_id: room_id.to_owned(),
|
||||||
|
v: vec![first_pdu.1.event_id.as_ref().to_owned()],
|
||||||
|
limit: uint!(100),
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
match response {
|
||||||
|
Ok(response) => {
|
||||||
|
let mut pub_key_map = RwLock::new(BTreeMap::new());
|
||||||
|
for pdu in response.pdus {
|
||||||
|
if let Err(e) = self
|
||||||
|
.backfill_pdu(backfill_server, pdu, &mut pub_key_map)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
warn!("Failed to add backfilled pdu: {e}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
warn!("{backfill_server} could not provide backfill: {e}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
info!("No servers could backfill");
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tracing::instrument(skip(self, pdu))]
|
||||||
|
pub async fn backfill_pdu(
|
||||||
|
&self,
|
||||||
|
origin: &ServerName,
|
||||||
|
pdu: Box<RawJsonValue>,
|
||||||
|
pub_key_map: &RwLock<BTreeMap<String, BTreeMap<String, Base64>>>,
|
||||||
|
) -> Result<()> {
|
||||||
|
let (event_id, value, room_id) = server_server::parse_incoming_pdu(&pdu)?;
|
||||||
|
|
||||||
|
services()
|
||||||
|
.rooms
|
||||||
|
.event_handler
|
||||||
|
.handle_incoming_pdu(origin, &event_id, &room_id, value, false, &pub_key_map)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let value = self.get_pdu_json(&event_id)?.expect("We just created it");
|
||||||
|
|
||||||
|
let shortroomid = services()
|
||||||
|
.rooms
|
||||||
|
.short
|
||||||
|
.get_shortroomid(&room_id)?
|
||||||
|
.expect("room exists");
|
||||||
|
|
||||||
|
let mutex_insert = Arc::clone(
|
||||||
|
services()
|
||||||
|
.globals
|
||||||
|
.roomid_mutex_insert
|
||||||
|
.write()
|
||||||
|
.unwrap()
|
||||||
|
.entry(room_id.clone())
|
||||||
|
.or_default(),
|
||||||
|
);
|
||||||
|
let insert_lock = mutex_insert.lock().unwrap();
|
||||||
|
|
||||||
|
let count = services().globals.next_count()?;
|
||||||
|
let mut pdu_id = shortroomid.to_be_bytes().to_vec();
|
||||||
|
pdu_id.extend_from_slice(&count.to_be_bytes());
|
||||||
|
|
||||||
|
// Insert pdu
|
||||||
|
self.db.prepend_backfill_pdu(&pdu_id, &event_id, &value)?;
|
||||||
|
|
||||||
|
drop(insert_lock);
|
||||||
|
|
||||||
|
info!("Appended incoming pdu");
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue