Merge remote-tracking branch 'origin/next'
This commit is contained in:
commit
53f14a2c4c
|
@ -25,4 +25,4 @@ docker-compose*
|
||||||
rustfmt.toml
|
rustfmt.toml
|
||||||
|
|
||||||
# Documentation
|
# Documentation
|
||||||
*.md
|
#*.md
|
||||||
|
|
|
@ -31,7 +31,6 @@ modules.xml
|
||||||
|
|
||||||
### vscode ###
|
### vscode ###
|
||||||
.vscode/*
|
.vscode/*
|
||||||
!.vscode/settings.json
|
|
||||||
!.vscode/tasks.json
|
!.vscode/tasks.json
|
||||||
!.vscode/launch.json
|
!.vscode/launch.json
|
||||||
!.vscode/extensions.json
|
!.vscode/extensions.json
|
||||||
|
@ -62,3 +61,9 @@ conduit.db
|
||||||
|
|
||||||
# Etc.
|
# Etc.
|
||||||
**/*.rs.bk
|
**/*.rs.bk
|
||||||
|
|
||||||
|
# Nix artifacts
|
||||||
|
/result*
|
||||||
|
|
||||||
|
# Direnv cache
|
||||||
|
/.direnv
|
||||||
|
|
357
.gitlab-ci.yml
357
.gitlab-ci.yml
|
@ -5,140 +5,10 @@ stages:
|
||||||
- upload artifacts
|
- upload artifacts
|
||||||
|
|
||||||
variables:
|
variables:
|
||||||
|
# Make GitLab CI go fast:
|
||||||
GIT_SUBMODULE_STRATEGY: recursive
|
GIT_SUBMODULE_STRATEGY: recursive
|
||||||
FF_USE_FASTZIP: 1
|
FF_USE_FASTZIP: 1
|
||||||
CACHE_COMPRESSION_LEVEL: fastest
|
CACHE_COMPRESSION_LEVEL: fastest
|
||||||
# Docker in Docker
|
|
||||||
DOCKER_HOST: tcp://docker:2375/
|
|
||||||
DOCKER_TLS_CERTDIR: ""
|
|
||||||
DOCKER_DRIVER: overlay2
|
|
||||||
|
|
||||||
# --------------------------------------------------------------------- #
|
|
||||||
# Cargo: Compiling for different architectures #
|
|
||||||
# --------------------------------------------------------------------- #
|
|
||||||
|
|
||||||
.build-cargo-shared-settings:
|
|
||||||
stage: "build"
|
|
||||||
needs: []
|
|
||||||
rules:
|
|
||||||
- if: '$CI_COMMIT_BRANCH == "master"'
|
|
||||||
- if: '$CI_COMMIT_BRANCH == "next"'
|
|
||||||
- if: "$CI_COMMIT_TAG"
|
|
||||||
- if: '($CI_MERGE_REQUEST_APPROVED == "true") || $BUILD_EVERYTHING' # Once MR is approved, test all builds. Or if BUILD_EVERYTHING is set.
|
|
||||||
interruptible: true
|
|
||||||
image: "registry.gitlab.com/jfowl/conduit-containers/rust-with-tools@sha256:69ab327974aef4cc0daf4273579253bf7ae5e379a6c52729b83137e4caa9d093"
|
|
||||||
tags: ["docker"]
|
|
||||||
services: ["docker:dind"]
|
|
||||||
variables:
|
|
||||||
SHARED_PATH: $CI_PROJECT_DIR/shared
|
|
||||||
CARGO_PROFILE_RELEASE_LTO: "true"
|
|
||||||
CARGO_PROFILE_RELEASE_CODEGEN_UNITS: "1"
|
|
||||||
CARGO_INCREMENTAL: "false" # https://matklad.github.io/2021/09/04/fast-rust-builds.html#ci-workflow
|
|
||||||
before_script:
|
|
||||||
- 'echo "Building for target $TARGET"'
|
|
||||||
- "rustup show && rustc --version && cargo --version" # Print version info for debugging
|
|
||||||
# fix cargo and rustup mounts from this container (https://gitlab.com/gitlab-org/gitlab-foss/-/issues/41227)
|
|
||||||
- "mkdir -p $SHARED_PATH/cargo"
|
|
||||||
- "cp -r $CARGO_HOME/bin $SHARED_PATH/cargo"
|
|
||||||
- "cp -r $RUSTUP_HOME $SHARED_PATH"
|
|
||||||
- "export CARGO_HOME=$SHARED_PATH/cargo RUSTUP_HOME=$SHARED_PATH/rustup"
|
|
||||||
# If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results.
|
|
||||||
- if [ -n "${SCCACHE_ENDPOINT}" ]; then export RUSTC_WRAPPER=/sccache; fi
|
|
||||||
script:
|
|
||||||
# cross-compile conduit for target
|
|
||||||
- 'time cross build --target="$TARGET" --locked --release'
|
|
||||||
- 'mv "target/$TARGET/release/conduit" "conduit-$TARGET"'
|
|
||||||
# print information about linking for debugging
|
|
||||||
- "file conduit-$TARGET" # print file information
|
|
||||||
- 'readelf --dynamic conduit-$TARGET | sed -e "/NEEDED/q1"' # ensure statically linked
|
|
||||||
cache:
|
|
||||||
# https://doc.rust-lang.org/cargo/guide/cargo-home.html#caching-the-cargo-home-in-ci
|
|
||||||
key: "cargo-cache-$TARGET"
|
|
||||||
paths:
|
|
||||||
- $SHARED_PATH/cargo/registry/index
|
|
||||||
- $SHARED_PATH/cargo/registry/cache
|
|
||||||
- $SHARED_PATH/cargo/git/db
|
|
||||||
artifacts:
|
|
||||||
expire_in: never
|
|
||||||
|
|
||||||
build:release:cargo:x86_64-unknown-linux-musl-with-debug:
|
|
||||||
extends: .build-cargo-shared-settings
|
|
||||||
variables:
|
|
||||||
CARGO_PROFILE_RELEASE_DEBUG: 2 # Enable debug info for flamegraph profiling
|
|
||||||
TARGET: "x86_64-unknown-linux-musl"
|
|
||||||
after_script:
|
|
||||||
- "mv ./conduit-x86_64-unknown-linux-musl ./conduit-x86_64-unknown-linux-musl-with-debug"
|
|
||||||
artifacts:
|
|
||||||
name: "conduit-x86_64-unknown-linux-musl-with-debug"
|
|
||||||
paths:
|
|
||||||
- "conduit-x86_64-unknown-linux-musl-with-debug"
|
|
||||||
expose_as: "Conduit for x86_64-unknown-linux-musl-with-debug"
|
|
||||||
|
|
||||||
build:release:cargo:x86_64-unknown-linux-musl:
|
|
||||||
extends: .build-cargo-shared-settings
|
|
||||||
variables:
|
|
||||||
TARGET: "x86_64-unknown-linux-musl"
|
|
||||||
artifacts:
|
|
||||||
name: "conduit-x86_64-unknown-linux-musl"
|
|
||||||
paths:
|
|
||||||
- "conduit-x86_64-unknown-linux-musl"
|
|
||||||
expose_as: "Conduit for x86_64-unknown-linux-musl"
|
|
||||||
|
|
||||||
build:release:cargo:arm-unknown-linux-musleabihf:
|
|
||||||
extends: .build-cargo-shared-settings
|
|
||||||
variables:
|
|
||||||
TARGET: "arm-unknown-linux-musleabihf"
|
|
||||||
artifacts:
|
|
||||||
name: "conduit-arm-unknown-linux-musleabihf"
|
|
||||||
paths:
|
|
||||||
- "conduit-arm-unknown-linux-musleabihf"
|
|
||||||
expose_as: "Conduit for arm-unknown-linux-musleabihf"
|
|
||||||
|
|
||||||
build:release:cargo:armv7-unknown-linux-musleabihf:
|
|
||||||
extends: .build-cargo-shared-settings
|
|
||||||
variables:
|
|
||||||
TARGET: "armv7-unknown-linux-musleabihf"
|
|
||||||
artifacts:
|
|
||||||
name: "conduit-armv7-unknown-linux-musleabihf"
|
|
||||||
paths:
|
|
||||||
- "conduit-armv7-unknown-linux-musleabihf"
|
|
||||||
expose_as: "Conduit for armv7-unknown-linux-musleabihf"
|
|
||||||
|
|
||||||
build:release:cargo:aarch64-unknown-linux-musl:
|
|
||||||
extends: .build-cargo-shared-settings
|
|
||||||
variables:
|
|
||||||
TARGET: "aarch64-unknown-linux-musl"
|
|
||||||
artifacts:
|
|
||||||
name: "conduit-aarch64-unknown-linux-musl"
|
|
||||||
paths:
|
|
||||||
- "conduit-aarch64-unknown-linux-musl"
|
|
||||||
expose_as: "Conduit for aarch64-unknown-linux-musl"
|
|
||||||
|
|
||||||
.cargo-debug-shared-settings:
|
|
||||||
extends: ".build-cargo-shared-settings"
|
|
||||||
rules:
|
|
||||||
- when: "always"
|
|
||||||
cache:
|
|
||||||
key: "build_cache--$TARGET--$CI_COMMIT_BRANCH--debug"
|
|
||||||
script:
|
|
||||||
# cross-compile conduit for target
|
|
||||||
- 'time time cross build --target="$TARGET" --locked'
|
|
||||||
- 'mv "target/$TARGET/debug/conduit" "conduit-debug-$TARGET"'
|
|
||||||
# print information about linking for debugging
|
|
||||||
- "file conduit-debug-$TARGET" # print file information
|
|
||||||
- 'readelf --dynamic conduit-debug-$TARGET | sed -e "/NEEDED/q1"' # ensure statically linked
|
|
||||||
artifacts:
|
|
||||||
expire_in: 4 weeks
|
|
||||||
|
|
||||||
build:debug:cargo:x86_64-unknown-linux-musl:
|
|
||||||
extends: ".cargo-debug-shared-settings"
|
|
||||||
variables:
|
|
||||||
TARGET: "x86_64-unknown-linux-musl"
|
|
||||||
artifacts:
|
|
||||||
name: "conduit-debug-x86_64-unknown-linux-musl"
|
|
||||||
paths:
|
|
||||||
- "conduit-debug-x86_64-unknown-linux-musl"
|
|
||||||
expose_as: "Conduit DEBUG for x86_64-unknown-linux-musl"
|
|
||||||
|
|
||||||
# --------------------------------------------------------------------- #
|
# --------------------------------------------------------------------- #
|
||||||
# Create and publish docker image #
|
# Create and publish docker image #
|
||||||
|
@ -146,98 +16,106 @@ build:debug:cargo:x86_64-unknown-linux-musl:
|
||||||
|
|
||||||
.docker-shared-settings:
|
.docker-shared-settings:
|
||||||
stage: "build docker image"
|
stage: "build docker image"
|
||||||
image: jdrouet/docker-with-buildx:stable
|
image: jdrouet/docker-with-buildx:20.10.21-0.9.1
|
||||||
|
needs: []
|
||||||
tags: ["docker"]
|
tags: ["docker"]
|
||||||
|
variables:
|
||||||
|
# Docker in Docker:
|
||||||
|
DOCKER_HOST: tcp://docker:2375/
|
||||||
|
DOCKER_TLS_CERTDIR: ""
|
||||||
|
DOCKER_DRIVER: overlay2
|
||||||
services:
|
services:
|
||||||
- docker:dind
|
- docker:dind
|
||||||
needs:
|
|
||||||
- "build:release:cargo:x86_64-unknown-linux-musl"
|
|
||||||
- "build:release:cargo:arm-unknown-linux-musleabihf"
|
|
||||||
- "build:release:cargo:armv7-unknown-linux-musleabihf"
|
|
||||||
- "build:release:cargo:aarch64-unknown-linux-musl"
|
|
||||||
variables:
|
|
||||||
PLATFORMS: "linux/arm/v6,linux/arm/v7,linux/arm64,linux/amd64"
|
|
||||||
DOCKER_FILE: "docker/ci-binaries-packaging.Dockerfile"
|
|
||||||
cache:
|
|
||||||
paths:
|
|
||||||
- docker_cache
|
|
||||||
key: "$CI_JOB_NAME"
|
|
||||||
before_script:
|
|
||||||
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
|
|
||||||
# Only log in to Dockerhub if the credentials are given:
|
|
||||||
- if [ -n "${DOCKER_HUB}" ]; then docker login -u "$DOCKER_HUB_USER" -p "$DOCKER_HUB_PASSWORD" "$DOCKER_HUB"; fi
|
|
||||||
script:
|
script:
|
||||||
# Prepare buildx to build multiarch stuff:
|
- apk add openssh-client
|
||||||
- docker context create 'ci-context'
|
- eval $(ssh-agent -s)
|
||||||
- docker buildx create --name 'multiarch-builder' --use 'ci-context'
|
- mkdir -p ~/.ssh && chmod 700 ~/.ssh
|
||||||
# Copy binaries to their docker arch path
|
- printf "Host *\n\tStrictHostKeyChecking no\n\n" >> ~/.ssh/config
|
||||||
- mkdir -p linux/ && mv ./conduit-x86_64-unknown-linux-musl linux/amd64
|
- sh .gitlab/setup-buildx-remote-builders.sh
|
||||||
- mkdir -p linux/arm/ && mv ./conduit-arm-unknown-linux-musleabihf linux/arm/v6
|
# Authorize against this project's own image registry:
|
||||||
- mkdir -p linux/arm/ && mv ./conduit-armv7-unknown-linux-musleabihf linux/arm/v7
|
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
|
||||||
- mv ./conduit-aarch64-unknown-linux-musl linux/arm64
|
# Build multiplatform image and push to temporary tag:
|
||||||
- 'export CREATED=$(date -u +''%Y-%m-%dT%H:%M:%SZ'') && echo "Docker image creation date: $CREATED"'
|
|
||||||
# Build and push image:
|
|
||||||
- >
|
- >
|
||||||
docker buildx build
|
docker buildx build
|
||||||
|
--platform "linux/arm/v7,linux/arm64,linux/amd64"
|
||||||
--pull
|
--pull
|
||||||
|
--tag "$CI_REGISTRY_IMAGE/temporary-ci-images:$CI_JOB_ID"
|
||||||
--push
|
--push
|
||||||
--cache-from=type=local,src=$CI_PROJECT_DIR/docker_cache
|
--file "Dockerfile" .
|
||||||
--cache-to=type=local,dest=$CI_PROJECT_DIR/docker_cache
|
# Build multiplatform image to deb stage and extract their .deb files:
|
||||||
--build-arg CREATED=$CREATED
|
- >
|
||||||
--build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml)
|
docker buildx build
|
||||||
--build-arg "GIT_REF=$CI_COMMIT_SHORT_SHA"
|
--platform "linux/arm/v7,linux/arm64,linux/amd64"
|
||||||
--platform "$PLATFORMS"
|
--target "packager-result"
|
||||||
--tag "$TAG"
|
--output="type=local,dest=/tmp/build-output"
|
||||||
--tag "$TAG-alpine"
|
--file "Dockerfile" .
|
||||||
--tag "$TAG-commit-$CI_COMMIT_SHORT_SHA"
|
# Build multiplatform image to binary stage and extract their binaries:
|
||||||
--file "$DOCKER_FILE" .
|
- >
|
||||||
|
docker buildx build
|
||||||
|
--platform "linux/arm/v7,linux/arm64,linux/amd64"
|
||||||
|
--target "builder-result"
|
||||||
|
--output="type=local,dest=/tmp/build-output"
|
||||||
|
--file "Dockerfile" .
|
||||||
|
# Copy to GitLab container registry:
|
||||||
|
- >
|
||||||
|
docker buildx imagetools create
|
||||||
|
--tag "$CI_REGISTRY_IMAGE/$TAG"
|
||||||
|
--tag "$CI_REGISTRY_IMAGE/$TAG-bullseye"
|
||||||
|
--tag "$CI_REGISTRY_IMAGE/$TAG-commit-$CI_COMMIT_SHORT_SHA"
|
||||||
|
"$CI_REGISTRY_IMAGE/temporary-ci-images:$CI_JOB_ID"
|
||||||
|
# if DockerHub credentials exist, also copy to dockerhub:
|
||||||
|
- if [ -n "${DOCKER_HUB}" ]; then docker login -u "$DOCKER_HUB_USER" -p "$DOCKER_HUB_PASSWORD" "$DOCKER_HUB"; fi
|
||||||
|
- >
|
||||||
|
if [ -n "${DOCKER_HUB}" ]; then
|
||||||
|
docker buildx imagetools create
|
||||||
|
--tag "$DOCKER_HUB_IMAGE/$TAG"
|
||||||
|
--tag "$DOCKER_HUB_IMAGE/$TAG-bullseye"
|
||||||
|
--tag "$DOCKER_HUB_IMAGE/$TAG-commit-$CI_COMMIT_SHORT_SHA"
|
||||||
|
"$CI_REGISTRY_IMAGE/temporary-ci-images:$CI_JOB_ID"
|
||||||
|
; fi
|
||||||
|
- mv /tmp/build-output ./
|
||||||
|
artifacts:
|
||||||
|
paths:
|
||||||
|
- "./build-output/"
|
||||||
|
|
||||||
docker:next:gitlab:
|
docker:next:
|
||||||
extends: .docker-shared-settings
|
extends: .docker-shared-settings
|
||||||
rules:
|
rules:
|
||||||
- if: '$CI_COMMIT_BRANCH == "next"'
|
- if: '$BUILD_SERVER_SSH_PRIVATE_KEY && $CI_COMMIT_BRANCH == "next"'
|
||||||
variables:
|
variables:
|
||||||
TAG: "$CI_REGISTRY_IMAGE/matrix-conduit:next"
|
TAG: "matrix-conduit:next"
|
||||||
|
|
||||||
docker:next:dockerhub:
|
docker:master:
|
||||||
extends: .docker-shared-settings
|
extends: .docker-shared-settings
|
||||||
rules:
|
rules:
|
||||||
- if: '$CI_COMMIT_BRANCH == "next" && $DOCKER_HUB'
|
- if: '$BUILD_SERVER_SSH_PRIVATE_KEY && $CI_COMMIT_BRANCH == "master"'
|
||||||
variables:
|
variables:
|
||||||
TAG: "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:next"
|
TAG: "matrix-conduit:latest"
|
||||||
|
|
||||||
docker:master:gitlab:
|
docker:tags:
|
||||||
extends: .docker-shared-settings
|
extends: .docker-shared-settings
|
||||||
rules:
|
rules:
|
||||||
- if: '$CI_COMMIT_BRANCH == "master"'
|
- if: "$BUILD_SERVER_SSH_PRIVATE_KEY && $CI_COMMIT_TAG"
|
||||||
variables:
|
variables:
|
||||||
TAG: "$CI_REGISTRY_IMAGE/matrix-conduit:latest"
|
TAG: "matrix-conduit:$CI_COMMIT_TAG"
|
||||||
|
|
||||||
docker:master:dockerhub:
|
|
||||||
extends: .docker-shared-settings
|
|
||||||
rules:
|
|
||||||
- if: '$CI_COMMIT_BRANCH == "master" && $DOCKER_HUB'
|
|
||||||
variables:
|
|
||||||
TAG: "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:latest"
|
|
||||||
|
|
||||||
docker:tags:gitlab:
|
|
||||||
extends: .docker-shared-settings
|
|
||||||
rules:
|
|
||||||
- if: "$CI_COMMIT_TAG"
|
|
||||||
variables:
|
|
||||||
TAG: "$CI_REGISTRY_IMAGE/matrix-conduit:$CI_COMMIT_TAG"
|
|
||||||
|
|
||||||
docker:tags:dockerhub:
|
|
||||||
extends: .docker-shared-settings
|
|
||||||
rules:
|
|
||||||
- if: "$CI_COMMIT_TAG && $DOCKER_HUB"
|
|
||||||
variables:
|
|
||||||
TAG: "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:$CI_COMMIT_TAG"
|
|
||||||
|
|
||||||
# --------------------------------------------------------------------- #
|
# --------------------------------------------------------------------- #
|
||||||
# Run tests #
|
# Run tests #
|
||||||
# --------------------------------------------------------------------- #
|
# --------------------------------------------------------------------- #
|
||||||
|
|
||||||
|
cargo check:
|
||||||
|
stage: test
|
||||||
|
image: docker.io/rust:1.64.0-bullseye
|
||||||
|
needs: []
|
||||||
|
interruptible: true
|
||||||
|
before_script:
|
||||||
|
- "rustup show && rustc --version && cargo --version" # Print version info for debugging
|
||||||
|
- apt-get update && apt-get -y --no-install-recommends install libclang-dev # dependency for rocksdb
|
||||||
|
script:
|
||||||
|
- cargo check
|
||||||
|
|
||||||
|
|
||||||
.test-shared-settings:
|
.test-shared-settings:
|
||||||
stage: "test"
|
stage: "test"
|
||||||
needs: []
|
needs: []
|
||||||
|
@ -250,8 +128,7 @@ docker:tags:dockerhub:
|
||||||
test:cargo:
|
test:cargo:
|
||||||
extends: .test-shared-settings
|
extends: .test-shared-settings
|
||||||
before_script:
|
before_script:
|
||||||
# If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results:
|
- apt-get update && apt-get -y --no-install-recommends install libclang-dev # dependency for rocksdb
|
||||||
- if [ -n "${SCCACHE_ENDPOINT}" ]; then export RUSTC_WRAPPER=/usr/local/cargo/bin/sccache; fi
|
|
||||||
script:
|
script:
|
||||||
- rustc --version && cargo --version # Print version info for debugging
|
- rustc --version && cargo --version # Print version info for debugging
|
||||||
- "cargo test --color always --workspace --verbose --locked --no-fail-fast -- -Z unstable-options --format json | gitlab-report -p test > $CI_PROJECT_DIR/report.xml"
|
- "cargo test --color always --workspace --verbose --locked --no-fail-fast -- -Z unstable-options --format json | gitlab-report -p test > $CI_PROJECT_DIR/report.xml"
|
||||||
|
@ -260,14 +137,12 @@ test:cargo:
|
||||||
reports:
|
reports:
|
||||||
junit: report.xml
|
junit: report.xml
|
||||||
|
|
||||||
|
|
||||||
test:clippy:
|
test:clippy:
|
||||||
extends: .test-shared-settings
|
extends: .test-shared-settings
|
||||||
allow_failure: true
|
allow_failure: true
|
||||||
before_script:
|
before_script:
|
||||||
- rustup component add clippy
|
- rustup component add clippy
|
||||||
# If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results:
|
- apt-get update && apt-get -y --no-install-recommends install libclang-dev # dependency for rocksdb
|
||||||
- if [ -n "${SCCACHE_ENDPOINT}" ]; then export RUSTC_WRAPPER=/usr/local/cargo/bin/sccache; fi
|
|
||||||
script:
|
script:
|
||||||
- rustc --version && cargo --version # Print version info for debugging
|
- rustc --version && cargo --version # Print version info for debugging
|
||||||
- "cargo clippy --color always --verbose --message-format=json | gitlab-report -p clippy > $CI_PROJECT_DIR/gl-code-quality-report.json"
|
- "cargo clippy --color always --verbose --message-format=json | gitlab-report -p clippy > $CI_PROJECT_DIR/gl-code-quality-report.json"
|
||||||
|
@ -294,38 +169,6 @@ test:audit:
|
||||||
reports:
|
reports:
|
||||||
sast: gl-sast-report.json
|
sast: gl-sast-report.json
|
||||||
|
|
||||||
test:sytest:
|
|
||||||
stage: "test"
|
|
||||||
allow_failure: true
|
|
||||||
needs:
|
|
||||||
- "build:debug:cargo:x86_64-unknown-linux-musl"
|
|
||||||
image:
|
|
||||||
name: "valkum/sytest-conduit:latest"
|
|
||||||
entrypoint: [""]
|
|
||||||
tags: ["docker"]
|
|
||||||
variables:
|
|
||||||
PLUGINS: "https://github.com/valkum/sytest_conduit/archive/master.tar.gz"
|
|
||||||
interruptible: true
|
|
||||||
before_script:
|
|
||||||
- "mkdir -p /app"
|
|
||||||
- "cp ./conduit-debug-x86_64-unknown-linux-musl /app/conduit"
|
|
||||||
- "chmod +x /app/conduit"
|
|
||||||
- "rm -rf /src && ln -s $CI_PROJECT_DIR/ /src"
|
|
||||||
- "mkdir -p /work/server-0/database/ && mkdir -p /work/server-1/database/ && mkdir -p /work/server-2/database/"
|
|
||||||
- "cd /"
|
|
||||||
script:
|
|
||||||
- "SYTEST_EXIT_CODE=0"
|
|
||||||
- "/bootstrap.sh conduit || SYTEST_EXIT_CODE=1"
|
|
||||||
- 'perl /sytest/tap-to-junit-xml.pl --puretap --input /logs/results.tap --output $CI_PROJECT_DIR/sytest.xml "Sytest" && cp /logs/results.tap $CI_PROJECT_DIR/results.tap'
|
|
||||||
- "exit $SYTEST_EXIT_CODE"
|
|
||||||
artifacts:
|
|
||||||
when: always
|
|
||||||
paths:
|
|
||||||
- "$CI_PROJECT_DIR/sytest.xml"
|
|
||||||
- "$CI_PROJECT_DIR/results.tap"
|
|
||||||
reports:
|
|
||||||
junit: "$CI_PROJECT_DIR/sytest.xml"
|
|
||||||
|
|
||||||
test:dockerlint:
|
test:dockerlint:
|
||||||
stage: "test"
|
stage: "test"
|
||||||
needs: []
|
needs: []
|
||||||
|
@ -338,14 +181,12 @@ test:dockerlint:
|
||||||
hadolint
|
hadolint
|
||||||
--no-fail --verbose
|
--no-fail --verbose
|
||||||
./Dockerfile
|
./Dockerfile
|
||||||
./docker/ci-binaries-packaging.Dockerfile
|
|
||||||
# Then output the results into a json for GitLab to pretty-print this in the MR:
|
# Then output the results into a json for GitLab to pretty-print this in the MR:
|
||||||
- >
|
- >
|
||||||
hadolint
|
hadolint
|
||||||
--format gitlab_codeclimate
|
--format gitlab_codeclimate
|
||||||
--failure-threshold error
|
--failure-threshold error
|
||||||
./Dockerfile
|
./Dockerfile > dockerlint.json
|
||||||
./docker/ci-binaries-packaging.Dockerfile > dockerlint.json
|
|
||||||
artifacts:
|
artifacts:
|
||||||
when: always
|
when: always
|
||||||
reports:
|
reports:
|
||||||
|
@ -365,28 +206,26 @@ test:dockerlint:
|
||||||
# Store binaries as package so they have download urls #
|
# Store binaries as package so they have download urls #
|
||||||
# --------------------------------------------------------------------- #
|
# --------------------------------------------------------------------- #
|
||||||
|
|
||||||
publish:package:
|
# DISABLED FOR NOW, NEEDS TO BE FIXED AT A LATER TIME:
|
||||||
stage: "upload artifacts"
|
|
||||||
needs:
|
#publish:package:
|
||||||
- "build:release:cargo:x86_64-unknown-linux-musl"
|
# stage: "upload artifacts"
|
||||||
- "build:release:cargo:arm-unknown-linux-musleabihf"
|
# needs:
|
||||||
- "build:release:cargo:armv7-unknown-linux-musleabihf"
|
# - "docker:tags"
|
||||||
- "build:release:cargo:aarch64-unknown-linux-musl"
|
# rules:
|
||||||
# - "build:cargo-deb:x86_64-unknown-linux-gnu"
|
# - if: "$CI_COMMIT_TAG"
|
||||||
rules:
|
# image: curlimages/curl:latest
|
||||||
- if: '$CI_COMMIT_BRANCH == "master"'
|
# tags: ["docker"]
|
||||||
- if: '$CI_COMMIT_BRANCH == "next"'
|
# variables:
|
||||||
- if: "$CI_COMMIT_TAG"
|
# GIT_STRATEGY: "none" # Don't need a clean copy of the code, we just operate on artifacts
|
||||||
image: curlimages/curl:latest
|
# script:
|
||||||
tags: ["docker"]
|
# - 'BASE_URL="${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/conduit-${CI_COMMIT_REF_SLUG}/build-${CI_PIPELINE_ID}"'
|
||||||
variables:
|
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_amd64/conduit "${BASE_URL}/conduit-x86_64-unknown-linux-gnu"'
|
||||||
GIT_STRATEGY: "none" # Don't need a clean copy of the code, we just operate on artifacts
|
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_arm_v7/conduit "${BASE_URL}/conduit-armv7-unknown-linux-gnu"'
|
||||||
script:
|
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_arm64/conduit "${BASE_URL}/conduit-aarch64-unknown-linux-gnu"'
|
||||||
- 'BASE_URL="${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/conduit-${CI_COMMIT_REF_SLUG}/build-${CI_PIPELINE_ID}"'
|
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_amd64/conduit.deb "${BASE_URL}/conduit-x86_64-unknown-linux-gnu.deb"'
|
||||||
- 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-x86_64-unknown-linux-musl "${BASE_URL}/conduit-x86_64-unknown-linux-musl"'
|
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_arm_v7/conduit.deb "${BASE_URL}/conduit-armv7-unknown-linux-gnu.deb"'
|
||||||
- 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-arm-unknown-linux-musleabihf "${BASE_URL}/conduit-arm-unknown-linux-musleabihf"'
|
# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_arm64/conduit.deb "${BASE_URL}/conduit-aarch64-unknown-linux-gnu.deb"'
|
||||||
- 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-armv7-unknown-linux-musleabihf "${BASE_URL}/conduit-armv7-unknown-linux-musleabihf"'
|
|
||||||
- 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-aarch64-unknown-linux-musl "${BASE_URL}/conduit-aarch64-unknown-linux-musl"'
|
|
||||||
|
|
||||||
# Avoid duplicate pipelines
|
# Avoid duplicate pipelines
|
||||||
# See: https://docs.gitlab.com/ee/ci/yaml/workflow.html#switch-between-branch-pipelines-and-merge-request-pipelines
|
# See: https://docs.gitlab.com/ee/ci/yaml/workflow.html#switch-between-branch-pipelines-and-merge-request-pipelines
|
||||||
|
|
|
@ -0,0 +1,5 @@
|
||||||
|
# Nix things
|
||||||
|
.envrc @CobaltCause
|
||||||
|
flake.lock @CobaltCause
|
||||||
|
flake.nix @CobaltCause
|
||||||
|
nix/ @CobaltCause
|
|
@ -0,0 +1,37 @@
|
||||||
|
#!/bin/sh
|
||||||
|
set -eux
|
||||||
|
|
||||||
|
# --------------------------------------------------------------------- #
|
||||||
|
# #
|
||||||
|
# Configures docker buildx to use a remote server for arm building. #
|
||||||
|
# Expects $SSH_PRIVATE_KEY to be a valid ssh ed25519 private key with #
|
||||||
|
# access to the server $ARM_SERVER_USER@$ARM_SERVER_IP #
|
||||||
|
# #
|
||||||
|
# This is expected to only be used in the official CI/CD pipeline! #
|
||||||
|
# #
|
||||||
|
# Requirements: openssh-client, docker buildx #
|
||||||
|
# Inspired by: https://depot.dev/blog/building-arm-containers #
|
||||||
|
# #
|
||||||
|
# --------------------------------------------------------------------- #
|
||||||
|
|
||||||
|
cat "$BUILD_SERVER_SSH_PRIVATE_KEY" | ssh-add -
|
||||||
|
|
||||||
|
# Test server connections:
|
||||||
|
ssh "$ARM_SERVER_USER@$ARM_SERVER_IP" "uname -a"
|
||||||
|
ssh "$AMD_SERVER_USER@$AMD_SERVER_IP" "uname -a"
|
||||||
|
|
||||||
|
# Connect remote arm64 server for all arm builds:
|
||||||
|
docker buildx create \
|
||||||
|
--name "multi" \
|
||||||
|
--driver "docker-container" \
|
||||||
|
--platform "linux/arm64,linux/arm/v7" \
|
||||||
|
"ssh://$ARM_SERVER_USER@$ARM_SERVER_IP"
|
||||||
|
|
||||||
|
# Connect remote amd64 server for adm64 builds:
|
||||||
|
docker buildx create --append \
|
||||||
|
--name "multi" \
|
||||||
|
--driver "docker-container" \
|
||||||
|
--platform "linux/amd64" \
|
||||||
|
"ssh://$AMD_SERVER_USER@$AMD_SERVER_IP"
|
||||||
|
|
||||||
|
docker buildx use multi
|
|
@ -1,3 +0,0 @@
|
||||||
{
|
|
||||||
"rust-analyzer.procMacro.enable": true,
|
|
||||||
}
|
|
File diff suppressed because it is too large
Load Diff
52
Cargo.toml
52
Cargo.toml
|
@ -6,29 +6,29 @@ authors = ["timokoesters <timo@koesters.xyz>"]
|
||||||
homepage = "https://conduit.rs"
|
homepage = "https://conduit.rs"
|
||||||
repository = "https://gitlab.com/famedly/conduit"
|
repository = "https://gitlab.com/famedly/conduit"
|
||||||
readme = "README.md"
|
readme = "README.md"
|
||||||
version = "0.4.0"
|
version = "0.5.0"
|
||||||
rust-version = "1.56"
|
rust-version = "1.64"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
|
||||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
# Web framework
|
# Web framework
|
||||||
axum = { version = "0.5.8", default-features = false, features = ["form", "headers", "http1", "http2", "json", "matched-path"], optional = true }
|
axum = { version = "0.5.17", default-features = false, features = ["form", "headers", "http1", "http2", "json", "matched-path"], optional = true }
|
||||||
axum-server = { version = "0.4.0", features = ["tls-rustls"] }
|
axum-server = { version = "0.4.0", features = ["tls-rustls"] }
|
||||||
tower = { version = "0.4.8", features = ["util"] }
|
tower = { version = "0.4.8", features = ["util"] }
|
||||||
tower-http = { version = "0.3.4", features = ["add-extension", "cors", "compression-full", "sensitive-headers", "trace", "util"] }
|
tower-http = { version = "0.3.4", features = ["add-extension", "cors", "compression-full", "sensitive-headers", "trace", "util"] }
|
||||||
|
|
||||||
# Used for matrix spec type definitions and helpers
|
# Used for matrix spec type definitions and helpers
|
||||||
#ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
|
#ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
|
||||||
ruma = { git = "https://github.com/ruma/ruma", rev = "d614ad1422d6c4b3437ebc318ca8514ae338fd6d", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-pre-spec", "unstable-exhaustive-types"] }
|
ruma = { git = "https://github.com/ruma/ruma", rev = "67d0f3cc04a8d1dc4a8a1ec947519967ce11ce26", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] }
|
||||||
#ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
|
#ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
|
||||||
#ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
|
#ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
|
||||||
|
|
||||||
# Async runtime and utilities
|
# Async runtime and utilities
|
||||||
tokio = { version = "1.11.0", features = ["fs", "macros", "signal", "sync"] }
|
tokio = { version = "1.11.0", features = ["fs", "macros", "signal", "sync"] }
|
||||||
# Used for storing data permanently
|
# Used for storing data permanently
|
||||||
sled = { version = "0.34.7", features = ["compression", "no_metrics"], optional = true }
|
#sled = { version = "0.34.7", features = ["compression", "no_metrics"], optional = true }
|
||||||
#sled = { git = "https://github.com/spacejam/sled.git", rev = "e4640e0773595229f398438886f19bca6f7326a2", features = ["compression"] }
|
#sled = { git = "https://github.com/spacejam/sled.git", rev = "e4640e0773595229f398438886f19bca6f7326a2", features = ["compression"] }
|
||||||
persy = { version = "1.0.0", optional = true, features = ["background_ops"] }
|
persy = { version = "1.0.0", optional = true, features = ["background_ops"] }
|
||||||
|
|
||||||
|
@ -40,38 +40,39 @@ directories = "4.0.0"
|
||||||
# Used for ruma wrapper
|
# Used for ruma wrapper
|
||||||
serde_json = { version = "1.0.68", features = ["raw_value"] }
|
serde_json = { version = "1.0.68", features = ["raw_value"] }
|
||||||
# Used for appservice registration files
|
# Used for appservice registration files
|
||||||
serde_yaml = "0.8.21"
|
serde_yaml = "0.9.13"
|
||||||
# Used for pdu definition
|
# Used for pdu definition
|
||||||
serde = { version = "1.0.130", features = ["rc"] }
|
serde = { version = "1.0.130", features = ["rc"] }
|
||||||
# Used for secure identifiers
|
# Used for secure identifiers
|
||||||
rand = "0.8.4"
|
rand = "0.8.4"
|
||||||
# Used to hash passwords
|
# Used to hash passwords
|
||||||
rust-argon2 = "0.8.3"
|
rust-argon2 = "1.0.0"
|
||||||
# Used to send requests
|
# Used to send requests
|
||||||
reqwest = { default-features = false, features = ["rustls-tls-native-roots", "socks"], git = "https://github.com/timokoesters/reqwest", rev = "57b7cf4feb921573dfafad7d34b9ac6e44ead0bd" }
|
reqwest = { default-features = false, features = ["rustls-tls-native-roots", "socks"], git = "https://github.com/timokoesters/reqwest", rev = "57b7cf4feb921573dfafad7d34b9ac6e44ead0bd" }
|
||||||
# Used for conduit::Error type
|
# Used for conduit::Error type
|
||||||
thiserror = "1.0.29"
|
thiserror = "1.0.29"
|
||||||
# Used to generate thumbnails for images
|
# Used to generate thumbnails for images
|
||||||
image = { version = "0.23.14", default-features = false, features = ["jpeg", "png", "gif"] }
|
image = { version = "0.24.4", default-features = false, features = ["jpeg", "png", "gif"] }
|
||||||
# Used to encode server public key
|
# Used to encode server public key
|
||||||
base64 = "0.13.0"
|
base64 = "0.13.0"
|
||||||
# Used when hashing the state
|
# Used when hashing the state
|
||||||
ring = "0.16.20"
|
ring = "0.16.20"
|
||||||
# Used when querying the SRV record of other servers
|
# Used when querying the SRV record of other servers
|
||||||
trust-dns-resolver = "0.20.3"
|
trust-dns-resolver = "0.22.0"
|
||||||
# Used to find matching events for appservices
|
# Used to find matching events for appservices
|
||||||
regex = "1.5.4"
|
regex = "1.5.4"
|
||||||
# jwt jsonwebtokens
|
# jwt jsonwebtokens
|
||||||
jsonwebtoken = "7.2.0"
|
jsonwebtoken = "8.1.1"
|
||||||
# Performance measurements
|
# Performance measurements
|
||||||
tracing = { version = "0.1.27", features = [] }
|
tracing = { version = "0.1.27", features = [] }
|
||||||
tracing-subscriber = "0.2.22"
|
tracing-subscriber = { version = "0.3.16", features = ["env-filter"] }
|
||||||
tracing-flame = "0.1.0"
|
tracing-flame = "0.2.0"
|
||||||
opentelemetry = { version = "0.16.0", features = ["rt-tokio"] }
|
opentelemetry = { version = "0.18.0", features = ["rt-tokio"] }
|
||||||
opentelemetry-jaeger = { version = "0.15.0", features = ["rt-tokio"] }
|
opentelemetry-jaeger = { version = "0.17.0", features = ["rt-tokio"] }
|
||||||
|
tracing-opentelemetry = "0.18.0"
|
||||||
lru-cache = "0.1.2"
|
lru-cache = "0.1.2"
|
||||||
rusqlite = { version = "0.25.3", optional = true, features = ["bundled"] }
|
rusqlite = { version = "0.28.0", optional = true, features = ["bundled"] }
|
||||||
parking_lot = { version = "0.11.2", optional = true }
|
parking_lot = { version = "0.12.1", optional = true }
|
||||||
crossbeam = { version = "0.8.1", optional = true }
|
crossbeam = { version = "0.8.1", optional = true }
|
||||||
num_cpus = "1.13.0"
|
num_cpus = "1.13.0"
|
||||||
threadpool = "1.8.1"
|
threadpool = "1.8.1"
|
||||||
|
@ -80,20 +81,24 @@ rocksdb = { version = "0.17.0", default-features = true, features = ["multi-thre
|
||||||
|
|
||||||
thread_local = "1.1.3"
|
thread_local = "1.1.3"
|
||||||
# used for TURN server authentication
|
# used for TURN server authentication
|
||||||
hmac = "0.11.0"
|
hmac = "0.12.1"
|
||||||
sha-1 = "0.9.8"
|
sha-1 = "0.10.0"
|
||||||
# used for conduit's CLI and admin room command parsing
|
# used for conduit's CLI and admin room command parsing
|
||||||
clap = { version = "3.2.5", default-features = false, features = ["std", "derive"] }
|
clap = { version = "4.0.11", default-features = false, features = ["std", "derive", "help", "usage", "error-context"] }
|
||||||
futures-util = { version = "0.3.17", default-features = false }
|
futures-util = { version = "0.3.17", default-features = false }
|
||||||
# Used for reading the configuration from conduit.toml & environment variables
|
# Used for reading the configuration from conduit.toml & environment variables
|
||||||
figment = { version = "0.10.6", features = ["env", "toml"] }
|
figment = { version = "0.10.6", features = ["env", "toml"] }
|
||||||
|
|
||||||
tikv-jemalloc-ctl = { version = "0.4.2", features = ["use_std"], optional = true }
|
tikv-jemalloc-ctl = { version = "0.5.0", features = ["use_std"], optional = true }
|
||||||
tikv-jemallocator = { version = "0.4.1", features = ["unprefixed_malloc_on_supported_platforms"], optional = true }
|
tikv-jemallocator = { version = "0.5.0", features = ["unprefixed_malloc_on_supported_platforms"], optional = true }
|
||||||
|
lazy_static = "1.4.0"
|
||||||
|
async-trait = "0.1.57"
|
||||||
|
|
||||||
|
sd-notify = { version = "0.4.1", optional = true }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = ["conduit_bin", "backend_sqlite", "backend_rocksdb", "jemalloc"]
|
default = ["conduit_bin", "backend_sqlite", "backend_rocksdb", "jemalloc", "systemd"]
|
||||||
backend_sled = ["sled"]
|
#backend_sled = ["sled"]
|
||||||
backend_persy = ["persy", "parking_lot"]
|
backend_persy = ["persy", "parking_lot"]
|
||||||
backend_sqlite = ["sqlite"]
|
backend_sqlite = ["sqlite"]
|
||||||
backend_heed = ["heed", "crossbeam"]
|
backend_heed = ["heed", "crossbeam"]
|
||||||
|
@ -101,6 +106,7 @@ backend_rocksdb = ["rocksdb"]
|
||||||
jemalloc = ["tikv-jemalloc-ctl", "tikv-jemallocator"]
|
jemalloc = ["tikv-jemalloc-ctl", "tikv-jemallocator"]
|
||||||
sqlite = ["rusqlite", "parking_lot", "tokio/signal"]
|
sqlite = ["rusqlite", "parking_lot", "tokio/signal"]
|
||||||
conduit_bin = ["axum"]
|
conduit_bin = ["axum"]
|
||||||
|
systemd = ["sd-notify"]
|
||||||
|
|
||||||
[[bin]]
|
[[bin]]
|
||||||
name = "conduit"
|
name = "conduit"
|
||||||
|
|
63
DEPLOY.md
63
DEPLOY.md
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
> ## Getting help
|
> ## Getting help
|
||||||
>
|
>
|
||||||
> If you run into any problems while setting up Conduit, write an email to `timo@koesters.xyz`, ask us
|
> If you run into any problems while setting up Conduit, write an email to `conduit@koesters.xyz`, ask us
|
||||||
> in `#conduit:fachschaften.org` or [open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new).
|
> in `#conduit:fachschaften.org` or [open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new).
|
||||||
|
|
||||||
## Installing Conduit
|
## Installing Conduit
|
||||||
|
@ -13,20 +13,26 @@ only offer Linux binaries.
|
||||||
You may simply download the binary that fits your machine. Run `uname -m` to see what you need. Now copy the right url:
|
You may simply download the binary that fits your machine. Run `uname -m` to see what you need. Now copy the right url:
|
||||||
|
|
||||||
| CPU Architecture | Download stable version | Download development version |
|
| CPU Architecture | Download stable version | Download development version |
|
||||||
| ------------------------------------------- | ------------------------------ | ---------------------------- |
|
| ------------------------------------------- | --------------------------------------------------------------- | ----------------------------------------------------------- |
|
||||||
| x84_64 / amd64 (Most servers and computers) | [Download][x84_64-musl-master] | [Download][x84_64-musl-next] |
|
| x84_64 / amd64 (Most servers and computers) | [Binary][x84_64-glibc-master] / [.deb][x84_64-glibc-master-deb] | [Binary][x84_64-glibc-next] / [.deb][x84_64-glibc-next-deb] |
|
||||||
| armv6 | [Download][armv6-musl-master] | [Download][armv6-musl-next] |
|
| armv7 (e.g. Raspberry Pi by default) | [Binary][armv7-glibc-master] / [.deb][armv7-glibc-master-deb] | [Binary][armv7-glibc-next] / [.deb][armv7-glibc-next-deb] |
|
||||||
| armv7 (e.g. Raspberry Pi by default) | [Download][armv7-musl-master] | [Download][armv7-musl-next] |
|
| armv8 / aarch64 | [Binary][armv8-glibc-master] / [.deb][armv8-glibc-master-deb] | [Binary][armv8-glibc-next] / [.deb][armv8-glibc-next-deb] |
|
||||||
| armv8 / aarch64 | [Download][armv8-musl-master] | [Download][armv8-musl-next] |
|
|
||||||
|
|
||||||
[x84_64-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-x86_64-unknown-linux-musl?job=build:release:cargo:x86_64-unknown-linux-musl
|
These builds were created on and linked against the glibc version shipped with Debian bullseye.
|
||||||
[armv6-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-arm-unknown-linux-musleabihf?job=build:release:cargo:arm-unknown-linux-musleabihf
|
If you use a system with an older glibc version, you might need to compile Conduit yourself.
|
||||||
[armv7-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-armv7-unknown-linux-musleabihf?job=build:release:cargo:armv7-unknown-linux-musleabihf
|
|
||||||
[armv8-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-aarch64-unknown-linux-musl?job=build:release:cargo:aarch64-unknown-linux-musl
|
[x84_64-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_amd64/conduit?job=docker:master
|
||||||
[x84_64-musl-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/conduit-x86_64-unknown-linux-musl?job=build:release:cargo:x86_64-unknown-linux-musl
|
[armv7-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm_v7/conduit?job=docker:master
|
||||||
[armv6-musl-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/conduit-arm-unknown-linux-musleabihf?job=build:release:cargo:arm-unknown-linux-musleabihf
|
[armv8-glibc-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm64/conduit?job=docker:master
|
||||||
[armv7-musl-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/conduit-armv7-unknown-linux-musleabihf?job=build:release:cargo:armv7-unknown-linux-musleabihf
|
[x84_64-glibc-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_amd64/conduit?job=docker:next
|
||||||
[armv8-musl-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/conduit-aarch64-unknown-linux-musl?job=build:release:cargo:aarch64-unknown-linux-musl
|
[armv7-glibc-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_arm_v7/conduit?job=docker:next
|
||||||
|
[armv8-glibc-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_arm64/conduit?job=docker:next
|
||||||
|
[x84_64-glibc-master-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_amd64/conduit.deb?job=docker:master
|
||||||
|
[armv7-glibc-master-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm_v7/conduit.deb?job=docker:master
|
||||||
|
[armv8-glibc-master-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/build-output/linux_arm64/conduit.deb?job=docker:master
|
||||||
|
[x84_64-glibc-next-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_amd64/conduit.deb?job=docker:next
|
||||||
|
[armv7-glibc-next-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_arm_v7/conduit.deb?job=docker:next
|
||||||
|
[armv8-glibc-next-deb]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/build-output/linux_arm64/conduit.deb?job=docker:next
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ sudo wget -O /usr/local/bin/matrix-conduit <url>
|
$ sudo wget -O /usr/local/bin/matrix-conduit <url>
|
||||||
|
@ -43,8 +49,25 @@ $ sudo apt install libclang-dev build-essential
|
||||||
$ cargo build --release
|
$ cargo build --release
|
||||||
```
|
```
|
||||||
|
|
||||||
|
If you want to cross compile Conduit to another architecture, read the guide below.
|
||||||
|
|
||||||
If you want to cross compile Conduit to another architecture, read the [Cross-Compile Guide](cross/README.md).
|
<details>
|
||||||
|
<summary>Cross compilation</summary>
|
||||||
|
|
||||||
|
As easiest way to compile conduit for another platform [cross-rs](https://github.com/cross-rs/cross) is recommended, so install it first.
|
||||||
|
|
||||||
|
In order to use RockDB as storage backend append `-latomic` to linker flags.
|
||||||
|
|
||||||
|
For example, to build a binary for Raspberry Pi Zero W (ARMv6) you need `arm-unknown-linux-gnueabihf` as compilation
|
||||||
|
target.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git clone https://gitlab.com/famedly/conduit.git
|
||||||
|
cd conduit
|
||||||
|
export RUSTFLAGS='-C link-arg=-lgcc -Clink-arg=-latomic -Clink-arg=-static-libgcc'
|
||||||
|
cross build --release --no-default-features --features conduit_bin,backend_rocksdb,jemalloc --target=arm-unknown-linux-gnueabihf
|
||||||
|
```
|
||||||
|
</details>
|
||||||
|
|
||||||
## Adding a Conduit user
|
## Adding a Conduit user
|
||||||
|
|
||||||
|
@ -136,7 +159,7 @@ allow_federation = true
|
||||||
trusted_servers = ["matrix.org"]
|
trusted_servers = ["matrix.org"]
|
||||||
|
|
||||||
#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time
|
#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time
|
||||||
#log = "info,state_res=warn,rocket=off,_=off,sled=off"
|
#log = "warn,state_res=warn,rocket=off,_=off,sled=off"
|
||||||
|
|
||||||
address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy
|
address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy
|
||||||
#address = "0.0.0.0" # If Conduit is running in a container, make sure the reverse proxy (ie. Traefik) can reach it.
|
#address = "0.0.0.0" # If Conduit is running in a container, make sure the reverse proxy (ie. Traefik) can reach it.
|
||||||
|
@ -189,18 +212,21 @@ $ sudo systemctl reload apache2
|
||||||
```
|
```
|
||||||
|
|
||||||
### Caddy
|
### Caddy
|
||||||
|
|
||||||
Create `/etc/caddy/conf.d/conduit_caddyfile` and enter this (substitute for your server name).
|
Create `/etc/caddy/conf.d/conduit_caddyfile` and enter this (substitute for your server name).
|
||||||
|
|
||||||
```caddy
|
```caddy
|
||||||
your.server.name, your.server.name:8448 {
|
your.server.name, your.server.name:8448 {
|
||||||
reverse_proxy /_matrix/* 127.0.0.1:6167
|
reverse_proxy /_matrix/* 127.0.0.1:6167
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
That's it! Just start or enable the service and you're set.
|
That's it! Just start or enable the service and you're set.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
$ sudo systemctl enable caddy
|
$ sudo systemctl enable caddy
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
### Nginx
|
### Nginx
|
||||||
|
|
||||||
If you use Nginx and not Apache, add the following server section inside the http section of `/etc/nginx/nginx.conf`
|
If you use Nginx and not Apache, add the following server section inside the http section of `/etc/nginx/nginx.conf`
|
||||||
|
@ -214,6 +240,9 @@ server {
|
||||||
server_name your.server.name; # EDIT THIS
|
server_name your.server.name; # EDIT THIS
|
||||||
merge_slashes off;
|
merge_slashes off;
|
||||||
|
|
||||||
|
# Nginx defaults to only allow 1MB uploads
|
||||||
|
client_max_body_size 20M;
|
||||||
|
|
||||||
location /_matrix/ {
|
location /_matrix/ {
|
||||||
proxy_pass http://127.0.0.1:6167$request_uri;
|
proxy_pass http://127.0.0.1:6167$request_uri;
|
||||||
proxy_set_header Host $http_host;
|
proxy_set_header Host $http_host;
|
||||||
|
|
67
Dockerfile
67
Dockerfile
|
@ -1,5 +1,5 @@
|
||||||
# syntax=docker/dockerfile:1
|
# syntax=docker/dockerfile:1
|
||||||
FROM docker.io/rust:1.58-bullseye AS builder
|
FROM docker.io/rust:1.64-bullseye AS builder
|
||||||
WORKDIR /usr/src/conduit
|
WORKDIR /usr/src/conduit
|
||||||
|
|
||||||
# Install required packages to build Conduit and it's dependencies
|
# Install required packages to build Conduit and it's dependencies
|
||||||
|
@ -27,6 +27,49 @@ COPY src src
|
||||||
# Builds conduit and places the binary at /usr/src/conduit/target/release/conduit
|
# Builds conduit and places the binary at /usr/src/conduit/target/release/conduit
|
||||||
RUN touch src/main.rs && touch src/lib.rs && cargo build --release
|
RUN touch src/main.rs && touch src/lib.rs && cargo build --release
|
||||||
|
|
||||||
|
|
||||||
|
# ONLY USEFUL FOR CI: target stage to extract build artifacts
|
||||||
|
FROM scratch AS builder-result
|
||||||
|
COPY --from=builder /usr/src/conduit/target/release/conduit /conduit
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------------------------------------------
|
||||||
|
# Build cargo-deb, a tool to package up rust binaries into .deb packages for Debian/Ubuntu based systems:
|
||||||
|
# ---------------------------------------------------------------------------------------------------------------
|
||||||
|
FROM docker.io/rust:1.64-bullseye AS build-cargo-deb
|
||||||
|
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install -y --no-install-recommends \
|
||||||
|
dpkg \
|
||||||
|
dpkg-dev \
|
||||||
|
liblzma-dev
|
||||||
|
|
||||||
|
RUN cargo install cargo-deb
|
||||||
|
# => binary is in /usr/local/cargo/bin/cargo-deb
|
||||||
|
|
||||||
|
|
||||||
|
# ---------------------------------------------------------------------------------------------------------------
|
||||||
|
# Package conduit build-result into a .deb package:
|
||||||
|
# ---------------------------------------------------------------------------------------------------------------
|
||||||
|
FROM builder AS packager
|
||||||
|
WORKDIR /usr/src/conduit
|
||||||
|
|
||||||
|
COPY ./LICENSE ./LICENSE
|
||||||
|
COPY ./README.md ./README.md
|
||||||
|
COPY debian/README.Debian ./debian/
|
||||||
|
COPY --from=build-cargo-deb /usr/local/cargo/bin/cargo-deb /usr/local/cargo/bin/cargo-deb
|
||||||
|
|
||||||
|
# --no-build makes cargo-deb reuse already compiled project
|
||||||
|
RUN cargo deb --no-build
|
||||||
|
# => Package is in /usr/src/conduit/target/debian/<project_name>_<version>_<arch>.deb
|
||||||
|
|
||||||
|
|
||||||
|
# ONLY USEFUL FOR CI: target stage to extract build artifacts
|
||||||
|
FROM scratch AS packager-result
|
||||||
|
COPY --from=packager /usr/src/conduit/target/debian/*.deb /conduit.deb
|
||||||
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------------------------------------------
|
# ---------------------------------------------------------------------------------------------------------------
|
||||||
# Stuff below this line actually ends up in the resulting docker image
|
# Stuff below this line actually ends up in the resulting docker image
|
||||||
# ---------------------------------------------------------------------------------------------------------------
|
# ---------------------------------------------------------------------------------------------------------------
|
||||||
|
@ -36,30 +79,32 @@ FROM docker.io/debian:bullseye-slim AS runner
|
||||||
# You still need to map the port when using the docker command or docker-compose.
|
# You still need to map the port when using the docker command or docker-compose.
|
||||||
EXPOSE 6167
|
EXPOSE 6167
|
||||||
|
|
||||||
|
ARG DEFAULT_DB_PATH=/var/lib/matrix-conduit
|
||||||
|
|
||||||
ENV CONDUIT_PORT=6167 \
|
ENV CONDUIT_PORT=6167 \
|
||||||
CONDUIT_ADDRESS="0.0.0.0" \
|
CONDUIT_ADDRESS="0.0.0.0" \
|
||||||
CONDUIT_DATABASE_PATH=/var/lib/matrix-conduit \
|
CONDUIT_DATABASE_PATH=${DEFAULT_DB_PATH} \
|
||||||
CONDUIT_CONFIG=''
|
CONDUIT_CONFIG=''
|
||||||
# └─> Set no config file to do all configuration with env vars
|
# └─> Set no config file to do all configuration with env vars
|
||||||
|
|
||||||
# Conduit needs:
|
# Conduit needs:
|
||||||
|
# dpkg: to install conduit.deb
|
||||||
# ca-certificates: for https
|
# ca-certificates: for https
|
||||||
# iproute2 & wget: for the healthcheck script
|
# iproute2 & wget: for the healthcheck script
|
||||||
RUN apt-get update && apt-get -y --no-install-recommends install \
|
RUN apt-get update && apt-get -y --no-install-recommends install \
|
||||||
|
dpkg \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
iproute2 \
|
iproute2 \
|
||||||
wget \
|
wget \
|
||||||
&& rm -rf /var/lib/apt/lists/*
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
# Created directory for the database and media files
|
|
||||||
RUN mkdir -p /srv/conduit/.local/share/conduit
|
|
||||||
|
|
||||||
# Test if Conduit is still alive, uses the same endpoint as Element
|
# Test if Conduit is still alive, uses the same endpoint as Element
|
||||||
COPY ./docker/healthcheck.sh /srv/conduit/healthcheck.sh
|
COPY ./docker/healthcheck.sh /srv/conduit/healthcheck.sh
|
||||||
HEALTHCHECK --start-period=5s --interval=5s CMD ./healthcheck.sh
|
HEALTHCHECK --start-period=5s --interval=5s CMD ./healthcheck.sh
|
||||||
|
|
||||||
# Copy over the actual Conduit binary from the builder stage
|
# Install conduit.deb:
|
||||||
COPY --from=builder /usr/src/conduit/target/release/conduit /srv/conduit/conduit
|
COPY --from=packager /usr/src/conduit/target/debian/*.deb /srv/conduit/
|
||||||
|
RUN dpkg -i /srv/conduit/*.deb
|
||||||
|
|
||||||
# Improve security: Don't run stuff as root, that does not need to run as root
|
# Improve security: Don't run stuff as root, that does not need to run as root
|
||||||
# Most distros also use 1000:1000 for the first real user, so this should resolve volume mounting problems.
|
# Most distros also use 1000:1000 for the first real user, so this should resolve volume mounting problems.
|
||||||
|
@ -69,9 +114,11 @@ RUN set -x ; \
|
||||||
groupadd -r -g ${GROUP_ID} conduit ; \
|
groupadd -r -g ${GROUP_ID} conduit ; \
|
||||||
useradd -l -r -M -d /srv/conduit -o -u ${USER_ID} -g conduit conduit && exit 0 ; exit 1
|
useradd -l -r -M -d /srv/conduit -o -u ${USER_ID} -g conduit conduit && exit 0 ; exit 1
|
||||||
|
|
||||||
# Change ownership of Conduit files to conduit user and group and make the healthcheck executable:
|
# Create database directory, change ownership of Conduit files to conduit user and group and make the healthcheck executable:
|
||||||
RUN chown -cR conduit:conduit /srv/conduit && \
|
RUN chown -cR conduit:conduit /srv/conduit && \
|
||||||
chmod +x /srv/conduit/healthcheck.sh
|
chmod +x /srv/conduit/healthcheck.sh && \
|
||||||
|
mkdir -p ${DEFAULT_DB_PATH} && \
|
||||||
|
chown -cR conduit:conduit ${DEFAULT_DB_PATH}
|
||||||
|
|
||||||
# Change user to conduit, no root permissions afterwards:
|
# Change user to conduit, no root permissions afterwards:
|
||||||
USER conduit
|
USER conduit
|
||||||
|
@ -80,4 +127,4 @@ WORKDIR /srv/conduit
|
||||||
|
|
||||||
# Run Conduit and print backtraces on panics
|
# Run Conduit and print backtraces on panics
|
||||||
ENV RUST_BACKTRACE=1
|
ENV RUST_BACKTRACE=1
|
||||||
ENTRYPOINT [ "/srv/conduit/conduit" ]
|
ENTRYPOINT [ "/usr/sbin/matrix-conduit" ]
|
||||||
|
|
29
README.md
29
README.md
|
@ -1,7 +1,12 @@
|
||||||
# Conduit
|
# Conduit
|
||||||
|
|
||||||
### A Matrix homeserver written in Rust
|
### A Matrix homeserver written in Rust
|
||||||
|
|
||||||
|
#### What is Matrix?
|
||||||
|
[Matrix](https://matrix.org) is an open network for secure and decentralized
|
||||||
|
communication. Users from every Matrix homeserver can chat with users from all
|
||||||
|
other Matrix servers. You can even use bridges (also called Matrix appservices)
|
||||||
|
to communicate with users outside of Matrix, like a community on Discord.
|
||||||
|
|
||||||
#### What is the goal?
|
#### What is the goal?
|
||||||
|
|
||||||
An efficient Matrix homeserver that's easy to set up and just works. You can install
|
An efficient Matrix homeserver that's easy to set up and just works. You can install
|
||||||
|
@ -13,9 +18,10 @@ friends or company.
|
||||||
Yes! You can test our Conduit instance by opening a Matrix client (<https://app.element.io> or Element Android for
|
Yes! You can test our Conduit instance by opening a Matrix client (<https://app.element.io> or Element Android for
|
||||||
example) and registering on the `conduit.rs` homeserver.
|
example) and registering on the `conduit.rs` homeserver.
|
||||||
|
|
||||||
It is hosted on a ODROID HC 2 with 2GB RAM and a SAMSUNG Exynos 5422 CPU, which
|
*Registration is currently disabled because of scammers. For an account please
|
||||||
was used in the Samsung Galaxy S5. It joined many big rooms including Matrix
|
message us (see contact section below).*
|
||||||
HQ.
|
|
||||||
|
Server hosting for conduit.rs is donated by the Matrix.org Foundation.
|
||||||
|
|
||||||
#### What is the current status?
|
#### What is the current status?
|
||||||
|
|
||||||
|
@ -25,8 +31,8 @@ from time to time.
|
||||||
|
|
||||||
There are still a few important features missing:
|
There are still a few important features missing:
|
||||||
|
|
||||||
- E2EE verification over federation
|
- E2EE emoji comparison over federation (E2EE chat works)
|
||||||
- Outgoing read receipts, typing, presence over federation
|
- Outgoing read receipts, typing, presence over federation (incoming works)
|
||||||
|
|
||||||
Check out the [Conduit 1.0 Release Milestone](https://gitlab.com/famedly/conduit/-/milestones/3).
|
Check out the [Conduit 1.0 Release Milestone](https://gitlab.com/famedly/conduit/-/milestones/3).
|
||||||
|
|
||||||
|
@ -34,6 +40,7 @@ Check out the [Conduit 1.0 Release Milestone](https://gitlab.com/famedly/conduit
|
||||||
|
|
||||||
- Simple install (this was tested the most): [DEPLOY.md](DEPLOY.md)
|
- Simple install (this was tested the most): [DEPLOY.md](DEPLOY.md)
|
||||||
- Debian package: [debian/README.Debian](debian/README.Debian)
|
- Debian package: [debian/README.Debian](debian/README.Debian)
|
||||||
|
- Nix/NixOS: [nix/README.md](nix/README.md)
|
||||||
- Docker: [docker/README.md](docker/README.md)
|
- Docker: [docker/README.md](docker/README.md)
|
||||||
|
|
||||||
If you want to connect an Appservice to Conduit, take a look at [APPSERVICES.md](APPSERVICES.md).
|
If you want to connect an Appservice to Conduit, take a look at [APPSERVICES.md](APPSERVICES.md).
|
||||||
|
@ -49,13 +56,21 @@ If you want to connect an Appservice to Conduit, take a look at [APPSERVICES.md]
|
||||||
|
|
||||||
#### Thanks to
|
#### Thanks to
|
||||||
|
|
||||||
Thanks to Famedly, Prototype Fund (DLR and German BMBF) and all other individuals for financially supporting this project.
|
Thanks to FUTO, Famedly, Prototype Fund (DLR and German BMBF) and all individuals for financially supporting this project.
|
||||||
|
|
||||||
Thanks to the contributors to Conduit and all libraries we use, for example:
|
Thanks to the contributors to Conduit and all libraries we use, for example:
|
||||||
|
|
||||||
- Ruma: A clean library for the Matrix Spec in Rust
|
- Ruma: A clean library for the Matrix Spec in Rust
|
||||||
- axum: A modular web framework
|
- axum: A modular web framework
|
||||||
|
|
||||||
|
#### Contact
|
||||||
|
|
||||||
|
If you run into any question, feel free to
|
||||||
|
- Ask us in `#conduit:fachschaften.org` on Matrix
|
||||||
|
- Write an E-Mail to `conduit@koesters.xyz`
|
||||||
|
- Send an direct message to `timo@fachschaften.org` on Matrix
|
||||||
|
- [Open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new)
|
||||||
|
|
||||||
#### Donate
|
#### Donate
|
||||||
|
|
||||||
Liberapay: <https://liberapay.com/timokoesters/>\
|
Liberapay: <https://liberapay.com/timokoesters/>\
|
||||||
|
|
|
@ -46,7 +46,7 @@ enable_lightning_bolt = true
|
||||||
trusted_servers = ["matrix.org"]
|
trusted_servers = ["matrix.org"]
|
||||||
|
|
||||||
#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time
|
#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time
|
||||||
#log = "info,state_res=warn,rocket=off,_=off,sled=off"
|
#log = "warn,state_res=warn,rocket=off,_=off,sled=off"
|
||||||
|
|
||||||
address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy
|
address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy
|
||||||
#address = "0.0.0.0" # If Conduit is running in a container, make sure the reverse proxy (ie. Traefik) can reach it.
|
#address = "0.0.0.0" # If Conduit is running in a container, make sure the reverse proxy (ie. Traefik) can reach it.
|
||||||
|
|
|
@ -77,7 +77,7 @@ allow_federation = true
|
||||||
trusted_servers = ["matrix.org"]
|
trusted_servers = ["matrix.org"]
|
||||||
|
|
||||||
#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time
|
#max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time
|
||||||
#log = "info,state_res=warn,rocket=off,_=off,sled=off"
|
#log = "warn,state_res=warn,rocket=off,_=off,sled=off"
|
||||||
EOF
|
EOF
|
||||||
fi
|
fi
|
||||||
;;
|
;;
|
||||||
|
|
|
@ -31,7 +31,7 @@ services:
|
||||||
CONDUIT_ALLOW_FEDERATION: 'true'
|
CONDUIT_ALLOW_FEDERATION: 'true'
|
||||||
CONDUIT_TRUSTED_SERVERS: '["matrix.org"]'
|
CONDUIT_TRUSTED_SERVERS: '["matrix.org"]'
|
||||||
#CONDUIT_MAX_CONCURRENT_REQUESTS: 100
|
#CONDUIT_MAX_CONCURRENT_REQUESTS: 100
|
||||||
#CONDUIT_LOG: info,rocket=off,_=off,sled=off
|
#CONDUIT_LOG: warn,rocket=off,_=off,sled=off
|
||||||
CONDUIT_ADDRESS: 0.0.0.0
|
CONDUIT_ADDRESS: 0.0.0.0
|
||||||
CONDUIT_CONFIG: '' # Ignore this
|
CONDUIT_CONFIG: '' # Ignore this
|
||||||
#
|
#
|
||||||
|
|
|
@ -33,7 +33,7 @@ docker run -d -p 8448:6167 \
|
||||||
-e CONDUIT_MAX_REQUEST_SIZE="20_000_000" \
|
-e CONDUIT_MAX_REQUEST_SIZE="20_000_000" \
|
||||||
-e CONDUIT_TRUSTED_SERVERS="[\"matrix.org\"]" \
|
-e CONDUIT_TRUSTED_SERVERS="[\"matrix.org\"]" \
|
||||||
-e CONDUIT_MAX_CONCURRENT_REQUESTS="100" \
|
-e CONDUIT_MAX_CONCURRENT_REQUESTS="100" \
|
||||||
-e CONDUIT_LOG="info,rocket=off,_=off,sled=off" \
|
-e CONDUIT_LOG="warn,rocket=off,_=off,sled=off" \
|
||||||
--name conduit matrixconduit/matrix-conduit:latest
|
--name conduit matrixconduit/matrix-conduit:latest
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -121,12 +121,12 @@ So...step by step:
|
||||||
|
|
||||||
location /.well-known/matrix/server {
|
location /.well-known/matrix/server {
|
||||||
return 200 '{"m.server": "<SUBDOMAIN>.<DOMAIN>:443"}';
|
return 200 '{"m.server": "<SUBDOMAIN>.<DOMAIN>:443"}';
|
||||||
add_header Content-Type application/json;
|
types { } default_type "application/json; charset=utf-8";
|
||||||
}
|
}
|
||||||
|
|
||||||
location /.well-known/matrix/client {
|
location /.well-known/matrix/client {
|
||||||
return 200 '{"m.homeserver": {"base_url": "https://<SUBDOMAIN>.<DOMAIN>"}}';
|
return 200 '{"m.homeserver": {"base_url": "https://<SUBDOMAIN>.<DOMAIN>"}}';
|
||||||
add_header Content-Type application/json;
|
types { } default_type "application/json; charset=utf-8";
|
||||||
add_header "Access-Control-Allow-Origin" *;
|
add_header "Access-Control-Allow-Origin" *;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -31,7 +31,7 @@ services:
|
||||||
CONDUIT_ALLOW_FEDERATION: 'true'
|
CONDUIT_ALLOW_FEDERATION: 'true'
|
||||||
CONDUIT_TRUSTED_SERVERS: '["matrix.org"]'
|
CONDUIT_TRUSTED_SERVERS: '["matrix.org"]'
|
||||||
#CONDUIT_MAX_CONCURRENT_REQUESTS: 100
|
#CONDUIT_MAX_CONCURRENT_REQUESTS: 100
|
||||||
#CONDUIT_LOG: info,rocket=off,_=off,sled=off
|
#CONDUIT_LOG: warn,rocket=off,_=off,sled=off
|
||||||
CONDUIT_ADDRESS: 0.0.0.0
|
CONDUIT_ADDRESS: 0.0.0.0
|
||||||
CONDUIT_CONFIG: '' # Ignore this
|
CONDUIT_CONFIG: '' # Ignore this
|
||||||
|
|
||||||
|
|
|
@ -33,7 +33,7 @@ services:
|
||||||
# CONDUIT_PORT: 6167
|
# CONDUIT_PORT: 6167
|
||||||
# CONDUIT_CONFIG: '/srv/conduit/conduit.toml' # if you want to configure purely by env vars, set this to an empty string ''
|
# CONDUIT_CONFIG: '/srv/conduit/conduit.toml' # if you want to configure purely by env vars, set this to an empty string ''
|
||||||
# Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging
|
# Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging
|
||||||
# CONDUIT_LOG: info # default is: "info,_=off,sled=off"
|
# CONDUIT_LOG: info # default is: "warn,_=off,sled=off"
|
||||||
# CONDUIT_ALLOW_JAEGER: 'false'
|
# CONDUIT_ALLOW_JAEGER: 'false'
|
||||||
# CONDUIT_ALLOW_ENCRYPTION: 'false'
|
# CONDUIT_ALLOW_ENCRYPTION: 'false'
|
||||||
# CONDUIT_ALLOW_FEDERATION: 'false'
|
# CONDUIT_ALLOW_FEDERATION: 'false'
|
||||||
|
|
|
@ -0,0 +1,102 @@
|
||||||
|
{
|
||||||
|
"nodes": {
|
||||||
|
"fenix": {
|
||||||
|
"inputs": {
|
||||||
|
"nixpkgs": [
|
||||||
|
"nixpkgs"
|
||||||
|
],
|
||||||
|
"rust-analyzer-src": "rust-analyzer-src"
|
||||||
|
},
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1665815894,
|
||||||
|
"narHash": "sha256-Vboo1L4NMGLKZKVLnOPi9OHlae7uoNyfgvyIUm+SVXE=",
|
||||||
|
"owner": "nix-community",
|
||||||
|
"repo": "fenix",
|
||||||
|
"rev": "2348450241a5f945f0ba07e44ecbfac2f541d7f4",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "nix-community",
|
||||||
|
"repo": "fenix",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"flake-utils": {
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1659877975,
|
||||||
|
"narHash": "sha256-zllb8aq3YO3h8B/U0/J1WBgAL8EX5yWf5pMj3G0NAmc=",
|
||||||
|
"owner": "numtide",
|
||||||
|
"repo": "flake-utils",
|
||||||
|
"rev": "c0e246b9b83f637f4681389ecabcb2681b4f3af0",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "numtide",
|
||||||
|
"repo": "flake-utils",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"naersk": {
|
||||||
|
"inputs": {
|
||||||
|
"nixpkgs": [
|
||||||
|
"nixpkgs"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1662220400,
|
||||||
|
"narHash": "sha256-9o2OGQqu4xyLZP9K6kNe1pTHnyPz0Wr3raGYnr9AIgY=",
|
||||||
|
"owner": "nix-community",
|
||||||
|
"repo": "naersk",
|
||||||
|
"rev": "6944160c19cb591eb85bbf9b2f2768a935623ed3",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "nix-community",
|
||||||
|
"repo": "naersk",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"nixpkgs": {
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1665856037,
|
||||||
|
"narHash": "sha256-/RvIWnGKdTSoIq5Xc2HwPIL0TzRslzU6Rqk4Img6UNg=",
|
||||||
|
"owner": "NixOS",
|
||||||
|
"repo": "nixpkgs",
|
||||||
|
"rev": "c95ebc5125ffffcd431df0ad8620f0926b8125b8",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "NixOS",
|
||||||
|
"repo": "nixpkgs",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"root": {
|
||||||
|
"inputs": {
|
||||||
|
"fenix": "fenix",
|
||||||
|
"flake-utils": "flake-utils",
|
||||||
|
"naersk": "naersk",
|
||||||
|
"nixpkgs": "nixpkgs"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"rust-analyzer-src": {
|
||||||
|
"flake": false,
|
||||||
|
"locked": {
|
||||||
|
"lastModified": 1665765556,
|
||||||
|
"narHash": "sha256-w9L5j0TIB5ay4aRwzGCp8mgvGsu5dVJQvbEFutwr6xE=",
|
||||||
|
"owner": "rust-lang",
|
||||||
|
"repo": "rust-analyzer",
|
||||||
|
"rev": "018b8429cf3fa9d8aed916704e41dfedeb0f4f78",
|
||||||
|
"type": "github"
|
||||||
|
},
|
||||||
|
"original": {
|
||||||
|
"owner": "rust-lang",
|
||||||
|
"ref": "nightly",
|
||||||
|
"repo": "rust-analyzer",
|
||||||
|
"type": "github"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"root": "root",
|
||||||
|
"version": 7
|
||||||
|
}
|
|
@ -0,0 +1,75 @@
|
||||||
|
{
|
||||||
|
inputs = {
|
||||||
|
nixpkgs.url = "github:NixOS/nixpkgs";
|
||||||
|
flake-utils.url = "github:numtide/flake-utils";
|
||||||
|
|
||||||
|
fenix = {
|
||||||
|
url = "github:nix-community/fenix";
|
||||||
|
inputs.nixpkgs.follows = "nixpkgs";
|
||||||
|
};
|
||||||
|
naersk = {
|
||||||
|
url = "github:nix-community/naersk";
|
||||||
|
inputs.nixpkgs.follows = "nixpkgs";
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
outputs =
|
||||||
|
{ self
|
||||||
|
, nixpkgs
|
||||||
|
, flake-utils
|
||||||
|
|
||||||
|
, fenix
|
||||||
|
, naersk
|
||||||
|
}: flake-utils.lib.eachDefaultSystem (system:
|
||||||
|
let
|
||||||
|
pkgs = nixpkgs.legacyPackages.${system};
|
||||||
|
|
||||||
|
# Nix-accessible `Cargo.toml`
|
||||||
|
cargoToml = builtins.fromTOML (builtins.readFile ./Cargo.toml);
|
||||||
|
|
||||||
|
# The Rust toolchain to use
|
||||||
|
toolchain = fenix.packages.${system}.toolchainOf {
|
||||||
|
# Use the Rust version defined in `Cargo.toml`
|
||||||
|
channel = cargoToml.package.rust-version;
|
||||||
|
|
||||||
|
# This will need to be updated when `package.rust-version` is changed in
|
||||||
|
# `Cargo.toml`
|
||||||
|
sha256 = "sha256-KXx+ID0y4mg2B3LHp7IyaiMrdexF6octADnAtFIOjrY=";
|
||||||
|
};
|
||||||
|
|
||||||
|
builder = (pkgs.callPackage naersk {
|
||||||
|
inherit (toolchain) rustc cargo;
|
||||||
|
}).buildPackage;
|
||||||
|
in
|
||||||
|
{
|
||||||
|
packages.default = builder {
|
||||||
|
src = ./.;
|
||||||
|
|
||||||
|
nativeBuildInputs = (with pkgs.rustPlatform; [
|
||||||
|
bindgenHook
|
||||||
|
]);
|
||||||
|
};
|
||||||
|
|
||||||
|
devShells.default = pkgs.mkShell {
|
||||||
|
# Rust Analyzer needs to be able to find the path to default crate
|
||||||
|
# sources, and it can read this environment variable to do so
|
||||||
|
RUST_SRC_PATH = "${toolchain.rust-src}/lib/rustlib/src/rust/library";
|
||||||
|
|
||||||
|
# Development tools
|
||||||
|
nativeBuildInputs = (with pkgs.rustPlatform; [
|
||||||
|
bindgenHook
|
||||||
|
]) ++ (with toolchain; [
|
||||||
|
cargo
|
||||||
|
clippy
|
||||||
|
rust-src
|
||||||
|
rustc
|
||||||
|
rustfmt
|
||||||
|
]);
|
||||||
|
};
|
||||||
|
|
||||||
|
checks = {
|
||||||
|
packagesDefault = self.packages.${system}.default;
|
||||||
|
devShellsDefault = self.devShells.${system}.default;
|
||||||
|
};
|
||||||
|
});
|
||||||
|
}
|
|
@ -0,0 +1,188 @@
|
||||||
|
# Conduit for Nix/NixOS
|
||||||
|
|
||||||
|
This guide assumes you have a recent version of Nix (^2.4) installed.
|
||||||
|
|
||||||
|
Since Conduit ships as a Nix flake, you'll first need to [enable
|
||||||
|
flakes][enable_flakes].
|
||||||
|
|
||||||
|
You can now use the usual Nix commands to interact with Conduit's flake. For
|
||||||
|
example, `nix run gitlab:famedly/conduit` will run Conduit (though you'll need
|
||||||
|
to provide configuration and such manually as usual).
|
||||||
|
|
||||||
|
If your NixOS configuration is defined as a flake, you can depend on this flake
|
||||||
|
to provide a more up-to-date version than provided by `nixpkgs`. In your flake,
|
||||||
|
add the following to your `inputs`:
|
||||||
|
|
||||||
|
```nix
|
||||||
|
conduit = {
|
||||||
|
url = "gitlab:famedly/conduit";
|
||||||
|
|
||||||
|
# Assuming you have an input for nixpkgs called `nixpkgs`. If you experience
|
||||||
|
# build failures while using this, try commenting/deleting this line. This
|
||||||
|
# will probably also require you to always build from source.
|
||||||
|
inputs.nixpkgs.follows = "nixpkgs";
|
||||||
|
};
|
||||||
|
```
|
||||||
|
|
||||||
|
Next, make sure you're passing your flake inputs to the `specialArgs` argument
|
||||||
|
of `nixpkgs.lib.nixosSystem` [as explained here][specialargs]. This guide will
|
||||||
|
assume you've named the group `flake-inputs`.
|
||||||
|
|
||||||
|
Now you can configure Conduit and a reverse proxy for it. Add the following to
|
||||||
|
a new Nix file and include it in your configuration:
|
||||||
|
|
||||||
|
```nix
|
||||||
|
{ config
|
||||||
|
, pkgs
|
||||||
|
, flake-inputs
|
||||||
|
, ...
|
||||||
|
}:
|
||||||
|
|
||||||
|
let
|
||||||
|
# You'll need to edit these values
|
||||||
|
|
||||||
|
# The hostname that will appear in your user and room IDs
|
||||||
|
server_name = "example.com";
|
||||||
|
|
||||||
|
# The hostname that Conduit actually runs on
|
||||||
|
#
|
||||||
|
# This can be the same as `server_name` if you want. This is only necessary
|
||||||
|
# when Conduit is running on a different machine than the one hosting your
|
||||||
|
# root domain. This configuration also assumes this is all running on a single
|
||||||
|
# machine, some tweaks will need to be made if this is not the case.
|
||||||
|
matrix_hostname = "matrix.${server_name}";
|
||||||
|
|
||||||
|
# An admin email for TLS certificate notifications
|
||||||
|
admin_email = "admin@${server_name}";
|
||||||
|
|
||||||
|
# These ones you can leave alone
|
||||||
|
|
||||||
|
# Build a dervation that stores the content of `${server_name}/.well-known/matrix/server`
|
||||||
|
well_known_server = pkgs.writeText "well-known-matrix-server" ''
|
||||||
|
{
|
||||||
|
"m.server": "${matrix_hostname}"
|
||||||
|
}
|
||||||
|
'';
|
||||||
|
|
||||||
|
# Build a dervation that stores the content of `${server_name}/.well-known/matrix/client`
|
||||||
|
well_known_client = pkgs.writeText "well-known-matrix-client" ''
|
||||||
|
{
|
||||||
|
"m.homeserver": {
|
||||||
|
"base_url": "https://${matrix_hostname}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
'';
|
||||||
|
in
|
||||||
|
|
||||||
|
{
|
||||||
|
# Configure Conduit itself
|
||||||
|
services.matrix-conduit = {
|
||||||
|
enable = true;
|
||||||
|
|
||||||
|
# This causes NixOS to use the flake defined in this repository instead of
|
||||||
|
# the build of Conduit built into nixpkgs.
|
||||||
|
package = flake-inputs.conduit.packages.${pkgs.system}.default;
|
||||||
|
|
||||||
|
settings.global = {
|
||||||
|
inherit server_name;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
# Configure automated TLS acquisition/renewal
|
||||||
|
security.acme = {
|
||||||
|
acceptTerms = true;
|
||||||
|
defaults = {
|
||||||
|
email = admin_email;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
# ACME data must be readable by the NGINX user
|
||||||
|
users.users.nginx.extraGroups = [
|
||||||
|
"acme"
|
||||||
|
];
|
||||||
|
|
||||||
|
# Configure NGINX as a reverse proxy
|
||||||
|
services.nginx = {
|
||||||
|
enable = true;
|
||||||
|
recommendedProxySettings = true;
|
||||||
|
|
||||||
|
virtualHosts = {
|
||||||
|
"${server_name}" = {
|
||||||
|
forceSSL = true;
|
||||||
|
enableACME = true;
|
||||||
|
|
||||||
|
listen = [
|
||||||
|
{
|
||||||
|
addr = "0.0.0.0";
|
||||||
|
port = 443;
|
||||||
|
ssl = true;
|
||||||
|
}
|
||||||
|
{
|
||||||
|
addr = "0.0.0.0";
|
||||||
|
port = 8448;
|
||||||
|
ssl = true;
|
||||||
|
}
|
||||||
|
];
|
||||||
|
|
||||||
|
extraConfig = ''
|
||||||
|
merge_slashes off;
|
||||||
|
'';
|
||||||
|
|
||||||
|
"${matrix_hostname}" = {
|
||||||
|
forceSSL = true;
|
||||||
|
enableACME = true;
|
||||||
|
|
||||||
|
locations."/_matrix/" = {
|
||||||
|
proxyPass = "http://backend_conduit$request_uri";
|
||||||
|
proxyWebsockets = true;
|
||||||
|
extraConfig = ''
|
||||||
|
proxy_set_header Host $host;
|
||||||
|
proxy_buffering off;
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
locations."=/.well-known/matrix/server" = {
|
||||||
|
# Use the contents of the derivation built previously
|
||||||
|
alias = "${well_known_server}";
|
||||||
|
|
||||||
|
extraConfig = ''
|
||||||
|
# Set the header since by default NGINX thinks it's just bytes
|
||||||
|
default_type application/json;
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
|
||||||
|
locations."=/.well-known/matrix/client" = {
|
||||||
|
# Use the contents of the derivation built previously
|
||||||
|
alias = "${well_known_client}";
|
||||||
|
|
||||||
|
extraConfig = ''
|
||||||
|
# Set the header since by default NGINX thinks it's just bytes
|
||||||
|
default_type application/json;
|
||||||
|
|
||||||
|
# https://matrix.org/docs/spec/client_server/r0.4.0#web-browser-clients
|
||||||
|
add_header Access-Control-Allow-Origin "*";
|
||||||
|
'';
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
upstreams = {
|
||||||
|
"backend_conduit" = {
|
||||||
|
servers = {
|
||||||
|
"localhost:${toString config.services.matrix-conduit.settings.global.port}" = { };
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
# Open firewall ports for HTTP, HTTPS, and Matrix federation
|
||||||
|
networking.firewall.allowedTCPPorts = [ 80 443 8448 ];
|
||||||
|
networking.firewall.allowedUDPPorts = [ 80 443 8448 ];
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Now you can rebuild your system configuration and you should be good to go!
|
||||||
|
|
||||||
|
[enable_flakes]: https://nixos.wiki/wiki/Flakes#Enable_flakes
|
||||||
|
|
||||||
|
[specialargs]: https://nixos.wiki/wiki/Flakes#Using_nix_flakes_with_NixOS
|
|
@ -1,12 +1,11 @@
|
||||||
use crate::{utils, Error, Result};
|
use crate::{services, utils, Error, Result};
|
||||||
use bytes::BytesMut;
|
use bytes::BytesMut;
|
||||||
use ruma::api::{IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken};
|
use ruma::api::{IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken};
|
||||||
use std::{fmt::Debug, mem, time::Duration};
|
use std::{fmt::Debug, mem, time::Duration};
|
||||||
use tracing::warn;
|
use tracing::warn;
|
||||||
|
|
||||||
#[tracing::instrument(skip(globals, request))]
|
#[tracing::instrument(skip(request))]
|
||||||
pub(crate) async fn send_request<T: OutgoingRequest>(
|
pub(crate) async fn send_request<T: OutgoingRequest>(
|
||||||
globals: &crate::database::globals::Globals,
|
|
||||||
registration: serde_yaml::Value,
|
registration: serde_yaml::Value,
|
||||||
request: T,
|
request: T,
|
||||||
) -> Result<T::IncomingResponse>
|
) -> Result<T::IncomingResponse>
|
||||||
|
@ -46,7 +45,23 @@ where
|
||||||
*reqwest_request.timeout_mut() = Some(Duration::from_secs(30));
|
*reqwest_request.timeout_mut() = Some(Duration::from_secs(30));
|
||||||
|
|
||||||
let url = reqwest_request.url().clone();
|
let url = reqwest_request.url().clone();
|
||||||
let mut response = globals.default_client().execute(reqwest_request).await?;
|
let mut response = match services()
|
||||||
|
.globals
|
||||||
|
.default_client()
|
||||||
|
.execute(reqwest_request)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(r) => r,
|
||||||
|
Err(e) => {
|
||||||
|
warn!(
|
||||||
|
"Could not send request to appservice {:?} at {}: {}",
|
||||||
|
registration.get("id"),
|
||||||
|
destination,
|
||||||
|
e
|
||||||
|
);
|
||||||
|
return Err(e.into());
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
// reqwest::Response -> http::Response conversion
|
// reqwest::Response -> http::Response conversion
|
||||||
let status = response.status();
|
let status = response.status();
|
|
@ -1,13 +1,11 @@
|
||||||
use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH};
|
use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH};
|
||||||
use crate::{
|
use crate::{api::client_server, services, utils, Error, Result, Ruma};
|
||||||
database::{admin::make_user_admin, DatabaseGuard},
|
|
||||||
utils, Error, Result, Ruma,
|
|
||||||
};
|
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::{
|
api::client::{
|
||||||
account::{
|
account::{
|
||||||
change_password, deactivate, get_3pids, get_username_availability, register, whoami,
|
change_password, deactivate, get_3pids, get_username_availability, register,
|
||||||
ThirdPartyIdRemovalStatus,
|
request_3pid_management_token_via_email, request_3pid_management_token_via_msisdn,
|
||||||
|
whoami, ThirdPartyIdRemovalStatus,
|
||||||
},
|
},
|
||||||
error::ErrorKind,
|
error::ErrorKind,
|
||||||
uiaa::{AuthFlow, AuthType, UiaaInfo},
|
uiaa::{AuthFlow, AuthType, UiaaInfo},
|
||||||
|
@ -32,15 +30,16 @@ const RANDOM_USER_ID_LENGTH: usize = 10;
|
||||||
///
|
///
|
||||||
/// Note: This will not reserve the username, so the username might become invalid when trying to register
|
/// Note: This will not reserve the username, so the username might become invalid when trying to register
|
||||||
pub async fn get_register_available_route(
|
pub async fn get_register_available_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_username_availability::v3::Request>,
|
||||||
body: Ruma<get_username_availability::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_username_availability::v3::Response> {
|
) -> Result<get_username_availability::v3::Response> {
|
||||||
// Validate user id
|
// Validate user id
|
||||||
let user_id =
|
let user_id = UserId::parse_with_server_name(
|
||||||
UserId::parse_with_server_name(body.username.to_lowercase(), db.globals.server_name())
|
body.username.to_lowercase(),
|
||||||
|
services().globals.server_name(),
|
||||||
|
)
|
||||||
.ok()
|
.ok()
|
||||||
.filter(|user_id| {
|
.filter(|user_id| {
|
||||||
!user_id.is_historical() && user_id.server_name() == db.globals.server_name()
|
!user_id.is_historical() && user_id.server_name() == services().globals.server_name()
|
||||||
})
|
})
|
||||||
.ok_or(Error::BadRequest(
|
.ok_or(Error::BadRequest(
|
||||||
ErrorKind::InvalidUsername,
|
ErrorKind::InvalidUsername,
|
||||||
|
@ -48,7 +47,7 @@ pub async fn get_register_available_route(
|
||||||
))?;
|
))?;
|
||||||
|
|
||||||
// Check if username is creative enough
|
// Check if username is creative enough
|
||||||
if db.users.exists(&user_id)? {
|
if services().users.exists(&user_id)? {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::UserInUse,
|
ErrorKind::UserInUse,
|
||||||
"Desired user ID is already taken.",
|
"Desired user ID is already taken.",
|
||||||
|
@ -74,11 +73,8 @@ pub async fn get_register_available_route(
|
||||||
/// - If type is not guest and no username is given: Always fails after UIAA check
|
/// - If type is not guest and no username is given: Always fails after UIAA check
|
||||||
/// - Creates a new account and populates it with default account data
|
/// - Creates a new account and populates it with default account data
|
||||||
/// - If `inhibit_login` is false: Creates a device and returns device id and access_token
|
/// - If `inhibit_login` is false: Creates a device and returns device id and access_token
|
||||||
pub async fn register_route(
|
pub async fn register_route(body: Ruma<register::v3::Request>) -> Result<register::v3::Response> {
|
||||||
db: DatabaseGuard,
|
if !services().globals.allow_registration() && !body.from_appservice {
|
||||||
body: Ruma<register::v3::IncomingRequest>,
|
|
||||||
) -> Result<register::v3::Response> {
|
|
||||||
if !db.globals.allow_registration() && !body.from_appservice {
|
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::Forbidden,
|
||||||
"Registration has been disabled.",
|
"Registration has been disabled.",
|
||||||
|
@ -89,18 +85,20 @@ pub async fn register_route(
|
||||||
|
|
||||||
let user_id = match (&body.username, is_guest) {
|
let user_id = match (&body.username, is_guest) {
|
||||||
(Some(username), false) => {
|
(Some(username), false) => {
|
||||||
let proposed_user_id =
|
let proposed_user_id = UserId::parse_with_server_name(
|
||||||
UserId::parse_with_server_name(username.to_lowercase(), db.globals.server_name())
|
username.to_lowercase(),
|
||||||
|
services().globals.server_name(),
|
||||||
|
)
|
||||||
.ok()
|
.ok()
|
||||||
.filter(|user_id| {
|
.filter(|user_id| {
|
||||||
!user_id.is_historical()
|
!user_id.is_historical()
|
||||||
&& user_id.server_name() == db.globals.server_name()
|
&& user_id.server_name() == services().globals.server_name()
|
||||||
})
|
})
|
||||||
.ok_or(Error::BadRequest(
|
.ok_or(Error::BadRequest(
|
||||||
ErrorKind::InvalidUsername,
|
ErrorKind::InvalidUsername,
|
||||||
"Username is invalid.",
|
"Username is invalid.",
|
||||||
))?;
|
))?;
|
||||||
if db.users.exists(&proposed_user_id)? {
|
if services().users.exists(&proposed_user_id)? {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::UserInUse,
|
ErrorKind::UserInUse,
|
||||||
"Desired user ID is already taken.",
|
"Desired user ID is already taken.",
|
||||||
|
@ -111,10 +109,10 @@ pub async fn register_route(
|
||||||
_ => loop {
|
_ => loop {
|
||||||
let proposed_user_id = UserId::parse_with_server_name(
|
let proposed_user_id = UserId::parse_with_server_name(
|
||||||
utils::random_string(RANDOM_USER_ID_LENGTH).to_lowercase(),
|
utils::random_string(RANDOM_USER_ID_LENGTH).to_lowercase(),
|
||||||
db.globals.server_name(),
|
services().globals.server_name(),
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
if !db.users.exists(&proposed_user_id)? {
|
if !services().users.exists(&proposed_user_id)? {
|
||||||
break proposed_user_id;
|
break proposed_user_id;
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -133,14 +131,12 @@ pub async fn register_route(
|
||||||
|
|
||||||
if !body.from_appservice {
|
if !body.from_appservice {
|
||||||
if let Some(auth) = &body.auth {
|
if let Some(auth) = &body.auth {
|
||||||
let (worked, uiaainfo) = db.uiaa.try_auth(
|
let (worked, uiaainfo) = services().uiaa.try_auth(
|
||||||
&UserId::parse_with_server_name("", db.globals.server_name())
|
&UserId::parse_with_server_name("", services().globals.server_name())
|
||||||
.expect("we know this is valid"),
|
.expect("we know this is valid"),
|
||||||
"".into(),
|
"".into(),
|
||||||
auth,
|
auth,
|
||||||
&uiaainfo,
|
&uiaainfo,
|
||||||
&db.users,
|
|
||||||
&db.globals,
|
|
||||||
)?;
|
)?;
|
||||||
if !worked {
|
if !worked {
|
||||||
return Err(Error::Uiaa(uiaainfo));
|
return Err(Error::Uiaa(uiaainfo));
|
||||||
|
@ -148,8 +144,8 @@ pub async fn register_route(
|
||||||
// Success!
|
// Success!
|
||||||
} else if let Some(json) = body.json_body {
|
} else if let Some(json) = body.json_body {
|
||||||
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
||||||
db.uiaa.create(
|
services().uiaa.create(
|
||||||
&UserId::parse_with_server_name("", db.globals.server_name())
|
&UserId::parse_with_server_name("", services().globals.server_name())
|
||||||
.expect("we know this is valid"),
|
.expect("we know this is valid"),
|
||||||
"".into(),
|
"".into(),
|
||||||
&uiaainfo,
|
&uiaainfo,
|
||||||
|
@ -168,30 +164,31 @@ pub async fn register_route(
|
||||||
};
|
};
|
||||||
|
|
||||||
// Create user
|
// Create user
|
||||||
db.users.create(&user_id, password)?;
|
services().users.create(&user_id, password)?;
|
||||||
|
|
||||||
// Default to pretty displayname
|
// Default to pretty displayname
|
||||||
let mut displayname = user_id.localpart().to_owned();
|
let mut displayname = user_id.localpart().to_owned();
|
||||||
|
|
||||||
// If enabled append lightning bolt to display name (default true)
|
// If enabled append lightning bolt to display name (default true)
|
||||||
if db.globals.enable_lightning_bolt() {
|
if services().globals.enable_lightning_bolt() {
|
||||||
displayname.push_str(" ⚡️");
|
displayname.push_str(" ⚡️");
|
||||||
}
|
}
|
||||||
|
|
||||||
db.users
|
services()
|
||||||
|
.users
|
||||||
.set_displayname(&user_id, Some(displayname.clone()))?;
|
.set_displayname(&user_id, Some(displayname.clone()))?;
|
||||||
|
|
||||||
// Initial account data
|
// Initial account data
|
||||||
db.account_data.update(
|
services().account_data.update(
|
||||||
None,
|
None,
|
||||||
&user_id,
|
&user_id,
|
||||||
GlobalAccountDataEventType::PushRules.to_string().into(),
|
GlobalAccountDataEventType::PushRules.to_string().into(),
|
||||||
&ruma::events::push_rules::PushRulesEvent {
|
&serde_json::to_value(ruma::events::push_rules::PushRulesEvent {
|
||||||
content: ruma::events::push_rules::PushRulesEventContent {
|
content: ruma::events::push_rules::PushRulesEventContent {
|
||||||
global: push::Ruleset::server_default(&user_id),
|
global: push::Ruleset::server_default(&user_id),
|
||||||
},
|
},
|
||||||
},
|
})
|
||||||
&db.globals,
|
.expect("to json always works"),
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
// Inhibit login does not work for guests
|
// Inhibit login does not work for guests
|
||||||
|
@ -200,6 +197,8 @@ pub async fn register_route(
|
||||||
access_token: None,
|
access_token: None,
|
||||||
user_id,
|
user_id,
|
||||||
device_id: None,
|
device_id: None,
|
||||||
|
refresh_token: None,
|
||||||
|
expires_in: None,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -215,7 +214,7 @@ pub async fn register_route(
|
||||||
let token = utils::random_string(TOKEN_LENGTH);
|
let token = utils::random_string(TOKEN_LENGTH);
|
||||||
|
|
||||||
// Create device for this account
|
// Create device for this account
|
||||||
db.users.create_device(
|
services().users.create_device(
|
||||||
&user_id,
|
&user_id,
|
||||||
&device_id,
|
&device_id,
|
||||||
&token,
|
&token,
|
||||||
|
@ -223,26 +222,29 @@ pub async fn register_route(
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
info!("New user {} registered on this server.", user_id);
|
info!("New user {} registered on this server.", user_id);
|
||||||
db.admin
|
services()
|
||||||
|
.admin
|
||||||
.send_message(RoomMessageEventContent::notice_plain(format!(
|
.send_message(RoomMessageEventContent::notice_plain(format!(
|
||||||
"New user {} registered on this server.",
|
"New user {user_id} registered on this server."
|
||||||
user_id
|
|
||||||
)));
|
)));
|
||||||
|
|
||||||
// If this is the first real user, grant them admin privileges
|
// If this is the first real user, grant them admin privileges
|
||||||
// Note: the server user, @conduit:servername, is generated first
|
// Note: the server user, @conduit:servername, is generated first
|
||||||
if db.users.count()? == 2 {
|
if services().users.count()? == 2 {
|
||||||
make_user_admin(&db, &user_id, displayname).await?;
|
services()
|
||||||
|
.admin
|
||||||
|
.make_user_admin(&user_id, displayname)
|
||||||
|
.await?;
|
||||||
|
|
||||||
warn!("Granting {} admin privileges as the first user", user_id);
|
warn!("Granting {} admin privileges as the first user", user_id);
|
||||||
}
|
}
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(register::v3::Response {
|
Ok(register::v3::Response {
|
||||||
access_token: Some(token),
|
access_token: Some(token),
|
||||||
user_id,
|
user_id,
|
||||||
device_id: Some(device_id),
|
device_id: Some(device_id),
|
||||||
|
refresh_token: None,
|
||||||
|
expires_in: None,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -261,8 +263,7 @@ pub async fn register_route(
|
||||||
/// - Forgets to-device events
|
/// - Forgets to-device events
|
||||||
/// - Triggers device list updates
|
/// - Triggers device list updates
|
||||||
pub async fn change_password_route(
|
pub async fn change_password_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<change_password::v3::Request>,
|
||||||
body: Ruma<change_password::v3::IncomingRequest>,
|
|
||||||
) -> Result<change_password::v3::Response> {
|
) -> Result<change_password::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||||
|
@ -278,49 +279,45 @@ pub async fn change_password_route(
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(auth) = &body.auth {
|
if let Some(auth) = &body.auth {
|
||||||
let (worked, uiaainfo) = db.uiaa.try_auth(
|
let (worked, uiaainfo) =
|
||||||
sender_user,
|
services()
|
||||||
sender_device,
|
.uiaa
|
||||||
auth,
|
.try_auth(sender_user, sender_device, auth, &uiaainfo)?;
|
||||||
&uiaainfo,
|
|
||||||
&db.users,
|
|
||||||
&db.globals,
|
|
||||||
)?;
|
|
||||||
if !worked {
|
if !worked {
|
||||||
return Err(Error::Uiaa(uiaainfo));
|
return Err(Error::Uiaa(uiaainfo));
|
||||||
}
|
}
|
||||||
// Success!
|
// Success!
|
||||||
} else if let Some(json) = body.json_body {
|
} else if let Some(json) = body.json_body {
|
||||||
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
||||||
db.uiaa
|
services()
|
||||||
|
.uiaa
|
||||||
.create(sender_user, sender_device, &uiaainfo, &json)?;
|
.create(sender_user, sender_device, &uiaainfo, &json)?;
|
||||||
return Err(Error::Uiaa(uiaainfo));
|
return Err(Error::Uiaa(uiaainfo));
|
||||||
} else {
|
} else {
|
||||||
return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
|
return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
|
||||||
}
|
}
|
||||||
|
|
||||||
db.users
|
services()
|
||||||
|
.users
|
||||||
.set_password(sender_user, Some(&body.new_password))?;
|
.set_password(sender_user, Some(&body.new_password))?;
|
||||||
|
|
||||||
if body.logout_devices {
|
if body.logout_devices {
|
||||||
// Logout all devices except the current one
|
// Logout all devices except the current one
|
||||||
for id in db
|
for id in services()
|
||||||
.users
|
.users
|
||||||
.all_device_ids(sender_user)
|
.all_device_ids(sender_user)
|
||||||
.filter_map(|id| id.ok())
|
.filter_map(|id| id.ok())
|
||||||
.filter(|id| id != sender_device)
|
.filter(|id| id != sender_device)
|
||||||
{
|
{
|
||||||
db.users.remove_device(sender_user, &id)?;
|
services().users.remove_device(sender_user, &id)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
info!("User {} changed their password.", sender_user);
|
info!("User {} changed their password.", sender_user);
|
||||||
db.admin
|
services()
|
||||||
|
.admin
|
||||||
.send_message(RoomMessageEventContent::notice_plain(format!(
|
.send_message(RoomMessageEventContent::notice_plain(format!(
|
||||||
"User {} changed their password.",
|
"User {sender_user} changed their password."
|
||||||
sender_user
|
|
||||||
)));
|
)));
|
||||||
|
|
||||||
Ok(change_password::v3::Response {})
|
Ok(change_password::v3::Response {})
|
||||||
|
@ -331,17 +328,14 @@ pub async fn change_password_route(
|
||||||
/// Get user_id of the sender user.
|
/// Get user_id of the sender user.
|
||||||
///
|
///
|
||||||
/// Note: Also works for Application Services
|
/// Note: Also works for Application Services
|
||||||
pub async fn whoami_route(
|
pub async fn whoami_route(body: Ruma<whoami::v3::Request>) -> Result<whoami::v3::Response> {
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<whoami::v3::Request>,
|
|
||||||
) -> Result<whoami::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let device_id = body.sender_device.as_ref().cloned();
|
let device_id = body.sender_device.as_ref().cloned();
|
||||||
|
|
||||||
Ok(whoami::v3::Response {
|
Ok(whoami::v3::Response {
|
||||||
user_id: sender_user.clone(),
|
user_id: sender_user.clone(),
|
||||||
device_id,
|
device_id,
|
||||||
is_guest: db.users.is_deactivated(&sender_user)?,
|
is_guest: services().users.is_deactivated(sender_user)? && !body.from_appservice,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -356,8 +350,7 @@ pub async fn whoami_route(
|
||||||
/// - Triggers device list updates
|
/// - Triggers device list updates
|
||||||
/// - Removes ability to log in again
|
/// - Removes ability to log in again
|
||||||
pub async fn deactivate_route(
|
pub async fn deactivate_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<deactivate::v3::Request>,
|
||||||
body: Ruma<deactivate::v3::IncomingRequest>,
|
|
||||||
) -> Result<deactivate::v3::Response> {
|
) -> Result<deactivate::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||||
|
@ -373,21 +366,18 @@ pub async fn deactivate_route(
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(auth) = &body.auth {
|
if let Some(auth) = &body.auth {
|
||||||
let (worked, uiaainfo) = db.uiaa.try_auth(
|
let (worked, uiaainfo) =
|
||||||
sender_user,
|
services()
|
||||||
sender_device,
|
.uiaa
|
||||||
auth,
|
.try_auth(sender_user, sender_device, auth, &uiaainfo)?;
|
||||||
&uiaainfo,
|
|
||||||
&db.users,
|
|
||||||
&db.globals,
|
|
||||||
)?;
|
|
||||||
if !worked {
|
if !worked {
|
||||||
return Err(Error::Uiaa(uiaainfo));
|
return Err(Error::Uiaa(uiaainfo));
|
||||||
}
|
}
|
||||||
// Success!
|
// Success!
|
||||||
} else if let Some(json) = body.json_body {
|
} else if let Some(json) = body.json_body {
|
||||||
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
||||||
db.uiaa
|
services()
|
||||||
|
.uiaa
|
||||||
.create(sender_user, sender_device, &uiaainfo, &json)?;
|
.create(sender_user, sender_device, &uiaainfo, &json)?;
|
||||||
return Err(Error::Uiaa(uiaainfo));
|
return Err(Error::Uiaa(uiaainfo));
|
||||||
} else {
|
} else {
|
||||||
|
@ -395,26 +385,24 @@ pub async fn deactivate_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make the user leave all rooms before deactivation
|
// Make the user leave all rooms before deactivation
|
||||||
db.rooms.leave_all_rooms(&sender_user, &db).await?;
|
client_server::leave_all_rooms(sender_user).await?;
|
||||||
|
|
||||||
// Remove devices and mark account as deactivated
|
// Remove devices and mark account as deactivated
|
||||||
db.users.deactivate_account(sender_user)?;
|
services().users.deactivate_account(sender_user)?;
|
||||||
|
|
||||||
info!("User {} deactivated their account.", sender_user);
|
info!("User {} deactivated their account.", sender_user);
|
||||||
db.admin
|
services()
|
||||||
|
.admin
|
||||||
.send_message(RoomMessageEventContent::notice_plain(format!(
|
.send_message(RoomMessageEventContent::notice_plain(format!(
|
||||||
"User {} deactivated their account.",
|
"User {sender_user} deactivated their account."
|
||||||
sender_user
|
|
||||||
)));
|
)));
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(deactivate::v3::Response {
|
Ok(deactivate::v3::Response {
|
||||||
id_server_unbind_result: ThirdPartyIdRemovalStatus::NoSupport,
|
id_server_unbind_result: ThirdPartyIdRemovalStatus::NoSupport,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// # `GET _matrix/client/r0/account/3pid`
|
/// # `GET _matrix/client/v3/account/3pid`
|
||||||
///
|
///
|
||||||
/// Get a list of third party identifiers associated with this account.
|
/// Get a list of third party identifiers associated with this account.
|
||||||
///
|
///
|
||||||
|
@ -426,3 +414,31 @@ pub async fn third_party_route(
|
||||||
|
|
||||||
Ok(get_3pids::v3::Response::new(Vec::new()))
|
Ok(get_3pids::v3::Response::new(Vec::new()))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// # `POST /_matrix/client/v3/account/3pid/email/requestToken`
|
||||||
|
///
|
||||||
|
/// "This API should be used to request validation tokens when adding an email address to an account"
|
||||||
|
///
|
||||||
|
/// - 403 signals that The homeserver does not allow the third party identifier as a contact option.
|
||||||
|
pub async fn request_3pid_management_token_via_email_route(
|
||||||
|
_body: Ruma<request_3pid_management_token_via_email::v3::Request>,
|
||||||
|
) -> Result<request_3pid_management_token_via_email::v3::Response> {
|
||||||
|
Err(Error::BadRequest(
|
||||||
|
ErrorKind::ThreepidDenied,
|
||||||
|
"Third party identifier is not allowed",
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `POST /_matrix/client/v3/account/3pid/msisdn/requestToken`
|
||||||
|
///
|
||||||
|
/// "This API should be used to request validation tokens when adding an phone number to an account"
|
||||||
|
///
|
||||||
|
/// - 403 signals that The homeserver does not allow the third party identifier as a contact option.
|
||||||
|
pub async fn request_3pid_management_token_via_msisdn_route(
|
||||||
|
_body: Ruma<request_3pid_management_token_via_msisdn::v3::Request>,
|
||||||
|
) -> Result<request_3pid_management_token_via_msisdn::v3::Response> {
|
||||||
|
Err(Error::BadRequest(
|
||||||
|
ErrorKind::ThreepidDenied,
|
||||||
|
"Third party identifier is not allowed",
|
||||||
|
))
|
||||||
|
}
|
|
@ -1,4 +1,4 @@
|
||||||
use crate::{database::DatabaseGuard, Database, Error, Result, Ruma};
|
use crate::{services, Error, Result, Ruma};
|
||||||
use regex::Regex;
|
use regex::Regex;
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::{
|
api::{
|
||||||
|
@ -9,31 +9,35 @@ use ruma::{
|
||||||
},
|
},
|
||||||
federation,
|
federation,
|
||||||
},
|
},
|
||||||
RoomAliasId,
|
OwnedRoomAliasId,
|
||||||
};
|
};
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/directory/room/{roomAlias}`
|
/// # `PUT /_matrix/client/r0/directory/room/{roomAlias}`
|
||||||
///
|
///
|
||||||
/// Creates a new room alias on this server.
|
/// Creates a new room alias on this server.
|
||||||
pub async fn create_alias_route(
|
pub async fn create_alias_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<create_alias::v3::Request>,
|
||||||
body: Ruma<create_alias::v3::IncomingRequest>,
|
|
||||||
) -> Result<create_alias::v3::Response> {
|
) -> Result<create_alias::v3::Response> {
|
||||||
if body.room_alias.server_name() != db.globals.server_name() {
|
if body.room_alias.server_name() != services().globals.server_name() {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::InvalidParam,
|
ErrorKind::InvalidParam,
|
||||||
"Alias is from another server.",
|
"Alias is from another server.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
if db.rooms.id_from_alias(&body.room_alias)?.is_some() {
|
if services()
|
||||||
|
.rooms
|
||||||
|
.alias
|
||||||
|
.resolve_local_alias(&body.room_alias)?
|
||||||
|
.is_some()
|
||||||
|
{
|
||||||
return Err(Error::Conflict("Alias already exists."));
|
return Err(Error::Conflict("Alias already exists."));
|
||||||
}
|
}
|
||||||
|
|
||||||
db.rooms
|
services()
|
||||||
.set_alias(&body.room_alias, Some(&body.room_id), &db.globals)?;
|
.rooms
|
||||||
|
.alias
|
||||||
db.flush()?;
|
.set_alias(&body.room_alias, &body.room_id)?;
|
||||||
|
|
||||||
Ok(create_alias::v3::Response::new())
|
Ok(create_alias::v3::Response::new())
|
||||||
}
|
}
|
||||||
|
@ -45,22 +49,19 @@ pub async fn create_alias_route(
|
||||||
/// - TODO: additional access control checks
|
/// - TODO: additional access control checks
|
||||||
/// - TODO: Update canonical alias event
|
/// - TODO: Update canonical alias event
|
||||||
pub async fn delete_alias_route(
|
pub async fn delete_alias_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<delete_alias::v3::Request>,
|
||||||
body: Ruma<delete_alias::v3::IncomingRequest>,
|
|
||||||
) -> Result<delete_alias::v3::Response> {
|
) -> Result<delete_alias::v3::Response> {
|
||||||
if body.room_alias.server_name() != db.globals.server_name() {
|
if body.room_alias.server_name() != services().globals.server_name() {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::InvalidParam,
|
ErrorKind::InvalidParam,
|
||||||
"Alias is from another server.",
|
"Alias is from another server.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
db.rooms.set_alias(&body.room_alias, None, &db.globals)?;
|
services().rooms.alias.remove_alias(&body.room_alias)?;
|
||||||
|
|
||||||
// TODO: update alt_aliases?
|
// TODO: update alt_aliases?
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(delete_alias::v3::Response::new())
|
Ok(delete_alias::v3::Response::new())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -70,23 +71,22 @@ pub async fn delete_alias_route(
|
||||||
///
|
///
|
||||||
/// - TODO: Suggest more servers to join via
|
/// - TODO: Suggest more servers to join via
|
||||||
pub async fn get_alias_route(
|
pub async fn get_alias_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_alias::v3::Request>,
|
||||||
body: Ruma<get_alias::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_alias::v3::Response> {
|
) -> Result<get_alias::v3::Response> {
|
||||||
get_alias_helper(&db, &body.room_alias).await
|
get_alias_helper(body.body.room_alias).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) async fn get_alias_helper(
|
pub(crate) async fn get_alias_helper(
|
||||||
db: &Database,
|
room_alias: OwnedRoomAliasId,
|
||||||
room_alias: &RoomAliasId,
|
|
||||||
) -> Result<get_alias::v3::Response> {
|
) -> Result<get_alias::v3::Response> {
|
||||||
if room_alias.server_name() != db.globals.server_name() {
|
if room_alias.server_name() != services().globals.server_name() {
|
||||||
let response = db
|
let response = services()
|
||||||
.sending
|
.sending
|
||||||
.send_federation_request(
|
.send_federation_request(
|
||||||
&db.globals,
|
|
||||||
room_alias.server_name(),
|
room_alias.server_name(),
|
||||||
federation::query::get_room_information::v1::Request { room_alias },
|
federation::query::get_room_information::v1::Request {
|
||||||
|
room_alias: room_alias.to_owned(),
|
||||||
|
},
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
|
@ -97,10 +97,10 @@ pub(crate) async fn get_alias_helper(
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut room_id = None;
|
let mut room_id = None;
|
||||||
match db.rooms.id_from_alias(room_alias)? {
|
match services().rooms.alias.resolve_local_alias(&room_alias)? {
|
||||||
Some(r) => room_id = Some(r),
|
Some(r) => room_id = Some(r),
|
||||||
None => {
|
None => {
|
||||||
for (_id, registration) in db.appservice.all()? {
|
for (_id, registration) in services().appservice.all()? {
|
||||||
let aliases = registration
|
let aliases = registration
|
||||||
.get("namespaces")
|
.get("namespaces")
|
||||||
.and_then(|ns| ns.get("aliases"))
|
.and_then(|ns| ns.get("aliases"))
|
||||||
|
@ -115,19 +115,26 @@ pub(crate) async fn get_alias_helper(
|
||||||
if aliases
|
if aliases
|
||||||
.iter()
|
.iter()
|
||||||
.any(|aliases| aliases.is_match(room_alias.as_str()))
|
.any(|aliases| aliases.is_match(room_alias.as_str()))
|
||||||
&& db
|
&& services()
|
||||||
.sending
|
.sending
|
||||||
.send_appservice_request(
|
.send_appservice_request(
|
||||||
&db.globals,
|
|
||||||
registration,
|
registration,
|
||||||
appservice::query::query_room_alias::v1::Request { room_alias },
|
appservice::query::query_room_alias::v1::Request {
|
||||||
|
room_alias: room_alias.clone(),
|
||||||
|
},
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.is_ok()
|
.is_ok()
|
||||||
{
|
{
|
||||||
room_id = Some(db.rooms.id_from_alias(room_alias)?.ok_or_else(|| {
|
room_id = Some(
|
||||||
|
services()
|
||||||
|
.rooms
|
||||||
|
.alias
|
||||||
|
.resolve_local_alias(&room_alias)?
|
||||||
|
.ok_or_else(|| {
|
||||||
Error::bad_config("Appservice lied to us. Room does not exist.")
|
Error::bad_config("Appservice lied to us. Room does not exist.")
|
||||||
})?);
|
})?,
|
||||||
|
);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -146,6 +153,6 @@ pub(crate) async fn get_alias_helper(
|
||||||
|
|
||||||
Ok(get_alias::v3::Response::new(
|
Ok(get_alias::v3::Response::new(
|
||||||
room_id,
|
room_id,
|
||||||
vec![db.globals.server_name().to_owned()],
|
vec![services().globals.server_name().to_owned()],
|
||||||
))
|
))
|
||||||
}
|
}
|
|
@ -1,4 +1,4 @@
|
||||||
use crate::{database::DatabaseGuard, Error, Result, Ruma};
|
use crate::{services, Error, Result, Ruma};
|
||||||
use ruma::api::client::{
|
use ruma::api::client::{
|
||||||
backup::{
|
backup::{
|
||||||
add_backup_keys, add_backup_keys_for_room, add_backup_keys_for_session,
|
add_backup_keys, add_backup_keys_for_room, add_backup_keys_for_session,
|
||||||
|
@ -14,15 +14,12 @@ use ruma::api::client::{
|
||||||
///
|
///
|
||||||
/// Creates a new backup.
|
/// Creates a new backup.
|
||||||
pub async fn create_backup_version_route(
|
pub async fn create_backup_version_route(
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<create_backup_version::v3::Request>,
|
body: Ruma<create_backup_version::v3::Request>,
|
||||||
) -> Result<create_backup_version::v3::Response> {
|
) -> Result<create_backup_version::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let version = db
|
let version = services()
|
||||||
.key_backups
|
.key_backups
|
||||||
.create_backup(sender_user, &body.algorithm, &db.globals)?;
|
.create_backup(sender_user, &body.algorithm)?;
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(create_backup_version::v3::Response { version })
|
Ok(create_backup_version::v3::Response { version })
|
||||||
}
|
}
|
||||||
|
@ -31,14 +28,12 @@ pub async fn create_backup_version_route(
|
||||||
///
|
///
|
||||||
/// Update information about an existing backup. Only `auth_data` can be modified.
|
/// Update information about an existing backup. Only `auth_data` can be modified.
|
||||||
pub async fn update_backup_version_route(
|
pub async fn update_backup_version_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<update_backup_version::v3::Request>,
|
||||||
body: Ruma<update_backup_version::v3::IncomingRequest>,
|
|
||||||
) -> Result<update_backup_version::v3::Response> {
|
) -> Result<update_backup_version::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
db.key_backups
|
services()
|
||||||
.update_backup(sender_user, &body.version, &body.algorithm, &db.globals)?;
|
.key_backups
|
||||||
|
.update_backup(sender_user, &body.version, &body.algorithm)?;
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(update_backup_version::v3::Response {})
|
Ok(update_backup_version::v3::Response {})
|
||||||
}
|
}
|
||||||
|
@ -47,13 +42,12 @@ pub async fn update_backup_version_route(
|
||||||
///
|
///
|
||||||
/// Get information about the latest backup version.
|
/// Get information about the latest backup version.
|
||||||
pub async fn get_latest_backup_info_route(
|
pub async fn get_latest_backup_info_route(
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<get_latest_backup_info::v3::Request>,
|
body: Ruma<get_latest_backup_info::v3::Request>,
|
||||||
) -> Result<get_latest_backup_info::v3::Response> {
|
) -> Result<get_latest_backup_info::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let (version, algorithm) =
|
let (version, algorithm) = services()
|
||||||
db.key_backups
|
.key_backups
|
||||||
.get_latest_backup(sender_user)?
|
.get_latest_backup(sender_user)?
|
||||||
.ok_or(Error::BadRequest(
|
.ok_or(Error::BadRequest(
|
||||||
ErrorKind::NotFound,
|
ErrorKind::NotFound,
|
||||||
|
@ -62,8 +56,8 @@ pub async fn get_latest_backup_info_route(
|
||||||
|
|
||||||
Ok(get_latest_backup_info::v3::Response {
|
Ok(get_latest_backup_info::v3::Response {
|
||||||
algorithm,
|
algorithm,
|
||||||
count: (db.key_backups.count_keys(sender_user, &version)? as u32).into(),
|
count: (services().key_backups.count_keys(sender_user, &version)? as u32).into(),
|
||||||
etag: db.key_backups.get_etag(sender_user, &version)?,
|
etag: services().key_backups.get_etag(sender_user, &version)?,
|
||||||
version,
|
version,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -72,11 +66,10 @@ pub async fn get_latest_backup_info_route(
|
||||||
///
|
///
|
||||||
/// Get information about an existing backup.
|
/// Get information about an existing backup.
|
||||||
pub async fn get_backup_info_route(
|
pub async fn get_backup_info_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_backup_info::v3::Request>,
|
||||||
body: Ruma<get_backup_info::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_backup_info::v3::Response> {
|
) -> Result<get_backup_info::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let algorithm = db
|
let algorithm = services()
|
||||||
.key_backups
|
.key_backups
|
||||||
.get_backup(sender_user, &body.version)?
|
.get_backup(sender_user, &body.version)?
|
||||||
.ok_or(Error::BadRequest(
|
.ok_or(Error::BadRequest(
|
||||||
|
@ -86,8 +79,13 @@ pub async fn get_backup_info_route(
|
||||||
|
|
||||||
Ok(get_backup_info::v3::Response {
|
Ok(get_backup_info::v3::Response {
|
||||||
algorithm,
|
algorithm,
|
||||||
count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(),
|
count: (services()
|
||||||
etag: db.key_backups.get_etag(sender_user, &body.version)?,
|
.key_backups
|
||||||
|
.count_keys(sender_user, &body.version)? as u32)
|
||||||
|
.into(),
|
||||||
|
etag: services()
|
||||||
|
.key_backups
|
||||||
|
.get_etag(sender_user, &body.version)?,
|
||||||
version: body.version.to_owned(),
|
version: body.version.to_owned(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -98,14 +96,13 @@ pub async fn get_backup_info_route(
|
||||||
///
|
///
|
||||||
/// - Deletes both information about the backup, as well as all key data related to the backup
|
/// - Deletes both information about the backup, as well as all key data related to the backup
|
||||||
pub async fn delete_backup_version_route(
|
pub async fn delete_backup_version_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<delete_backup_version::v3::Request>,
|
||||||
body: Ruma<delete_backup_version::v3::IncomingRequest>,
|
|
||||||
) -> Result<delete_backup_version::v3::Response> {
|
) -> Result<delete_backup_version::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
db.key_backups.delete_backup(sender_user, &body.version)?;
|
services()
|
||||||
|
.key_backups
|
||||||
db.flush()?;
|
.delete_backup(sender_user, &body.version)?;
|
||||||
|
|
||||||
Ok(delete_backup_version::v3::Response {})
|
Ok(delete_backup_version::v3::Response {})
|
||||||
}
|
}
|
||||||
|
@ -118,13 +115,12 @@ pub async fn delete_backup_version_route(
|
||||||
/// - Adds the keys to the backup
|
/// - Adds the keys to the backup
|
||||||
/// - Returns the new number of keys in this backup and the etag
|
/// - Returns the new number of keys in this backup and the etag
|
||||||
pub async fn add_backup_keys_route(
|
pub async fn add_backup_keys_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<add_backup_keys::v3::Request>,
|
||||||
body: Ruma<add_backup_keys::v3::IncomingRequest>,
|
|
||||||
) -> Result<add_backup_keys::v3::Response> {
|
) -> Result<add_backup_keys::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
if Some(&body.version)
|
if Some(&body.version)
|
||||||
!= db
|
!= services()
|
||||||
.key_backups
|
.key_backups
|
||||||
.get_latest_backup_version(sender_user)?
|
.get_latest_backup_version(sender_user)?
|
||||||
.as_ref()
|
.as_ref()
|
||||||
|
@ -137,22 +133,24 @@ pub async fn add_backup_keys_route(
|
||||||
|
|
||||||
for (room_id, room) in &body.rooms {
|
for (room_id, room) in &body.rooms {
|
||||||
for (session_id, key_data) in &room.sessions {
|
for (session_id, key_data) in &room.sessions {
|
||||||
db.key_backups.add_key(
|
services().key_backups.add_key(
|
||||||
sender_user,
|
sender_user,
|
||||||
&body.version,
|
&body.version,
|
||||||
room_id,
|
room_id,
|
||||||
session_id,
|
session_id,
|
||||||
key_data,
|
key_data,
|
||||||
&db.globals,
|
|
||||||
)?
|
)?
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(add_backup_keys::v3::Response {
|
Ok(add_backup_keys::v3::Response {
|
||||||
count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(),
|
count: (services()
|
||||||
etag: db.key_backups.get_etag(sender_user, &body.version)?,
|
.key_backups
|
||||||
|
.count_keys(sender_user, &body.version)? as u32)
|
||||||
|
.into(),
|
||||||
|
etag: services()
|
||||||
|
.key_backups
|
||||||
|
.get_etag(sender_user, &body.version)?,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -164,13 +162,12 @@ pub async fn add_backup_keys_route(
|
||||||
/// - Adds the keys to the backup
|
/// - Adds the keys to the backup
|
||||||
/// - Returns the new number of keys in this backup and the etag
|
/// - Returns the new number of keys in this backup and the etag
|
||||||
pub async fn add_backup_keys_for_room_route(
|
pub async fn add_backup_keys_for_room_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<add_backup_keys_for_room::v3::Request>,
|
||||||
body: Ruma<add_backup_keys_for_room::v3::IncomingRequest>,
|
|
||||||
) -> Result<add_backup_keys_for_room::v3::Response> {
|
) -> Result<add_backup_keys_for_room::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
if Some(&body.version)
|
if Some(&body.version)
|
||||||
!= db
|
!= services()
|
||||||
.key_backups
|
.key_backups
|
||||||
.get_latest_backup_version(sender_user)?
|
.get_latest_backup_version(sender_user)?
|
||||||
.as_ref()
|
.as_ref()
|
||||||
|
@ -182,21 +179,23 @@ pub async fn add_backup_keys_for_room_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
for (session_id, key_data) in &body.sessions {
|
for (session_id, key_data) in &body.sessions {
|
||||||
db.key_backups.add_key(
|
services().key_backups.add_key(
|
||||||
sender_user,
|
sender_user,
|
||||||
&body.version,
|
&body.version,
|
||||||
&body.room_id,
|
&body.room_id,
|
||||||
session_id,
|
session_id,
|
||||||
key_data,
|
key_data,
|
||||||
&db.globals,
|
|
||||||
)?
|
)?
|
||||||
}
|
}
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(add_backup_keys_for_room::v3::Response {
|
Ok(add_backup_keys_for_room::v3::Response {
|
||||||
count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(),
|
count: (services()
|
||||||
etag: db.key_backups.get_etag(sender_user, &body.version)?,
|
.key_backups
|
||||||
|
.count_keys(sender_user, &body.version)? as u32)
|
||||||
|
.into(),
|
||||||
|
etag: services()
|
||||||
|
.key_backups
|
||||||
|
.get_etag(sender_user, &body.version)?,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -208,13 +207,12 @@ pub async fn add_backup_keys_for_room_route(
|
||||||
/// - Adds the keys to the backup
|
/// - Adds the keys to the backup
|
||||||
/// - Returns the new number of keys in this backup and the etag
|
/// - Returns the new number of keys in this backup and the etag
|
||||||
pub async fn add_backup_keys_for_session_route(
|
pub async fn add_backup_keys_for_session_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<add_backup_keys_for_session::v3::Request>,
|
||||||
body: Ruma<add_backup_keys_for_session::v3::IncomingRequest>,
|
|
||||||
) -> Result<add_backup_keys_for_session::v3::Response> {
|
) -> Result<add_backup_keys_for_session::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
if Some(&body.version)
|
if Some(&body.version)
|
||||||
!= db
|
!= services()
|
||||||
.key_backups
|
.key_backups
|
||||||
.get_latest_backup_version(sender_user)?
|
.get_latest_backup_version(sender_user)?
|
||||||
.as_ref()
|
.as_ref()
|
||||||
|
@ -225,20 +223,22 @@ pub async fn add_backup_keys_for_session_route(
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
db.key_backups.add_key(
|
services().key_backups.add_key(
|
||||||
sender_user,
|
sender_user,
|
||||||
&body.version,
|
&body.version,
|
||||||
&body.room_id,
|
&body.room_id,
|
||||||
&body.session_id,
|
&body.session_id,
|
||||||
&body.session_data,
|
&body.session_data,
|
||||||
&db.globals,
|
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(add_backup_keys_for_session::v3::Response {
|
Ok(add_backup_keys_for_session::v3::Response {
|
||||||
count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(),
|
count: (services()
|
||||||
etag: db.key_backups.get_etag(sender_user, &body.version)?,
|
.key_backups
|
||||||
|
.count_keys(sender_user, &body.version)? as u32)
|
||||||
|
.into(),
|
||||||
|
etag: services()
|
||||||
|
.key_backups
|
||||||
|
.get_etag(sender_user, &body.version)?,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -246,12 +246,11 @@ pub async fn add_backup_keys_for_session_route(
|
||||||
///
|
///
|
||||||
/// Retrieves all keys from the backup.
|
/// Retrieves all keys from the backup.
|
||||||
pub async fn get_backup_keys_route(
|
pub async fn get_backup_keys_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_backup_keys::v3::Request>,
|
||||||
body: Ruma<get_backup_keys::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_backup_keys::v3::Response> {
|
) -> Result<get_backup_keys::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let rooms = db.key_backups.get_all(sender_user, &body.version)?;
|
let rooms = services().key_backups.get_all(sender_user, &body.version)?;
|
||||||
|
|
||||||
Ok(get_backup_keys::v3::Response { rooms })
|
Ok(get_backup_keys::v3::Response { rooms })
|
||||||
}
|
}
|
||||||
|
@ -260,12 +259,11 @@ pub async fn get_backup_keys_route(
|
||||||
///
|
///
|
||||||
/// Retrieves all keys from the backup for a given room.
|
/// Retrieves all keys from the backup for a given room.
|
||||||
pub async fn get_backup_keys_for_room_route(
|
pub async fn get_backup_keys_for_room_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_backup_keys_for_room::v3::Request>,
|
||||||
body: Ruma<get_backup_keys_for_room::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_backup_keys_for_room::v3::Response> {
|
) -> Result<get_backup_keys_for_room::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let sessions = db
|
let sessions = services()
|
||||||
.key_backups
|
.key_backups
|
||||||
.get_room(sender_user, &body.version, &body.room_id)?;
|
.get_room(sender_user, &body.version, &body.room_id)?;
|
||||||
|
|
||||||
|
@ -276,12 +274,11 @@ pub async fn get_backup_keys_for_room_route(
|
||||||
///
|
///
|
||||||
/// Retrieves a key from the backup.
|
/// Retrieves a key from the backup.
|
||||||
pub async fn get_backup_keys_for_session_route(
|
pub async fn get_backup_keys_for_session_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_backup_keys_for_session::v3::Request>,
|
||||||
body: Ruma<get_backup_keys_for_session::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_backup_keys_for_session::v3::Response> {
|
) -> Result<get_backup_keys_for_session::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let key_data = db
|
let key_data = services()
|
||||||
.key_backups
|
.key_backups
|
||||||
.get_session(sender_user, &body.version, &body.room_id, &body.session_id)?
|
.get_session(sender_user, &body.version, &body.room_id, &body.session_id)?
|
||||||
.ok_or(Error::BadRequest(
|
.ok_or(Error::BadRequest(
|
||||||
|
@ -296,18 +293,22 @@ pub async fn get_backup_keys_for_session_route(
|
||||||
///
|
///
|
||||||
/// Delete the keys from the backup.
|
/// Delete the keys from the backup.
|
||||||
pub async fn delete_backup_keys_route(
|
pub async fn delete_backup_keys_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<delete_backup_keys::v3::Request>,
|
||||||
body: Ruma<delete_backup_keys::v3::IncomingRequest>,
|
|
||||||
) -> Result<delete_backup_keys::v3::Response> {
|
) -> Result<delete_backup_keys::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
db.key_backups.delete_all_keys(sender_user, &body.version)?;
|
services()
|
||||||
|
.key_backups
|
||||||
db.flush()?;
|
.delete_all_keys(sender_user, &body.version)?;
|
||||||
|
|
||||||
Ok(delete_backup_keys::v3::Response {
|
Ok(delete_backup_keys::v3::Response {
|
||||||
count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(),
|
count: (services()
|
||||||
etag: db.key_backups.get_etag(sender_user, &body.version)?,
|
.key_backups
|
||||||
|
.count_keys(sender_user, &body.version)? as u32)
|
||||||
|
.into(),
|
||||||
|
etag: services()
|
||||||
|
.key_backups
|
||||||
|
.get_etag(sender_user, &body.version)?,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -315,19 +316,22 @@ pub async fn delete_backup_keys_route(
|
||||||
///
|
///
|
||||||
/// Delete the keys from the backup for a given room.
|
/// Delete the keys from the backup for a given room.
|
||||||
pub async fn delete_backup_keys_for_room_route(
|
pub async fn delete_backup_keys_for_room_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<delete_backup_keys_for_room::v3::Request>,
|
||||||
body: Ruma<delete_backup_keys_for_room::v3::IncomingRequest>,
|
|
||||||
) -> Result<delete_backup_keys_for_room::v3::Response> {
|
) -> Result<delete_backup_keys_for_room::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
db.key_backups
|
services()
|
||||||
|
.key_backups
|
||||||
.delete_room_keys(sender_user, &body.version, &body.room_id)?;
|
.delete_room_keys(sender_user, &body.version, &body.room_id)?;
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(delete_backup_keys_for_room::v3::Response {
|
Ok(delete_backup_keys_for_room::v3::Response {
|
||||||
count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(),
|
count: (services()
|
||||||
etag: db.key_backups.get_etag(sender_user, &body.version)?,
|
.key_backups
|
||||||
|
.count_keys(sender_user, &body.version)? as u32)
|
||||||
|
.into(),
|
||||||
|
etag: services()
|
||||||
|
.key_backups
|
||||||
|
.get_etag(sender_user, &body.version)?,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -335,18 +339,24 @@ pub async fn delete_backup_keys_for_room_route(
|
||||||
///
|
///
|
||||||
/// Delete a key from the backup.
|
/// Delete a key from the backup.
|
||||||
pub async fn delete_backup_keys_for_session_route(
|
pub async fn delete_backup_keys_for_session_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<delete_backup_keys_for_session::v3::Request>,
|
||||||
body: Ruma<delete_backup_keys_for_session::v3::IncomingRequest>,
|
|
||||||
) -> Result<delete_backup_keys_for_session::v3::Response> {
|
) -> Result<delete_backup_keys_for_session::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
db.key_backups
|
services().key_backups.delete_room_key(
|
||||||
.delete_room_key(sender_user, &body.version, &body.room_id, &body.session_id)?;
|
sender_user,
|
||||||
|
&body.version,
|
||||||
db.flush()?;
|
&body.room_id,
|
||||||
|
&body.session_id,
|
||||||
|
)?;
|
||||||
|
|
||||||
Ok(delete_backup_keys_for_session::v3::Response {
|
Ok(delete_backup_keys_for_session::v3::Response {
|
||||||
count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(),
|
count: (services()
|
||||||
etag: db.key_backups.get_etag(sender_user, &body.version)?,
|
.key_backups
|
||||||
|
.count_keys(sender_user, &body.version)? as u32)
|
||||||
|
.into(),
|
||||||
|
etag: services()
|
||||||
|
.key_backups
|
||||||
|
.get_etag(sender_user, &body.version)?,
|
||||||
})
|
})
|
||||||
}
|
}
|
|
@ -1,4 +1,4 @@
|
||||||
use crate::{database::DatabaseGuard, Result, Ruma};
|
use crate::{services, Result, Ruma};
|
||||||
use ruma::api::client::discovery::get_capabilities::{
|
use ruma::api::client::discovery::get_capabilities::{
|
||||||
self, Capabilities, RoomVersionStability, RoomVersionsCapability,
|
self, Capabilities, RoomVersionStability, RoomVersionsCapability,
|
||||||
};
|
};
|
||||||
|
@ -8,26 +8,19 @@ use std::collections::BTreeMap;
|
||||||
///
|
///
|
||||||
/// Get information on the supported feature set and other relevent capabilities of this server.
|
/// Get information on the supported feature set and other relevent capabilities of this server.
|
||||||
pub async fn get_capabilities_route(
|
pub async fn get_capabilities_route(
|
||||||
db: DatabaseGuard,
|
_body: Ruma<get_capabilities::v3::Request>,
|
||||||
_body: Ruma<get_capabilities::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_capabilities::v3::Response> {
|
) -> Result<get_capabilities::v3::Response> {
|
||||||
let mut available = BTreeMap::new();
|
let mut available = BTreeMap::new();
|
||||||
if db.globals.allow_unstable_room_versions() {
|
for room_version in &services().globals.unstable_room_versions {
|
||||||
for room_version in &db.globals.unstable_room_versions {
|
|
||||||
available.insert(room_version.clone(), RoomVersionStability::Stable);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
for room_version in &db.globals.unstable_room_versions {
|
|
||||||
available.insert(room_version.clone(), RoomVersionStability::Unstable);
|
available.insert(room_version.clone(), RoomVersionStability::Unstable);
|
||||||
}
|
}
|
||||||
}
|
for room_version in &services().globals.stable_room_versions {
|
||||||
for room_version in &db.globals.stable_room_versions {
|
|
||||||
available.insert(room_version.clone(), RoomVersionStability::Stable);
|
available.insert(room_version.clone(), RoomVersionStability::Stable);
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut capabilities = Capabilities::new();
|
let mut capabilities = Capabilities::new();
|
||||||
capabilities.room_versions = RoomVersionsCapability {
|
capabilities.room_versions = RoomVersionsCapability {
|
||||||
default: db.globals.default_room_version(),
|
default: services().globals.default_room_version(),
|
||||||
available,
|
available,
|
||||||
};
|
};
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
use crate::{database::DatabaseGuard, Error, Result, Ruma};
|
use crate::{services, Error, Result, Ruma};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::{
|
api::client::{
|
||||||
config::{
|
config::{
|
||||||
|
@ -17,8 +17,7 @@ use serde_json::{json, value::RawValue as RawJsonValue};
|
||||||
///
|
///
|
||||||
/// Sets some account data for the sender user.
|
/// Sets some account data for the sender user.
|
||||||
pub async fn set_global_account_data_route(
|
pub async fn set_global_account_data_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<set_global_account_data::v3::Request>,
|
||||||
body: Ruma<set_global_account_data::v3::IncomingRequest>,
|
|
||||||
) -> Result<set_global_account_data::v3::Response> {
|
) -> Result<set_global_account_data::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
@ -27,7 +26,7 @@ pub async fn set_global_account_data_route(
|
||||||
|
|
||||||
let event_type = body.event_type.to_string();
|
let event_type = body.event_type.to_string();
|
||||||
|
|
||||||
db.account_data.update(
|
services().account_data.update(
|
||||||
None,
|
None,
|
||||||
sender_user,
|
sender_user,
|
||||||
event_type.clone().into(),
|
event_type.clone().into(),
|
||||||
|
@ -35,11 +34,8 @@ pub async fn set_global_account_data_route(
|
||||||
"type": event_type,
|
"type": event_type,
|
||||||
"content": data,
|
"content": data,
|
||||||
}),
|
}),
|
||||||
&db.globals,
|
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(set_global_account_data::v3::Response {})
|
Ok(set_global_account_data::v3::Response {})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -47,8 +43,7 @@ pub async fn set_global_account_data_route(
|
||||||
///
|
///
|
||||||
/// Sets some room account data for the sender user.
|
/// Sets some room account data for the sender user.
|
||||||
pub async fn set_room_account_data_route(
|
pub async fn set_room_account_data_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<set_room_account_data::v3::Request>,
|
||||||
body: Ruma<set_room_account_data::v3::IncomingRequest>,
|
|
||||||
) -> Result<set_room_account_data::v3::Response> {
|
) -> Result<set_room_account_data::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
@ -57,7 +52,7 @@ pub async fn set_room_account_data_route(
|
||||||
|
|
||||||
let event_type = body.event_type.to_string();
|
let event_type = body.event_type.to_string();
|
||||||
|
|
||||||
db.account_data.update(
|
services().account_data.update(
|
||||||
Some(&body.room_id),
|
Some(&body.room_id),
|
||||||
sender_user,
|
sender_user,
|
||||||
event_type.clone().into(),
|
event_type.clone().into(),
|
||||||
|
@ -65,11 +60,8 @@ pub async fn set_room_account_data_route(
|
||||||
"type": event_type,
|
"type": event_type,
|
||||||
"content": data,
|
"content": data,
|
||||||
}),
|
}),
|
||||||
&db.globals,
|
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(set_room_account_data::v3::Response {})
|
Ok(set_room_account_data::v3::Response {})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -77,12 +69,11 @@ pub async fn set_room_account_data_route(
|
||||||
///
|
///
|
||||||
/// Gets some account data for the sender user.
|
/// Gets some account data for the sender user.
|
||||||
pub async fn get_global_account_data_route(
|
pub async fn get_global_account_data_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_global_account_data::v3::Request>,
|
||||||
body: Ruma<get_global_account_data::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_global_account_data::v3::Response> {
|
) -> Result<get_global_account_data::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let event: Box<RawJsonValue> = db
|
let event: Box<RawJsonValue> = services()
|
||||||
.account_data
|
.account_data
|
||||||
.get(None, sender_user, body.event_type.clone().into())?
|
.get(None, sender_user, body.event_type.clone().into())?
|
||||||
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?;
|
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?;
|
||||||
|
@ -98,12 +89,11 @@ pub async fn get_global_account_data_route(
|
||||||
///
|
///
|
||||||
/// Gets some room account data for the sender user.
|
/// Gets some room account data for the sender user.
|
||||||
pub async fn get_room_account_data_route(
|
pub async fn get_room_account_data_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_room_account_data::v3::Request>,
|
||||||
body: Ruma<get_room_account_data::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_room_account_data::v3::Response> {
|
) -> Result<get_room_account_data::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let event: Box<RawJsonValue> = db
|
let event: Box<RawJsonValue> = services()
|
||||||
.account_data
|
.account_data
|
||||||
.get(
|
.get(
|
||||||
Some(&body.room_id),
|
Some(&body.room_id),
|
|
@ -1,4 +1,4 @@
|
||||||
use crate::{database::DatabaseGuard, Error, Result, Ruma};
|
use crate::{services, Error, Result, Ruma};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::{context::get_context, error::ErrorKind, filter::LazyLoadOptions},
|
api::client::{context::get_context, error::ErrorKind, filter::LazyLoadOptions},
|
||||||
events::StateEventType,
|
events::StateEventType,
|
||||||
|
@ -13,8 +13,7 @@ use tracing::error;
|
||||||
/// - Only works if the user is joined (TODO: always allow, but only show events if the user was
|
/// - Only works if the user is joined (TODO: always allow, but only show events if the user was
|
||||||
/// joined, depending on history_visibility)
|
/// joined, depending on history_visibility)
|
||||||
pub async fn get_context_route(
|
pub async fn get_context_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_context::v3::Request>,
|
||||||
body: Ruma<get_context::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_context::v3::Response> {
|
) -> Result<get_context::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||||
|
@ -28,18 +27,20 @@ pub async fn get_context_route(
|
||||||
|
|
||||||
let mut lazy_loaded = HashSet::new();
|
let mut lazy_loaded = HashSet::new();
|
||||||
|
|
||||||
let base_pdu_id = db
|
let base_pdu_id = services()
|
||||||
.rooms
|
.rooms
|
||||||
|
.timeline
|
||||||
.get_pdu_id(&body.event_id)?
|
.get_pdu_id(&body.event_id)?
|
||||||
.ok_or(Error::BadRequest(
|
.ok_or(Error::BadRequest(
|
||||||
ErrorKind::NotFound,
|
ErrorKind::NotFound,
|
||||||
"Base event id not found.",
|
"Base event id not found.",
|
||||||
))?;
|
))?;
|
||||||
|
|
||||||
let base_token = db.rooms.pdu_count(&base_pdu_id)?;
|
let base_token = services().rooms.timeline.pdu_count(&base_pdu_id)?;
|
||||||
|
|
||||||
let base_event = db
|
let base_event = services()
|
||||||
.rooms
|
.rooms
|
||||||
|
.timeline
|
||||||
.get_pdu_from_id(&base_pdu_id)?
|
.get_pdu_from_id(&base_pdu_id)?
|
||||||
.ok_or(Error::BadRequest(
|
.ok_or(Error::BadRequest(
|
||||||
ErrorKind::NotFound,
|
ErrorKind::NotFound,
|
||||||
|
@ -48,14 +49,18 @@ pub async fn get_context_route(
|
||||||
|
|
||||||
let room_id = base_event.room_id.clone();
|
let room_id = base_event.room_id.clone();
|
||||||
|
|
||||||
if !db.rooms.is_joined(sender_user, &room_id)? {
|
if !services()
|
||||||
|
.rooms
|
||||||
|
.state_cache
|
||||||
|
.is_joined(sender_user, &room_id)?
|
||||||
|
{
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::Forbidden,
|
||||||
"You don't have permission to view this room.",
|
"You don't have permission to view this room.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
if !db.rooms.lazy_load_was_sent_before(
|
if !services().rooms.lazy_loading.lazy_load_was_sent_before(
|
||||||
sender_user,
|
sender_user,
|
||||||
sender_device,
|
sender_device,
|
||||||
&room_id,
|
&room_id,
|
||||||
|
@ -67,8 +72,9 @@ pub async fn get_context_route(
|
||||||
|
|
||||||
let base_event = base_event.to_room_event();
|
let base_event = base_event.to_room_event();
|
||||||
|
|
||||||
let events_before: Vec<_> = db
|
let events_before: Vec<_> = services()
|
||||||
.rooms
|
.rooms
|
||||||
|
.timeline
|
||||||
.pdus_until(sender_user, &room_id, base_token)?
|
.pdus_until(sender_user, &room_id, base_token)?
|
||||||
.take(
|
.take(
|
||||||
u32::try_from(body.limit).map_err(|_| {
|
u32::try_from(body.limit).map_err(|_| {
|
||||||
|
@ -80,7 +86,7 @@ pub async fn get_context_route(
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
for (_, event) in &events_before {
|
for (_, event) in &events_before {
|
||||||
if !db.rooms.lazy_load_was_sent_before(
|
if !services().rooms.lazy_loading.lazy_load_was_sent_before(
|
||||||
sender_user,
|
sender_user,
|
||||||
sender_device,
|
sender_device,
|
||||||
&room_id,
|
&room_id,
|
||||||
|
@ -93,7 +99,7 @@ pub async fn get_context_route(
|
||||||
|
|
||||||
let start_token = events_before
|
let start_token = events_before
|
||||||
.last()
|
.last()
|
||||||
.and_then(|(pdu_id, _)| db.rooms.pdu_count(pdu_id).ok())
|
.and_then(|(pdu_id, _)| services().rooms.timeline.pdu_count(pdu_id).ok())
|
||||||
.map(|count| count.to_string());
|
.map(|count| count.to_string());
|
||||||
|
|
||||||
let events_before: Vec<_> = events_before
|
let events_before: Vec<_> = events_before
|
||||||
|
@ -101,8 +107,9 @@ pub async fn get_context_route(
|
||||||
.map(|(_, pdu)| pdu.to_room_event())
|
.map(|(_, pdu)| pdu.to_room_event())
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let events_after: Vec<_> = db
|
let events_after: Vec<_> = services()
|
||||||
.rooms
|
.rooms
|
||||||
|
.timeline
|
||||||
.pdus_after(sender_user, &room_id, base_token)?
|
.pdus_after(sender_user, &room_id, base_token)?
|
||||||
.take(
|
.take(
|
||||||
u32::try_from(body.limit).map_err(|_| {
|
u32::try_from(body.limit).map_err(|_| {
|
||||||
|
@ -114,7 +121,7 @@ pub async fn get_context_route(
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
for (_, event) in &events_after {
|
for (_, event) in &events_after {
|
||||||
if !db.rooms.lazy_load_was_sent_before(
|
if !services().rooms.lazy_loading.lazy_load_was_sent_before(
|
||||||
sender_user,
|
sender_user,
|
||||||
sender_device,
|
sender_device,
|
||||||
&room_id,
|
&room_id,
|
||||||
|
@ -125,23 +132,28 @@ pub async fn get_context_route(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let shortstatehash = match db.rooms.pdu_shortstatehash(
|
let shortstatehash = match services().rooms.state_accessor.pdu_shortstatehash(
|
||||||
events_after
|
events_after
|
||||||
.last()
|
.last()
|
||||||
.map_or(&*body.event_id, |(_, e)| &*e.event_id),
|
.map_or(&*body.event_id, |(_, e)| &*e.event_id),
|
||||||
)? {
|
)? {
|
||||||
Some(s) => s,
|
Some(s) => s,
|
||||||
None => db
|
None => services()
|
||||||
.rooms
|
.rooms
|
||||||
.current_shortstatehash(&room_id)?
|
.state
|
||||||
|
.get_room_shortstatehash(&room_id)?
|
||||||
.expect("All rooms have state"),
|
.expect("All rooms have state"),
|
||||||
};
|
};
|
||||||
|
|
||||||
let state_ids = db.rooms.state_full_ids(shortstatehash).await?;
|
let state_ids = services()
|
||||||
|
.rooms
|
||||||
|
.state_accessor
|
||||||
|
.state_full_ids(shortstatehash)
|
||||||
|
.await?;
|
||||||
|
|
||||||
let end_token = events_after
|
let end_token = events_after
|
||||||
.last()
|
.last()
|
||||||
.and_then(|(pdu_id, _)| db.rooms.pdu_count(pdu_id).ok())
|
.and_then(|(pdu_id, _)| services().rooms.timeline.pdu_count(pdu_id).ok())
|
||||||
.map(|count| count.to_string());
|
.map(|count| count.to_string());
|
||||||
|
|
||||||
let events_after: Vec<_> = events_after
|
let events_after: Vec<_> = events_after
|
||||||
|
@ -152,10 +164,13 @@ pub async fn get_context_route(
|
||||||
let mut state = Vec::new();
|
let mut state = Vec::new();
|
||||||
|
|
||||||
for (shortstatekey, id) in state_ids {
|
for (shortstatekey, id) in state_ids {
|
||||||
let (event_type, state_key) = db.rooms.get_statekey_from_short(shortstatekey)?;
|
let (event_type, state_key) = services()
|
||||||
|
.rooms
|
||||||
|
.short
|
||||||
|
.get_statekey_from_short(shortstatekey)?;
|
||||||
|
|
||||||
if event_type != StateEventType::RoomMember {
|
if event_type != StateEventType::RoomMember {
|
||||||
let pdu = match db.rooms.get_pdu(&id)? {
|
let pdu = match services().rooms.timeline.get_pdu(&id)? {
|
||||||
Some(pdu) => pdu,
|
Some(pdu) => pdu,
|
||||||
None => {
|
None => {
|
||||||
error!("Pdu in state not found: {}", id);
|
error!("Pdu in state not found: {}", id);
|
||||||
|
@ -164,7 +179,7 @@ pub async fn get_context_route(
|
||||||
};
|
};
|
||||||
state.push(pdu.to_state_event());
|
state.push(pdu.to_state_event());
|
||||||
} else if !lazy_load_enabled || lazy_loaded.contains(&state_key) {
|
} else if !lazy_load_enabled || lazy_loaded.contains(&state_key) {
|
||||||
let pdu = match db.rooms.get_pdu(&id)? {
|
let pdu = match services().rooms.timeline.get_pdu(&id)? {
|
||||||
Some(pdu) => pdu,
|
Some(pdu) => pdu,
|
||||||
None => {
|
None => {
|
||||||
error!("Pdu in state not found: {}", id);
|
error!("Pdu in state not found: {}", id);
|
|
@ -1,4 +1,4 @@
|
||||||
use crate::{database::DatabaseGuard, utils, Error, Result, Ruma};
|
use crate::{services, utils, Error, Result, Ruma};
|
||||||
use ruma::api::client::{
|
use ruma::api::client::{
|
||||||
device::{self, delete_device, delete_devices, get_device, get_devices, update_device},
|
device::{self, delete_device, delete_devices, get_device, get_devices, update_device},
|
||||||
error::ErrorKind,
|
error::ErrorKind,
|
||||||
|
@ -11,12 +11,11 @@ use super::SESSION_ID_LENGTH;
|
||||||
///
|
///
|
||||||
/// Get metadata on all devices of the sender user.
|
/// Get metadata on all devices of the sender user.
|
||||||
pub async fn get_devices_route(
|
pub async fn get_devices_route(
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<get_devices::v3::Request>,
|
body: Ruma<get_devices::v3::Request>,
|
||||||
) -> Result<get_devices::v3::Response> {
|
) -> Result<get_devices::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let devices: Vec<device::Device> = db
|
let devices: Vec<device::Device> = services()
|
||||||
.users
|
.users
|
||||||
.all_devices_metadata(sender_user)
|
.all_devices_metadata(sender_user)
|
||||||
.filter_map(|r| r.ok()) // Filter out buggy devices
|
.filter_map(|r| r.ok()) // Filter out buggy devices
|
||||||
|
@ -29,12 +28,11 @@ pub async fn get_devices_route(
|
||||||
///
|
///
|
||||||
/// Get metadata on a single device of the sender user.
|
/// Get metadata on a single device of the sender user.
|
||||||
pub async fn get_device_route(
|
pub async fn get_device_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_device::v3::Request>,
|
||||||
body: Ruma<get_device::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_device::v3::Response> {
|
) -> Result<get_device::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let device = db
|
let device = services()
|
||||||
.users
|
.users
|
||||||
.get_device_metadata(sender_user, &body.body.device_id)?
|
.get_device_metadata(sender_user, &body.body.device_id)?
|
||||||
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Device not found."))?;
|
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Device not found."))?;
|
||||||
|
@ -46,23 +44,21 @@ pub async fn get_device_route(
|
||||||
///
|
///
|
||||||
/// Updates the metadata on a given device of the sender user.
|
/// Updates the metadata on a given device of the sender user.
|
||||||
pub async fn update_device_route(
|
pub async fn update_device_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<update_device::v3::Request>,
|
||||||
body: Ruma<update_device::v3::IncomingRequest>,
|
|
||||||
) -> Result<update_device::v3::Response> {
|
) -> Result<update_device::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let mut device = db
|
let mut device = services()
|
||||||
.users
|
.users
|
||||||
.get_device_metadata(sender_user, &body.device_id)?
|
.get_device_metadata(sender_user, &body.device_id)?
|
||||||
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Device not found."))?;
|
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Device not found."))?;
|
||||||
|
|
||||||
device.display_name = body.display_name.clone();
|
device.display_name = body.display_name.clone();
|
||||||
|
|
||||||
db.users
|
services()
|
||||||
|
.users
|
||||||
.update_device_metadata(sender_user, &body.device_id, &device)?;
|
.update_device_metadata(sender_user, &body.device_id, &device)?;
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(update_device::v3::Response {})
|
Ok(update_device::v3::Response {})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -76,8 +72,7 @@ pub async fn update_device_route(
|
||||||
/// - Forgets to-device events
|
/// - Forgets to-device events
|
||||||
/// - Triggers device list updates
|
/// - Triggers device list updates
|
||||||
pub async fn delete_device_route(
|
pub async fn delete_device_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<delete_device::v3::Request>,
|
||||||
body: Ruma<delete_device::v3::IncomingRequest>,
|
|
||||||
) -> Result<delete_device::v3::Response> {
|
) -> Result<delete_device::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||||
|
@ -94,30 +89,27 @@ pub async fn delete_device_route(
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(auth) = &body.auth {
|
if let Some(auth) = &body.auth {
|
||||||
let (worked, uiaainfo) = db.uiaa.try_auth(
|
let (worked, uiaainfo) =
|
||||||
sender_user,
|
services()
|
||||||
sender_device,
|
.uiaa
|
||||||
auth,
|
.try_auth(sender_user, sender_device, auth, &uiaainfo)?;
|
||||||
&uiaainfo,
|
|
||||||
&db.users,
|
|
||||||
&db.globals,
|
|
||||||
)?;
|
|
||||||
if !worked {
|
if !worked {
|
||||||
return Err(Error::Uiaa(uiaainfo));
|
return Err(Error::Uiaa(uiaainfo));
|
||||||
}
|
}
|
||||||
// Success!
|
// Success!
|
||||||
} else if let Some(json) = body.json_body {
|
} else if let Some(json) = body.json_body {
|
||||||
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
||||||
db.uiaa
|
services()
|
||||||
|
.uiaa
|
||||||
.create(sender_user, sender_device, &uiaainfo, &json)?;
|
.create(sender_user, sender_device, &uiaainfo, &json)?;
|
||||||
return Err(Error::Uiaa(uiaainfo));
|
return Err(Error::Uiaa(uiaainfo));
|
||||||
} else {
|
} else {
|
||||||
return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
|
return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
|
||||||
}
|
}
|
||||||
|
|
||||||
db.users.remove_device(sender_user, &body.device_id)?;
|
services()
|
||||||
|
.users
|
||||||
db.flush()?;
|
.remove_device(sender_user, &body.device_id)?;
|
||||||
|
|
||||||
Ok(delete_device::v3::Response {})
|
Ok(delete_device::v3::Response {})
|
||||||
}
|
}
|
||||||
|
@ -134,8 +126,7 @@ pub async fn delete_device_route(
|
||||||
/// - Forgets to-device events
|
/// - Forgets to-device events
|
||||||
/// - Triggers device list updates
|
/// - Triggers device list updates
|
||||||
pub async fn delete_devices_route(
|
pub async fn delete_devices_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<delete_devices::v3::Request>,
|
||||||
body: Ruma<delete_devices::v3::IncomingRequest>,
|
|
||||||
) -> Result<delete_devices::v3::Response> {
|
) -> Result<delete_devices::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||||
|
@ -152,21 +143,18 @@ pub async fn delete_devices_route(
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(auth) = &body.auth {
|
if let Some(auth) = &body.auth {
|
||||||
let (worked, uiaainfo) = db.uiaa.try_auth(
|
let (worked, uiaainfo) =
|
||||||
sender_user,
|
services()
|
||||||
sender_device,
|
.uiaa
|
||||||
auth,
|
.try_auth(sender_user, sender_device, auth, &uiaainfo)?;
|
||||||
&uiaainfo,
|
|
||||||
&db.users,
|
|
||||||
&db.globals,
|
|
||||||
)?;
|
|
||||||
if !worked {
|
if !worked {
|
||||||
return Err(Error::Uiaa(uiaainfo));
|
return Err(Error::Uiaa(uiaainfo));
|
||||||
}
|
}
|
||||||
// Success!
|
// Success!
|
||||||
} else if let Some(json) = body.json_body {
|
} else if let Some(json) = body.json_body {
|
||||||
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
||||||
db.uiaa
|
services()
|
||||||
|
.uiaa
|
||||||
.create(sender_user, sender_device, &uiaainfo, &json)?;
|
.create(sender_user, sender_device, &uiaainfo, &json)?;
|
||||||
return Err(Error::Uiaa(uiaainfo));
|
return Err(Error::Uiaa(uiaainfo));
|
||||||
} else {
|
} else {
|
||||||
|
@ -174,10 +162,8 @@ pub async fn delete_devices_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
for device_id in &body.devices {
|
for device_id in &body.devices {
|
||||||
db.users.remove_device(sender_user, device_id)?
|
services().users.remove_device(sender_user, device_id)?
|
||||||
}
|
}
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(delete_devices::v3::Response {})
|
Ok(delete_devices::v3::Response {})
|
||||||
}
|
}
|
|
@ -1,4 +1,4 @@
|
||||||
use crate::{database::DatabaseGuard, Database, Error, Result, Ruma};
|
use crate::{services, Error, Result, Ruma};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::{
|
api::{
|
||||||
client::{
|
client::{
|
||||||
|
@ -11,14 +11,12 @@ use ruma::{
|
||||||
},
|
},
|
||||||
federation,
|
federation,
|
||||||
},
|
},
|
||||||
directory::{
|
directory::{Filter, PublicRoomJoinRule, PublicRoomsChunk, RoomNetwork},
|
||||||
Filter, IncomingFilter, IncomingRoomNetwork, PublicRoomJoinRule, PublicRoomsChunk,
|
|
||||||
RoomNetwork,
|
|
||||||
},
|
|
||||||
events::{
|
events::{
|
||||||
room::{
|
room::{
|
||||||
avatar::RoomAvatarEventContent,
|
avatar::RoomAvatarEventContent,
|
||||||
canonical_alias::RoomCanonicalAliasEventContent,
|
canonical_alias::RoomCanonicalAliasEventContent,
|
||||||
|
create::RoomCreateEventContent,
|
||||||
guest_access::{GuestAccess, RoomGuestAccessEventContent},
|
guest_access::{GuestAccess, RoomGuestAccessEventContent},
|
||||||
history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent},
|
history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent},
|
||||||
join_rules::{JoinRule, RoomJoinRulesEventContent},
|
join_rules::{JoinRule, RoomJoinRulesEventContent},
|
||||||
|
@ -29,7 +27,7 @@ use ruma::{
|
||||||
},
|
},
|
||||||
ServerName, UInt,
|
ServerName, UInt,
|
||||||
};
|
};
|
||||||
use tracing::{info, warn};
|
use tracing::{error, info, warn};
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/publicRooms`
|
/// # `POST /_matrix/client/r0/publicRooms`
|
||||||
///
|
///
|
||||||
|
@ -37,11 +35,9 @@ use tracing::{info, warn};
|
||||||
///
|
///
|
||||||
/// - Rooms are ordered by the number of joined members
|
/// - Rooms are ordered by the number of joined members
|
||||||
pub async fn get_public_rooms_filtered_route(
|
pub async fn get_public_rooms_filtered_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_public_rooms_filtered::v3::Request>,
|
||||||
body: Ruma<get_public_rooms_filtered::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_public_rooms_filtered::v3::Response> {
|
) -> Result<get_public_rooms_filtered::v3::Response> {
|
||||||
get_public_rooms_filtered_helper(
|
get_public_rooms_filtered_helper(
|
||||||
&db,
|
|
||||||
body.server.as_deref(),
|
body.server.as_deref(),
|
||||||
body.limit,
|
body.limit,
|
||||||
body.since.as_deref(),
|
body.since.as_deref(),
|
||||||
|
@ -57,16 +53,14 @@ pub async fn get_public_rooms_filtered_route(
|
||||||
///
|
///
|
||||||
/// - Rooms are ordered by the number of joined members
|
/// - Rooms are ordered by the number of joined members
|
||||||
pub async fn get_public_rooms_route(
|
pub async fn get_public_rooms_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_public_rooms::v3::Request>,
|
||||||
body: Ruma<get_public_rooms::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_public_rooms::v3::Response> {
|
) -> Result<get_public_rooms::v3::Response> {
|
||||||
let response = get_public_rooms_filtered_helper(
|
let response = get_public_rooms_filtered_helper(
|
||||||
&db,
|
|
||||||
body.server.as_deref(),
|
body.server.as_deref(),
|
||||||
body.limit,
|
body.limit,
|
||||||
body.since.as_deref(),
|
body.since.as_deref(),
|
||||||
&IncomingFilter::default(),
|
&Filter::default(),
|
||||||
&IncomingRoomNetwork::Matrix,
|
&RoomNetwork::Matrix,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
|
@ -84,17 +78,21 @@ pub async fn get_public_rooms_route(
|
||||||
///
|
///
|
||||||
/// - TODO: Access control checks
|
/// - TODO: Access control checks
|
||||||
pub async fn set_room_visibility_route(
|
pub async fn set_room_visibility_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<set_room_visibility::v3::Request>,
|
||||||
body: Ruma<set_room_visibility::v3::IncomingRequest>,
|
|
||||||
) -> Result<set_room_visibility::v3::Response> {
|
) -> Result<set_room_visibility::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
if !services().rooms.metadata.exists(&body.room_id)? {
|
||||||
|
// Return 404 if the room doesn't exist
|
||||||
|
return Err(Error::BadRequest(ErrorKind::NotFound, "Room not found"));
|
||||||
|
}
|
||||||
|
|
||||||
match &body.visibility {
|
match &body.visibility {
|
||||||
room::Visibility::Public => {
|
room::Visibility::Public => {
|
||||||
db.rooms.set_public(&body.room_id, true)?;
|
services().rooms.directory.set_public(&body.room_id)?;
|
||||||
info!("{} made {} public", sender_user, body.room_id);
|
info!("{} made {} public", sender_user, body.room_id);
|
||||||
}
|
}
|
||||||
room::Visibility::Private => db.rooms.set_public(&body.room_id, false)?,
|
room::Visibility::Private => services().rooms.directory.set_not_public(&body.room_id)?,
|
||||||
_ => {
|
_ => {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::InvalidParam,
|
ErrorKind::InvalidParam,
|
||||||
|
@ -103,8 +101,6 @@ pub async fn set_room_visibility_route(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(set_room_visibility::v3::Response {})
|
Ok(set_room_visibility::v3::Response {})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -112,11 +108,15 @@ pub async fn set_room_visibility_route(
|
||||||
///
|
///
|
||||||
/// Gets the visibility of a given room in the room directory.
|
/// Gets the visibility of a given room in the room directory.
|
||||||
pub async fn get_room_visibility_route(
|
pub async fn get_room_visibility_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_room_visibility::v3::Request>,
|
||||||
body: Ruma<get_room_visibility::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_room_visibility::v3::Response> {
|
) -> Result<get_room_visibility::v3::Response> {
|
||||||
|
if !services().rooms.metadata.exists(&body.room_id)? {
|
||||||
|
// Return 404 if the room doesn't exist
|
||||||
|
return Err(Error::BadRequest(ErrorKind::NotFound, "Room not found"));
|
||||||
|
}
|
||||||
|
|
||||||
Ok(get_room_visibility::v3::Response {
|
Ok(get_room_visibility::v3::Response {
|
||||||
visibility: if db.rooms.is_public_room(&body.room_id)? {
|
visibility: if services().rooms.directory.is_public_room(&body.room_id)? {
|
||||||
room::Visibility::Public
|
room::Visibility::Public
|
||||||
} else {
|
} else {
|
||||||
room::Visibility::Private
|
room::Visibility::Private
|
||||||
|
@ -125,25 +125,25 @@ pub async fn get_room_visibility_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) async fn get_public_rooms_filtered_helper(
|
pub(crate) async fn get_public_rooms_filtered_helper(
|
||||||
db: &Database,
|
|
||||||
server: Option<&ServerName>,
|
server: Option<&ServerName>,
|
||||||
limit: Option<UInt>,
|
limit: Option<UInt>,
|
||||||
since: Option<&str>,
|
since: Option<&str>,
|
||||||
filter: &IncomingFilter,
|
filter: &Filter,
|
||||||
_network: &IncomingRoomNetwork,
|
_network: &RoomNetwork,
|
||||||
) -> Result<get_public_rooms_filtered::v3::Response> {
|
) -> Result<get_public_rooms_filtered::v3::Response> {
|
||||||
if let Some(other_server) = server.filter(|server| *server != db.globals.server_name().as_str())
|
if let Some(other_server) =
|
||||||
|
server.filter(|server| *server != services().globals.server_name().as_str())
|
||||||
{
|
{
|
||||||
let response = db
|
let response = services()
|
||||||
.sending
|
.sending
|
||||||
.send_federation_request(
|
.send_federation_request(
|
||||||
&db.globals,
|
|
||||||
other_server,
|
other_server,
|
||||||
federation::directory::get_public_rooms_filtered::v1::Request {
|
federation::directory::get_public_rooms_filtered::v1::Request {
|
||||||
limit,
|
limit,
|
||||||
since,
|
since: since.map(ToOwned::to_owned),
|
||||||
filter: Filter {
|
filter: Filter {
|
||||||
generic_search_term: filter.generic_search_term.as_deref(),
|
generic_search_term: filter.generic_search_term.clone(),
|
||||||
|
room_types: filter.room_types.clone(),
|
||||||
},
|
},
|
||||||
room_network: RoomNetwork::Matrix,
|
room_network: RoomNetwork::Matrix,
|
||||||
},
|
},
|
||||||
|
@ -184,15 +184,17 @@ pub(crate) async fn get_public_rooms_filtered_helper(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut all_rooms: Vec<_> = db
|
let mut all_rooms: Vec<_> = services()
|
||||||
.rooms
|
.rooms
|
||||||
|
.directory
|
||||||
.public_rooms()
|
.public_rooms()
|
||||||
.map(|room_id| {
|
.map(|room_id| {
|
||||||
let room_id = room_id?;
|
let room_id = room_id?;
|
||||||
|
|
||||||
let chunk = PublicRoomsChunk {
|
let chunk = PublicRoomsChunk {
|
||||||
canonical_alias: db
|
canonical_alias: services()
|
||||||
.rooms
|
.rooms
|
||||||
|
.state_accessor
|
||||||
.room_state_get(&room_id, &StateEventType::RoomCanonicalAlias, "")?
|
.room_state_get(&room_id, &StateEventType::RoomCanonicalAlias, "")?
|
||||||
.map_or(Ok(None), |s| {
|
.map_or(Ok(None), |s| {
|
||||||
serde_json::from_str(s.content.get())
|
serde_json::from_str(s.content.get())
|
||||||
|
@ -201,8 +203,9 @@ pub(crate) async fn get_public_rooms_filtered_helper(
|
||||||
Error::bad_database("Invalid canonical alias event in database.")
|
Error::bad_database("Invalid canonical alias event in database.")
|
||||||
})
|
})
|
||||||
})?,
|
})?,
|
||||||
name: db
|
name: services()
|
||||||
.rooms
|
.rooms
|
||||||
|
.state_accessor
|
||||||
.room_state_get(&room_id, &StateEventType::RoomName, "")?
|
.room_state_get(&room_id, &StateEventType::RoomName, "")?
|
||||||
.map_or(Ok(None), |s| {
|
.map_or(Ok(None), |s| {
|
||||||
serde_json::from_str(s.content.get())
|
serde_json::from_str(s.content.get())
|
||||||
|
@ -211,8 +214,9 @@ pub(crate) async fn get_public_rooms_filtered_helper(
|
||||||
Error::bad_database("Invalid room name event in database.")
|
Error::bad_database("Invalid room name event in database.")
|
||||||
})
|
})
|
||||||
})?,
|
})?,
|
||||||
num_joined_members: db
|
num_joined_members: services()
|
||||||
.rooms
|
.rooms
|
||||||
|
.state_cache
|
||||||
.room_joined_count(&room_id)?
|
.room_joined_count(&room_id)?
|
||||||
.unwrap_or_else(|| {
|
.unwrap_or_else(|| {
|
||||||
warn!("Room {} has no member count", room_id);
|
warn!("Room {} has no member count", room_id);
|
||||||
|
@ -220,8 +224,9 @@ pub(crate) async fn get_public_rooms_filtered_helper(
|
||||||
})
|
})
|
||||||
.try_into()
|
.try_into()
|
||||||
.expect("user count should not be that big"),
|
.expect("user count should not be that big"),
|
||||||
topic: db
|
topic: services()
|
||||||
.rooms
|
.rooms
|
||||||
|
.state_accessor
|
||||||
.room_state_get(&room_id, &StateEventType::RoomTopic, "")?
|
.room_state_get(&room_id, &StateEventType::RoomTopic, "")?
|
||||||
.map_or(Ok(None), |s| {
|
.map_or(Ok(None), |s| {
|
||||||
serde_json::from_str(s.content.get())
|
serde_json::from_str(s.content.get())
|
||||||
|
@ -230,8 +235,9 @@ pub(crate) async fn get_public_rooms_filtered_helper(
|
||||||
Error::bad_database("Invalid room topic event in database.")
|
Error::bad_database("Invalid room topic event in database.")
|
||||||
})
|
})
|
||||||
})?,
|
})?,
|
||||||
world_readable: db
|
world_readable: services()
|
||||||
.rooms
|
.rooms
|
||||||
|
.state_accessor
|
||||||
.room_state_get(&room_id, &StateEventType::RoomHistoryVisibility, "")?
|
.room_state_get(&room_id, &StateEventType::RoomHistoryVisibility, "")?
|
||||||
.map_or(Ok(false), |s| {
|
.map_or(Ok(false), |s| {
|
||||||
serde_json::from_str(s.content.get())
|
serde_json::from_str(s.content.get())
|
||||||
|
@ -244,8 +250,9 @@ pub(crate) async fn get_public_rooms_filtered_helper(
|
||||||
)
|
)
|
||||||
})
|
})
|
||||||
})?,
|
})?,
|
||||||
guest_can_join: db
|
guest_can_join: services()
|
||||||
.rooms
|
.rooms
|
||||||
|
.state_accessor
|
||||||
.room_state_get(&room_id, &StateEventType::RoomGuestAccess, "")?
|
.room_state_get(&room_id, &StateEventType::RoomGuestAccess, "")?
|
||||||
.map_or(Ok(false), |s| {
|
.map_or(Ok(false), |s| {
|
||||||
serde_json::from_str(s.content.get())
|
serde_json::from_str(s.content.get())
|
||||||
|
@ -256,8 +263,9 @@ pub(crate) async fn get_public_rooms_filtered_helper(
|
||||||
Error::bad_database("Invalid room guest access event in database.")
|
Error::bad_database("Invalid room guest access event in database.")
|
||||||
})
|
})
|
||||||
})?,
|
})?,
|
||||||
avatar_url: db
|
avatar_url: services()
|
||||||
.rooms
|
.rooms
|
||||||
|
.state_accessor
|
||||||
.room_state_get(&room_id, &StateEventType::RoomAvatar, "")?
|
.room_state_get(&room_id, &StateEventType::RoomAvatar, "")?
|
||||||
.map(|s| {
|
.map(|s| {
|
||||||
serde_json::from_str(s.content.get())
|
serde_json::from_str(s.content.get())
|
||||||
|
@ -269,8 +277,9 @@ pub(crate) async fn get_public_rooms_filtered_helper(
|
||||||
.transpose()?
|
.transpose()?
|
||||||
// url is now an Option<String> so we must flatten
|
// url is now an Option<String> so we must flatten
|
||||||
.flatten(),
|
.flatten(),
|
||||||
join_rule: db
|
join_rule: services()
|
||||||
.rooms
|
.rooms
|
||||||
|
.state_accessor
|
||||||
.room_state_get(&room_id, &StateEventType::RoomJoinRules, "")?
|
.room_state_get(&room_id, &StateEventType::RoomJoinRules, "")?
|
||||||
.map(|s| {
|
.map(|s| {
|
||||||
serde_json::from_str(s.content.get())
|
serde_json::from_str(s.content.get())
|
||||||
|
@ -279,15 +288,28 @@ pub(crate) async fn get_public_rooms_filtered_helper(
|
||||||
JoinRule::Knock => Some(PublicRoomJoinRule::Knock),
|
JoinRule::Knock => Some(PublicRoomJoinRule::Knock),
|
||||||
_ => None,
|
_ => None,
|
||||||
})
|
})
|
||||||
.map_err(|_| {
|
.map_err(|e| {
|
||||||
Error::bad_database("Invalid room join rule event in database.")
|
error!("Invalid room join rule event in database: {}", e);
|
||||||
|
Error::BadDatabase("Invalid room join rule event in database.")
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
.transpose()?
|
.transpose()?
|
||||||
.flatten()
|
.flatten()
|
||||||
.ok_or(Error::bad_database(
|
.ok_or_else(|| Error::bad_database("Missing room join rule event for room."))?,
|
||||||
"Invalid room join rule event in database.",
|
room_type: services()
|
||||||
))?,
|
.rooms
|
||||||
|
.state_accessor
|
||||||
|
.room_state_get(&room_id, &StateEventType::RoomCreate, "")?
|
||||||
|
.map(|s| {
|
||||||
|
serde_json::from_str::<RoomCreateEventContent>(s.content.get()).map_err(
|
||||||
|
|e| {
|
||||||
|
error!("Invalid room create event in database: {}", e);
|
||||||
|
Error::BadDatabase("Invalid room create event in database.")
|
||||||
|
},
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.transpose()?
|
||||||
|
.and_then(|e| e.room_type),
|
||||||
room_id,
|
room_id,
|
||||||
};
|
};
|
||||||
Ok(chunk)
|
Ok(chunk)
|
||||||
|
@ -339,7 +361,7 @@ pub(crate) async fn get_public_rooms_filtered_helper(
|
||||||
let prev_batch = if num_since == 0 {
|
let prev_batch = if num_since == 0 {
|
||||||
None
|
None
|
||||||
} else {
|
} else {
|
||||||
Some(format!("p{}", num_since))
|
Some(format!("p{num_since}"))
|
||||||
};
|
};
|
||||||
|
|
||||||
let next_batch = if chunk.len() < limit as usize {
|
let next_batch = if chunk.len() < limit as usize {
|
|
@ -1,4 +1,4 @@
|
||||||
use crate::{database::DatabaseGuard, Error, Result, Ruma};
|
use crate::{services, Error, Result, Ruma};
|
||||||
use ruma::api::client::{
|
use ruma::api::client::{
|
||||||
error::ErrorKind,
|
error::ErrorKind,
|
||||||
filter::{create_filter, get_filter},
|
filter::{create_filter, get_filter},
|
||||||
|
@ -10,11 +10,10 @@ use ruma::api::client::{
|
||||||
///
|
///
|
||||||
/// - A user can only access their own filters
|
/// - A user can only access their own filters
|
||||||
pub async fn get_filter_route(
|
pub async fn get_filter_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_filter::v3::Request>,
|
||||||
body: Ruma<get_filter::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_filter::v3::Response> {
|
) -> Result<get_filter::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let filter = match db.users.get_filter(sender_user, &body.filter_id)? {
|
let filter = match services().users.get_filter(sender_user, &body.filter_id)? {
|
||||||
Some(filter) => filter,
|
Some(filter) => filter,
|
||||||
None => return Err(Error::BadRequest(ErrorKind::NotFound, "Filter not found.")),
|
None => return Err(Error::BadRequest(ErrorKind::NotFound, "Filter not found.")),
|
||||||
};
|
};
|
||||||
|
@ -26,11 +25,10 @@ pub async fn get_filter_route(
|
||||||
///
|
///
|
||||||
/// Creates a new filter to be used by other endpoints.
|
/// Creates a new filter to be used by other endpoints.
|
||||||
pub async fn create_filter_route(
|
pub async fn create_filter_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<create_filter::v3::Request>,
|
||||||
body: Ruma<create_filter::v3::IncomingRequest>,
|
|
||||||
) -> Result<create_filter::v3::Response> {
|
) -> Result<create_filter::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
Ok(create_filter::v3::Response::new(
|
Ok(create_filter::v3::Response::new(
|
||||||
db.users.create_filter(sender_user, &body.filter)?,
|
services().users.create_filter(sender_user, &body.filter)?,
|
||||||
))
|
))
|
||||||
}
|
}
|
|
@ -1,5 +1,5 @@
|
||||||
use super::SESSION_ID_LENGTH;
|
use super::SESSION_ID_LENGTH;
|
||||||
use crate::{database::DatabaseGuard, utils, Database, Error, Result, Ruma};
|
use crate::{services, utils, Error, Result, Ruma};
|
||||||
use futures_util::{stream::FuturesUnordered, StreamExt};
|
use futures_util::{stream::FuturesUnordered, StreamExt};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::{
|
api::{
|
||||||
|
@ -14,7 +14,7 @@ use ruma::{
|
||||||
federation,
|
federation,
|
||||||
},
|
},
|
||||||
serde::Raw,
|
serde::Raw,
|
||||||
DeviceId, DeviceKeyAlgorithm, UserId,
|
DeviceKeyAlgorithm, OwnedDeviceId, OwnedUserId, UserId,
|
||||||
};
|
};
|
||||||
use serde_json::json;
|
use serde_json::json;
|
||||||
use std::collections::{BTreeMap, HashMap, HashSet};
|
use std::collections::{BTreeMap, HashMap, HashSet};
|
||||||
|
@ -26,39 +26,35 @@ use std::collections::{BTreeMap, HashMap, HashSet};
|
||||||
/// - Adds one time keys
|
/// - Adds one time keys
|
||||||
/// - If there are no device keys yet: Adds device keys (TODO: merge with existing keys?)
|
/// - If there are no device keys yet: Adds device keys (TODO: merge with existing keys?)
|
||||||
pub async fn upload_keys_route(
|
pub async fn upload_keys_route(
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<upload_keys::v3::Request>,
|
body: Ruma<upload_keys::v3::Request>,
|
||||||
) -> Result<upload_keys::v3::Response> {
|
) -> Result<upload_keys::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
for (key_key, key_value) in &body.one_time_keys {
|
for (key_key, key_value) in &body.one_time_keys {
|
||||||
db.users
|
services()
|
||||||
.add_one_time_key(sender_user, sender_device, key_key, key_value, &db.globals)?;
|
.users
|
||||||
|
.add_one_time_key(sender_user, sender_device, key_key, key_value)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(device_keys) = &body.device_keys {
|
if let Some(device_keys) = &body.device_keys {
|
||||||
// TODO: merge this and the existing event?
|
// TODO: merge this and the existing event?
|
||||||
// This check is needed to assure that signatures are kept
|
// This check is needed to assure that signatures are kept
|
||||||
if db
|
if services()
|
||||||
.users
|
.users
|
||||||
.get_device_keys(sender_user, sender_device)?
|
.get_device_keys(sender_user, sender_device)?
|
||||||
.is_none()
|
.is_none()
|
||||||
{
|
{
|
||||||
db.users.add_device_keys(
|
services()
|
||||||
sender_user,
|
.users
|
||||||
sender_device,
|
.add_device_keys(sender_user, sender_device, device_keys)?;
|
||||||
device_keys,
|
|
||||||
&db.rooms,
|
|
||||||
&db.globals,
|
|
||||||
)?;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(upload_keys::v3::Response {
|
Ok(upload_keys::v3::Response {
|
||||||
one_time_key_counts: db.users.count_one_time_keys(sender_user, sender_device)?,
|
one_time_key_counts: services()
|
||||||
|
.users
|
||||||
|
.count_one_time_keys(sender_user, sender_device)?,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -69,19 +65,11 @@ pub async fn upload_keys_route(
|
||||||
/// - Always fetches users from other servers over federation
|
/// - Always fetches users from other servers over federation
|
||||||
/// - Gets master keys, self-signing keys, user signing keys and device keys.
|
/// - Gets master keys, self-signing keys, user signing keys and device keys.
|
||||||
/// - The master and self-signing keys contain signatures that the user is allowed to see
|
/// - The master and self-signing keys contain signatures that the user is allowed to see
|
||||||
pub async fn get_keys_route(
|
pub async fn get_keys_route(body: Ruma<get_keys::v3::Request>) -> Result<get_keys::v3::Response> {
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<get_keys::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_keys::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let response = get_keys_helper(
|
let response =
|
||||||
Some(sender_user),
|
get_keys_helper(Some(sender_user), &body.device_keys, |u| u == sender_user).await?;
|
||||||
&body.device_keys,
|
|
||||||
|u| u == sender_user,
|
|
||||||
&db,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(response)
|
Ok(response)
|
||||||
}
|
}
|
||||||
|
@ -90,12 +78,9 @@ pub async fn get_keys_route(
|
||||||
///
|
///
|
||||||
/// Claims one-time keys
|
/// Claims one-time keys
|
||||||
pub async fn claim_keys_route(
|
pub async fn claim_keys_route(
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<claim_keys::v3::Request>,
|
body: Ruma<claim_keys::v3::Request>,
|
||||||
) -> Result<claim_keys::v3::Response> {
|
) -> Result<claim_keys::v3::Response> {
|
||||||
let response = claim_keys_helper(&body.one_time_keys, &db).await?;
|
let response = claim_keys_helper(&body.one_time_keys).await?;
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(response)
|
Ok(response)
|
||||||
}
|
}
|
||||||
|
@ -106,8 +91,7 @@ pub async fn claim_keys_route(
|
||||||
///
|
///
|
||||||
/// - Requires UIAA to verify password
|
/// - Requires UIAA to verify password
|
||||||
pub async fn upload_signing_keys_route(
|
pub async fn upload_signing_keys_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<upload_signing_keys::v3::Request>,
|
||||||
body: Ruma<upload_signing_keys::v3::IncomingRequest>,
|
|
||||||
) -> Result<upload_signing_keys::v3::Response> {
|
) -> Result<upload_signing_keys::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||||
|
@ -124,21 +108,18 @@ pub async fn upload_signing_keys_route(
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(auth) = &body.auth {
|
if let Some(auth) = &body.auth {
|
||||||
let (worked, uiaainfo) = db.uiaa.try_auth(
|
let (worked, uiaainfo) =
|
||||||
sender_user,
|
services()
|
||||||
sender_device,
|
.uiaa
|
||||||
auth,
|
.try_auth(sender_user, sender_device, auth, &uiaainfo)?;
|
||||||
&uiaainfo,
|
|
||||||
&db.users,
|
|
||||||
&db.globals,
|
|
||||||
)?;
|
|
||||||
if !worked {
|
if !worked {
|
||||||
return Err(Error::Uiaa(uiaainfo));
|
return Err(Error::Uiaa(uiaainfo));
|
||||||
}
|
}
|
||||||
// Success!
|
// Success!
|
||||||
} else if let Some(json) = body.json_body {
|
} else if let Some(json) = body.json_body {
|
||||||
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
||||||
db.uiaa
|
services()
|
||||||
|
.uiaa
|
||||||
.create(sender_user, sender_device, &uiaainfo, &json)?;
|
.create(sender_user, sender_device, &uiaainfo, &json)?;
|
||||||
return Err(Error::Uiaa(uiaainfo));
|
return Err(Error::Uiaa(uiaainfo));
|
||||||
} else {
|
} else {
|
||||||
|
@ -146,18 +127,14 @@ pub async fn upload_signing_keys_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(master_key) = &body.master_key {
|
if let Some(master_key) = &body.master_key {
|
||||||
db.users.add_cross_signing_keys(
|
services().users.add_cross_signing_keys(
|
||||||
sender_user,
|
sender_user,
|
||||||
master_key,
|
master_key,
|
||||||
&body.self_signing_key,
|
&body.self_signing_key,
|
||||||
&body.user_signing_key,
|
&body.user_signing_key,
|
||||||
&db.rooms,
|
|
||||||
&db.globals,
|
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(upload_signing_keys::v3::Response {})
|
Ok(upload_signing_keys::v3::Response {})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -165,16 +142,28 @@ pub async fn upload_signing_keys_route(
|
||||||
///
|
///
|
||||||
/// Uploads end-to-end key signatures from the sender user.
|
/// Uploads end-to-end key signatures from the sender user.
|
||||||
pub async fn upload_signatures_route(
|
pub async fn upload_signatures_route(
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<upload_signatures::v3::Request>,
|
body: Ruma<upload_signatures::v3::Request>,
|
||||||
) -> Result<upload_signatures::v3::Response> {
|
) -> Result<upload_signatures::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
for (user_id, signed_keys) in &body.signed_keys {
|
for (user_id, keys) in &body.signed_keys {
|
||||||
for (key_id, signed_key) in signed_keys {
|
for (key_id, key) in keys {
|
||||||
let signed_key = serde_json::to_value(signed_key).unwrap();
|
let key = serde_json::to_value(key)
|
||||||
|
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid key JSON"))?;
|
||||||
|
|
||||||
for signature in signed_key
|
let is_signed_key = match key.get("usage") {
|
||||||
|
Some(usage) => usage
|
||||||
|
.as_array()
|
||||||
|
.map(|usage| !usage.contains(&json!("master")))
|
||||||
|
.unwrap_or(false),
|
||||||
|
None => true,
|
||||||
|
};
|
||||||
|
|
||||||
|
if !is_signed_key {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
for signature in key
|
||||||
.get("signatures")
|
.get("signatures")
|
||||||
.ok_or(Error::BadRequest(
|
.ok_or(Error::BadRequest(
|
||||||
ErrorKind::InvalidParam,
|
ErrorKind::InvalidParam,
|
||||||
|
@ -205,20 +194,13 @@ pub async fn upload_signatures_route(
|
||||||
))?
|
))?
|
||||||
.to_owned(),
|
.to_owned(),
|
||||||
);
|
);
|
||||||
db.users.sign_key(
|
services()
|
||||||
user_id,
|
.users
|
||||||
key_id,
|
.sign_key(user_id, key_id, signature, sender_user)?;
|
||||||
signature,
|
|
||||||
sender_user,
|
|
||||||
&db.rooms,
|
|
||||||
&db.globals,
|
|
||||||
)?;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(upload_signatures::v3::Response {
|
Ok(upload_signatures::v3::Response {
|
||||||
failures: BTreeMap::new(), // TODO: integrate
|
failures: BTreeMap::new(), // TODO: integrate
|
||||||
})
|
})
|
||||||
|
@ -230,15 +212,15 @@ pub async fn upload_signatures_route(
|
||||||
///
|
///
|
||||||
/// - TODO: left users
|
/// - TODO: left users
|
||||||
pub async fn get_key_changes_route(
|
pub async fn get_key_changes_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_key_changes::v3::Request>,
|
||||||
body: Ruma<get_key_changes::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_key_changes::v3::Response> {
|
) -> Result<get_key_changes::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let mut device_list_updates = HashSet::new();
|
let mut device_list_updates = HashSet::new();
|
||||||
|
|
||||||
device_list_updates.extend(
|
device_list_updates.extend(
|
||||||
db.users
|
services()
|
||||||
|
.users
|
||||||
.keys_changed(
|
.keys_changed(
|
||||||
sender_user.as_str(),
|
sender_user.as_str(),
|
||||||
body.from
|
body.from
|
||||||
|
@ -253,11 +235,17 @@ pub async fn get_key_changes_route(
|
||||||
.filter_map(|r| r.ok()),
|
.filter_map(|r| r.ok()),
|
||||||
);
|
);
|
||||||
|
|
||||||
for room_id in db.rooms.rooms_joined(sender_user).filter_map(|r| r.ok()) {
|
for room_id in services()
|
||||||
|
.rooms
|
||||||
|
.state_cache
|
||||||
|
.rooms_joined(sender_user)
|
||||||
|
.filter_map(|r| r.ok())
|
||||||
|
{
|
||||||
device_list_updates.extend(
|
device_list_updates.extend(
|
||||||
db.users
|
services()
|
||||||
|
.users
|
||||||
.keys_changed(
|
.keys_changed(
|
||||||
&room_id.to_string(),
|
room_id.as_ref(),
|
||||||
body.from.parse().map_err(|_| {
|
body.from.parse().map_err(|_| {
|
||||||
Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from`.")
|
Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from`.")
|
||||||
})?,
|
})?,
|
||||||
|
@ -276,9 +264,8 @@ pub async fn get_key_changes_route(
|
||||||
|
|
||||||
pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
|
pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
|
||||||
sender_user: Option<&UserId>,
|
sender_user: Option<&UserId>,
|
||||||
device_keys_input: &BTreeMap<Box<UserId>, Vec<Box<DeviceId>>>,
|
device_keys_input: &BTreeMap<OwnedUserId, Vec<OwnedDeviceId>>,
|
||||||
allowed_signatures: F,
|
allowed_signatures: F,
|
||||||
db: &Database,
|
|
||||||
) -> Result<get_keys::v3::Response> {
|
) -> Result<get_keys::v3::Response> {
|
||||||
let mut master_keys = BTreeMap::new();
|
let mut master_keys = BTreeMap::new();
|
||||||
let mut self_signing_keys = BTreeMap::new();
|
let mut self_signing_keys = BTreeMap::new();
|
||||||
|
@ -288,9 +275,9 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
|
||||||
let mut get_over_federation = HashMap::new();
|
let mut get_over_federation = HashMap::new();
|
||||||
|
|
||||||
for (user_id, device_ids) in device_keys_input {
|
for (user_id, device_ids) in device_keys_input {
|
||||||
let user_id: &UserId = &**user_id;
|
let user_id: &UserId = user_id;
|
||||||
|
|
||||||
if user_id.server_name() != db.globals.server_name() {
|
if user_id.server_name() != services().globals.server_name() {
|
||||||
get_over_federation
|
get_over_federation
|
||||||
.entry(user_id.server_name())
|
.entry(user_id.server_name())
|
||||||
.or_insert_with(Vec::new)
|
.or_insert_with(Vec::new)
|
||||||
|
@ -300,10 +287,10 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
|
||||||
|
|
||||||
if device_ids.is_empty() {
|
if device_ids.is_empty() {
|
||||||
let mut container = BTreeMap::new();
|
let mut container = BTreeMap::new();
|
||||||
for device_id in db.users.all_device_ids(user_id) {
|
for device_id in services().users.all_device_ids(user_id) {
|
||||||
let device_id = device_id?;
|
let device_id = device_id?;
|
||||||
if let Some(mut keys) = db.users.get_device_keys(user_id, &device_id)? {
|
if let Some(mut keys) = services().users.get_device_keys(user_id, &device_id)? {
|
||||||
let metadata = db
|
let metadata = services()
|
||||||
.users
|
.users
|
||||||
.get_device_metadata(user_id, &device_id)?
|
.get_device_metadata(user_id, &device_id)?
|
||||||
.ok_or_else(|| {
|
.ok_or_else(|| {
|
||||||
|
@ -319,13 +306,14 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
|
||||||
} else {
|
} else {
|
||||||
for device_id in device_ids {
|
for device_id in device_ids {
|
||||||
let mut container = BTreeMap::new();
|
let mut container = BTreeMap::new();
|
||||||
if let Some(mut keys) = db.users.get_device_keys(user_id, device_id)? {
|
if let Some(mut keys) = services().users.get_device_keys(user_id, device_id)? {
|
||||||
let metadata = db.users.get_device_metadata(user_id, device_id)?.ok_or(
|
let metadata = services()
|
||||||
Error::BadRequest(
|
.users
|
||||||
|
.get_device_metadata(user_id, device_id)?
|
||||||
|
.ok_or(Error::BadRequest(
|
||||||
ErrorKind::InvalidParam,
|
ErrorKind::InvalidParam,
|
||||||
"Tried to get keys for nonexistent device.",
|
"Tried to get keys for nonexistent device.",
|
||||||
),
|
))?;
|
||||||
)?;
|
|
||||||
|
|
||||||
add_unsigned_device_display_name(&mut keys, metadata)
|
add_unsigned_device_display_name(&mut keys, metadata)
|
||||||
.map_err(|_| Error::bad_database("invalid device keys in database"))?;
|
.map_err(|_| Error::bad_database("invalid device keys in database"))?;
|
||||||
|
@ -335,17 +323,20 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(master_key) = db.users.get_master_key(user_id, &allowed_signatures)? {
|
if let Some(master_key) = services()
|
||||||
|
.users
|
||||||
|
.get_master_key(user_id, &allowed_signatures)?
|
||||||
|
{
|
||||||
master_keys.insert(user_id.to_owned(), master_key);
|
master_keys.insert(user_id.to_owned(), master_key);
|
||||||
}
|
}
|
||||||
if let Some(self_signing_key) = db
|
if let Some(self_signing_key) = services()
|
||||||
.users
|
.users
|
||||||
.get_self_signing_key(user_id, &allowed_signatures)?
|
.get_self_signing_key(user_id, &allowed_signatures)?
|
||||||
{
|
{
|
||||||
self_signing_keys.insert(user_id.to_owned(), self_signing_key);
|
self_signing_keys.insert(user_id.to_owned(), self_signing_key);
|
||||||
}
|
}
|
||||||
if Some(user_id) == sender_user {
|
if Some(user_id) == sender_user {
|
||||||
if let Some(user_signing_key) = db.users.get_user_signing_key(user_id)? {
|
if let Some(user_signing_key) = services().users.get_user_signing_key(user_id)? {
|
||||||
user_signing_keys.insert(user_id.to_owned(), user_signing_key);
|
user_signing_keys.insert(user_id.to_owned(), user_signing_key);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -362,9 +353,9 @@ pub(crate) async fn get_keys_helper<F: Fn(&UserId) -> bool>(
|
||||||
}
|
}
|
||||||
(
|
(
|
||||||
server,
|
server,
|
||||||
db.sending
|
services()
|
||||||
|
.sending
|
||||||
.send_federation_request(
|
.send_federation_request(
|
||||||
&db.globals,
|
|
||||||
server,
|
server,
|
||||||
federation::keys::get_keys::v1::Request {
|
federation::keys::get_keys::v1::Request {
|
||||||
device_keys: device_keys_input_fed,
|
device_keys: device_keys_input_fed,
|
||||||
|
@ -416,15 +407,14 @@ fn add_unsigned_device_display_name(
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) async fn claim_keys_helper(
|
pub(crate) async fn claim_keys_helper(
|
||||||
one_time_keys_input: &BTreeMap<Box<UserId>, BTreeMap<Box<DeviceId>, DeviceKeyAlgorithm>>,
|
one_time_keys_input: &BTreeMap<OwnedUserId, BTreeMap<OwnedDeviceId, DeviceKeyAlgorithm>>,
|
||||||
db: &Database,
|
|
||||||
) -> Result<claim_keys::v3::Response> {
|
) -> Result<claim_keys::v3::Response> {
|
||||||
let mut one_time_keys = BTreeMap::new();
|
let mut one_time_keys = BTreeMap::new();
|
||||||
|
|
||||||
let mut get_over_federation = BTreeMap::new();
|
let mut get_over_federation = BTreeMap::new();
|
||||||
|
|
||||||
for (user_id, map) in one_time_keys_input {
|
for (user_id, map) in one_time_keys_input {
|
||||||
if user_id.server_name() != db.globals.server_name() {
|
if user_id.server_name() != services().globals.server_name() {
|
||||||
get_over_federation
|
get_over_federation
|
||||||
.entry(user_id.server_name())
|
.entry(user_id.server_name())
|
||||||
.or_insert_with(Vec::new)
|
.or_insert_with(Vec::new)
|
||||||
|
@ -434,8 +424,9 @@ pub(crate) async fn claim_keys_helper(
|
||||||
let mut container = BTreeMap::new();
|
let mut container = BTreeMap::new();
|
||||||
for (device_id, key_algorithm) in map {
|
for (device_id, key_algorithm) in map {
|
||||||
if let Some(one_time_keys) =
|
if let Some(one_time_keys) =
|
||||||
db.users
|
services()
|
||||||
.take_one_time_key(user_id, device_id, key_algorithm, &db.globals)?
|
.users
|
||||||
|
.take_one_time_key(user_id, device_id, key_algorithm)?
|
||||||
{
|
{
|
||||||
let mut c = BTreeMap::new();
|
let mut c = BTreeMap::new();
|
||||||
c.insert(one_time_keys.0, one_time_keys.1);
|
c.insert(one_time_keys.0, one_time_keys.1);
|
||||||
|
@ -447,28 +438,38 @@ pub(crate) async fn claim_keys_helper(
|
||||||
|
|
||||||
let mut failures = BTreeMap::new();
|
let mut failures = BTreeMap::new();
|
||||||
|
|
||||||
for (server, vec) in get_over_federation {
|
let mut futures: FuturesUnordered<_> = get_over_federation
|
||||||
|
.into_iter()
|
||||||
|
.map(|(server, vec)| async move {
|
||||||
let mut one_time_keys_input_fed = BTreeMap::new();
|
let mut one_time_keys_input_fed = BTreeMap::new();
|
||||||
for (user_id, keys) in vec {
|
for (user_id, keys) in vec {
|
||||||
one_time_keys_input_fed.insert(user_id.clone(), keys.clone());
|
one_time_keys_input_fed.insert(user_id.clone(), keys.clone());
|
||||||
}
|
}
|
||||||
// Ignore failures
|
(
|
||||||
if let Ok(keys) = db
|
server,
|
||||||
|
services()
|
||||||
.sending
|
.sending
|
||||||
.send_federation_request(
|
.send_federation_request(
|
||||||
&db.globals,
|
|
||||||
server,
|
server,
|
||||||
federation::keys::claim_keys::v1::Request {
|
federation::keys::claim_keys::v1::Request {
|
||||||
one_time_keys: one_time_keys_input_fed,
|
one_time_keys: one_time_keys_input_fed,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
.await
|
.await,
|
||||||
{
|
)
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
while let Some((server, response)) = futures.next().await {
|
||||||
|
match response {
|
||||||
|
Ok(keys) => {
|
||||||
one_time_keys.extend(keys.one_time_keys);
|
one_time_keys.extend(keys.one_time_keys);
|
||||||
} else {
|
}
|
||||||
|
Err(_e) => {
|
||||||
failures.insert(server.to_string(), json!({}));
|
failures.insert(server.to_string(), json!({}));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
Ok(claim_keys::v3::Response {
|
Ok(claim_keys::v3::Response {
|
||||||
failures,
|
failures,
|
|
@ -1,7 +1,4 @@
|
||||||
use crate::{
|
use crate::{service::media::FileMeta, services, utils, Error, Result, Ruma};
|
||||||
database::{media::FileMeta, DatabaseGuard},
|
|
||||||
utils, Error, Result, Ruma,
|
|
||||||
};
|
|
||||||
use ruma::api::client::{
|
use ruma::api::client::{
|
||||||
error::ErrorKind,
|
error::ErrorKind,
|
||||||
media::{
|
media::{
|
||||||
|
@ -16,11 +13,10 @@ const MXC_LENGTH: usize = 32;
|
||||||
///
|
///
|
||||||
/// Returns max upload size.
|
/// Returns max upload size.
|
||||||
pub async fn get_media_config_route(
|
pub async fn get_media_config_route(
|
||||||
db: DatabaseGuard,
|
|
||||||
_body: Ruma<get_media_config::v3::Request>,
|
_body: Ruma<get_media_config::v3::Request>,
|
||||||
) -> Result<get_media_config::v3::Response> {
|
) -> Result<get_media_config::v3::Response> {
|
||||||
Ok(get_media_config::v3::Response {
|
Ok(get_media_config::v3::Response {
|
||||||
upload_size: db.globals.max_request_size().into(),
|
upload_size: services().globals.max_request_size().into(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -31,31 +27,27 @@ pub async fn get_media_config_route(
|
||||||
/// - Some metadata will be saved in the database
|
/// - Some metadata will be saved in the database
|
||||||
/// - Media will be saved in the media/ directory
|
/// - Media will be saved in the media/ directory
|
||||||
pub async fn create_content_route(
|
pub async fn create_content_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<create_content::v3::Request>,
|
||||||
body: Ruma<create_content::v3::IncomingRequest>,
|
|
||||||
) -> Result<create_content::v3::Response> {
|
) -> Result<create_content::v3::Response> {
|
||||||
let mxc = format!(
|
let mxc = format!(
|
||||||
"mxc://{}/{}",
|
"mxc://{}/{}",
|
||||||
db.globals.server_name(),
|
services().globals.server_name(),
|
||||||
utils::random_string(MXC_LENGTH)
|
utils::random_string(MXC_LENGTH)
|
||||||
);
|
);
|
||||||
|
|
||||||
db.media
|
services()
|
||||||
|
.media
|
||||||
.create(
|
.create(
|
||||||
mxc.clone(),
|
mxc.clone(),
|
||||||
&db.globals,
|
body.filename
|
||||||
&body
|
|
||||||
.filename
|
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.map(|filename| "inline; filename=".to_owned() + filename)
|
.map(|filename| "inline; filename=".to_owned() + filename)
|
||||||
.as_deref(),
|
.as_deref(),
|
||||||
&body.content_type.as_deref(),
|
body.content_type.as_deref(),
|
||||||
&body.file,
|
&body.file,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(create_content::v3::Response {
|
Ok(create_content::v3::Response {
|
||||||
content_uri: mxc.try_into().expect("Invalid mxc:// URI"),
|
content_uri: mxc.try_into().expect("Invalid mxc:// URI"),
|
||||||
blurhash: None,
|
blurhash: None,
|
||||||
|
@ -63,30 +55,28 @@ pub async fn create_content_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn get_remote_content(
|
pub async fn get_remote_content(
|
||||||
db: &DatabaseGuard,
|
|
||||||
mxc: &str,
|
mxc: &str,
|
||||||
server_name: &ruma::ServerName,
|
server_name: &ruma::ServerName,
|
||||||
media_id: &str,
|
media_id: String,
|
||||||
) -> Result<get_content::v3::Response, Error> {
|
) -> Result<get_content::v3::Response, Error> {
|
||||||
let content_response = db
|
let content_response = services()
|
||||||
.sending
|
.sending
|
||||||
.send_federation_request(
|
.send_federation_request(
|
||||||
&db.globals,
|
|
||||||
server_name,
|
server_name,
|
||||||
get_content::v3::Request {
|
get_content::v3::Request {
|
||||||
allow_remote: false,
|
allow_remote: false,
|
||||||
server_name,
|
server_name: server_name.to_owned(),
|
||||||
media_id,
|
media_id,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
db.media
|
services()
|
||||||
|
.media
|
||||||
.create(
|
.create(
|
||||||
mxc.to_string(),
|
mxc.to_owned(),
|
||||||
&db.globals,
|
content_response.content_disposition.as_deref(),
|
||||||
&content_response.content_disposition.as_deref(),
|
content_response.content_type.as_deref(),
|
||||||
&content_response.content_type.as_deref(),
|
|
||||||
&content_response.file,
|
&content_response.file,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
@ -100,8 +90,7 @@ pub async fn get_remote_content(
|
||||||
///
|
///
|
||||||
/// - Only allows federation if `allow_remote` is true
|
/// - Only allows federation if `allow_remote` is true
|
||||||
pub async fn get_content_route(
|
pub async fn get_content_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_content::v3::Request>,
|
||||||
body: Ruma<get_content::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_content::v3::Response> {
|
) -> Result<get_content::v3::Response> {
|
||||||
let mxc = format!("mxc://{}/{}", body.server_name, body.media_id);
|
let mxc = format!("mxc://{}/{}", body.server_name, body.media_id);
|
||||||
|
|
||||||
|
@ -109,16 +98,17 @@ pub async fn get_content_route(
|
||||||
content_disposition,
|
content_disposition,
|
||||||
content_type,
|
content_type,
|
||||||
file,
|
file,
|
||||||
}) = db.media.get(&db.globals, &mxc).await?
|
}) = services().media.get(mxc.clone()).await?
|
||||||
{
|
{
|
||||||
Ok(get_content::v3::Response {
|
Ok(get_content::v3::Response {
|
||||||
file,
|
file,
|
||||||
content_type,
|
content_type,
|
||||||
content_disposition,
|
content_disposition,
|
||||||
|
cross_origin_resource_policy: Some("cross-origin".to_owned()),
|
||||||
})
|
})
|
||||||
} else if &*body.server_name != db.globals.server_name() && body.allow_remote {
|
} else if &*body.server_name != services().globals.server_name() && body.allow_remote {
|
||||||
let remote_content_response =
|
let remote_content_response =
|
||||||
get_remote_content(&db, &mxc, &body.server_name, &body.media_id).await?;
|
get_remote_content(&mxc, &body.server_name, body.media_id.clone()).await?;
|
||||||
Ok(remote_content_response)
|
Ok(remote_content_response)
|
||||||
} else {
|
} else {
|
||||||
Err(Error::BadRequest(ErrorKind::NotFound, "Media not found."))
|
Err(Error::BadRequest(ErrorKind::NotFound, "Media not found."))
|
||||||
|
@ -131,8 +121,7 @@ pub async fn get_content_route(
|
||||||
///
|
///
|
||||||
/// - Only allows federation if `allow_remote` is true
|
/// - Only allows federation if `allow_remote` is true
|
||||||
pub async fn get_content_as_filename_route(
|
pub async fn get_content_as_filename_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_content_as_filename::v3::Request>,
|
||||||
body: Ruma<get_content_as_filename::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_content_as_filename::v3::Response> {
|
) -> Result<get_content_as_filename::v3::Response> {
|
||||||
let mxc = format!("mxc://{}/{}", body.server_name, body.media_id);
|
let mxc = format!("mxc://{}/{}", body.server_name, body.media_id);
|
||||||
|
|
||||||
|
@ -140,21 +129,23 @@ pub async fn get_content_as_filename_route(
|
||||||
content_disposition: _,
|
content_disposition: _,
|
||||||
content_type,
|
content_type,
|
||||||
file,
|
file,
|
||||||
}) = db.media.get(&db.globals, &mxc).await?
|
}) = services().media.get(mxc.clone()).await?
|
||||||
{
|
{
|
||||||
Ok(get_content_as_filename::v3::Response {
|
Ok(get_content_as_filename::v3::Response {
|
||||||
file,
|
file,
|
||||||
content_type,
|
content_type,
|
||||||
content_disposition: Some(format!("inline; filename={}", body.filename)),
|
content_disposition: Some(format!("inline; filename={}", body.filename)),
|
||||||
|
cross_origin_resource_policy: Some("cross-origin".to_owned()),
|
||||||
})
|
})
|
||||||
} else if &*body.server_name != db.globals.server_name() && body.allow_remote {
|
} else if &*body.server_name != services().globals.server_name() && body.allow_remote {
|
||||||
let remote_content_response =
|
let remote_content_response =
|
||||||
get_remote_content(&db, &mxc, &body.server_name, &body.media_id).await?;
|
get_remote_content(&mxc, &body.server_name, body.media_id.clone()).await?;
|
||||||
|
|
||||||
Ok(get_content_as_filename::v3::Response {
|
Ok(get_content_as_filename::v3::Response {
|
||||||
content_disposition: Some(format!("inline: filename={}", body.filename)),
|
content_disposition: Some(format!("inline: filename={}", body.filename)),
|
||||||
content_type: remote_content_response.content_type,
|
content_type: remote_content_response.content_type,
|
||||||
file: remote_content_response.file,
|
file: remote_content_response.file,
|
||||||
|
cross_origin_resource_policy: Some("cross-origin".to_owned()),
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
Err(Error::BadRequest(ErrorKind::NotFound, "Media not found."))
|
Err(Error::BadRequest(ErrorKind::NotFound, "Media not found."))
|
||||||
|
@ -167,18 +158,16 @@ pub async fn get_content_as_filename_route(
|
||||||
///
|
///
|
||||||
/// - Only allows federation if `allow_remote` is true
|
/// - Only allows federation if `allow_remote` is true
|
||||||
pub async fn get_content_thumbnail_route(
|
pub async fn get_content_thumbnail_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_content_thumbnail::v3::Request>,
|
||||||
body: Ruma<get_content_thumbnail::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_content_thumbnail::v3::Response> {
|
) -> Result<get_content_thumbnail::v3::Response> {
|
||||||
let mxc = format!("mxc://{}/{}", body.server_name, body.media_id);
|
let mxc = format!("mxc://{}/{}", body.server_name, body.media_id);
|
||||||
|
|
||||||
if let Some(FileMeta {
|
if let Some(FileMeta {
|
||||||
content_type, file, ..
|
content_type, file, ..
|
||||||
}) = db
|
}) = services()
|
||||||
.media
|
.media
|
||||||
.get_thumbnail(
|
.get_thumbnail(
|
||||||
&mxc,
|
mxc.clone(),
|
||||||
&db.globals,
|
|
||||||
body.width
|
body.width
|
||||||
.try_into()
|
.try_into()
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?,
|
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?,
|
||||||
|
@ -188,30 +177,33 @@ pub async fn get_content_thumbnail_route(
|
||||||
)
|
)
|
||||||
.await?
|
.await?
|
||||||
{
|
{
|
||||||
Ok(get_content_thumbnail::v3::Response { file, content_type })
|
Ok(get_content_thumbnail::v3::Response {
|
||||||
} else if &*body.server_name != db.globals.server_name() && body.allow_remote {
|
file,
|
||||||
let get_thumbnail_response = db
|
content_type,
|
||||||
|
cross_origin_resource_policy: Some("cross-origin".to_owned()),
|
||||||
|
})
|
||||||
|
} else if &*body.server_name != services().globals.server_name() && body.allow_remote {
|
||||||
|
let get_thumbnail_response = services()
|
||||||
.sending
|
.sending
|
||||||
.send_federation_request(
|
.send_federation_request(
|
||||||
&db.globals,
|
|
||||||
&body.server_name,
|
&body.server_name,
|
||||||
get_content_thumbnail::v3::Request {
|
get_content_thumbnail::v3::Request {
|
||||||
allow_remote: false,
|
allow_remote: false,
|
||||||
height: body.height,
|
height: body.height,
|
||||||
width: body.width,
|
width: body.width,
|
||||||
method: body.method.clone(),
|
method: body.method.clone(),
|
||||||
server_name: &body.server_name,
|
server_name: body.server_name.clone(),
|
||||||
media_id: &body.media_id,
|
media_id: body.media_id.clone(),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
db.media
|
services()
|
||||||
|
.media
|
||||||
.upload_thumbnail(
|
.upload_thumbnail(
|
||||||
mxc,
|
mxc,
|
||||||
&db.globals,
|
None,
|
||||||
&None,
|
get_thumbnail_response.content_type.as_deref(),
|
||||||
&get_thumbnail_response.content_type,
|
|
||||||
body.width.try_into().expect("all UInts are valid u32s"),
|
body.width.try_into().expect("all UInts are valid u32s"),
|
||||||
body.height.try_into().expect("all UInts are valid u32s"),
|
body.height.try_into().expect("all UInts are valid u32s"),
|
||||||
&get_thumbnail_response.file,
|
&get_thumbnail_response.file,
|
File diff suppressed because it is too large
Load Diff
|
@ -1,4 +1,4 @@
|
||||||
use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, Error, Result, Ruma};
|
use crate::{service::pdu::PduBuilder, services, utils, Error, Result, Ruma};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::{
|
api::client::{
|
||||||
error::ErrorKind,
|
error::ErrorKind,
|
||||||
|
@ -19,14 +19,14 @@ use std::{
|
||||||
/// - The only requirement for the content is that it has to be valid json
|
/// - The only requirement for the content is that it has to be valid json
|
||||||
/// - Tries to send the event into the room, auth rules will determine if it is allowed
|
/// - Tries to send the event into the room, auth rules will determine if it is allowed
|
||||||
pub async fn send_message_event_route(
|
pub async fn send_message_event_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<send_message_event::v3::Request>,
|
||||||
body: Ruma<send_message_event::v3::IncomingRequest>,
|
|
||||||
) -> Result<send_message_event::v3::Response> {
|
) -> Result<send_message_event::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let sender_device = body.sender_device.as_deref();
|
let sender_device = body.sender_device.as_deref();
|
||||||
|
|
||||||
let mutex_state = Arc::clone(
|
let mutex_state = Arc::clone(
|
||||||
db.globals
|
services()
|
||||||
|
.globals
|
||||||
.roomid_mutex_state
|
.roomid_mutex_state
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
@ -37,7 +37,7 @@ pub async fn send_message_event_route(
|
||||||
|
|
||||||
// Forbid m.room.encrypted if encryption is disabled
|
// Forbid m.room.encrypted if encryption is disabled
|
||||||
if RoomEventType::RoomEncrypted == body.event_type.to_string().into()
|
if RoomEventType::RoomEncrypted == body.event_type.to_string().into()
|
||||||
&& !db.globals.allow_encryption()
|
&& !services().globals.allow_encryption()
|
||||||
{
|
{
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::Forbidden,
|
||||||
|
@ -47,7 +47,8 @@ pub async fn send_message_event_route(
|
||||||
|
|
||||||
// Check if this is a new transaction id
|
// Check if this is a new transaction id
|
||||||
if let Some(response) =
|
if let Some(response) =
|
||||||
db.transaction_ids
|
services()
|
||||||
|
.transaction_ids
|
||||||
.existing_txnid(sender_user, sender_device, &body.txn_id)?
|
.existing_txnid(sender_user, sender_device, &body.txn_id)?
|
||||||
{
|
{
|
||||||
// The client might have sent a txnid of the /sendToDevice endpoint
|
// The client might have sent a txnid of the /sendToDevice endpoint
|
||||||
|
@ -69,7 +70,7 @@ pub async fn send_message_event_route(
|
||||||
let mut unsigned = BTreeMap::new();
|
let mut unsigned = BTreeMap::new();
|
||||||
unsigned.insert("transaction_id".to_owned(), body.txn_id.to_string().into());
|
unsigned.insert("transaction_id".to_owned(), body.txn_id.to_string().into());
|
||||||
|
|
||||||
let event_id = db.rooms.build_and_append_pdu(
|
let event_id = services().rooms.timeline.build_and_append_pdu(
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: body.event_type.to_string().into(),
|
event_type: body.event_type.to_string().into(),
|
||||||
content: serde_json::from_str(body.body.body.json().get())
|
content: serde_json::from_str(body.body.body.json().get())
|
||||||
|
@ -80,11 +81,10 @@ pub async fn send_message_event_route(
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&body.room_id,
|
&body.room_id,
|
||||||
&db,
|
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
db.transaction_ids.add_txnid(
|
services().transaction_ids.add_txnid(
|
||||||
sender_user,
|
sender_user,
|
||||||
sender_device,
|
sender_device,
|
||||||
&body.txn_id,
|
&body.txn_id,
|
||||||
|
@ -93,8 +93,6 @@ pub async fn send_message_event_route(
|
||||||
|
|
||||||
drop(state_lock);
|
drop(state_lock);
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(send_message_event::v3::Response::new(
|
Ok(send_message_event::v3::Response::new(
|
||||||
(*event_id).to_owned(),
|
(*event_id).to_owned(),
|
||||||
))
|
))
|
||||||
|
@ -107,13 +105,16 @@ pub async fn send_message_event_route(
|
||||||
/// - Only works if the user is joined (TODO: always allow, but only show events where the user was
|
/// - Only works if the user is joined (TODO: always allow, but only show events where the user was
|
||||||
/// joined, depending on history_visibility)
|
/// joined, depending on history_visibility)
|
||||||
pub async fn get_message_events_route(
|
pub async fn get_message_events_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_message_events::v3::Request>,
|
||||||
body: Ruma<get_message_events::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_message_events::v3::Response> {
|
) -> Result<get_message_events::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
if !db.rooms.is_joined(sender_user, &body.room_id)? {
|
if !services()
|
||||||
|
.rooms
|
||||||
|
.state_cache
|
||||||
|
.is_joined(sender_user, &body.room_id)?
|
||||||
|
{
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::Forbidden,
|
||||||
"You don't have permission to view this room.",
|
"You don't have permission to view this room.",
|
||||||
|
@ -126,15 +127,19 @@ pub async fn get_message_events_route(
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from` value."))?,
|
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from` value."))?,
|
||||||
|
|
||||||
None => match body.dir {
|
None => match body.dir {
|
||||||
get_message_events::v3::Direction::Forward => 0,
|
ruma::api::client::Direction::Forward => 0,
|
||||||
get_message_events::v3::Direction::Backward => u64::MAX,
|
ruma::api::client::Direction::Backward => u64::MAX,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
let to = body.to.as_ref().map(|t| t.parse());
|
let to = body.to.as_ref().map(|t| t.parse());
|
||||||
|
|
||||||
db.rooms
|
services().rooms.lazy_loading.lazy_load_confirm_delivery(
|
||||||
.lazy_load_confirm_delivery(sender_user, sender_device, &body.room_id, from)?;
|
sender_user,
|
||||||
|
sender_device,
|
||||||
|
&body.room_id,
|
||||||
|
from,
|
||||||
|
)?;
|
||||||
|
|
||||||
// Use limit or else 10
|
// Use limit or else 10
|
||||||
let limit = body.limit.try_into().map_or(10_usize, |l: u32| l as usize);
|
let limit = body.limit.try_into().map_or(10_usize, |l: u32| l as usize);
|
||||||
|
@ -146,14 +151,17 @@ pub async fn get_message_events_route(
|
||||||
let mut lazy_loaded = HashSet::new();
|
let mut lazy_loaded = HashSet::new();
|
||||||
|
|
||||||
match body.dir {
|
match body.dir {
|
||||||
get_message_events::v3::Direction::Forward => {
|
ruma::api::client::Direction::Forward => {
|
||||||
let events_after: Vec<_> = db
|
let events_after: Vec<_> = services()
|
||||||
.rooms
|
.rooms
|
||||||
|
.timeline
|
||||||
.pdus_after(sender_user, &body.room_id, from)?
|
.pdus_after(sender_user, &body.room_id, from)?
|
||||||
.take(limit)
|
.take(limit)
|
||||||
.filter_map(|r| r.ok()) // Filter out buggy events
|
.filter_map(|r| r.ok()) // Filter out buggy events
|
||||||
.filter_map(|(pdu_id, pdu)| {
|
.filter_map(|(pdu_id, pdu)| {
|
||||||
db.rooms
|
services()
|
||||||
|
.rooms
|
||||||
|
.timeline
|
||||||
.pdu_count(&pdu_id)
|
.pdu_count(&pdu_id)
|
||||||
.map(|pdu_count| (pdu_count, pdu))
|
.map(|pdu_count| (pdu_count, pdu))
|
||||||
.ok()
|
.ok()
|
||||||
|
@ -162,7 +170,10 @@ pub async fn get_message_events_route(
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
for (_, event) in &events_after {
|
for (_, event) in &events_after {
|
||||||
if !db.rooms.lazy_load_was_sent_before(
|
/* TODO: Remove this when these are resolved:
|
||||||
|
* https://github.com/vector-im/element-android/issues/3417
|
||||||
|
* https://github.com/vector-im/element-web/issues/21034
|
||||||
|
if !services().rooms.lazy_loading.lazy_load_was_sent_before(
|
||||||
sender_user,
|
sender_user,
|
||||||
sender_device,
|
sender_device,
|
||||||
&body.room_id,
|
&body.room_id,
|
||||||
|
@ -170,6 +181,8 @@ pub async fn get_message_events_route(
|
||||||
)? {
|
)? {
|
||||||
lazy_loaded.insert(event.sender.clone());
|
lazy_loaded.insert(event.sender.clone());
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
|
lazy_loaded.insert(event.sender.clone());
|
||||||
}
|
}
|
||||||
|
|
||||||
next_token = events_after.last().map(|(count, _)| count).copied();
|
next_token = events_after.last().map(|(count, _)| count).copied();
|
||||||
|
@ -183,14 +196,17 @@ pub async fn get_message_events_route(
|
||||||
resp.end = next_token.map(|count| count.to_string());
|
resp.end = next_token.map(|count| count.to_string());
|
||||||
resp.chunk = events_after;
|
resp.chunk = events_after;
|
||||||
}
|
}
|
||||||
get_message_events::v3::Direction::Backward => {
|
ruma::api::client::Direction::Backward => {
|
||||||
let events_before: Vec<_> = db
|
let events_before: Vec<_> = services()
|
||||||
.rooms
|
.rooms
|
||||||
|
.timeline
|
||||||
.pdus_until(sender_user, &body.room_id, from)?
|
.pdus_until(sender_user, &body.room_id, from)?
|
||||||
.take(limit)
|
.take(limit)
|
||||||
.filter_map(|r| r.ok()) // Filter out buggy events
|
.filter_map(|r| r.ok()) // Filter out buggy events
|
||||||
.filter_map(|(pdu_id, pdu)| {
|
.filter_map(|(pdu_id, pdu)| {
|
||||||
db.rooms
|
services()
|
||||||
|
.rooms
|
||||||
|
.timeline
|
||||||
.pdu_count(&pdu_id)
|
.pdu_count(&pdu_id)
|
||||||
.map(|pdu_count| (pdu_count, pdu))
|
.map(|pdu_count| (pdu_count, pdu))
|
||||||
.ok()
|
.ok()
|
||||||
|
@ -199,7 +215,10 @@ pub async fn get_message_events_route(
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
for (_, event) in &events_before {
|
for (_, event) in &events_before {
|
||||||
if !db.rooms.lazy_load_was_sent_before(
|
/* TODO: Remove this when these are resolved:
|
||||||
|
* https://github.com/vector-im/element-android/issues/3417
|
||||||
|
* https://github.com/vector-im/element-web/issues/21034
|
||||||
|
if !services().rooms.lazy_loading.lazy_load_was_sent_before(
|
||||||
sender_user,
|
sender_user,
|
||||||
sender_device,
|
sender_device,
|
||||||
&body.room_id,
|
&body.room_id,
|
||||||
|
@ -207,6 +226,8 @@ pub async fn get_message_events_route(
|
||||||
)? {
|
)? {
|
||||||
lazy_loaded.insert(event.sender.clone());
|
lazy_loaded.insert(event.sender.clone());
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
|
lazy_loaded.insert(event.sender.clone());
|
||||||
}
|
}
|
||||||
|
|
||||||
next_token = events_before.last().map(|(count, _)| count).copied();
|
next_token = events_before.last().map(|(count, _)| count).copied();
|
||||||
|
@ -224,16 +245,19 @@ pub async fn get_message_events_route(
|
||||||
|
|
||||||
resp.state = Vec::new();
|
resp.state = Vec::new();
|
||||||
for ll_id in &lazy_loaded {
|
for ll_id in &lazy_loaded {
|
||||||
if let Some(member_event) =
|
if let Some(member_event) = services().rooms.state_accessor.room_state_get(
|
||||||
db.rooms
|
&body.room_id,
|
||||||
.room_state_get(&body.room_id, &StateEventType::RoomMember, ll_id.as_str())?
|
&StateEventType::RoomMember,
|
||||||
{
|
ll_id.as_str(),
|
||||||
|
)? {
|
||||||
resp.state.push(member_event.to_state_event());
|
resp.state.push(member_event.to_state_event());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO: enable again when we are sure clients can handle it
|
||||||
|
/*
|
||||||
if let Some(next_token) = next_token {
|
if let Some(next_token) = next_token {
|
||||||
db.rooms.lazy_load_mark_sent(
|
services().rooms.lazy_loading.lazy_load_mark_sent(
|
||||||
sender_user,
|
sender_user,
|
||||||
sender_device,
|
sender_device,
|
||||||
&body.room_id,
|
&body.room_id,
|
||||||
|
@ -241,6 +265,7 @@ pub async fn get_message_events_route(
|
||||||
next_token,
|
next_token,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
Ok(resp)
|
Ok(resp)
|
||||||
}
|
}
|
|
@ -63,6 +63,6 @@ pub use user_directory::*;
|
||||||
pub use voip::*;
|
pub use voip::*;
|
||||||
|
|
||||||
pub const DEVICE_ID_LENGTH: usize = 10;
|
pub const DEVICE_ID_LENGTH: usize = 10;
|
||||||
pub const TOKEN_LENGTH: usize = 256;
|
pub const TOKEN_LENGTH: usize = 32;
|
||||||
pub const SESSION_ID_LENGTH: usize = 256;
|
pub const SESSION_ID_LENGTH: usize = 32;
|
||||||
pub const AUTO_GEN_PASSWORD_LENGTH: usize = 15;
|
pub const AUTO_GEN_PASSWORD_LENGTH: usize = 15;
|
|
@ -1,4 +1,4 @@
|
||||||
use crate::{database::DatabaseGuard, utils, Result, Ruma};
|
use crate::{services, utils, Result, Ruma};
|
||||||
use ruma::api::client::presence::{get_presence, set_presence};
|
use ruma::api::client::presence::{get_presence, set_presence};
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
|
@ -6,22 +6,21 @@ use std::time::Duration;
|
||||||
///
|
///
|
||||||
/// Sets the presence state of the sender user.
|
/// Sets the presence state of the sender user.
|
||||||
pub async fn set_presence_route(
|
pub async fn set_presence_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<set_presence::v3::Request>,
|
||||||
body: Ruma<set_presence::v3::IncomingRequest>,
|
|
||||||
) -> Result<set_presence::v3::Response> {
|
) -> Result<set_presence::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
for room_id in db.rooms.rooms_joined(sender_user) {
|
for room_id in services().rooms.state_cache.rooms_joined(sender_user) {
|
||||||
let room_id = room_id?;
|
let room_id = room_id?;
|
||||||
|
|
||||||
db.rooms.edus.update_presence(
|
services().rooms.edus.presence.update_presence(
|
||||||
sender_user,
|
sender_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
ruma::events::presence::PresenceEvent {
|
ruma::events::presence::PresenceEvent {
|
||||||
content: ruma::events::presence::PresenceEventContent {
|
content: ruma::events::presence::PresenceEventContent {
|
||||||
avatar_url: db.users.avatar_url(sender_user)?,
|
avatar_url: services().users.avatar_url(sender_user)?,
|
||||||
currently_active: None,
|
currently_active: None,
|
||||||
displayname: db.users.displayname(sender_user)?,
|
displayname: services().users.displayname(sender_user)?,
|
||||||
last_active_ago: Some(
|
last_active_ago: Some(
|
||||||
utils::millis_since_unix_epoch()
|
utils::millis_since_unix_epoch()
|
||||||
.try_into()
|
.try_into()
|
||||||
|
@ -32,12 +31,9 @@ pub async fn set_presence_route(
|
||||||
},
|
},
|
||||||
sender: sender_user.clone(),
|
sender: sender_user.clone(),
|
||||||
},
|
},
|
||||||
&db.globals,
|
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(set_presence::v3::Response {})
|
Ok(set_presence::v3::Response {})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -47,22 +43,23 @@ pub async fn set_presence_route(
|
||||||
///
|
///
|
||||||
/// - Only works if you share a room with the user
|
/// - Only works if you share a room with the user
|
||||||
pub async fn get_presence_route(
|
pub async fn get_presence_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_presence::v3::Request>,
|
||||||
body: Ruma<get_presence::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_presence::v3::Response> {
|
) -> Result<get_presence::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let mut presence_event = None;
|
let mut presence_event = None;
|
||||||
|
|
||||||
for room_id in db
|
for room_id in services()
|
||||||
.rooms
|
.rooms
|
||||||
|
.user
|
||||||
.get_shared_rooms(vec![sender_user.clone(), body.user_id.clone()])?
|
.get_shared_rooms(vec![sender_user.clone(), body.user_id.clone()])?
|
||||||
{
|
{
|
||||||
let room_id = room_id?;
|
let room_id = room_id?;
|
||||||
|
|
||||||
if let Some(presence) = db
|
if let Some(presence) = services()
|
||||||
.rooms
|
.rooms
|
||||||
.edus
|
.edus
|
||||||
|
.presence
|
||||||
.get_last_presence_event(sender_user, &room_id)?
|
.get_last_presence_event(sender_user, &room_id)?
|
||||||
{
|
{
|
||||||
presence_event = Some(presence);
|
presence_event = Some(presence);
|
|
@ -1,4 +1,4 @@
|
||||||
use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, Error, Result, Ruma};
|
use crate::{service::pdu::PduBuilder, services, utils, Error, Result, Ruma};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::{
|
api::{
|
||||||
client::{
|
client::{
|
||||||
|
@ -20,17 +20,18 @@ use std::sync::Arc;
|
||||||
///
|
///
|
||||||
/// - Also makes sure other users receive the update using presence EDUs
|
/// - Also makes sure other users receive the update using presence EDUs
|
||||||
pub async fn set_displayname_route(
|
pub async fn set_displayname_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<set_display_name::v3::Request>,
|
||||||
body: Ruma<set_display_name::v3::IncomingRequest>,
|
|
||||||
) -> Result<set_display_name::v3::Response> {
|
) -> Result<set_display_name::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
db.users
|
services()
|
||||||
|
.users
|
||||||
.set_displayname(sender_user, body.displayname.clone())?;
|
.set_displayname(sender_user, body.displayname.clone())?;
|
||||||
|
|
||||||
// Send a new membership event and presence update into all joined rooms
|
// Send a new membership event and presence update into all joined rooms
|
||||||
let all_rooms_joined: Vec<_> = db
|
let all_rooms_joined: Vec<_> = services()
|
||||||
.rooms
|
.rooms
|
||||||
|
.state_cache
|
||||||
.rooms_joined(sender_user)
|
.rooms_joined(sender_user)
|
||||||
.filter_map(|r| r.ok())
|
.filter_map(|r| r.ok())
|
||||||
.map(|room_id| {
|
.map(|room_id| {
|
||||||
|
@ -40,7 +41,9 @@ pub async fn set_displayname_route(
|
||||||
content: to_raw_value(&RoomMemberEventContent {
|
content: to_raw_value(&RoomMemberEventContent {
|
||||||
displayname: body.displayname.clone(),
|
displayname: body.displayname.clone(),
|
||||||
..serde_json::from_str(
|
..serde_json::from_str(
|
||||||
db.rooms
|
services()
|
||||||
|
.rooms
|
||||||
|
.state_accessor
|
||||||
.room_state_get(
|
.room_state_get(
|
||||||
&room_id,
|
&room_id,
|
||||||
&StateEventType::RoomMember,
|
&StateEventType::RoomMember,
|
||||||
|
@ -70,7 +73,8 @@ pub async fn set_displayname_route(
|
||||||
|
|
||||||
for (pdu_builder, room_id) in all_rooms_joined {
|
for (pdu_builder, room_id) in all_rooms_joined {
|
||||||
let mutex_state = Arc::clone(
|
let mutex_state = Arc::clone(
|
||||||
db.globals
|
services()
|
||||||
|
.globals
|
||||||
.roomid_mutex_state
|
.roomid_mutex_state
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
@ -79,19 +83,22 @@ pub async fn set_displayname_route(
|
||||||
);
|
);
|
||||||
let state_lock = mutex_state.lock().await;
|
let state_lock = mutex_state.lock().await;
|
||||||
|
|
||||||
let _ = db
|
let _ = services().rooms.timeline.build_and_append_pdu(
|
||||||
.rooms
|
pdu_builder,
|
||||||
.build_and_append_pdu(pdu_builder, sender_user, &room_id, &db, &state_lock);
|
sender_user,
|
||||||
|
&room_id,
|
||||||
|
&state_lock,
|
||||||
|
);
|
||||||
|
|
||||||
// Presence update
|
// Presence update
|
||||||
db.rooms.edus.update_presence(
|
services().rooms.edus.presence.update_presence(
|
||||||
sender_user,
|
sender_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
ruma::events::presence::PresenceEvent {
|
ruma::events::presence::PresenceEvent {
|
||||||
content: ruma::events::presence::PresenceEventContent {
|
content: ruma::events::presence::PresenceEventContent {
|
||||||
avatar_url: db.users.avatar_url(sender_user)?,
|
avatar_url: services().users.avatar_url(sender_user)?,
|
||||||
currently_active: None,
|
currently_active: None,
|
||||||
displayname: db.users.displayname(sender_user)?,
|
displayname: services().users.displayname(sender_user)?,
|
||||||
last_active_ago: Some(
|
last_active_ago: Some(
|
||||||
utils::millis_since_unix_epoch()
|
utils::millis_since_unix_epoch()
|
||||||
.try_into()
|
.try_into()
|
||||||
|
@ -102,12 +109,9 @@ pub async fn set_displayname_route(
|
||||||
},
|
},
|
||||||
sender: sender_user.clone(),
|
sender: sender_user.clone(),
|
||||||
},
|
},
|
||||||
&db.globals,
|
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(set_display_name::v3::Response {})
|
Ok(set_display_name::v3::Response {})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -117,18 +121,16 @@ pub async fn set_displayname_route(
|
||||||
///
|
///
|
||||||
/// - If user is on another server: Fetches displayname over federation
|
/// - If user is on another server: Fetches displayname over federation
|
||||||
pub async fn get_displayname_route(
|
pub async fn get_displayname_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_display_name::v3::Request>,
|
||||||
body: Ruma<get_display_name::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_display_name::v3::Response> {
|
) -> Result<get_display_name::v3::Response> {
|
||||||
if body.user_id.server_name() != db.globals.server_name() {
|
if body.user_id.server_name() != services().globals.server_name() {
|
||||||
let response = db
|
let response = services()
|
||||||
.sending
|
.sending
|
||||||
.send_federation_request(
|
.send_federation_request(
|
||||||
&db.globals,
|
|
||||||
body.user_id.server_name(),
|
body.user_id.server_name(),
|
||||||
federation::query::get_profile_information::v1::Request {
|
federation::query::get_profile_information::v1::Request {
|
||||||
user_id: &body.user_id,
|
user_id: body.user_id.clone(),
|
||||||
field: Some(&ProfileField::DisplayName),
|
field: Some(ProfileField::DisplayName),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
@ -139,7 +141,7 @@ pub async fn get_displayname_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(get_display_name::v3::Response {
|
Ok(get_display_name::v3::Response {
|
||||||
displayname: db.users.displayname(&body.user_id)?,
|
displayname: services().users.displayname(&body.user_id)?,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -149,19 +151,22 @@ pub async fn get_displayname_route(
|
||||||
///
|
///
|
||||||
/// - Also makes sure other users receive the update using presence EDUs
|
/// - Also makes sure other users receive the update using presence EDUs
|
||||||
pub async fn set_avatar_url_route(
|
pub async fn set_avatar_url_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<set_avatar_url::v3::Request>,
|
||||||
body: Ruma<set_avatar_url::v3::IncomingRequest>,
|
|
||||||
) -> Result<set_avatar_url::v3::Response> {
|
) -> Result<set_avatar_url::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
db.users
|
services()
|
||||||
|
.users
|
||||||
.set_avatar_url(sender_user, body.avatar_url.clone())?;
|
.set_avatar_url(sender_user, body.avatar_url.clone())?;
|
||||||
|
|
||||||
db.users.set_blurhash(sender_user, body.blurhash.clone())?;
|
services()
|
||||||
|
.users
|
||||||
|
.set_blurhash(sender_user, body.blurhash.clone())?;
|
||||||
|
|
||||||
// Send a new membership event and presence update into all joined rooms
|
// Send a new membership event and presence update into all joined rooms
|
||||||
let all_joined_rooms: Vec<_> = db
|
let all_joined_rooms: Vec<_> = services()
|
||||||
.rooms
|
.rooms
|
||||||
|
.state_cache
|
||||||
.rooms_joined(sender_user)
|
.rooms_joined(sender_user)
|
||||||
.filter_map(|r| r.ok())
|
.filter_map(|r| r.ok())
|
||||||
.map(|room_id| {
|
.map(|room_id| {
|
||||||
|
@ -171,7 +176,9 @@ pub async fn set_avatar_url_route(
|
||||||
content: to_raw_value(&RoomMemberEventContent {
|
content: to_raw_value(&RoomMemberEventContent {
|
||||||
avatar_url: body.avatar_url.clone(),
|
avatar_url: body.avatar_url.clone(),
|
||||||
..serde_json::from_str(
|
..serde_json::from_str(
|
||||||
db.rooms
|
services()
|
||||||
|
.rooms
|
||||||
|
.state_accessor
|
||||||
.room_state_get(
|
.room_state_get(
|
||||||
&room_id,
|
&room_id,
|
||||||
&StateEventType::RoomMember,
|
&StateEventType::RoomMember,
|
||||||
|
@ -201,7 +208,8 @@ pub async fn set_avatar_url_route(
|
||||||
|
|
||||||
for (pdu_builder, room_id) in all_joined_rooms {
|
for (pdu_builder, room_id) in all_joined_rooms {
|
||||||
let mutex_state = Arc::clone(
|
let mutex_state = Arc::clone(
|
||||||
db.globals
|
services()
|
||||||
|
.globals
|
||||||
.roomid_mutex_state
|
.roomid_mutex_state
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
@ -210,19 +218,22 @@ pub async fn set_avatar_url_route(
|
||||||
);
|
);
|
||||||
let state_lock = mutex_state.lock().await;
|
let state_lock = mutex_state.lock().await;
|
||||||
|
|
||||||
let _ = db
|
let _ = services().rooms.timeline.build_and_append_pdu(
|
||||||
.rooms
|
pdu_builder,
|
||||||
.build_and_append_pdu(pdu_builder, sender_user, &room_id, &db, &state_lock);
|
sender_user,
|
||||||
|
&room_id,
|
||||||
|
&state_lock,
|
||||||
|
);
|
||||||
|
|
||||||
// Presence update
|
// Presence update
|
||||||
db.rooms.edus.update_presence(
|
services().rooms.edus.presence.update_presence(
|
||||||
sender_user,
|
sender_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
ruma::events::presence::PresenceEvent {
|
ruma::events::presence::PresenceEvent {
|
||||||
content: ruma::events::presence::PresenceEventContent {
|
content: ruma::events::presence::PresenceEventContent {
|
||||||
avatar_url: db.users.avatar_url(sender_user)?,
|
avatar_url: services().users.avatar_url(sender_user)?,
|
||||||
currently_active: None,
|
currently_active: None,
|
||||||
displayname: db.users.displayname(sender_user)?,
|
displayname: services().users.displayname(sender_user)?,
|
||||||
last_active_ago: Some(
|
last_active_ago: Some(
|
||||||
utils::millis_since_unix_epoch()
|
utils::millis_since_unix_epoch()
|
||||||
.try_into()
|
.try_into()
|
||||||
|
@ -233,12 +244,9 @@ pub async fn set_avatar_url_route(
|
||||||
},
|
},
|
||||||
sender: sender_user.clone(),
|
sender: sender_user.clone(),
|
||||||
},
|
},
|
||||||
&db.globals,
|
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(set_avatar_url::v3::Response {})
|
Ok(set_avatar_url::v3::Response {})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -248,18 +256,16 @@ pub async fn set_avatar_url_route(
|
||||||
///
|
///
|
||||||
/// - If user is on another server: Fetches avatar_url and blurhash over federation
|
/// - If user is on another server: Fetches avatar_url and blurhash over federation
|
||||||
pub async fn get_avatar_url_route(
|
pub async fn get_avatar_url_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_avatar_url::v3::Request>,
|
||||||
body: Ruma<get_avatar_url::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_avatar_url::v3::Response> {
|
) -> Result<get_avatar_url::v3::Response> {
|
||||||
if body.user_id.server_name() != db.globals.server_name() {
|
if body.user_id.server_name() != services().globals.server_name() {
|
||||||
let response = db
|
let response = services()
|
||||||
.sending
|
.sending
|
||||||
.send_federation_request(
|
.send_federation_request(
|
||||||
&db.globals,
|
|
||||||
body.user_id.server_name(),
|
body.user_id.server_name(),
|
||||||
federation::query::get_profile_information::v1::Request {
|
federation::query::get_profile_information::v1::Request {
|
||||||
user_id: &body.user_id,
|
user_id: body.user_id.clone(),
|
||||||
field: Some(&ProfileField::AvatarUrl),
|
field: Some(ProfileField::AvatarUrl),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
@ -271,8 +277,8 @@ pub async fn get_avatar_url_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(get_avatar_url::v3::Response {
|
Ok(get_avatar_url::v3::Response {
|
||||||
avatar_url: db.users.avatar_url(&body.user_id)?,
|
avatar_url: services().users.avatar_url(&body.user_id)?,
|
||||||
blurhash: db.users.blurhash(&body.user_id)?,
|
blurhash: services().users.blurhash(&body.user_id)?,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -282,17 +288,15 @@ pub async fn get_avatar_url_route(
|
||||||
///
|
///
|
||||||
/// - If user is on another server: Fetches profile over federation
|
/// - If user is on another server: Fetches profile over federation
|
||||||
pub async fn get_profile_route(
|
pub async fn get_profile_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_profile::v3::Request>,
|
||||||
body: Ruma<get_profile::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_profile::v3::Response> {
|
) -> Result<get_profile::v3::Response> {
|
||||||
if body.user_id.server_name() != db.globals.server_name() {
|
if body.user_id.server_name() != services().globals.server_name() {
|
||||||
let response = db
|
let response = services()
|
||||||
.sending
|
.sending
|
||||||
.send_federation_request(
|
.send_federation_request(
|
||||||
&db.globals,
|
|
||||||
body.user_id.server_name(),
|
body.user_id.server_name(),
|
||||||
federation::query::get_profile_information::v1::Request {
|
federation::query::get_profile_information::v1::Request {
|
||||||
user_id: &body.user_id,
|
user_id: body.user_id.clone(),
|
||||||
field: None,
|
field: None,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
@ -305,7 +309,7 @@ pub async fn get_profile_route(
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
if !db.users.exists(&body.user_id)? {
|
if !services().users.exists(&body.user_id)? {
|
||||||
// Return 404 if this user doesn't exist
|
// Return 404 if this user doesn't exist
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::NotFound,
|
ErrorKind::NotFound,
|
||||||
|
@ -314,8 +318,8 @@ pub async fn get_profile_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(get_profile::v3::Response {
|
Ok(get_profile::v3::Response {
|
||||||
avatar_url: db.users.avatar_url(&body.user_id)?,
|
avatar_url: services().users.avatar_url(&body.user_id)?,
|
||||||
blurhash: db.users.blurhash(&body.user_id)?,
|
blurhash: services().users.blurhash(&body.user_id)?,
|
||||||
displayname: db.users.displayname(&body.user_id)?,
|
displayname: services().users.displayname(&body.user_id)?,
|
||||||
})
|
})
|
||||||
}
|
}
|
|
@ -1,27 +1,26 @@
|
||||||
use crate::{database::DatabaseGuard, Error, Result, Ruma};
|
use crate::{services, Error, Result, Ruma};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::{
|
api::client::{
|
||||||
error::ErrorKind,
|
error::ErrorKind,
|
||||||
push::{
|
push::{
|
||||||
delete_pushrule, get_pushers, get_pushrule, get_pushrule_actions, get_pushrule_enabled,
|
delete_pushrule, get_pushers, get_pushrule, get_pushrule_actions, get_pushrule_enabled,
|
||||||
get_pushrules_all, set_pusher, set_pushrule, set_pushrule_actions,
|
get_pushrules_all, set_pusher, set_pushrule, set_pushrule_actions,
|
||||||
set_pushrule_enabled, RuleKind,
|
set_pushrule_enabled, RuleKind, RuleScope,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
events::{push_rules::PushRulesEvent, GlobalAccountDataEventType},
|
events::{push_rules::PushRulesEvent, GlobalAccountDataEventType},
|
||||||
push::{ConditionalPushRuleInit, PatternedPushRuleInit, SimplePushRuleInit},
|
push::{ConditionalPushRuleInit, NewPushRule, PatternedPushRuleInit, SimplePushRuleInit},
|
||||||
};
|
};
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/pushrules`
|
/// # `GET /_matrix/client/r0/pushrules`
|
||||||
///
|
///
|
||||||
/// Retrieves the push rules event for this user.
|
/// Retrieves the push rules event for this user.
|
||||||
pub async fn get_pushrules_all_route(
|
pub async fn get_pushrules_all_route(
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<get_pushrules_all::v3::Request>,
|
body: Ruma<get_pushrules_all::v3::Request>,
|
||||||
) -> Result<get_pushrules_all::v3::Response> {
|
) -> Result<get_pushrules_all::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let event: PushRulesEvent = db
|
let event = services()
|
||||||
.account_data
|
.account_data
|
||||||
.get(
|
.get(
|
||||||
None,
|
None,
|
||||||
|
@ -33,8 +32,12 @@ pub async fn get_pushrules_all_route(
|
||||||
"PushRules event not found.",
|
"PushRules event not found.",
|
||||||
))?;
|
))?;
|
||||||
|
|
||||||
|
let account_data = serde_json::from_str::<PushRulesEvent>(event.get())
|
||||||
|
.map_err(|_| Error::bad_database("Invalid account data event in db."))?
|
||||||
|
.content;
|
||||||
|
|
||||||
Ok(get_pushrules_all::v3::Response {
|
Ok(get_pushrules_all::v3::Response {
|
||||||
global: event.content.global,
|
global: account_data.global,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -42,12 +45,11 @@ pub async fn get_pushrules_all_route(
|
||||||
///
|
///
|
||||||
/// Retrieves a single specified push rule for this user.
|
/// Retrieves a single specified push rule for this user.
|
||||||
pub async fn get_pushrule_route(
|
pub async fn get_pushrule_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_pushrule::v3::Request>,
|
||||||
body: Ruma<get_pushrule::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_pushrule::v3::Response> {
|
) -> Result<get_pushrule::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let event: PushRulesEvent = db
|
let event = services()
|
||||||
.account_data
|
.account_data
|
||||||
.get(
|
.get(
|
||||||
None,
|
None,
|
||||||
|
@ -59,7 +61,11 @@ pub async fn get_pushrule_route(
|
||||||
"PushRules event not found.",
|
"PushRules event not found.",
|
||||||
))?;
|
))?;
|
||||||
|
|
||||||
let global = event.content.global;
|
let account_data = serde_json::from_str::<PushRulesEvent>(event.get())
|
||||||
|
.map_err(|_| Error::bad_database("Invalid account data event in db."))?
|
||||||
|
.content;
|
||||||
|
|
||||||
|
let global = account_data.global;
|
||||||
let rule = match body.kind {
|
let rule = match body.kind {
|
||||||
RuleKind::Override => global
|
RuleKind::Override => global
|
||||||
.override_
|
.override_
|
||||||
|
@ -98,20 +104,19 @@ pub async fn get_pushrule_route(
|
||||||
///
|
///
|
||||||
/// Creates a single specified push rule for this user.
|
/// Creates a single specified push rule for this user.
|
||||||
pub async fn set_pushrule_route(
|
pub async fn set_pushrule_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<set_pushrule::v3::Request>,
|
||||||
body: Ruma<set_pushrule::v3::IncomingRequest>,
|
|
||||||
) -> Result<set_pushrule::v3::Response> {
|
) -> Result<set_pushrule::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let body = body.body;
|
let body = body.body;
|
||||||
|
|
||||||
if body.scope != "global" {
|
if body.scope != RuleScope::Global {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::InvalidParam,
|
ErrorKind::InvalidParam,
|
||||||
"Scopes other than 'global' are not supported.",
|
"Scopes other than 'global' are not supported.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut event: PushRulesEvent = db
|
let event = services()
|
||||||
.account_data
|
.account_data
|
||||||
.get(
|
.get(
|
||||||
None,
|
None,
|
||||||
|
@ -123,79 +128,78 @@ pub async fn set_pushrule_route(
|
||||||
"PushRules event not found.",
|
"PushRules event not found.",
|
||||||
))?;
|
))?;
|
||||||
|
|
||||||
let global = &mut event.content.global;
|
let mut account_data = serde_json::from_str::<PushRulesEvent>(event.get())
|
||||||
match body.kind {
|
.map_err(|_| Error::bad_database("Invalid account data event in db."))?;
|
||||||
RuleKind::Override => {
|
|
||||||
|
let global = &mut account_data.content.global;
|
||||||
|
match body.rule {
|
||||||
|
NewPushRule::Override(rule) => {
|
||||||
global.override_.replace(
|
global.override_.replace(
|
||||||
ConditionalPushRuleInit {
|
ConditionalPushRuleInit {
|
||||||
actions: body.actions,
|
actions: rule.actions,
|
||||||
default: false,
|
default: false,
|
||||||
enabled: true,
|
enabled: true,
|
||||||
rule_id: body.rule_id,
|
rule_id: rule.rule_id,
|
||||||
conditions: body.conditions,
|
conditions: rule.conditions,
|
||||||
}
|
}
|
||||||
.into(),
|
.into(),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
RuleKind::Underride => {
|
NewPushRule::Underride(rule) => {
|
||||||
global.underride.replace(
|
global.underride.replace(
|
||||||
ConditionalPushRuleInit {
|
ConditionalPushRuleInit {
|
||||||
actions: body.actions,
|
actions: rule.actions,
|
||||||
default: false,
|
default: false,
|
||||||
enabled: true,
|
enabled: true,
|
||||||
rule_id: body.rule_id,
|
rule_id: rule.rule_id,
|
||||||
conditions: body.conditions,
|
conditions: rule.conditions,
|
||||||
}
|
}
|
||||||
.into(),
|
.into(),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
RuleKind::Sender => {
|
NewPushRule::Sender(rule) => {
|
||||||
global.sender.replace(
|
global.sender.replace(
|
||||||
SimplePushRuleInit {
|
SimplePushRuleInit {
|
||||||
actions: body.actions,
|
actions: rule.actions,
|
||||||
default: false,
|
default: false,
|
||||||
enabled: true,
|
enabled: true,
|
||||||
rule_id: body.rule_id,
|
rule_id: rule.rule_id,
|
||||||
}
|
}
|
||||||
.into(),
|
.into(),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
RuleKind::Room => {
|
NewPushRule::Room(rule) => {
|
||||||
global.room.replace(
|
global.room.replace(
|
||||||
SimplePushRuleInit {
|
SimplePushRuleInit {
|
||||||
actions: body.actions,
|
actions: rule.actions,
|
||||||
default: false,
|
default: false,
|
||||||
enabled: true,
|
enabled: true,
|
||||||
rule_id: body.rule_id,
|
rule_id: rule.rule_id,
|
||||||
}
|
}
|
||||||
.into(),
|
.into(),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
RuleKind::Content => {
|
NewPushRule::Content(rule) => {
|
||||||
global.content.replace(
|
global.content.replace(
|
||||||
PatternedPushRuleInit {
|
PatternedPushRuleInit {
|
||||||
actions: body.actions,
|
actions: rule.actions,
|
||||||
default: false,
|
default: false,
|
||||||
enabled: true,
|
enabled: true,
|
||||||
rule_id: body.rule_id,
|
rule_id: rule.rule_id,
|
||||||
pattern: body.pattern.unwrap_or_default(),
|
pattern: rule.pattern,
|
||||||
}
|
}
|
||||||
.into(),
|
.into(),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
_ => {}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
db.account_data.update(
|
services().account_data.update(
|
||||||
None,
|
None,
|
||||||
sender_user,
|
sender_user,
|
||||||
GlobalAccountDataEventType::PushRules.to_string().into(),
|
GlobalAccountDataEventType::PushRules.to_string().into(),
|
||||||
&event,
|
&serde_json::to_value(account_data).expect("to json value always works"),
|
||||||
&db.globals,
|
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(set_pushrule::v3::Response {})
|
Ok(set_pushrule::v3::Response {})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -203,19 +207,18 @@ pub async fn set_pushrule_route(
|
||||||
///
|
///
|
||||||
/// Gets the actions of a single specified push rule for this user.
|
/// Gets the actions of a single specified push rule for this user.
|
||||||
pub async fn get_pushrule_actions_route(
|
pub async fn get_pushrule_actions_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_pushrule_actions::v3::Request>,
|
||||||
body: Ruma<get_pushrule_actions::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_pushrule_actions::v3::Response> {
|
) -> Result<get_pushrule_actions::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
if body.scope != "global" {
|
if body.scope != RuleScope::Global {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::InvalidParam,
|
ErrorKind::InvalidParam,
|
||||||
"Scopes other than 'global' are not supported.",
|
"Scopes other than 'global' are not supported.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut event: PushRulesEvent = db
|
let event = services()
|
||||||
.account_data
|
.account_data
|
||||||
.get(
|
.get(
|
||||||
None,
|
None,
|
||||||
|
@ -227,7 +230,11 @@ pub async fn get_pushrule_actions_route(
|
||||||
"PushRules event not found.",
|
"PushRules event not found.",
|
||||||
))?;
|
))?;
|
||||||
|
|
||||||
let global = &mut event.content.global;
|
let account_data = serde_json::from_str::<PushRulesEvent>(event.get())
|
||||||
|
.map_err(|_| Error::bad_database("Invalid account data event in db."))?
|
||||||
|
.content;
|
||||||
|
|
||||||
|
let global = account_data.global;
|
||||||
let actions = match body.kind {
|
let actions = match body.kind {
|
||||||
RuleKind::Override => global
|
RuleKind::Override => global
|
||||||
.override_
|
.override_
|
||||||
|
@ -252,8 +259,6 @@ pub async fn get_pushrule_actions_route(
|
||||||
_ => None,
|
_ => None,
|
||||||
};
|
};
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(get_pushrule_actions::v3::Response {
|
Ok(get_pushrule_actions::v3::Response {
|
||||||
actions: actions.unwrap_or_default(),
|
actions: actions.unwrap_or_default(),
|
||||||
})
|
})
|
||||||
|
@ -263,19 +268,18 @@ pub async fn get_pushrule_actions_route(
|
||||||
///
|
///
|
||||||
/// Sets the actions of a single specified push rule for this user.
|
/// Sets the actions of a single specified push rule for this user.
|
||||||
pub async fn set_pushrule_actions_route(
|
pub async fn set_pushrule_actions_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<set_pushrule_actions::v3::Request>,
|
||||||
body: Ruma<set_pushrule_actions::v3::IncomingRequest>,
|
|
||||||
) -> Result<set_pushrule_actions::v3::Response> {
|
) -> Result<set_pushrule_actions::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
if body.scope != "global" {
|
if body.scope != RuleScope::Global {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::InvalidParam,
|
ErrorKind::InvalidParam,
|
||||||
"Scopes other than 'global' are not supported.",
|
"Scopes other than 'global' are not supported.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut event: PushRulesEvent = db
|
let event = services()
|
||||||
.account_data
|
.account_data
|
||||||
.get(
|
.get(
|
||||||
None,
|
None,
|
||||||
|
@ -287,7 +291,10 @@ pub async fn set_pushrule_actions_route(
|
||||||
"PushRules event not found.",
|
"PushRules event not found.",
|
||||||
))?;
|
))?;
|
||||||
|
|
||||||
let global = &mut event.content.global;
|
let mut account_data = serde_json::from_str::<PushRulesEvent>(event.get())
|
||||||
|
.map_err(|_| Error::bad_database("Invalid account data event in db."))?;
|
||||||
|
|
||||||
|
let global = &mut account_data.content.global;
|
||||||
match body.kind {
|
match body.kind {
|
||||||
RuleKind::Override => {
|
RuleKind::Override => {
|
||||||
if let Some(mut rule) = global.override_.get(body.rule_id.as_str()).cloned() {
|
if let Some(mut rule) = global.override_.get(body.rule_id.as_str()).cloned() {
|
||||||
|
@ -322,16 +329,13 @@ pub async fn set_pushrule_actions_route(
|
||||||
_ => {}
|
_ => {}
|
||||||
};
|
};
|
||||||
|
|
||||||
db.account_data.update(
|
services().account_data.update(
|
||||||
None,
|
None,
|
||||||
sender_user,
|
sender_user,
|
||||||
GlobalAccountDataEventType::PushRules.to_string().into(),
|
GlobalAccountDataEventType::PushRules.to_string().into(),
|
||||||
&event,
|
&serde_json::to_value(account_data).expect("to json value always works"),
|
||||||
&db.globals,
|
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(set_pushrule_actions::v3::Response {})
|
Ok(set_pushrule_actions::v3::Response {})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -339,19 +343,18 @@ pub async fn set_pushrule_actions_route(
|
||||||
///
|
///
|
||||||
/// Gets the enabled status of a single specified push rule for this user.
|
/// Gets the enabled status of a single specified push rule for this user.
|
||||||
pub async fn get_pushrule_enabled_route(
|
pub async fn get_pushrule_enabled_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_pushrule_enabled::v3::Request>,
|
||||||
body: Ruma<get_pushrule_enabled::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_pushrule_enabled::v3::Response> {
|
) -> Result<get_pushrule_enabled::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
if body.scope != "global" {
|
if body.scope != RuleScope::Global {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::InvalidParam,
|
ErrorKind::InvalidParam,
|
||||||
"Scopes other than 'global' are not supported.",
|
"Scopes other than 'global' are not supported.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut event: PushRulesEvent = db
|
let event = services()
|
||||||
.account_data
|
.account_data
|
||||||
.get(
|
.get(
|
||||||
None,
|
None,
|
||||||
|
@ -363,7 +366,10 @@ pub async fn get_pushrule_enabled_route(
|
||||||
"PushRules event not found.",
|
"PushRules event not found.",
|
||||||
))?;
|
))?;
|
||||||
|
|
||||||
let global = &mut event.content.global;
|
let account_data = serde_json::from_str::<PushRulesEvent>(event.get())
|
||||||
|
.map_err(|_| Error::bad_database("Invalid account data event in db."))?;
|
||||||
|
|
||||||
|
let global = account_data.content.global;
|
||||||
let enabled = match body.kind {
|
let enabled = match body.kind {
|
||||||
RuleKind::Override => global
|
RuleKind::Override => global
|
||||||
.override_
|
.override_
|
||||||
|
@ -393,8 +399,6 @@ pub async fn get_pushrule_enabled_route(
|
||||||
_ => false,
|
_ => false,
|
||||||
};
|
};
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(get_pushrule_enabled::v3::Response { enabled })
|
Ok(get_pushrule_enabled::v3::Response { enabled })
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -402,19 +406,18 @@ pub async fn get_pushrule_enabled_route(
|
||||||
///
|
///
|
||||||
/// Sets the enabled status of a single specified push rule for this user.
|
/// Sets the enabled status of a single specified push rule for this user.
|
||||||
pub async fn set_pushrule_enabled_route(
|
pub async fn set_pushrule_enabled_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<set_pushrule_enabled::v3::Request>,
|
||||||
body: Ruma<set_pushrule_enabled::v3::IncomingRequest>,
|
|
||||||
) -> Result<set_pushrule_enabled::v3::Response> {
|
) -> Result<set_pushrule_enabled::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
if body.scope != "global" {
|
if body.scope != RuleScope::Global {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::InvalidParam,
|
ErrorKind::InvalidParam,
|
||||||
"Scopes other than 'global' are not supported.",
|
"Scopes other than 'global' are not supported.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut event: PushRulesEvent = db
|
let event = services()
|
||||||
.account_data
|
.account_data
|
||||||
.get(
|
.get(
|
||||||
None,
|
None,
|
||||||
|
@ -426,7 +429,10 @@ pub async fn set_pushrule_enabled_route(
|
||||||
"PushRules event not found.",
|
"PushRules event not found.",
|
||||||
))?;
|
))?;
|
||||||
|
|
||||||
let global = &mut event.content.global;
|
let mut account_data = serde_json::from_str::<PushRulesEvent>(event.get())
|
||||||
|
.map_err(|_| Error::bad_database("Invalid account data event in db."))?;
|
||||||
|
|
||||||
|
let global = &mut account_data.content.global;
|
||||||
match body.kind {
|
match body.kind {
|
||||||
RuleKind::Override => {
|
RuleKind::Override => {
|
||||||
if let Some(mut rule) = global.override_.get(body.rule_id.as_str()).cloned() {
|
if let Some(mut rule) = global.override_.get(body.rule_id.as_str()).cloned() {
|
||||||
|
@ -466,16 +472,13 @@ pub async fn set_pushrule_enabled_route(
|
||||||
_ => {}
|
_ => {}
|
||||||
}
|
}
|
||||||
|
|
||||||
db.account_data.update(
|
services().account_data.update(
|
||||||
None,
|
None,
|
||||||
sender_user,
|
sender_user,
|
||||||
GlobalAccountDataEventType::PushRules.to_string().into(),
|
GlobalAccountDataEventType::PushRules.to_string().into(),
|
||||||
&event,
|
&serde_json::to_value(account_data).expect("to json value always works"),
|
||||||
&db.globals,
|
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(set_pushrule_enabled::v3::Response {})
|
Ok(set_pushrule_enabled::v3::Response {})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -483,19 +486,18 @@ pub async fn set_pushrule_enabled_route(
|
||||||
///
|
///
|
||||||
/// Deletes a single specified push rule for this user.
|
/// Deletes a single specified push rule for this user.
|
||||||
pub async fn delete_pushrule_route(
|
pub async fn delete_pushrule_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<delete_pushrule::v3::Request>,
|
||||||
body: Ruma<delete_pushrule::v3::IncomingRequest>,
|
|
||||||
) -> Result<delete_pushrule::v3::Response> {
|
) -> Result<delete_pushrule::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
if body.scope != "global" {
|
if body.scope != RuleScope::Global {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::InvalidParam,
|
ErrorKind::InvalidParam,
|
||||||
"Scopes other than 'global' are not supported.",
|
"Scopes other than 'global' are not supported.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut event: PushRulesEvent = db
|
let event = services()
|
||||||
.account_data
|
.account_data
|
||||||
.get(
|
.get(
|
||||||
None,
|
None,
|
||||||
|
@ -507,7 +509,10 @@ pub async fn delete_pushrule_route(
|
||||||
"PushRules event not found.",
|
"PushRules event not found.",
|
||||||
))?;
|
))?;
|
||||||
|
|
||||||
let global = &mut event.content.global;
|
let mut account_data = serde_json::from_str::<PushRulesEvent>(event.get())
|
||||||
|
.map_err(|_| Error::bad_database("Invalid account data event in db."))?;
|
||||||
|
|
||||||
|
let global = &mut account_data.content.global;
|
||||||
match body.kind {
|
match body.kind {
|
||||||
RuleKind::Override => {
|
RuleKind::Override => {
|
||||||
if let Some(rule) = global.override_.get(body.rule_id.as_str()).cloned() {
|
if let Some(rule) = global.override_.get(body.rule_id.as_str()).cloned() {
|
||||||
|
@ -537,16 +542,13 @@ pub async fn delete_pushrule_route(
|
||||||
_ => {}
|
_ => {}
|
||||||
}
|
}
|
||||||
|
|
||||||
db.account_data.update(
|
services().account_data.update(
|
||||||
None,
|
None,
|
||||||
sender_user,
|
sender_user,
|
||||||
GlobalAccountDataEventType::PushRules.to_string().into(),
|
GlobalAccountDataEventType::PushRules.to_string().into(),
|
||||||
&event,
|
&serde_json::to_value(account_data).expect("to json value always works"),
|
||||||
&db.globals,
|
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(delete_pushrule::v3::Response {})
|
Ok(delete_pushrule::v3::Response {})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -554,13 +556,12 @@ pub async fn delete_pushrule_route(
|
||||||
///
|
///
|
||||||
/// Gets all currently active pushers for the sender user.
|
/// Gets all currently active pushers for the sender user.
|
||||||
pub async fn get_pushers_route(
|
pub async fn get_pushers_route(
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<get_pushers::v3::Request>,
|
body: Ruma<get_pushers::v3::Request>,
|
||||||
) -> Result<get_pushers::v3::Response> {
|
) -> Result<get_pushers::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
Ok(get_pushers::v3::Response {
|
Ok(get_pushers::v3::Response {
|
||||||
pushers: db.pusher.get_pushers(sender_user)?,
|
pushers: services().pusher.get_pushers(sender_user)?,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -570,15 +571,13 @@ pub async fn get_pushers_route(
|
||||||
///
|
///
|
||||||
/// - TODO: Handle `append`
|
/// - TODO: Handle `append`
|
||||||
pub async fn set_pushers_route(
|
pub async fn set_pushers_route(
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<set_pusher::v3::Request>,
|
body: Ruma<set_pusher::v3::Request>,
|
||||||
) -> Result<set_pusher::v3::Response> {
|
) -> Result<set_pusher::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let pusher = body.pusher.clone();
|
|
||||||
|
|
||||||
db.pusher.set_pusher(sender_user, pusher)?;
|
services()
|
||||||
|
.pusher
|
||||||
db.flush()?;
|
.set_pusher(sender_user, body.action.clone())?;
|
||||||
|
|
||||||
Ok(set_pusher::v3::Response::default())
|
Ok(set_pusher::v3::Response::default())
|
||||||
}
|
}
|
|
@ -0,0 +1,162 @@
|
||||||
|
use crate::{services, Error, Result, Ruma};
|
||||||
|
use ruma::{
|
||||||
|
api::client::{error::ErrorKind, read_marker::set_read_marker, receipt::create_receipt},
|
||||||
|
events::{
|
||||||
|
receipt::{ReceiptThread, ReceiptType},
|
||||||
|
RoomAccountDataEventType,
|
||||||
|
},
|
||||||
|
MilliSecondsSinceUnixEpoch,
|
||||||
|
};
|
||||||
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
|
/// # `POST /_matrix/client/r0/rooms/{roomId}/read_markers`
|
||||||
|
///
|
||||||
|
/// Sets different types of read markers.
|
||||||
|
///
|
||||||
|
/// - Updates fully-read account data event to `fully_read`
|
||||||
|
/// - If `read_receipt` is set: Update private marker and public read receipt EDU
|
||||||
|
pub async fn set_read_marker_route(
|
||||||
|
body: Ruma<set_read_marker::v3::Request>,
|
||||||
|
) -> Result<set_read_marker::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
if let Some(fully_read) = &body.fully_read {
|
||||||
|
let fully_read_event = ruma::events::fully_read::FullyReadEvent {
|
||||||
|
content: ruma::events::fully_read::FullyReadEventContent {
|
||||||
|
event_id: fully_read.clone(),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
services().account_data.update(
|
||||||
|
Some(&body.room_id),
|
||||||
|
sender_user,
|
||||||
|
RoomAccountDataEventType::FullyRead,
|
||||||
|
&serde_json::to_value(fully_read_event).expect("to json value always works"),
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
if body.private_read_receipt.is_some() || body.read_receipt.is_some() {
|
||||||
|
services()
|
||||||
|
.rooms
|
||||||
|
.user
|
||||||
|
.reset_notification_counts(sender_user, &body.room_id)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(event) = &body.private_read_receipt {
|
||||||
|
services().rooms.edus.read_receipt.private_read_set(
|
||||||
|
&body.room_id,
|
||||||
|
sender_user,
|
||||||
|
services()
|
||||||
|
.rooms
|
||||||
|
.timeline
|
||||||
|
.get_pdu_count(event)?
|
||||||
|
.ok_or(Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
|
"Event does not exist.",
|
||||||
|
))?,
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(event) = &body.read_receipt {
|
||||||
|
let mut user_receipts = BTreeMap::new();
|
||||||
|
user_receipts.insert(
|
||||||
|
sender_user.clone(),
|
||||||
|
ruma::events::receipt::Receipt {
|
||||||
|
ts: Some(MilliSecondsSinceUnixEpoch::now()),
|
||||||
|
thread: ReceiptThread::Unthreaded,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut receipts = BTreeMap::new();
|
||||||
|
receipts.insert(ReceiptType::Read, user_receipts);
|
||||||
|
|
||||||
|
let mut receipt_content = BTreeMap::new();
|
||||||
|
receipt_content.insert(event.to_owned(), receipts);
|
||||||
|
|
||||||
|
services().rooms.edus.read_receipt.readreceipt_update(
|
||||||
|
sender_user,
|
||||||
|
&body.room_id,
|
||||||
|
ruma::events::receipt::ReceiptEvent {
|
||||||
|
content: ruma::events::receipt::ReceiptEventContent(receipt_content),
|
||||||
|
room_id: body.room_id.clone(),
|
||||||
|
},
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(set_read_marker::v3::Response {})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `POST /_matrix/client/r0/rooms/{roomId}/receipt/{receiptType}/{eventId}`
|
||||||
|
///
|
||||||
|
/// Sets private read marker and public read receipt EDU.
|
||||||
|
pub async fn create_receipt_route(
|
||||||
|
body: Ruma<create_receipt::v3::Request>,
|
||||||
|
) -> Result<create_receipt::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
if matches!(
|
||||||
|
&body.receipt_type,
|
||||||
|
create_receipt::v3::ReceiptType::Read | create_receipt::v3::ReceiptType::ReadPrivate
|
||||||
|
) {
|
||||||
|
services()
|
||||||
|
.rooms
|
||||||
|
.user
|
||||||
|
.reset_notification_counts(sender_user, &body.room_id)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
match body.receipt_type {
|
||||||
|
create_receipt::v3::ReceiptType::FullyRead => {
|
||||||
|
let fully_read_event = ruma::events::fully_read::FullyReadEvent {
|
||||||
|
content: ruma::events::fully_read::FullyReadEventContent {
|
||||||
|
event_id: body.event_id.clone(),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
services().account_data.update(
|
||||||
|
Some(&body.room_id),
|
||||||
|
sender_user,
|
||||||
|
RoomAccountDataEventType::FullyRead,
|
||||||
|
&serde_json::to_value(fully_read_event).expect("to json value always works"),
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
create_receipt::v3::ReceiptType::Read => {
|
||||||
|
let mut user_receipts = BTreeMap::new();
|
||||||
|
user_receipts.insert(
|
||||||
|
sender_user.clone(),
|
||||||
|
ruma::events::receipt::Receipt {
|
||||||
|
ts: Some(MilliSecondsSinceUnixEpoch::now()),
|
||||||
|
thread: ReceiptThread::Unthreaded,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
let mut receipts = BTreeMap::new();
|
||||||
|
receipts.insert(ReceiptType::Read, user_receipts);
|
||||||
|
|
||||||
|
let mut receipt_content = BTreeMap::new();
|
||||||
|
receipt_content.insert(body.event_id.to_owned(), receipts);
|
||||||
|
|
||||||
|
services().rooms.edus.read_receipt.readreceipt_update(
|
||||||
|
sender_user,
|
||||||
|
&body.room_id,
|
||||||
|
ruma::events::receipt::ReceiptEvent {
|
||||||
|
content: ruma::events::receipt::ReceiptEventContent(receipt_content),
|
||||||
|
room_id: body.room_id.clone(),
|
||||||
|
},
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
create_receipt::v3::ReceiptType::ReadPrivate => {
|
||||||
|
services().rooms.edus.read_receipt.private_read_set(
|
||||||
|
&body.room_id,
|
||||||
|
sender_user,
|
||||||
|
services()
|
||||||
|
.rooms
|
||||||
|
.timeline
|
||||||
|
.get_pdu_count(&body.event_id)?
|
||||||
|
.ok_or(Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
|
"Event does not exist.",
|
||||||
|
))?,
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
_ => return Err(Error::bad_database("Unsupported receipt type")),
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(create_receipt::v3::Response {})
|
||||||
|
}
|
|
@ -1,6 +1,6 @@
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use crate::{database::DatabaseGuard, pdu::PduBuilder, Result, Ruma};
|
use crate::{service::pdu::PduBuilder, services, Result, Ruma};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::redact::redact_event,
|
api::client::redact::redact_event,
|
||||||
events::{room::redaction::RoomRedactionEventContent, RoomEventType},
|
events::{room::redaction::RoomRedactionEventContent, RoomEventType},
|
||||||
|
@ -14,14 +14,14 @@ use serde_json::value::to_raw_value;
|
||||||
///
|
///
|
||||||
/// - TODO: Handle txn id
|
/// - TODO: Handle txn id
|
||||||
pub async fn redact_event_route(
|
pub async fn redact_event_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<redact_event::v3::Request>,
|
||||||
body: Ruma<redact_event::v3::IncomingRequest>,
|
|
||||||
) -> Result<redact_event::v3::Response> {
|
) -> Result<redact_event::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let body = body.body;
|
let body = body.body;
|
||||||
|
|
||||||
let mutex_state = Arc::clone(
|
let mutex_state = Arc::clone(
|
||||||
db.globals
|
services()
|
||||||
|
.globals
|
||||||
.roomid_mutex_state
|
.roomid_mutex_state
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
@ -30,7 +30,7 @@ pub async fn redact_event_route(
|
||||||
);
|
);
|
||||||
let state_lock = mutex_state.lock().await;
|
let state_lock = mutex_state.lock().await;
|
||||||
|
|
||||||
let event_id = db.rooms.build_and_append_pdu(
|
let event_id = services().rooms.timeline.build_and_append_pdu(
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: RoomEventType::RoomRedaction,
|
event_type: RoomEventType::RoomRedaction,
|
||||||
content: to_raw_value(&RoomRedactionEventContent {
|
content: to_raw_value(&RoomRedactionEventContent {
|
||||||
|
@ -43,14 +43,11 @@ pub async fn redact_event_route(
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&body.room_id,
|
&body.room_id,
|
||||||
&db,
|
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
drop(state_lock);
|
drop(state_lock);
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
let event_id = (*event_id).to_owned();
|
let event_id = (*event_id).to_owned();
|
||||||
Ok(redact_event::v3::Response { event_id })
|
Ok(redact_event::v3::Response { event_id })
|
||||||
}
|
}
|
|
@ -1,4 +1,4 @@
|
||||||
use crate::{database::DatabaseGuard, utils::HtmlEscape, Error, Result, Ruma};
|
use crate::{services, utils::HtmlEscape, Error, Result, Ruma};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::{error::ErrorKind, room::report_content},
|
api::client::{error::ErrorKind, room::report_content},
|
||||||
events::room::message,
|
events::room::message,
|
||||||
|
@ -10,12 +10,11 @@ use ruma::{
|
||||||
/// Reports an inappropriate event to homeserver admins
|
/// Reports an inappropriate event to homeserver admins
|
||||||
///
|
///
|
||||||
pub async fn report_event_route(
|
pub async fn report_event_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<report_content::v3::Request>,
|
||||||
body: Ruma<report_content::v3::IncomingRequest>,
|
|
||||||
) -> Result<report_content::v3::Response> {
|
) -> Result<report_content::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let pdu = match db.rooms.get_pdu(&body.event_id)? {
|
let pdu = match services().rooms.timeline.get_pdu(&body.event_id)? {
|
||||||
Some(pdu) => pdu,
|
Some(pdu) => pdu,
|
||||||
_ => {
|
_ => {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
|
@ -39,7 +38,7 @@ pub async fn report_event_route(
|
||||||
));
|
));
|
||||||
};
|
};
|
||||||
|
|
||||||
db.admin
|
services().admin
|
||||||
.send_message(message::RoomMessageEventContent::text_html(
|
.send_message(message::RoomMessageEventContent::text_html(
|
||||||
format!(
|
format!(
|
||||||
"Report received from: {}\n\n\
|
"Report received from: {}\n\n\
|
||||||
|
@ -66,7 +65,5 @@ pub async fn report_event_route(
|
||||||
),
|
),
|
||||||
));
|
));
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(report_content::v3::Response {})
|
Ok(report_content::v3::Response {})
|
||||||
}
|
}
|
|
@ -1,5 +1,5 @@
|
||||||
use crate::{
|
use crate::{
|
||||||
client_server::invite_helper, database::DatabaseGuard, pdu::PduBuilder, Error, Result, Ruma,
|
api::client_server::invite_helper, service::pdu::PduBuilder, services, Error, Result, Ruma,
|
||||||
};
|
};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::{
|
api::client::{
|
||||||
|
@ -22,8 +22,8 @@ use ruma::{
|
||||||
RoomEventType, StateEventType,
|
RoomEventType, StateEventType,
|
||||||
},
|
},
|
||||||
int,
|
int,
|
||||||
serde::{CanonicalJsonObject, JsonObject},
|
serde::JsonObject,
|
||||||
RoomAliasId, RoomId,
|
CanonicalJsonObject, OwnedRoomAliasId, RoomAliasId, RoomId,
|
||||||
};
|
};
|
||||||
use serde_json::{json, value::to_raw_value};
|
use serde_json::{json, value::to_raw_value};
|
||||||
use std::{cmp::max, collections::BTreeMap, sync::Arc};
|
use std::{cmp::max, collections::BTreeMap, sync::Arc};
|
||||||
|
@ -46,19 +46,19 @@ use tracing::{info, warn};
|
||||||
/// - Send events implied by `name` and `topic`
|
/// - Send events implied by `name` and `topic`
|
||||||
/// - Send invite events
|
/// - Send invite events
|
||||||
pub async fn create_room_route(
|
pub async fn create_room_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<create_room::v3::Request>,
|
||||||
body: Ruma<create_room::v3::IncomingRequest>,
|
|
||||||
) -> Result<create_room::v3::Response> {
|
) -> Result<create_room::v3::Response> {
|
||||||
use create_room::v3::RoomPreset;
|
use create_room::v3::RoomPreset;
|
||||||
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let room_id = RoomId::new(db.globals.server_name());
|
let room_id = RoomId::new(services().globals.server_name());
|
||||||
|
|
||||||
db.rooms.get_or_create_shortroomid(&room_id, &db.globals)?;
|
services().rooms.short.get_or_create_shortroomid(&room_id)?;
|
||||||
|
|
||||||
let mutex_state = Arc::clone(
|
let mutex_state = Arc::clone(
|
||||||
db.globals
|
services()
|
||||||
|
.globals
|
||||||
.roomid_mutex_state
|
.roomid_mutex_state
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
@ -67,9 +67,9 @@ pub async fn create_room_route(
|
||||||
);
|
);
|
||||||
let state_lock = mutex_state.lock().await;
|
let state_lock = mutex_state.lock().await;
|
||||||
|
|
||||||
if !db.globals.allow_room_creation()
|
if !services().globals.allow_room_creation()
|
||||||
&& !body.from_appservice
|
&& !body.from_appservice
|
||||||
&& !db.users.is_admin(sender_user, &db.rooms, &db.globals)?
|
&& !services().users.is_admin(sender_user)?
|
||||||
{
|
{
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::Forbidden,
|
||||||
|
@ -77,18 +77,24 @@ pub async fn create_room_route(
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let alias: Option<Box<RoomAliasId>> =
|
let alias: Option<OwnedRoomAliasId> =
|
||||||
body.room_alias_name
|
body.room_alias_name
|
||||||
.as_ref()
|
.as_ref()
|
||||||
.map_or(Ok(None), |localpart| {
|
.map_or(Ok(None), |localpart| {
|
||||||
// TODO: Check for invalid characters and maximum length
|
// TODO: Check for invalid characters and maximum length
|
||||||
let alias =
|
let alias = RoomAliasId::parse(format!(
|
||||||
RoomAliasId::parse(format!("#{}:{}", localpart, db.globals.server_name()))
|
"#{}:{}",
|
||||||
.map_err(|_| {
|
localpart,
|
||||||
Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias.")
|
services().globals.server_name()
|
||||||
})?;
|
))
|
||||||
|
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?;
|
||||||
|
|
||||||
if db.rooms.id_from_alias(&alias)?.is_some() {
|
if services()
|
||||||
|
.rooms
|
||||||
|
.alias
|
||||||
|
.resolve_local_alias(&alias)?
|
||||||
|
.is_some()
|
||||||
|
{
|
||||||
Err(Error::BadRequest(
|
Err(Error::BadRequest(
|
||||||
ErrorKind::RoomInUse,
|
ErrorKind::RoomInUse,
|
||||||
"Room alias already exists.",
|
"Room alias already exists.",
|
||||||
|
@ -100,7 +106,11 @@ pub async fn create_room_route(
|
||||||
|
|
||||||
let room_version = match body.room_version.clone() {
|
let room_version = match body.room_version.clone() {
|
||||||
Some(room_version) => {
|
Some(room_version) => {
|
||||||
if db.rooms.is_supported_version(&db, &room_version) {
|
if services()
|
||||||
|
.globals
|
||||||
|
.supported_room_versions()
|
||||||
|
.contains(&room_version)
|
||||||
|
{
|
||||||
room_version
|
room_version
|
||||||
} else {
|
} else {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
|
@ -109,7 +119,7 @@ pub async fn create_room_route(
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
None => db.globals.default_room_version(),
|
None => services().globals.default_room_version(),
|
||||||
};
|
};
|
||||||
|
|
||||||
let content = match &body.creation_content {
|
let content = match &body.creation_content {
|
||||||
|
@ -163,7 +173,7 @@ pub async fn create_room_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
// 1. The room create event
|
// 1. The room create event
|
||||||
db.rooms.build_and_append_pdu(
|
services().rooms.timeline.build_and_append_pdu(
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: RoomEventType::RoomCreate,
|
event_type: RoomEventType::RoomCreate,
|
||||||
content: to_raw_value(&content).expect("event is valid, we just created it"),
|
content: to_raw_value(&content).expect("event is valid, we just created it"),
|
||||||
|
@ -173,21 +183,20 @@ pub async fn create_room_route(
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
&db,
|
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
// 2. Let the room creator join
|
// 2. Let the room creator join
|
||||||
db.rooms.build_and_append_pdu(
|
services().rooms.timeline.build_and_append_pdu(
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: RoomEventType::RoomMember,
|
event_type: RoomEventType::RoomMember,
|
||||||
content: to_raw_value(&RoomMemberEventContent {
|
content: to_raw_value(&RoomMemberEventContent {
|
||||||
membership: MembershipState::Join,
|
membership: MembershipState::Join,
|
||||||
displayname: db.users.displayname(sender_user)?,
|
displayname: services().users.displayname(sender_user)?,
|
||||||
avatar_url: db.users.avatar_url(sender_user)?,
|
avatar_url: services().users.avatar_url(sender_user)?,
|
||||||
is_direct: Some(body.is_direct),
|
is_direct: Some(body.is_direct),
|
||||||
third_party_invite: None,
|
third_party_invite: None,
|
||||||
blurhash: db.users.blurhash(sender_user)?,
|
blurhash: services().users.blurhash(sender_user)?,
|
||||||
reason: None,
|
reason: None,
|
||||||
join_authorized_via_users_server: None,
|
join_authorized_via_users_server: None,
|
||||||
})
|
})
|
||||||
|
@ -198,17 +207,13 @@ pub async fn create_room_route(
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
&db,
|
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
// 3. Power levels
|
// 3. Power levels
|
||||||
|
|
||||||
// Figure out preset. We need it for preset specific events
|
// Figure out preset. We need it for preset specific events
|
||||||
let preset = body
|
let preset = body.preset.clone().unwrap_or(match &body.visibility {
|
||||||
.preset
|
|
||||||
.clone()
|
|
||||||
.unwrap_or_else(|| match &body.visibility {
|
|
||||||
room::Visibility::Private => RoomPreset::PrivateChat,
|
room::Visibility::Private => RoomPreset::PrivateChat,
|
||||||
room::Visibility::Public => RoomPreset::PublicChat,
|
room::Visibility::Public => RoomPreset::PublicChat,
|
||||||
_ => RoomPreset::PrivateChat, // Room visibility should not be custom
|
_ => RoomPreset::PrivateChat, // Room visibility should not be custom
|
||||||
|
@ -240,7 +245,7 @@ pub async fn create_room_route(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
db.rooms.build_and_append_pdu(
|
services().rooms.timeline.build_and_append_pdu(
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: RoomEventType::RoomPowerLevels,
|
event_type: RoomEventType::RoomPowerLevels,
|
||||||
content: to_raw_value(&power_levels_content)
|
content: to_raw_value(&power_levels_content)
|
||||||
|
@ -251,13 +256,12 @@ pub async fn create_room_route(
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
&db,
|
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
// 4. Canonical room alias
|
// 4. Canonical room alias
|
||||||
if let Some(room_alias_id) = &alias {
|
if let Some(room_alias_id) = &alias {
|
||||||
db.rooms.build_and_append_pdu(
|
services().rooms.timeline.build_and_append_pdu(
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: RoomEventType::RoomCanonicalAlias,
|
event_type: RoomEventType::RoomCanonicalAlias,
|
||||||
content: to_raw_value(&RoomCanonicalAliasEventContent {
|
content: to_raw_value(&RoomCanonicalAliasEventContent {
|
||||||
|
@ -271,7 +275,6 @@ pub async fn create_room_route(
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
&db,
|
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
|
@ -279,7 +282,7 @@ pub async fn create_room_route(
|
||||||
// 5. Events set by preset
|
// 5. Events set by preset
|
||||||
|
|
||||||
// 5.1 Join Rules
|
// 5.1 Join Rules
|
||||||
db.rooms.build_and_append_pdu(
|
services().rooms.timeline.build_and_append_pdu(
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: RoomEventType::RoomJoinRules,
|
event_type: RoomEventType::RoomJoinRules,
|
||||||
content: to_raw_value(&RoomJoinRulesEventContent::new(match preset {
|
content: to_raw_value(&RoomJoinRulesEventContent::new(match preset {
|
||||||
|
@ -294,12 +297,11 @@ pub async fn create_room_route(
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
&db,
|
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
// 5.2 History Visibility
|
// 5.2 History Visibility
|
||||||
db.rooms.build_and_append_pdu(
|
services().rooms.timeline.build_and_append_pdu(
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: RoomEventType::RoomHistoryVisibility,
|
event_type: RoomEventType::RoomHistoryVisibility,
|
||||||
content: to_raw_value(&RoomHistoryVisibilityEventContent::new(
|
content: to_raw_value(&RoomHistoryVisibilityEventContent::new(
|
||||||
|
@ -312,12 +314,11 @@ pub async fn create_room_route(
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
&db,
|
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
// 5.3 Guest Access
|
// 5.3 Guest Access
|
||||||
db.rooms.build_and_append_pdu(
|
services().rooms.timeline.build_and_append_pdu(
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: RoomEventType::RoomGuestAccess,
|
event_type: RoomEventType::RoomGuestAccess,
|
||||||
content: to_raw_value(&RoomGuestAccessEventContent::new(match preset {
|
content: to_raw_value(&RoomGuestAccessEventContent::new(match preset {
|
||||||
|
@ -331,7 +332,6 @@ pub async fn create_room_route(
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
&db,
|
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
|
@ -346,18 +346,23 @@ pub async fn create_room_route(
|
||||||
pdu_builder.state_key.get_or_insert_with(|| "".to_owned());
|
pdu_builder.state_key.get_or_insert_with(|| "".to_owned());
|
||||||
|
|
||||||
// Silently skip encryption events if they are not allowed
|
// Silently skip encryption events if they are not allowed
|
||||||
if pdu_builder.event_type == RoomEventType::RoomEncryption && !db.globals.allow_encryption()
|
if pdu_builder.event_type == RoomEventType::RoomEncryption
|
||||||
|
&& !services().globals.allow_encryption()
|
||||||
{
|
{
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
db.rooms
|
services().rooms.timeline.build_and_append_pdu(
|
||||||
.build_and_append_pdu(pdu_builder, sender_user, &room_id, &db, &state_lock)?;
|
pdu_builder,
|
||||||
|
sender_user,
|
||||||
|
&room_id,
|
||||||
|
&state_lock,
|
||||||
|
)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
// 7. Events implied by name and topic
|
// 7. Events implied by name and topic
|
||||||
if let Some(name) = &body.name {
|
if let Some(name) = &body.name {
|
||||||
db.rooms.build_and_append_pdu(
|
services().rooms.timeline.build_and_append_pdu(
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: RoomEventType::RoomName,
|
event_type: RoomEventType::RoomName,
|
||||||
content: to_raw_value(&RoomNameEventContent::new(Some(name.clone())))
|
content: to_raw_value(&RoomNameEventContent::new(Some(name.clone())))
|
||||||
|
@ -368,13 +373,12 @@ pub async fn create_room_route(
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
&db,
|
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(topic) = &body.topic {
|
if let Some(topic) = &body.topic {
|
||||||
db.rooms.build_and_append_pdu(
|
services().rooms.timeline.build_and_append_pdu(
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: RoomEventType::RoomTopic,
|
event_type: RoomEventType::RoomTopic,
|
||||||
content: to_raw_value(&RoomTopicEventContent {
|
content: to_raw_value(&RoomTopicEventContent {
|
||||||
|
@ -387,7 +391,6 @@ pub async fn create_room_route(
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&room_id,
|
&room_id,
|
||||||
&db,
|
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
|
@ -395,22 +398,20 @@ pub async fn create_room_route(
|
||||||
// 8. Events implied by invite (and TODO: invite_3pid)
|
// 8. Events implied by invite (and TODO: invite_3pid)
|
||||||
drop(state_lock);
|
drop(state_lock);
|
||||||
for user_id in &body.invite {
|
for user_id in &body.invite {
|
||||||
let _ = invite_helper(sender_user, user_id, &room_id, &db, body.is_direct).await;
|
let _ = invite_helper(sender_user, user_id, &room_id, None, body.is_direct).await;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Homeserver specific stuff
|
// Homeserver specific stuff
|
||||||
if let Some(alias) = alias {
|
if let Some(alias) = alias {
|
||||||
db.rooms.set_alias(&alias, Some(&room_id), &db.globals)?;
|
services().rooms.alias.set_alias(&alias, &room_id)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
if body.visibility == room::Visibility::Public {
|
if body.visibility == room::Visibility::Public {
|
||||||
db.rooms.set_public(&room_id, true)?;
|
services().rooms.directory.set_public(&room_id)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
info!("{} created a room", sender_user);
|
info!("{} created a room", sender_user);
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(create_room::v3::Response::new(room_id))
|
Ok(create_room::v3::Response::new(room_id))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -420,12 +421,15 @@ pub async fn create_room_route(
|
||||||
///
|
///
|
||||||
/// - You have to currently be joined to the room (TODO: Respect history visibility)
|
/// - You have to currently be joined to the room (TODO: Respect history visibility)
|
||||||
pub async fn get_room_event_route(
|
pub async fn get_room_event_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_room_event::v3::Request>,
|
||||||
body: Ruma<get_room_event::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_room_event::v3::Response> {
|
) -> Result<get_room_event::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
if !db.rooms.is_joined(sender_user, &body.room_id)? {
|
if !services()
|
||||||
|
.rooms
|
||||||
|
.state_cache
|
||||||
|
.is_joined(sender_user, &body.room_id)?
|
||||||
|
{
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::Forbidden,
|
||||||
"You don't have permission to view this room.",
|
"You don't have permission to view this room.",
|
||||||
|
@ -433,8 +437,9 @@ pub async fn get_room_event_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(get_room_event::v3::Response {
|
Ok(get_room_event::v3::Response {
|
||||||
event: db
|
event: services()
|
||||||
.rooms
|
.rooms
|
||||||
|
.timeline
|
||||||
.get_pdu(&body.event_id)?
|
.get_pdu(&body.event_id)?
|
||||||
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))?
|
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))?
|
||||||
.to_room_event(),
|
.to_room_event(),
|
||||||
|
@ -447,12 +452,15 @@ pub async fn get_room_event_route(
|
||||||
///
|
///
|
||||||
/// - Only users joined to the room are allowed to call this TODO: Allow any user to call it if history_visibility is world readable
|
/// - Only users joined to the room are allowed to call this TODO: Allow any user to call it if history_visibility is world readable
|
||||||
pub async fn get_room_aliases_route(
|
pub async fn get_room_aliases_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<aliases::v3::Request>,
|
||||||
body: Ruma<aliases::v3::IncomingRequest>,
|
|
||||||
) -> Result<aliases::v3::Response> {
|
) -> Result<aliases::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
if !db.rooms.is_joined(sender_user, &body.room_id)? {
|
if !services()
|
||||||
|
.rooms
|
||||||
|
.state_cache
|
||||||
|
.is_joined(sender_user, &body.room_id)?
|
||||||
|
{
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::Forbidden,
|
||||||
"You don't have permission to view this room.",
|
"You don't have permission to view this room.",
|
||||||
|
@ -460,9 +468,10 @@ pub async fn get_room_aliases_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(aliases::v3::Response {
|
Ok(aliases::v3::Response {
|
||||||
aliases: db
|
aliases: services()
|
||||||
.rooms
|
.rooms
|
||||||
.room_aliases(&body.room_id)
|
.alias
|
||||||
|
.local_aliases_for_room(&body.room_id)
|
||||||
.filter_map(|a| a.ok())
|
.filter_map(|a| a.ok())
|
||||||
.collect(),
|
.collect(),
|
||||||
})
|
})
|
||||||
|
@ -479,12 +488,15 @@ pub async fn get_room_aliases_route(
|
||||||
/// - Moves local aliases
|
/// - Moves local aliases
|
||||||
/// - Modifies old room power levels to prevent users from speaking
|
/// - Modifies old room power levels to prevent users from speaking
|
||||||
pub async fn upgrade_room_route(
|
pub async fn upgrade_room_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<upgrade_room::v3::Request>,
|
||||||
body: Ruma<upgrade_room::v3::IncomingRequest>,
|
|
||||||
) -> Result<upgrade_room::v3::Response> {
|
) -> Result<upgrade_room::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
if !db.rooms.is_supported_version(&db, &body.new_version) {
|
if !services()
|
||||||
|
.globals
|
||||||
|
.supported_room_versions()
|
||||||
|
.contains(&body.new_version)
|
||||||
|
{
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::UnsupportedRoomVersion,
|
ErrorKind::UnsupportedRoomVersion,
|
||||||
"This server does not support that room version.",
|
"This server does not support that room version.",
|
||||||
|
@ -492,12 +504,15 @@ pub async fn upgrade_room_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a replacement room
|
// Create a replacement room
|
||||||
let replacement_room = RoomId::new(db.globals.server_name());
|
let replacement_room = RoomId::new(services().globals.server_name());
|
||||||
db.rooms
|
services()
|
||||||
.get_or_create_shortroomid(&replacement_room, &db.globals)?;
|
.rooms
|
||||||
|
.short
|
||||||
|
.get_or_create_shortroomid(&replacement_room)?;
|
||||||
|
|
||||||
let mutex_state = Arc::clone(
|
let mutex_state = Arc::clone(
|
||||||
db.globals
|
services()
|
||||||
|
.globals
|
||||||
.roomid_mutex_state
|
.roomid_mutex_state
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
@ -508,7 +523,7 @@ pub async fn upgrade_room_route(
|
||||||
|
|
||||||
// Send a m.room.tombstone event to the old room to indicate that it is not intended to be used any further
|
// Send a m.room.tombstone event to the old room to indicate that it is not intended to be used any further
|
||||||
// Fail if the sender does not have the required permissions
|
// Fail if the sender does not have the required permissions
|
||||||
let tombstone_event_id = db.rooms.build_and_append_pdu(
|
let tombstone_event_id = services().rooms.timeline.build_and_append_pdu(
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: RoomEventType::RoomTombstone,
|
event_type: RoomEventType::RoomTombstone,
|
||||||
content: to_raw_value(&RoomTombstoneEventContent {
|
content: to_raw_value(&RoomTombstoneEventContent {
|
||||||
|
@ -522,14 +537,14 @@ pub async fn upgrade_room_route(
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&body.room_id,
|
&body.room_id,
|
||||||
&db,
|
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
// Change lock to replacement room
|
// Change lock to replacement room
|
||||||
drop(state_lock);
|
drop(state_lock);
|
||||||
let mutex_state = Arc::clone(
|
let mutex_state = Arc::clone(
|
||||||
db.globals
|
services()
|
||||||
|
.globals
|
||||||
.roomid_mutex_state
|
.roomid_mutex_state
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
@ -540,7 +555,9 @@ pub async fn upgrade_room_route(
|
||||||
|
|
||||||
// Get the old room creation event
|
// Get the old room creation event
|
||||||
let mut create_event_content = serde_json::from_str::<CanonicalJsonObject>(
|
let mut create_event_content = serde_json::from_str::<CanonicalJsonObject>(
|
||||||
db.rooms
|
services()
|
||||||
|
.rooms
|
||||||
|
.state_accessor
|
||||||
.room_state_get(&body.room_id, &StateEventType::RoomCreate, "")?
|
.room_state_get(&body.room_id, &StateEventType::RoomCreate, "")?
|
||||||
.ok_or_else(|| Error::bad_database("Found room without m.room.create event."))?
|
.ok_or_else(|| Error::bad_database("Found room without m.room.create event."))?
|
||||||
.content
|
.content
|
||||||
|
@ -588,7 +605,7 @@ pub async fn upgrade_room_route(
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
db.rooms.build_and_append_pdu(
|
services().rooms.timeline.build_and_append_pdu(
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: RoomEventType::RoomCreate,
|
event_type: RoomEventType::RoomCreate,
|
||||||
content: to_raw_value(&create_event_content)
|
content: to_raw_value(&create_event_content)
|
||||||
|
@ -599,21 +616,20 @@ pub async fn upgrade_room_route(
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&replacement_room,
|
&replacement_room,
|
||||||
&db,
|
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
// Join the new room
|
// Join the new room
|
||||||
db.rooms.build_and_append_pdu(
|
services().rooms.timeline.build_and_append_pdu(
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: RoomEventType::RoomMember,
|
event_type: RoomEventType::RoomMember,
|
||||||
content: to_raw_value(&RoomMemberEventContent {
|
content: to_raw_value(&RoomMemberEventContent {
|
||||||
membership: MembershipState::Join,
|
membership: MembershipState::Join,
|
||||||
displayname: db.users.displayname(sender_user)?,
|
displayname: services().users.displayname(sender_user)?,
|
||||||
avatar_url: db.users.avatar_url(sender_user)?,
|
avatar_url: services().users.avatar_url(sender_user)?,
|
||||||
is_direct: None,
|
is_direct: None,
|
||||||
third_party_invite: None,
|
third_party_invite: None,
|
||||||
blurhash: db.users.blurhash(sender_user)?,
|
blurhash: services().users.blurhash(sender_user)?,
|
||||||
reason: None,
|
reason: None,
|
||||||
join_authorized_via_users_server: None,
|
join_authorized_via_users_server: None,
|
||||||
})
|
})
|
||||||
|
@ -624,7 +640,6 @@ pub async fn upgrade_room_route(
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&replacement_room,
|
&replacement_room,
|
||||||
&db,
|
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
|
@ -643,12 +658,17 @@ pub async fn upgrade_room_route(
|
||||||
|
|
||||||
// Replicate transferable state events to the new room
|
// Replicate transferable state events to the new room
|
||||||
for event_type in transferable_state_events {
|
for event_type in transferable_state_events {
|
||||||
let event_content = match db.rooms.room_state_get(&body.room_id, &event_type, "")? {
|
let event_content =
|
||||||
|
match services()
|
||||||
|
.rooms
|
||||||
|
.state_accessor
|
||||||
|
.room_state_get(&body.room_id, &event_type, "")?
|
||||||
|
{
|
||||||
Some(v) => v.content.clone(),
|
Some(v) => v.content.clone(),
|
||||||
None => continue, // Skipping missing events.
|
None => continue, // Skipping missing events.
|
||||||
};
|
};
|
||||||
|
|
||||||
db.rooms.build_and_append_pdu(
|
services().rooms.timeline.build_and_append_pdu(
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: event_type.to_string().into(),
|
event_type: event_type.to_string().into(),
|
||||||
content: event_content,
|
content: event_content,
|
||||||
|
@ -658,20 +678,28 @@ pub async fn upgrade_room_route(
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&replacement_room,
|
&replacement_room,
|
||||||
&db,
|
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Moves any local aliases to the new room
|
// Moves any local aliases to the new room
|
||||||
for alias in db.rooms.room_aliases(&body.room_id).filter_map(|r| r.ok()) {
|
for alias in services()
|
||||||
db.rooms
|
.rooms
|
||||||
.set_alias(&alias, Some(&replacement_room), &db.globals)?;
|
.alias
|
||||||
|
.local_aliases_for_room(&body.room_id)
|
||||||
|
.filter_map(|r| r.ok())
|
||||||
|
{
|
||||||
|
services()
|
||||||
|
.rooms
|
||||||
|
.alias
|
||||||
|
.set_alias(&alias, &replacement_room)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the old room power levels
|
// Get the old room power levels
|
||||||
let mut power_levels_event_content: RoomPowerLevelsEventContent = serde_json::from_str(
|
let mut power_levels_event_content: RoomPowerLevelsEventContent = serde_json::from_str(
|
||||||
db.rooms
|
services()
|
||||||
|
.rooms
|
||||||
|
.state_accessor
|
||||||
.room_state_get(&body.room_id, &StateEventType::RoomPowerLevels, "")?
|
.room_state_get(&body.room_id, &StateEventType::RoomPowerLevels, "")?
|
||||||
.ok_or_else(|| Error::bad_database("Found room without m.room.create event."))?
|
.ok_or_else(|| Error::bad_database("Found room without m.room.create event."))?
|
||||||
.content
|
.content
|
||||||
|
@ -685,7 +713,7 @@ pub async fn upgrade_room_route(
|
||||||
power_levels_event_content.invite = new_level;
|
power_levels_event_content.invite = new_level;
|
||||||
|
|
||||||
// Modify the power levels in the old room to prevent sending of events and inviting new users
|
// Modify the power levels in the old room to prevent sending of events and inviting new users
|
||||||
let _ = db.rooms.build_and_append_pdu(
|
let _ = services().rooms.timeline.build_and_append_pdu(
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: RoomEventType::RoomPowerLevels,
|
event_type: RoomEventType::RoomPowerLevels,
|
||||||
content: to_raw_value(&power_levels_event_content)
|
content: to_raw_value(&power_levels_event_content)
|
||||||
|
@ -696,14 +724,11 @@ pub async fn upgrade_room_route(
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
&body.room_id,
|
&body.room_id,
|
||||||
&db,
|
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
drop(state_lock);
|
drop(state_lock);
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
// Return the replacement room id
|
// Return the replacement room id
|
||||||
Ok(upgrade_room::v3::Response { replacement_room })
|
Ok(upgrade_room::v3::Response { replacement_room })
|
||||||
}
|
}
|
|
@ -1,4 +1,4 @@
|
||||||
use crate::{database::DatabaseGuard, Error, Result, Ruma};
|
use crate::{services, Error, Result, Ruma};
|
||||||
use ruma::api::client::{
|
use ruma::api::client::{
|
||||||
error::ErrorKind,
|
error::ErrorKind,
|
||||||
search::search_events::{
|
search::search_events::{
|
||||||
|
@ -15,8 +15,7 @@ use std::collections::BTreeMap;
|
||||||
///
|
///
|
||||||
/// - Only works if the user is currently joined to the room (TODO: Respect history visibility)
|
/// - Only works if the user is currently joined to the room (TODO: Respect history visibility)
|
||||||
pub async fn search_events_route(
|
pub async fn search_events_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<search_events::v3::Request>,
|
||||||
body: Ruma<search_events::v3::IncomingRequest>,
|
|
||||||
) -> Result<search_events::v3::Response> {
|
) -> Result<search_events::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
@ -24,7 +23,9 @@ pub async fn search_events_route(
|
||||||
let filter = &search_criteria.filter;
|
let filter = &search_criteria.filter;
|
||||||
|
|
||||||
let room_ids = filter.rooms.clone().unwrap_or_else(|| {
|
let room_ids = filter.rooms.clone().unwrap_or_else(|| {
|
||||||
db.rooms
|
services()
|
||||||
|
.rooms
|
||||||
|
.state_cache
|
||||||
.rooms_joined(sender_user)
|
.rooms_joined(sender_user)
|
||||||
.filter_map(|r| r.ok())
|
.filter_map(|r| r.ok())
|
||||||
.collect()
|
.collect()
|
||||||
|
@ -35,15 +36,20 @@ pub async fn search_events_route(
|
||||||
let mut searches = Vec::new();
|
let mut searches = Vec::new();
|
||||||
|
|
||||||
for room_id in room_ids {
|
for room_id in room_ids {
|
||||||
if !db.rooms.is_joined(sender_user, &room_id)? {
|
if !services()
|
||||||
|
.rooms
|
||||||
|
.state_cache
|
||||||
|
.is_joined(sender_user, &room_id)?
|
||||||
|
{
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::Forbidden,
|
||||||
"You don't have permission to view this room.",
|
"You don't have permission to view this room.",
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(search) = db
|
if let Some(search) = services()
|
||||||
.rooms
|
.rooms
|
||||||
|
.search
|
||||||
.search_pdus(&room_id, &search_criteria.search_term)?
|
.search_pdus(&room_id, &search_criteria.search_term)?
|
||||||
{
|
{
|
||||||
searches.push(search.0.peekable());
|
searches.push(search.0.peekable());
|
||||||
|
@ -85,8 +91,9 @@ pub async fn search_events_route(
|
||||||
start: None,
|
start: None,
|
||||||
},
|
},
|
||||||
rank: None,
|
rank: None,
|
||||||
result: db
|
result: services()
|
||||||
.rooms
|
.rooms
|
||||||
|
.timeline
|
||||||
.get_pdu_from_id(result)?
|
.get_pdu_from_id(result)?
|
||||||
.map(|pdu| pdu.to_room_event()),
|
.map(|pdu| pdu.to_room_event()),
|
||||||
})
|
})
|
||||||
|
@ -96,7 +103,7 @@ pub async fn search_events_route(
|
||||||
.take(limit)
|
.take(limit)
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let next_batch = if results.len() < limit as usize {
|
let next_batch = if results.len() < limit {
|
||||||
None
|
None
|
||||||
} else {
|
} else {
|
||||||
Some((skip + limit).to_string())
|
Some((skip + limit).to_string())
|
|
@ -1,10 +1,10 @@
|
||||||
use super::{DEVICE_ID_LENGTH, TOKEN_LENGTH};
|
use super::{DEVICE_ID_LENGTH, TOKEN_LENGTH};
|
||||||
use crate::{database::DatabaseGuard, utils, Error, Result, Ruma};
|
use crate::{services, utils, Error, Result, Ruma};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::{
|
api::client::{
|
||||||
error::ErrorKind,
|
error::ErrorKind,
|
||||||
session::{get_login_types, login, logout, logout_all},
|
session::{get_login_types, login, logout, logout_all},
|
||||||
uiaa::IncomingUserIdentifier,
|
uiaa::UserIdentifier,
|
||||||
},
|
},
|
||||||
UserId,
|
UserId,
|
||||||
};
|
};
|
||||||
|
@ -22,7 +22,7 @@ struct Claims {
|
||||||
/// Get the supported login types of this server. One of these should be used as the `type` field
|
/// Get the supported login types of this server. One of these should be used as the `type` field
|
||||||
/// when logging in.
|
/// when logging in.
|
||||||
pub async fn get_login_types_route(
|
pub async fn get_login_types_route(
|
||||||
_body: Ruma<get_login_types::v3::IncomingRequest>,
|
_body: Ruma<get_login_types::v3::Request>,
|
||||||
) -> Result<get_login_types::v3::Response> {
|
) -> Result<get_login_types::v3::Response> {
|
||||||
Ok(get_login_types::v3::Response::new(vec![
|
Ok(get_login_types::v3::Response::new(vec![
|
||||||
get_login_types::v3::LoginType::Password(Default::default()),
|
get_login_types::v3::LoginType::Password(Default::default()),
|
||||||
|
@ -40,28 +40,28 @@ pub async fn get_login_types_route(
|
||||||
///
|
///
|
||||||
/// Note: You can use [`GET /_matrix/client/r0/login`](fn.get_supported_versions_route.html) to see
|
/// Note: You can use [`GET /_matrix/client/r0/login`](fn.get_supported_versions_route.html) to see
|
||||||
/// supported login types.
|
/// supported login types.
|
||||||
pub async fn login_route(
|
pub async fn login_route(body: Ruma<login::v3::Request>) -> Result<login::v3::Response> {
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<login::v3::IncomingRequest>,
|
|
||||||
) -> Result<login::v3::Response> {
|
|
||||||
// Validate login method
|
// Validate login method
|
||||||
// TODO: Other login methods
|
// TODO: Other login methods
|
||||||
let user_id = match &body.login_info {
|
let user_id = match &body.login_info {
|
||||||
login::v3::IncomingLoginInfo::Password(login::v3::IncomingPassword {
|
login::v3::LoginInfo::Password(login::v3::Password {
|
||||||
identifier,
|
identifier,
|
||||||
password,
|
password,
|
||||||
}) => {
|
}) => {
|
||||||
let username = if let IncomingUserIdentifier::UserIdOrLocalpart(user_id) = identifier {
|
let username = if let UserIdentifier::UserIdOrLocalpart(user_id) = identifier {
|
||||||
user_id.to_lowercase()
|
user_id.to_lowercase()
|
||||||
} else {
|
} else {
|
||||||
return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type."));
|
return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type."));
|
||||||
};
|
};
|
||||||
let user_id =
|
let user_id =
|
||||||
UserId::parse_with_server_name(username.to_owned(), db.globals.server_name())
|
UserId::parse_with_server_name(username, services().globals.server_name())
|
||||||
.map_err(|_| {
|
.map_err(|_| {
|
||||||
Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.")
|
Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.")
|
||||||
})?;
|
})?;
|
||||||
let hash = db.users.password_hash(&user_id)?.ok_or(Error::BadRequest(
|
let hash = services()
|
||||||
|
.users
|
||||||
|
.password_hash(&user_id)?
|
||||||
|
.ok_or(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::Forbidden,
|
||||||
"Wrong username or password.",
|
"Wrong username or password.",
|
||||||
))?;
|
))?;
|
||||||
|
@ -84,16 +84,16 @@ pub async fn login_route(
|
||||||
|
|
||||||
user_id
|
user_id
|
||||||
}
|
}
|
||||||
login::v3::IncomingLoginInfo::Token(login::v3::IncomingToken { token }) => {
|
login::v3::LoginInfo::Token(login::v3::Token { token }) => {
|
||||||
if let Some(jwt_decoding_key) = db.globals.jwt_decoding_key() {
|
if let Some(jwt_decoding_key) = services().globals.jwt_decoding_key() {
|
||||||
let token = jsonwebtoken::decode::<Claims>(
|
let token = jsonwebtoken::decode::<Claims>(
|
||||||
token,
|
token,
|
||||||
jwt_decoding_key,
|
jwt_decoding_key,
|
||||||
&jsonwebtoken::Validation::default(),
|
&jsonwebtoken::Validation::default(),
|
||||||
)
|
)
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Token is invalid."))?;
|
.map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Token is invalid."))?;
|
||||||
let username = token.claims.sub;
|
let username = token.claims.sub.to_lowercase();
|
||||||
UserId::parse_with_server_name(username, db.globals.server_name()).map_err(
|
UserId::parse_with_server_name(username, services().globals.server_name()).map_err(
|
||||||
|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."),
|
|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."),
|
||||||
)?
|
)?
|
||||||
} else {
|
} else {
|
||||||
|
@ -122,15 +122,16 @@ pub async fn login_route(
|
||||||
|
|
||||||
// Determine if device_id was provided and exists in the db for this user
|
// Determine if device_id was provided and exists in the db for this user
|
||||||
let device_exists = body.device_id.as_ref().map_or(false, |device_id| {
|
let device_exists = body.device_id.as_ref().map_or(false, |device_id| {
|
||||||
db.users
|
services()
|
||||||
|
.users
|
||||||
.all_device_ids(&user_id)
|
.all_device_ids(&user_id)
|
||||||
.any(|x| x.as_ref().map_or(false, |v| v == device_id))
|
.any(|x| x.as_ref().map_or(false, |v| v == device_id))
|
||||||
});
|
});
|
||||||
|
|
||||||
if device_exists {
|
if device_exists {
|
||||||
db.users.set_token(&user_id, &device_id, &token)?;
|
services().users.set_token(&user_id, &device_id, &token)?;
|
||||||
} else {
|
} else {
|
||||||
db.users.create_device(
|
services().users.create_device(
|
||||||
&user_id,
|
&user_id,
|
||||||
&device_id,
|
&device_id,
|
||||||
&token,
|
&token,
|
||||||
|
@ -140,14 +141,14 @@ pub async fn login_route(
|
||||||
|
|
||||||
info!("{} logged in", user_id);
|
info!("{} logged in", user_id);
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(login::v3::Response {
|
Ok(login::v3::Response {
|
||||||
user_id,
|
user_id,
|
||||||
access_token: token,
|
access_token: token,
|
||||||
home_server: Some(db.globals.server_name().to_owned()),
|
home_server: Some(services().globals.server_name().to_owned()),
|
||||||
device_id,
|
device_id,
|
||||||
well_known: None,
|
well_known: None,
|
||||||
|
refresh_token: None,
|
||||||
|
expires_in: None,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -159,16 +160,11 @@ pub async fn login_route(
|
||||||
/// - Deletes device metadata (device id, device display name, last seen ip, last seen ts)
|
/// - Deletes device metadata (device id, device display name, last seen ip, last seen ts)
|
||||||
/// - Forgets to-device events
|
/// - Forgets to-device events
|
||||||
/// - Triggers device list updates
|
/// - Triggers device list updates
|
||||||
pub async fn logout_route(
|
pub async fn logout_route(body: Ruma<logout::v3::Request>) -> Result<logout::v3::Response> {
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<logout::v3::Request>,
|
|
||||||
) -> Result<logout::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
db.users.remove_device(sender_user, sender_device)?;
|
services().users.remove_device(sender_user, sender_device)?;
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(logout::v3::Response::new())
|
Ok(logout::v3::Response::new())
|
||||||
}
|
}
|
||||||
|
@ -185,16 +181,13 @@ pub async fn logout_route(
|
||||||
/// Note: This is equivalent to calling [`GET /_matrix/client/r0/logout`](fn.logout_route.html)
|
/// Note: This is equivalent to calling [`GET /_matrix/client/r0/logout`](fn.logout_route.html)
|
||||||
/// from each device of this user.
|
/// from each device of this user.
|
||||||
pub async fn logout_all_route(
|
pub async fn logout_all_route(
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<logout_all::v3::Request>,
|
body: Ruma<logout_all::v3::Request>,
|
||||||
) -> Result<logout_all::v3::Response> {
|
) -> Result<logout_all::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
for device_id in db.users.all_device_ids(sender_user).flatten() {
|
for device_id in services().users.all_device_ids(sender_user).flatten() {
|
||||||
db.users.remove_device(sender_user, &device_id)?;
|
services().users.remove_device(sender_user, &device_id)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(logout_all::v3::Response::new())
|
Ok(logout_all::v3::Response::new())
|
||||||
}
|
}
|
|
@ -1,8 +1,6 @@
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use crate::{
|
use crate::{service::pdu::PduBuilder, services, Error, Result, Ruma, RumaResponse};
|
||||||
database::DatabaseGuard, pdu::PduBuilder, Database, Error, Result, Ruma, RumaResponse,
|
|
||||||
};
|
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::{
|
api::client::{
|
||||||
error::ErrorKind,
|
error::ErrorKind,
|
||||||
|
@ -27,13 +25,11 @@ use ruma::{
|
||||||
/// - Tries to send the event into the room, auth rules will determine if it is allowed
|
/// - Tries to send the event into the room, auth rules will determine if it is allowed
|
||||||
/// - If event is new canonical_alias: Rejects if alias is incorrect
|
/// - If event is new canonical_alias: Rejects if alias is incorrect
|
||||||
pub async fn send_state_event_for_key_route(
|
pub async fn send_state_event_for_key_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<send_state_event::v3::Request>,
|
||||||
body: Ruma<send_state_event::v3::IncomingRequest>,
|
|
||||||
) -> Result<send_state_event::v3::Response> {
|
) -> Result<send_state_event::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let event_id = send_state_event_for_key_helper(
|
let event_id = send_state_event_for_key_helper(
|
||||||
&db,
|
|
||||||
sender_user,
|
sender_user,
|
||||||
&body.room_id,
|
&body.room_id,
|
||||||
&body.event_type,
|
&body.event_type,
|
||||||
|
@ -42,8 +38,6 @@ pub async fn send_state_event_for_key_route(
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
let event_id = (*event_id).to_owned();
|
let event_id = (*event_id).to_owned();
|
||||||
Ok(send_state_event::v3::Response { event_id })
|
Ok(send_state_event::v3::Response { event_id })
|
||||||
}
|
}
|
||||||
|
@ -56,13 +50,12 @@ pub async fn send_state_event_for_key_route(
|
||||||
/// - Tries to send the event into the room, auth rules will determine if it is allowed
|
/// - Tries to send the event into the room, auth rules will determine if it is allowed
|
||||||
/// - If event is new canonical_alias: Rejects if alias is incorrect
|
/// - If event is new canonical_alias: Rejects if alias is incorrect
|
||||||
pub async fn send_state_event_for_empty_key_route(
|
pub async fn send_state_event_for_empty_key_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<send_state_event::v3::Request>,
|
||||||
body: Ruma<send_state_event::v3::IncomingRequest>,
|
|
||||||
) -> Result<RumaResponse<send_state_event::v3::Response>> {
|
) -> Result<RumaResponse<send_state_event::v3::Response>> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
// Forbid m.room.encryption if encryption is disabled
|
// Forbid m.room.encryption if encryption is disabled
|
||||||
if body.event_type == StateEventType::RoomEncryption && !db.globals.allow_encryption() {
|
if body.event_type == StateEventType::RoomEncryption && !services().globals.allow_encryption() {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::Forbidden,
|
||||||
"Encryption has been disabled",
|
"Encryption has been disabled",
|
||||||
|
@ -70,7 +63,6 @@ pub async fn send_state_event_for_empty_key_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
let event_id = send_state_event_for_key_helper(
|
let event_id = send_state_event_for_key_helper(
|
||||||
&db,
|
|
||||||
sender_user,
|
sender_user,
|
||||||
&body.room_id,
|
&body.room_id,
|
||||||
&body.event_type.to_string().into(),
|
&body.event_type.to_string().into(),
|
||||||
|
@ -79,8 +71,6 @@ pub async fn send_state_event_for_empty_key_route(
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
let event_id = (*event_id).to_owned();
|
let event_id = (*event_id).to_owned();
|
||||||
Ok(send_state_event::v3::Response { event_id }.into())
|
Ok(send_state_event::v3::Response { event_id }.into())
|
||||||
}
|
}
|
||||||
|
@ -91,17 +81,21 @@ pub async fn send_state_event_for_empty_key_route(
|
||||||
///
|
///
|
||||||
/// - If not joined: Only works if current room history visibility is world readable
|
/// - If not joined: Only works if current room history visibility is world readable
|
||||||
pub async fn get_state_events_route(
|
pub async fn get_state_events_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_state_events::v3::Request>,
|
||||||
body: Ruma<get_state_events::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_state_events::v3::Response> {
|
) -> Result<get_state_events::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
#[allow(clippy::blocks_in_if_conditions)]
|
#[allow(clippy::blocks_in_if_conditions)]
|
||||||
// Users not in the room should not be able to access the state unless history_visibility is
|
// Users not in the room should not be able to access the state unless history_visibility is
|
||||||
// WorldReadable
|
// WorldReadable
|
||||||
if !db.rooms.is_joined(sender_user, &body.room_id)?
|
if !services()
|
||||||
|
.rooms
|
||||||
|
.state_cache
|
||||||
|
.is_joined(sender_user, &body.room_id)?
|
||||||
&& !matches!(
|
&& !matches!(
|
||||||
db.rooms
|
services()
|
||||||
|
.rooms
|
||||||
|
.state_accessor
|
||||||
.room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")?
|
.room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")?
|
||||||
.map(|event| {
|
.map(|event| {
|
||||||
serde_json::from_str(event.content.get())
|
serde_json::from_str(event.content.get())
|
||||||
|
@ -122,8 +116,9 @@ pub async fn get_state_events_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(get_state_events::v3::Response {
|
Ok(get_state_events::v3::Response {
|
||||||
room_state: db
|
room_state: services()
|
||||||
.rooms
|
.rooms
|
||||||
|
.state_accessor
|
||||||
.room_state_full(&body.room_id)
|
.room_state_full(&body.room_id)
|
||||||
.await?
|
.await?
|
||||||
.values()
|
.values()
|
||||||
|
@ -138,17 +133,21 @@ pub async fn get_state_events_route(
|
||||||
///
|
///
|
||||||
/// - If not joined: Only works if current room history visibility is world readable
|
/// - If not joined: Only works if current room history visibility is world readable
|
||||||
pub async fn get_state_events_for_key_route(
|
pub async fn get_state_events_for_key_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_state_events_for_key::v3::Request>,
|
||||||
body: Ruma<get_state_events_for_key::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_state_events_for_key::v3::Response> {
|
) -> Result<get_state_events_for_key::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
#[allow(clippy::blocks_in_if_conditions)]
|
#[allow(clippy::blocks_in_if_conditions)]
|
||||||
// Users not in the room should not be able to access the state unless history_visibility is
|
// Users not in the room should not be able to access the state unless history_visibility is
|
||||||
// WorldReadable
|
// WorldReadable
|
||||||
if !db.rooms.is_joined(sender_user, &body.room_id)?
|
if !services()
|
||||||
|
.rooms
|
||||||
|
.state_cache
|
||||||
|
.is_joined(sender_user, &body.room_id)?
|
||||||
&& !matches!(
|
&& !matches!(
|
||||||
db.rooms
|
services()
|
||||||
|
.rooms
|
||||||
|
.state_accessor
|
||||||
.room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")?
|
.room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")?
|
||||||
.map(|event| {
|
.map(|event| {
|
||||||
serde_json::from_str(event.content.get())
|
serde_json::from_str(event.content.get())
|
||||||
|
@ -168,8 +167,9 @@ pub async fn get_state_events_for_key_route(
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let event = db
|
let event = services()
|
||||||
.rooms
|
.rooms
|
||||||
|
.state_accessor
|
||||||
.room_state_get(&body.room_id, &body.event_type, &body.state_key)?
|
.room_state_get(&body.room_id, &body.event_type, &body.state_key)?
|
||||||
.ok_or(Error::BadRequest(
|
.ok_or(Error::BadRequest(
|
||||||
ErrorKind::NotFound,
|
ErrorKind::NotFound,
|
||||||
|
@ -188,17 +188,21 @@ pub async fn get_state_events_for_key_route(
|
||||||
///
|
///
|
||||||
/// - If not joined: Only works if current room history visibility is world readable
|
/// - If not joined: Only works if current room history visibility is world readable
|
||||||
pub async fn get_state_events_for_empty_key_route(
|
pub async fn get_state_events_for_empty_key_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_state_events_for_key::v3::Request>,
|
||||||
body: Ruma<get_state_events_for_key::v3::IncomingRequest>,
|
|
||||||
) -> Result<RumaResponse<get_state_events_for_key::v3::Response>> {
|
) -> Result<RumaResponse<get_state_events_for_key::v3::Response>> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
#[allow(clippy::blocks_in_if_conditions)]
|
#[allow(clippy::blocks_in_if_conditions)]
|
||||||
// Users not in the room should not be able to access the state unless history_visibility is
|
// Users not in the room should not be able to access the state unless history_visibility is
|
||||||
// WorldReadable
|
// WorldReadable
|
||||||
if !db.rooms.is_joined(sender_user, &body.room_id)?
|
if !services()
|
||||||
|
.rooms
|
||||||
|
.state_cache
|
||||||
|
.is_joined(sender_user, &body.room_id)?
|
||||||
&& !matches!(
|
&& !matches!(
|
||||||
db.rooms
|
services()
|
||||||
|
.rooms
|
||||||
|
.state_accessor
|
||||||
.room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")?
|
.room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")?
|
||||||
.map(|event| {
|
.map(|event| {
|
||||||
serde_json::from_str(event.content.get())
|
serde_json::from_str(event.content.get())
|
||||||
|
@ -218,8 +222,9 @@ pub async fn get_state_events_for_empty_key_route(
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
let event = db
|
let event = services()
|
||||||
.rooms
|
.rooms
|
||||||
|
.state_accessor
|
||||||
.room_state_get(&body.room_id, &body.event_type, "")?
|
.room_state_get(&body.room_id, &body.event_type, "")?
|
||||||
.ok_or(Error::BadRequest(
|
.ok_or(Error::BadRequest(
|
||||||
ErrorKind::NotFound,
|
ErrorKind::NotFound,
|
||||||
|
@ -234,7 +239,6 @@ pub async fn get_state_events_for_empty_key_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn send_state_event_for_key_helper(
|
async fn send_state_event_for_key_helper(
|
||||||
db: &Database,
|
|
||||||
sender: &UserId,
|
sender: &UserId,
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
event_type: &StateEventType,
|
event_type: &StateEventType,
|
||||||
|
@ -255,10 +259,11 @@ async fn send_state_event_for_key_helper(
|
||||||
}
|
}
|
||||||
|
|
||||||
for alias in aliases {
|
for alias in aliases {
|
||||||
if alias.server_name() != db.globals.server_name()
|
if alias.server_name() != services().globals.server_name()
|
||||||
|| db
|
|| services()
|
||||||
.rooms
|
.rooms
|
||||||
.id_from_alias(&alias)?
|
.alias
|
||||||
|
.resolve_local_alias(&alias)?
|
||||||
.filter(|room| room == room_id) // Make sure it's the right room
|
.filter(|room| room == room_id) // Make sure it's the right room
|
||||||
.is_none()
|
.is_none()
|
||||||
{
|
{
|
||||||
|
@ -272,7 +277,8 @@ async fn send_state_event_for_key_helper(
|
||||||
}
|
}
|
||||||
|
|
||||||
let mutex_state = Arc::clone(
|
let mutex_state = Arc::clone(
|
||||||
db.globals
|
services()
|
||||||
|
.globals
|
||||||
.roomid_mutex_state
|
.roomid_mutex_state
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
@ -281,7 +287,7 @@ async fn send_state_event_for_key_helper(
|
||||||
);
|
);
|
||||||
let state_lock = mutex_state.lock().await;
|
let state_lock = mutex_state.lock().await;
|
||||||
|
|
||||||
let event_id = db.rooms.build_and_append_pdu(
|
let event_id = services().rooms.timeline.build_and_append_pdu(
|
||||||
PduBuilder {
|
PduBuilder {
|
||||||
event_type: event_type.to_string().into(),
|
event_type: event_type.to_string().into(),
|
||||||
content: serde_json::from_str(json.json().get()).expect("content is valid json"),
|
content: serde_json::from_str(json.json().get()).expect("content is valid json"),
|
||||||
|
@ -291,7 +297,6 @@ async fn send_state_event_for_key_helper(
|
||||||
},
|
},
|
||||||
sender_user,
|
sender_user,
|
||||||
room_id,
|
room_id,
|
||||||
db,
|
|
||||||
&state_lock,
|
&state_lock,
|
||||||
)?;
|
)?;
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
use crate::{database::DatabaseGuard, Database, Error, Result, Ruma, RumaResponse};
|
use crate::{services, Error, Result, Ruma, RumaResponse};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::{
|
api::client::{
|
||||||
filter::{IncomingFilterDefinition, LazyLoadOptions},
|
filter::{FilterDefinition, LazyLoadOptions},
|
||||||
sync::sync_events,
|
sync::sync_events::{self, DeviceLists, UnreadNotificationsCount},
|
||||||
uiaa::UiaaResponse,
|
uiaa::UiaaResponse,
|
||||||
},
|
},
|
||||||
events::{
|
events::{
|
||||||
|
@ -10,7 +10,7 @@ use ruma::{
|
||||||
RoomEventType, StateEventType,
|
RoomEventType, StateEventType,
|
||||||
},
|
},
|
||||||
serde::Raw,
|
serde::Raw,
|
||||||
DeviceId, RoomId, UserId,
|
OwnedDeviceId, OwnedUserId, RoomId, UserId,
|
||||||
};
|
};
|
||||||
use std::{
|
use std::{
|
||||||
collections::{hash_map::Entry, BTreeMap, HashMap, HashSet},
|
collections::{hash_map::Entry, BTreeMap, HashMap, HashSet},
|
||||||
|
@ -55,16 +55,13 @@ use tracing::error;
|
||||||
/// - Sync is handled in an async task, multiple requests from the same device with the same
|
/// - Sync is handled in an async task, multiple requests from the same device with the same
|
||||||
/// `since` will be cached
|
/// `since` will be cached
|
||||||
pub async fn sync_events_route(
|
pub async fn sync_events_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<sync_events::v3::Request>,
|
||||||
body: Ruma<sync_events::v3::IncomingRequest>,
|
|
||||||
) -> Result<sync_events::v3::Response, RumaResponse<UiaaResponse>> {
|
) -> Result<sync_events::v3::Response, RumaResponse<UiaaResponse>> {
|
||||||
let sender_user = body.sender_user.expect("user is authenticated");
|
let sender_user = body.sender_user.expect("user is authenticated");
|
||||||
let sender_device = body.sender_device.expect("user is authenticated");
|
let sender_device = body.sender_device.expect("user is authenticated");
|
||||||
let body = body.body;
|
let body = body.body;
|
||||||
|
|
||||||
let arc_db = Arc::new(db);
|
let mut rx = match services()
|
||||||
|
|
||||||
let mut rx = match arc_db
|
|
||||||
.globals
|
.globals
|
||||||
.sync_receivers
|
.sync_receivers
|
||||||
.write()
|
.write()
|
||||||
|
@ -77,7 +74,6 @@ pub async fn sync_events_route(
|
||||||
v.insert((body.since.to_owned(), rx.clone()));
|
v.insert((body.since.to_owned(), rx.clone()));
|
||||||
|
|
||||||
tokio::spawn(sync_helper_wrapper(
|
tokio::spawn(sync_helper_wrapper(
|
||||||
Arc::clone(&arc_db),
|
|
||||||
sender_user.clone(),
|
sender_user.clone(),
|
||||||
sender_device.clone(),
|
sender_device.clone(),
|
||||||
body,
|
body,
|
||||||
|
@ -93,7 +89,6 @@ pub async fn sync_events_route(
|
||||||
o.insert((body.since.clone(), rx.clone()));
|
o.insert((body.since.clone(), rx.clone()));
|
||||||
|
|
||||||
tokio::spawn(sync_helper_wrapper(
|
tokio::spawn(sync_helper_wrapper(
|
||||||
Arc::clone(&arc_db),
|
|
||||||
sender_user.clone(),
|
sender_user.clone(),
|
||||||
sender_device.clone(),
|
sender_device.clone(),
|
||||||
body,
|
body,
|
||||||
|
@ -127,25 +122,18 @@ pub async fn sync_events_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn sync_helper_wrapper(
|
async fn sync_helper_wrapper(
|
||||||
db: Arc<DatabaseGuard>,
|
sender_user: OwnedUserId,
|
||||||
sender_user: Box<UserId>,
|
sender_device: OwnedDeviceId,
|
||||||
sender_device: Box<DeviceId>,
|
body: sync_events::v3::Request,
|
||||||
body: sync_events::v3::IncomingRequest,
|
|
||||||
tx: Sender<Option<Result<sync_events::v3::Response>>>,
|
tx: Sender<Option<Result<sync_events::v3::Response>>>,
|
||||||
) {
|
) {
|
||||||
let since = body.since.clone();
|
let since = body.since.clone();
|
||||||
|
|
||||||
let r = sync_helper(
|
let r = sync_helper(sender_user.clone(), sender_device.clone(), body).await;
|
||||||
Arc::clone(&db),
|
|
||||||
sender_user.clone(),
|
|
||||||
sender_device.clone(),
|
|
||||||
body,
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
if let Ok((_, caching_allowed)) = r {
|
if let Ok((_, caching_allowed)) = r {
|
||||||
if !caching_allowed {
|
if !caching_allowed {
|
||||||
match db
|
match services()
|
||||||
.globals
|
.globals
|
||||||
.sync_receivers
|
.sync_receivers
|
||||||
.write()
|
.write()
|
||||||
|
@ -163,38 +151,34 @@ async fn sync_helper_wrapper(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
drop(db);
|
|
||||||
|
|
||||||
let _ = tx.send(Some(r.map(|(r, _)| r)));
|
let _ = tx.send(Some(r.map(|(r, _)| r)));
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn sync_helper(
|
async fn sync_helper(
|
||||||
db: Arc<DatabaseGuard>,
|
sender_user: OwnedUserId,
|
||||||
sender_user: Box<UserId>,
|
sender_device: OwnedDeviceId,
|
||||||
sender_device: Box<DeviceId>,
|
body: sync_events::v3::Request,
|
||||||
body: sync_events::v3::IncomingRequest,
|
|
||||||
// bool = caching allowed
|
// bool = caching allowed
|
||||||
) -> Result<(sync_events::v3::Response, bool), Error> {
|
) -> Result<(sync_events::v3::Response, bool), Error> {
|
||||||
use sync_events::v3::{
|
use sync_events::v3::{
|
||||||
DeviceLists, Ephemeral, GlobalAccountData, IncomingFilter, InviteState, InvitedRoom,
|
Ephemeral, Filter, GlobalAccountData, InviteState, InvitedRoom, JoinedRoom, LeftRoom,
|
||||||
JoinedRoom, LeftRoom, Presence, RoomAccountData, RoomSummary, Rooms, State, Timeline,
|
Presence, RoomAccountData, RoomSummary, Rooms, State, Timeline, ToDevice,
|
||||||
ToDevice, UnreadNotificationsCount,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// TODO: match body.set_presence {
|
// TODO: match body.set_presence {
|
||||||
db.rooms.edus.ping_presence(&sender_user)?;
|
services().rooms.edus.presence.ping_presence(&sender_user)?;
|
||||||
|
|
||||||
// Setup watchers, so if there's no response, we can wait for them
|
// Setup watchers, so if there's no response, we can wait for them
|
||||||
let watcher = db.watch(&sender_user, &sender_device);
|
let watcher = services().globals.watch(&sender_user, &sender_device);
|
||||||
|
|
||||||
let next_batch = db.globals.current_count()?;
|
let next_batch = services().globals.current_count()?;
|
||||||
let next_batch_string = next_batch.to_string();
|
let next_batch_string = next_batch.to_string();
|
||||||
|
|
||||||
// Load filter
|
// Load filter
|
||||||
let filter = match body.filter {
|
let filter = match body.filter {
|
||||||
None => IncomingFilterDefinition::default(),
|
None => FilterDefinition::default(),
|
||||||
Some(IncomingFilter::FilterDefinition(filter)) => filter,
|
Some(Filter::FilterDefinition(filter)) => filter,
|
||||||
Some(IncomingFilter::FilterId(filter_id)) => db
|
Some(Filter::FilterId(filter_id)) => services()
|
||||||
.users
|
.users
|
||||||
.get_filter(&sender_user, &filter_id)?
|
.get_filter(&sender_user, &filter_id)?
|
||||||
.unwrap_or_default(),
|
.unwrap_or_default(),
|
||||||
|
@ -221,12 +205,17 @@ async fn sync_helper(
|
||||||
|
|
||||||
// Look for device list updates of this account
|
// Look for device list updates of this account
|
||||||
device_list_updates.extend(
|
device_list_updates.extend(
|
||||||
db.users
|
services()
|
||||||
.keys_changed(&sender_user.to_string(), since, None)
|
.users
|
||||||
|
.keys_changed(sender_user.as_ref(), since, None)
|
||||||
.filter_map(|r| r.ok()),
|
.filter_map(|r| r.ok()),
|
||||||
);
|
);
|
||||||
|
|
||||||
let all_joined_rooms = db.rooms.rooms_joined(&sender_user).collect::<Vec<_>>();
|
let all_joined_rooms = services()
|
||||||
|
.rooms
|
||||||
|
.state_cache
|
||||||
|
.rooms_joined(&sender_user)
|
||||||
|
.collect::<Vec<_>>();
|
||||||
for room_id in all_joined_rooms {
|
for room_id in all_joined_rooms {
|
||||||
let room_id = room_id?;
|
let room_id = room_id?;
|
||||||
|
|
||||||
|
@ -234,7 +223,8 @@ async fn sync_helper(
|
||||||
// Get and drop the lock to wait for remaining operations to finish
|
// Get and drop the lock to wait for remaining operations to finish
|
||||||
// This will make sure the we have all events until next_batch
|
// This will make sure the we have all events until next_batch
|
||||||
let mutex_insert = Arc::clone(
|
let mutex_insert = Arc::clone(
|
||||||
db.globals
|
services()
|
||||||
|
.globals
|
||||||
.roomid_mutex_insert
|
.roomid_mutex_insert
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
@ -247,9 +237,15 @@ async fn sync_helper(
|
||||||
|
|
||||||
let timeline_pdus;
|
let timeline_pdus;
|
||||||
let limited;
|
let limited;
|
||||||
if db.rooms.last_timeline_count(&sender_user, &room_id)? > since {
|
if services()
|
||||||
let mut non_timeline_pdus = db
|
|
||||||
.rooms
|
.rooms
|
||||||
|
.timeline
|
||||||
|
.last_timeline_count(&sender_user, &room_id)?
|
||||||
|
> since
|
||||||
|
{
|
||||||
|
let mut non_timeline_pdus = services()
|
||||||
|
.rooms
|
||||||
|
.timeline
|
||||||
.pdus_until(&sender_user, &room_id, u64::MAX)?
|
.pdus_until(&sender_user, &room_id, u64::MAX)?
|
||||||
.filter_map(|r| {
|
.filter_map(|r| {
|
||||||
// Filter out buggy events
|
// Filter out buggy events
|
||||||
|
@ -259,7 +255,9 @@ async fn sync_helper(
|
||||||
r.ok()
|
r.ok()
|
||||||
})
|
})
|
||||||
.take_while(|(pduid, _)| {
|
.take_while(|(pduid, _)| {
|
||||||
db.rooms
|
services()
|
||||||
|
.rooms
|
||||||
|
.timeline
|
||||||
.pdu_count(pduid)
|
.pdu_count(pduid)
|
||||||
.map_or(false, |count| count > since)
|
.map_or(false, |count| count > since)
|
||||||
});
|
});
|
||||||
|
@ -282,10 +280,10 @@ async fn sync_helper(
|
||||||
}
|
}
|
||||||
|
|
||||||
let send_notification_counts = !timeline_pdus.is_empty()
|
let send_notification_counts = !timeline_pdus.is_empty()
|
||||||
|| db
|
|| services()
|
||||||
.rooms
|
.rooms
|
||||||
.edus
|
.user
|
||||||
.last_privateread_update(&sender_user, &room_id)?
|
.last_notification_read(&sender_user, &room_id)?
|
||||||
> since;
|
> since;
|
||||||
|
|
||||||
let mut timeline_users = HashSet::new();
|
let mut timeline_users = HashSet::new();
|
||||||
|
@ -293,24 +291,40 @@ async fn sync_helper(
|
||||||
timeline_users.insert(event.sender.as_str().to_owned());
|
timeline_users.insert(event.sender.as_str().to_owned());
|
||||||
}
|
}
|
||||||
|
|
||||||
db.rooms
|
services().rooms.lazy_loading.lazy_load_confirm_delivery(
|
||||||
.lazy_load_confirm_delivery(&sender_user, &sender_device, &room_id, since)?;
|
&sender_user,
|
||||||
|
&sender_device,
|
||||||
|
&room_id,
|
||||||
|
since,
|
||||||
|
)?;
|
||||||
|
|
||||||
// Database queries:
|
// Database queries:
|
||||||
|
|
||||||
let current_shortstatehash = if let Some(s) = db.rooms.current_shortstatehash(&room_id)? {
|
let current_shortstatehash =
|
||||||
|
if let Some(s) = services().rooms.state.get_room_shortstatehash(&room_id)? {
|
||||||
s
|
s
|
||||||
} else {
|
} else {
|
||||||
error!("Room {} has no state", room_id);
|
error!("Room {} has no state", room_id);
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
let since_shortstatehash = db.rooms.get_token_shortstatehash(&room_id, since)?;
|
let since_shortstatehash = services()
|
||||||
|
.rooms
|
||||||
|
.user
|
||||||
|
.get_token_shortstatehash(&room_id, since)?;
|
||||||
|
|
||||||
// Calculates joined_member_count, invited_member_count and heroes
|
// Calculates joined_member_count, invited_member_count and heroes
|
||||||
let calculate_counts = || {
|
let calculate_counts = || {
|
||||||
let joined_member_count = db.rooms.room_joined_count(&room_id)?.unwrap_or(0);
|
let joined_member_count = services()
|
||||||
let invited_member_count = db.rooms.room_invited_count(&room_id)?.unwrap_or(0);
|
.rooms
|
||||||
|
.state_cache
|
||||||
|
.room_joined_count(&room_id)?
|
||||||
|
.unwrap_or(0);
|
||||||
|
let invited_member_count = services()
|
||||||
|
.rooms
|
||||||
|
.state_cache
|
||||||
|
.room_invited_count(&room_id)?
|
||||||
|
.unwrap_or(0);
|
||||||
|
|
||||||
// Recalculate heroes (first 5 members)
|
// Recalculate heroes (first 5 members)
|
||||||
let mut heroes = Vec::new();
|
let mut heroes = Vec::new();
|
||||||
|
@ -319,8 +333,9 @@ async fn sync_helper(
|
||||||
// Go through all PDUs and for each member event, check if the user is still joined or
|
// Go through all PDUs and for each member event, check if the user is still joined or
|
||||||
// invited until we have 5 or we reach the end
|
// invited until we have 5 or we reach the end
|
||||||
|
|
||||||
for hero in db
|
for hero in services()
|
||||||
.rooms
|
.rooms
|
||||||
|
.timeline
|
||||||
.all_pdus(&sender_user, &room_id)?
|
.all_pdus(&sender_user, &room_id)?
|
||||||
.filter_map(|pdu| pdu.ok()) // Ignore all broken pdus
|
.filter_map(|pdu| pdu.ok()) // Ignore all broken pdus
|
||||||
.filter(|(_, pdu)| pdu.kind == RoomEventType::RoomMember)
|
.filter(|(_, pdu)| pdu.kind == RoomEventType::RoomMember)
|
||||||
|
@ -339,8 +354,11 @@ async fn sync_helper(
|
||||||
if matches!(
|
if matches!(
|
||||||
content.membership,
|
content.membership,
|
||||||
MembershipState::Join | MembershipState::Invite
|
MembershipState::Join | MembershipState::Invite
|
||||||
) && (db.rooms.is_joined(&user_id, &room_id)?
|
) && (services().rooms.state_cache.is_joined(&user_id, &room_id)?
|
||||||
|| db.rooms.is_invited(&user_id, &room_id)?)
|
|| services()
|
||||||
|
.rooms
|
||||||
|
.state_cache
|
||||||
|
.is_invited(&user_id, &room_id)?)
|
||||||
{
|
{
|
||||||
Ok::<_, Error>(Some(state_key.clone()))
|
Ok::<_, Error>(Some(state_key.clone()))
|
||||||
} else {
|
} else {
|
||||||
|
@ -370,28 +388,57 @@ async fn sync_helper(
|
||||||
))
|
))
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let since_sender_member: Option<RoomMemberEventContent> = since_shortstatehash
|
||||||
|
.and_then(|shortstatehash| {
|
||||||
|
services()
|
||||||
|
.rooms
|
||||||
|
.state_accessor
|
||||||
|
.state_get(
|
||||||
|
shortstatehash,
|
||||||
|
&StateEventType::RoomMember,
|
||||||
|
sender_user.as_str(),
|
||||||
|
)
|
||||||
|
.transpose()
|
||||||
|
})
|
||||||
|
.transpose()?
|
||||||
|
.and_then(|pdu| {
|
||||||
|
serde_json::from_str(pdu.content.get())
|
||||||
|
.map_err(|_| Error::bad_database("Invalid PDU in database."))
|
||||||
|
.ok()
|
||||||
|
});
|
||||||
|
|
||||||
|
let joined_since_last_sync =
|
||||||
|
since_sender_member.map_or(true, |member| member.membership != MembershipState::Join);
|
||||||
|
|
||||||
let (
|
let (
|
||||||
heroes,
|
heroes,
|
||||||
joined_member_count,
|
joined_member_count,
|
||||||
invited_member_count,
|
invited_member_count,
|
||||||
joined_since_last_sync,
|
joined_since_last_sync,
|
||||||
state_events,
|
state_events,
|
||||||
) = if since_shortstatehash.is_none() {
|
) = if since_shortstatehash.is_none() || joined_since_last_sync {
|
||||||
// Probably since = 0, we will do an initial sync
|
// Probably since = 0, we will do an initial sync
|
||||||
|
|
||||||
let (joined_member_count, invited_member_count, heroes) = calculate_counts()?;
|
let (joined_member_count, invited_member_count, heroes) = calculate_counts()?;
|
||||||
|
|
||||||
let current_state_ids = db.rooms.state_full_ids(current_shortstatehash).await?;
|
let current_state_ids = services()
|
||||||
|
.rooms
|
||||||
|
.state_accessor
|
||||||
|
.state_full_ids(current_shortstatehash)
|
||||||
|
.await?;
|
||||||
|
|
||||||
let mut state_events = Vec::new();
|
let mut state_events = Vec::new();
|
||||||
let mut lazy_loaded = HashSet::new();
|
let mut lazy_loaded = HashSet::new();
|
||||||
|
|
||||||
let mut i = 0;
|
let mut i = 0;
|
||||||
for (shortstatekey, id) in current_state_ids {
|
for (shortstatekey, id) in current_state_ids {
|
||||||
let (event_type, state_key) = db.rooms.get_statekey_from_short(shortstatekey)?;
|
let (event_type, state_key) = services()
|
||||||
|
.rooms
|
||||||
|
.short
|
||||||
|
.get_statekey_from_short(shortstatekey)?;
|
||||||
|
|
||||||
if event_type != StateEventType::RoomMember {
|
if event_type != StateEventType::RoomMember {
|
||||||
let pdu = match db.rooms.get_pdu(&id)? {
|
let pdu = match services().rooms.timeline.get_pdu(&id)? {
|
||||||
Some(pdu) => pdu,
|
Some(pdu) => pdu,
|
||||||
None => {
|
None => {
|
||||||
error!("Pdu in state not found: {}", id);
|
error!("Pdu in state not found: {}", id);
|
||||||
|
@ -407,8 +454,10 @@ async fn sync_helper(
|
||||||
} else if !lazy_load_enabled
|
} else if !lazy_load_enabled
|
||||||
|| body.full_state
|
|| body.full_state
|
||||||
|| timeline_users.contains(&state_key)
|
|| timeline_users.contains(&state_key)
|
||||||
|
// TODO: Delete the following line when this is resolved: https://github.com/vector-im/element-web/issues/22565
|
||||||
|
|| *sender_user == state_key
|
||||||
{
|
{
|
||||||
let pdu = match db.rooms.get_pdu(&id)? {
|
let pdu = match services().rooms.timeline.get_pdu(&id)? {
|
||||||
Some(pdu) => pdu,
|
Some(pdu) => pdu,
|
||||||
None => {
|
None => {
|
||||||
error!("Pdu in state not found: {}", id);
|
error!("Pdu in state not found: {}", id);
|
||||||
|
@ -417,7 +466,7 @@ async fn sync_helper(
|
||||||
};
|
};
|
||||||
|
|
||||||
// This check is in case a bad user ID made it into the database
|
// This check is in case a bad user ID made it into the database
|
||||||
if let Ok(uid) = UserId::parse(state_key.as_ref()) {
|
if let Ok(uid) = UserId::parse(&state_key) {
|
||||||
lazy_loaded.insert(uid);
|
lazy_loaded.insert(uid);
|
||||||
}
|
}
|
||||||
state_events.push(pdu);
|
state_events.push(pdu);
|
||||||
|
@ -430,12 +479,15 @@ async fn sync_helper(
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reset lazy loading because this is an initial sync
|
// Reset lazy loading because this is an initial sync
|
||||||
db.rooms
|
services().rooms.lazy_loading.lazy_load_reset(
|
||||||
.lazy_load_reset(&sender_user, &sender_device, &room_id)?;
|
&sender_user,
|
||||||
|
&sender_device,
|
||||||
|
&room_id,
|
||||||
|
)?;
|
||||||
|
|
||||||
// The state_events above should contain all timeline_users, let's mark them as lazy
|
// The state_events above should contain all timeline_users, let's mark them as lazy
|
||||||
// loaded.
|
// loaded.
|
||||||
db.rooms.lazy_load_mark_sent(
|
services().rooms.lazy_loading.lazy_load_mark_sent(
|
||||||
&sender_user,
|
&sender_user,
|
||||||
&sender_device,
|
&sender_device,
|
||||||
&room_id,
|
&room_id,
|
||||||
|
@ -457,32 +509,24 @@ async fn sync_helper(
|
||||||
// Incremental /sync
|
// Incremental /sync
|
||||||
let since_shortstatehash = since_shortstatehash.unwrap();
|
let since_shortstatehash = since_shortstatehash.unwrap();
|
||||||
|
|
||||||
let since_sender_member: Option<RoomMemberEventContent> = db
|
|
||||||
.rooms
|
|
||||||
.state_get(
|
|
||||||
since_shortstatehash,
|
|
||||||
&StateEventType::RoomMember,
|
|
||||||
sender_user.as_str(),
|
|
||||||
)?
|
|
||||||
.and_then(|pdu| {
|
|
||||||
serde_json::from_str(pdu.content.get())
|
|
||||||
.map_err(|_| Error::bad_database("Invalid PDU in database."))
|
|
||||||
.ok()
|
|
||||||
});
|
|
||||||
|
|
||||||
let joined_since_last_sync = since_sender_member
|
|
||||||
.map_or(true, |member| member.membership != MembershipState::Join);
|
|
||||||
|
|
||||||
let mut state_events = Vec::new();
|
let mut state_events = Vec::new();
|
||||||
let mut lazy_loaded = HashSet::new();
|
let mut lazy_loaded = HashSet::new();
|
||||||
|
|
||||||
if since_shortstatehash != current_shortstatehash {
|
if since_shortstatehash != current_shortstatehash {
|
||||||
let current_state_ids = db.rooms.state_full_ids(current_shortstatehash).await?;
|
let current_state_ids = services()
|
||||||
let since_state_ids = db.rooms.state_full_ids(since_shortstatehash).await?;
|
.rooms
|
||||||
|
.state_accessor
|
||||||
|
.state_full_ids(current_shortstatehash)
|
||||||
|
.await?;
|
||||||
|
let since_state_ids = services()
|
||||||
|
.rooms
|
||||||
|
.state_accessor
|
||||||
|
.state_full_ids(since_shortstatehash)
|
||||||
|
.await?;
|
||||||
|
|
||||||
for (key, id) in current_state_ids {
|
for (key, id) in current_state_ids {
|
||||||
if body.full_state || since_state_ids.get(&key) != Some(&id) {
|
if body.full_state || since_state_ids.get(&key) != Some(&id) {
|
||||||
let pdu = match db.rooms.get_pdu(&id)? {
|
let pdu = match services().rooms.timeline.get_pdu(&id)? {
|
||||||
Some(pdu) => pdu,
|
Some(pdu) => pdu,
|
||||||
None => {
|
None => {
|
||||||
error!("Pdu in state not found: {}", id);
|
error!("Pdu in state not found: {}", id);
|
||||||
|
@ -515,14 +559,14 @@ async fn sync_helper(
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if !db.rooms.lazy_load_was_sent_before(
|
if !services().rooms.lazy_loading.lazy_load_was_sent_before(
|
||||||
&sender_user,
|
&sender_user,
|
||||||
&sender_device,
|
&sender_device,
|
||||||
&room_id,
|
&room_id,
|
||||||
&event.sender,
|
&event.sender,
|
||||||
)? || lazy_load_send_redundant
|
)? || lazy_load_send_redundant
|
||||||
{
|
{
|
||||||
if let Some(member_event) = db.rooms.room_state_get(
|
if let Some(member_event) = services().rooms.state_accessor.room_state_get(
|
||||||
&room_id,
|
&room_id,
|
||||||
&StateEventType::RoomMember,
|
&StateEventType::RoomMember,
|
||||||
event.sender.as_str(),
|
event.sender.as_str(),
|
||||||
|
@ -533,7 +577,7 @@ async fn sync_helper(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
db.rooms.lazy_load_mark_sent(
|
services().rooms.lazy_loading.lazy_load_mark_sent(
|
||||||
&sender_user,
|
&sender_user,
|
||||||
&sender_device,
|
&sender_device,
|
||||||
&room_id,
|
&room_id,
|
||||||
|
@ -541,14 +585,17 @@ async fn sync_helper(
|
||||||
next_batch,
|
next_batch,
|
||||||
);
|
);
|
||||||
|
|
||||||
let encrypted_room = db
|
let encrypted_room = services()
|
||||||
.rooms
|
.rooms
|
||||||
|
.state_accessor
|
||||||
.state_get(current_shortstatehash, &StateEventType::RoomEncryption, "")?
|
.state_get(current_shortstatehash, &StateEventType::RoomEncryption, "")?
|
||||||
.is_some();
|
.is_some();
|
||||||
|
|
||||||
let since_encryption =
|
let since_encryption = services().rooms.state_accessor.state_get(
|
||||||
db.rooms
|
since_shortstatehash,
|
||||||
.state_get(since_shortstatehash, &StateEventType::RoomEncryption, "")?;
|
&StateEventType::RoomEncryption,
|
||||||
|
"",
|
||||||
|
)?;
|
||||||
|
|
||||||
// Calculations:
|
// Calculations:
|
||||||
let new_encrypted_room = encrypted_room && since_encryption.is_none();
|
let new_encrypted_room = encrypted_room && since_encryption.is_none();
|
||||||
|
@ -580,7 +627,7 @@ async fn sync_helper(
|
||||||
match new_membership {
|
match new_membership {
|
||||||
MembershipState::Join => {
|
MembershipState::Join => {
|
||||||
// A new user joined an encrypted room
|
// A new user joined an encrypted room
|
||||||
if !share_encrypted_room(&db, &sender_user, &user_id, &room_id)? {
|
if !share_encrypted_room(&sender_user, &user_id, &room_id)? {
|
||||||
device_list_updates.insert(user_id);
|
device_list_updates.insert(user_id);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -597,7 +644,9 @@ async fn sync_helper(
|
||||||
if joined_since_last_sync && encrypted_room || new_encrypted_room {
|
if joined_since_last_sync && encrypted_room || new_encrypted_room {
|
||||||
// If the user is in a new encrypted room, give them all joined users
|
// If the user is in a new encrypted room, give them all joined users
|
||||||
device_list_updates.extend(
|
device_list_updates.extend(
|
||||||
db.rooms
|
services()
|
||||||
|
.rooms
|
||||||
|
.state_cache
|
||||||
.room_members(&room_id)
|
.room_members(&room_id)
|
||||||
.flatten()
|
.flatten()
|
||||||
.filter(|user_id| {
|
.filter(|user_id| {
|
||||||
|
@ -606,8 +655,7 @@ async fn sync_helper(
|
||||||
})
|
})
|
||||||
.filter(|user_id| {
|
.filter(|user_id| {
|
||||||
// Only send keys if the sender doesn't share an encrypted room with the target already
|
// Only send keys if the sender doesn't share an encrypted room with the target already
|
||||||
!share_encrypted_room(&db, &sender_user, user_id, &room_id)
|
!share_encrypted_room(&sender_user, user_id, &room_id).unwrap_or(false)
|
||||||
.unwrap_or(false)
|
|
||||||
}),
|
}),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
@ -629,14 +677,17 @@ async fn sync_helper(
|
||||||
|
|
||||||
// Look for device list updates in this room
|
// Look for device list updates in this room
|
||||||
device_list_updates.extend(
|
device_list_updates.extend(
|
||||||
db.users
|
services()
|
||||||
.keys_changed(&room_id.to_string(), since, None)
|
.users
|
||||||
|
.keys_changed(room_id.as_ref(), since, None)
|
||||||
.filter_map(|r| r.ok()),
|
.filter_map(|r| r.ok()),
|
||||||
);
|
);
|
||||||
|
|
||||||
let notification_count = if send_notification_counts {
|
let notification_count = if send_notification_counts {
|
||||||
Some(
|
Some(
|
||||||
db.rooms
|
services()
|
||||||
|
.rooms
|
||||||
|
.user
|
||||||
.notification_count(&sender_user, &room_id)?
|
.notification_count(&sender_user, &room_id)?
|
||||||
.try_into()
|
.try_into()
|
||||||
.expect("notification count can't go that high"),
|
.expect("notification count can't go that high"),
|
||||||
|
@ -647,7 +698,9 @@ async fn sync_helper(
|
||||||
|
|
||||||
let highlight_count = if send_notification_counts {
|
let highlight_count = if send_notification_counts {
|
||||||
Some(
|
Some(
|
||||||
db.rooms
|
services()
|
||||||
|
.rooms
|
||||||
|
.user
|
||||||
.highlight_count(&sender_user, &room_id)?
|
.highlight_count(&sender_user, &room_id)?
|
||||||
.try_into()
|
.try_into()
|
||||||
.expect("highlight count can't go that high"),
|
.expect("highlight count can't go that high"),
|
||||||
|
@ -659,7 +712,9 @@ async fn sync_helper(
|
||||||
let prev_batch = timeline_pdus
|
let prev_batch = timeline_pdus
|
||||||
.first()
|
.first()
|
||||||
.map_or(Ok::<_, Error>(None), |(pdu_id, _)| {
|
.map_or(Ok::<_, Error>(None), |(pdu_id, _)| {
|
||||||
Ok(Some(db.rooms.pdu_count(pdu_id)?.to_string()))
|
Ok(Some(
|
||||||
|
services().rooms.timeline.pdu_count(pdu_id)?.to_string(),
|
||||||
|
))
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
let room_events: Vec<_> = timeline_pdus
|
let room_events: Vec<_> = timeline_pdus
|
||||||
|
@ -667,18 +722,19 @@ async fn sync_helper(
|
||||||
.map(|(_, pdu)| pdu.to_sync_room_event())
|
.map(|(_, pdu)| pdu.to_sync_room_event())
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
let mut edus: Vec<_> = db
|
let mut edus: Vec<_> = services()
|
||||||
.rooms
|
.rooms
|
||||||
.edus
|
.edus
|
||||||
|
.read_receipt
|
||||||
.readreceipts_since(&room_id, since)
|
.readreceipts_since(&room_id, since)
|
||||||
.filter_map(|r| r.ok()) // Filter out buggy events
|
.filter_map(|r| r.ok()) // Filter out buggy events
|
||||||
.map(|(_, _, v)| v)
|
.map(|(_, _, v)| v)
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
if db.rooms.edus.last_typing_update(&room_id, &db.globals)? > since {
|
if services().rooms.edus.typing.last_typing_update(&room_id)? > since {
|
||||||
edus.push(
|
edus.push(
|
||||||
serde_json::from_str(
|
serde_json::from_str(
|
||||||
&serde_json::to_string(&db.rooms.edus.typings_all(&room_id)?)
|
&serde_json::to_string(&services().rooms.edus.typing.typings_all(&room_id)?)
|
||||||
.expect("event is valid, we just created it"),
|
.expect("event is valid, we just created it"),
|
||||||
)
|
)
|
||||||
.expect("event is valid, we just created it"),
|
.expect("event is valid, we just created it"),
|
||||||
|
@ -686,12 +742,15 @@ async fn sync_helper(
|
||||||
}
|
}
|
||||||
|
|
||||||
// Save the state after this sync so we can send the correct state diff next sync
|
// Save the state after this sync so we can send the correct state diff next sync
|
||||||
db.rooms
|
services().rooms.user.associate_token_shortstatehash(
|
||||||
.associate_token_shortstatehash(&room_id, next_batch, current_shortstatehash)?;
|
&room_id,
|
||||||
|
next_batch,
|
||||||
|
current_shortstatehash,
|
||||||
|
)?;
|
||||||
|
|
||||||
let joined_room = JoinedRoom {
|
let joined_room = JoinedRoom {
|
||||||
account_data: RoomAccountData {
|
account_data: RoomAccountData {
|
||||||
events: db
|
events: services()
|
||||||
.account_data
|
.account_data
|
||||||
.changes_since(Some(&room_id), &sender_user, since)?
|
.changes_since(Some(&room_id), &sender_user, since)?
|
||||||
.into_iter()
|
.into_iter()
|
||||||
|
@ -723,6 +782,7 @@ async fn sync_helper(
|
||||||
.collect(),
|
.collect(),
|
||||||
},
|
},
|
||||||
ephemeral: Ephemeral { events: edus },
|
ephemeral: Ephemeral { events: edus },
|
||||||
|
unread_thread_notifications: BTreeMap::new(),
|
||||||
};
|
};
|
||||||
|
|
||||||
if !joined_room.is_empty() {
|
if !joined_room.is_empty() {
|
||||||
|
@ -730,10 +790,11 @@ async fn sync_helper(
|
||||||
}
|
}
|
||||||
|
|
||||||
// Take presence updates from this room
|
// Take presence updates from this room
|
||||||
for (user_id, presence) in
|
for (user_id, presence) in services()
|
||||||
db.rooms
|
.rooms
|
||||||
.edus
|
.edus
|
||||||
.presence_since(&room_id, since, &db.rooms, &db.globals)?
|
.presence
|
||||||
|
.presence_since(&room_id, since)?
|
||||||
{
|
{
|
||||||
match presence_updates.entry(user_id) {
|
match presence_updates.entry(user_id) {
|
||||||
Entry::Vacant(v) => {
|
Entry::Vacant(v) => {
|
||||||
|
@ -765,14 +826,21 @@ async fn sync_helper(
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut left_rooms = BTreeMap::new();
|
let mut left_rooms = BTreeMap::new();
|
||||||
let all_left_rooms: Vec<_> = db.rooms.rooms_left(&sender_user).collect();
|
let all_left_rooms: Vec<_> = services()
|
||||||
|
.rooms
|
||||||
|
.state_cache
|
||||||
|
.rooms_left(&sender_user)
|
||||||
|
.collect();
|
||||||
for result in all_left_rooms {
|
for result in all_left_rooms {
|
||||||
let (room_id, left_state_events) = result?;
|
let (room_id, _) = result?;
|
||||||
|
|
||||||
|
let mut left_state_events = Vec::new();
|
||||||
|
|
||||||
{
|
{
|
||||||
// Get and drop the lock to wait for remaining operations to finish
|
// Get and drop the lock to wait for remaining operations to finish
|
||||||
let mutex_insert = Arc::clone(
|
let mutex_insert = Arc::clone(
|
||||||
db.globals
|
services()
|
||||||
|
.globals
|
||||||
.roomid_mutex_insert
|
.roomid_mutex_insert
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
@ -783,13 +851,98 @@ async fn sync_helper(
|
||||||
drop(insert_lock);
|
drop(insert_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
let left_count = db.rooms.get_left_count(&room_id, &sender_user)?;
|
let left_count = services()
|
||||||
|
.rooms
|
||||||
|
.state_cache
|
||||||
|
.get_left_count(&room_id, &sender_user)?;
|
||||||
|
|
||||||
// Left before last sync
|
// Left before last sync
|
||||||
if Some(since) >= left_count {
|
if Some(since) >= left_count {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if !services().rooms.metadata.exists(&room_id)? {
|
||||||
|
// This is just a rejected invite, not a room we know
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let since_shortstatehash = services()
|
||||||
|
.rooms
|
||||||
|
.user
|
||||||
|
.get_token_shortstatehash(&room_id, since)?;
|
||||||
|
|
||||||
|
let since_state_ids = match since_shortstatehash {
|
||||||
|
Some(s) => services().rooms.state_accessor.state_full_ids(s).await?,
|
||||||
|
None => HashMap::new(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let left_event_id = match services().rooms.state_accessor.room_state_get_id(
|
||||||
|
&room_id,
|
||||||
|
&StateEventType::RoomMember,
|
||||||
|
sender_user.as_str(),
|
||||||
|
)? {
|
||||||
|
Some(e) => e,
|
||||||
|
None => {
|
||||||
|
error!("Left room but no left state event");
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let left_shortstatehash = match services()
|
||||||
|
.rooms
|
||||||
|
.state_accessor
|
||||||
|
.pdu_shortstatehash(&left_event_id)?
|
||||||
|
{
|
||||||
|
Some(s) => s,
|
||||||
|
None => {
|
||||||
|
error!("Leave event has no state");
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut left_state_ids = services()
|
||||||
|
.rooms
|
||||||
|
.state_accessor
|
||||||
|
.state_full_ids(left_shortstatehash)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let leave_shortstatekey = services()
|
||||||
|
.rooms
|
||||||
|
.short
|
||||||
|
.get_or_create_shortstatekey(&StateEventType::RoomMember, sender_user.as_str())?;
|
||||||
|
|
||||||
|
left_state_ids.insert(leave_shortstatekey, left_event_id);
|
||||||
|
|
||||||
|
let mut i = 0;
|
||||||
|
for (key, id) in left_state_ids {
|
||||||
|
if body.full_state || since_state_ids.get(&key) != Some(&id) {
|
||||||
|
let (event_type, state_key) =
|
||||||
|
services().rooms.short.get_statekey_from_short(key)?;
|
||||||
|
|
||||||
|
if !lazy_load_enabled
|
||||||
|
|| event_type != StateEventType::RoomMember
|
||||||
|
|| body.full_state
|
||||||
|
// TODO: Delete the following line when this is resolved: https://github.com/vector-im/element-web/issues/22565
|
||||||
|
|| *sender_user == state_key
|
||||||
|
{
|
||||||
|
let pdu = match services().rooms.timeline.get_pdu(&id)? {
|
||||||
|
Some(pdu) => pdu,
|
||||||
|
None => {
|
||||||
|
error!("Pdu in state not found: {}", id);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
left_state_events.push(pdu.to_sync_state_event());
|
||||||
|
|
||||||
|
i += 1;
|
||||||
|
if i % 100 == 0 {
|
||||||
|
tokio::task::yield_now().await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
left_rooms.insert(
|
left_rooms.insert(
|
||||||
room_id.clone(),
|
room_id.clone(),
|
||||||
LeftRoom {
|
LeftRoom {
|
||||||
|
@ -807,14 +960,19 @@ async fn sync_helper(
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut invited_rooms = BTreeMap::new();
|
let mut invited_rooms = BTreeMap::new();
|
||||||
let all_invited_rooms: Vec<_> = db.rooms.rooms_invited(&sender_user).collect();
|
let all_invited_rooms: Vec<_> = services()
|
||||||
|
.rooms
|
||||||
|
.state_cache
|
||||||
|
.rooms_invited(&sender_user)
|
||||||
|
.collect();
|
||||||
for result in all_invited_rooms {
|
for result in all_invited_rooms {
|
||||||
let (room_id, invite_state_events) = result?;
|
let (room_id, invite_state_events) = result?;
|
||||||
|
|
||||||
{
|
{
|
||||||
// Get and drop the lock to wait for remaining operations to finish
|
// Get and drop the lock to wait for remaining operations to finish
|
||||||
let mutex_insert = Arc::clone(
|
let mutex_insert = Arc::clone(
|
||||||
db.globals
|
services()
|
||||||
|
.globals
|
||||||
.roomid_mutex_insert
|
.roomid_mutex_insert
|
||||||
.write()
|
.write()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
@ -825,7 +983,10 @@ async fn sync_helper(
|
||||||
drop(insert_lock);
|
drop(insert_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
let invite_count = db.rooms.get_invite_count(&room_id, &sender_user)?;
|
let invite_count = services()
|
||||||
|
.rooms
|
||||||
|
.state_cache
|
||||||
|
.get_invite_count(&room_id, &sender_user)?;
|
||||||
|
|
||||||
// Invited before last sync
|
// Invited before last sync
|
||||||
if Some(since) >= invite_count {
|
if Some(since) >= invite_count {
|
||||||
|
@ -843,13 +1004,16 @@ async fn sync_helper(
|
||||||
}
|
}
|
||||||
|
|
||||||
for user_id in left_encrypted_users {
|
for user_id in left_encrypted_users {
|
||||||
let still_share_encrypted_room = db
|
let still_share_encrypted_room = services()
|
||||||
.rooms
|
.rooms
|
||||||
|
.user
|
||||||
.get_shared_rooms(vec![sender_user.clone(), user_id.clone()])?
|
.get_shared_rooms(vec![sender_user.clone(), user_id.clone()])?
|
||||||
.filter_map(|r| r.ok())
|
.filter_map(|r| r.ok())
|
||||||
.filter_map(|other_room_id| {
|
.filter_map(|other_room_id| {
|
||||||
Some(
|
Some(
|
||||||
db.rooms
|
services()
|
||||||
|
.rooms
|
||||||
|
.state_accessor
|
||||||
.room_state_get(&other_room_id, &StateEventType::RoomEncryption, "")
|
.room_state_get(&other_room_id, &StateEventType::RoomEncryption, "")
|
||||||
.ok()?
|
.ok()?
|
||||||
.is_some(),
|
.is_some(),
|
||||||
|
@ -864,7 +1028,8 @@ async fn sync_helper(
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove all to-device events the device received *last time*
|
// Remove all to-device events the device received *last time*
|
||||||
db.users
|
services()
|
||||||
|
.users
|
||||||
.remove_to_device_events(&sender_user, &sender_device, since)?;
|
.remove_to_device_events(&sender_user, &sender_device, since)?;
|
||||||
|
|
||||||
let response = sync_events::v3::Response {
|
let response = sync_events::v3::Response {
|
||||||
|
@ -877,12 +1042,12 @@ async fn sync_helper(
|
||||||
},
|
},
|
||||||
presence: Presence {
|
presence: Presence {
|
||||||
events: presence_updates
|
events: presence_updates
|
||||||
.into_iter()
|
.into_values()
|
||||||
.map(|(_, v)| Raw::new(&v).expect("PresenceEvent always serializes successfully"))
|
.map(|v| Raw::new(&v).expect("PresenceEvent always serializes successfully"))
|
||||||
.collect(),
|
.collect(),
|
||||||
},
|
},
|
||||||
account_data: GlobalAccountData {
|
account_data: GlobalAccountData {
|
||||||
events: db
|
events: services()
|
||||||
.account_data
|
.account_data
|
||||||
.changes_since(None, &sender_user, since)?
|
.changes_since(None, &sender_user, since)?
|
||||||
.into_iter()
|
.into_iter()
|
||||||
|
@ -897,9 +1062,11 @@ async fn sync_helper(
|
||||||
changed: device_list_updates.into_iter().collect(),
|
changed: device_list_updates.into_iter().collect(),
|
||||||
left: device_list_left.into_iter().collect(),
|
left: device_list_left.into_iter().collect(),
|
||||||
},
|
},
|
||||||
device_one_time_keys_count: db.users.count_one_time_keys(&sender_user, &sender_device)?,
|
device_one_time_keys_count: services()
|
||||||
|
.users
|
||||||
|
.count_one_time_keys(&sender_user, &sender_device)?,
|
||||||
to_device: ToDevice {
|
to_device: ToDevice {
|
||||||
events: db
|
events: services()
|
||||||
.users
|
.users
|
||||||
.get_to_device_events(&sender_user, &sender_device)?,
|
.get_to_device_events(&sender_user, &sender_device)?,
|
||||||
},
|
},
|
||||||
|
@ -928,21 +1095,22 @@ async fn sync_helper(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip(db))]
|
|
||||||
fn share_encrypted_room(
|
fn share_encrypted_room(
|
||||||
db: &Database,
|
|
||||||
sender_user: &UserId,
|
sender_user: &UserId,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
ignore_room: &RoomId,
|
ignore_room: &RoomId,
|
||||||
) -> Result<bool> {
|
) -> Result<bool> {
|
||||||
Ok(db
|
Ok(services()
|
||||||
.rooms
|
.rooms
|
||||||
|
.user
|
||||||
.get_shared_rooms(vec![sender_user.to_owned(), user_id.to_owned()])?
|
.get_shared_rooms(vec![sender_user.to_owned(), user_id.to_owned()])?
|
||||||
.filter_map(|r| r.ok())
|
.filter_map(|r| r.ok())
|
||||||
.filter(|room_id| room_id != ignore_room)
|
.filter(|room_id| room_id != ignore_room)
|
||||||
.filter_map(|other_room_id| {
|
.filter_map(|other_room_id| {
|
||||||
Some(
|
Some(
|
||||||
db.rooms
|
services()
|
||||||
|
.rooms
|
||||||
|
.state_accessor
|
||||||
.room_state_get(&other_room_id, &StateEventType::RoomEncryption, "")
|
.room_state_get(&other_room_id, &StateEventType::RoomEncryption, "")
|
||||||
.ok()?
|
.ok()?
|
||||||
.is_some(),
|
.is_some(),
|
|
@ -0,0 +1,126 @@
|
||||||
|
use crate::{services, Error, Result, Ruma};
|
||||||
|
use ruma::{
|
||||||
|
api::client::tag::{create_tag, delete_tag, get_tags},
|
||||||
|
events::{
|
||||||
|
tag::{TagEvent, TagEventContent},
|
||||||
|
RoomAccountDataEventType,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
|
/// # `PUT /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags/{tag}`
|
||||||
|
///
|
||||||
|
/// Adds a tag to the room.
|
||||||
|
///
|
||||||
|
/// - Inserts the tag into the tag event of the room account data.
|
||||||
|
pub async fn update_tag_route(
|
||||||
|
body: Ruma<create_tag::v3::Request>,
|
||||||
|
) -> Result<create_tag::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
let event = services().account_data.get(
|
||||||
|
Some(&body.room_id),
|
||||||
|
sender_user,
|
||||||
|
RoomAccountDataEventType::Tag,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
let mut tags_event = event
|
||||||
|
.map(|e| {
|
||||||
|
serde_json::from_str(e.get())
|
||||||
|
.map_err(|_| Error::bad_database("Invalid account data event in db."))
|
||||||
|
})
|
||||||
|
.unwrap_or_else(|| {
|
||||||
|
Ok(TagEvent {
|
||||||
|
content: TagEventContent {
|
||||||
|
tags: BTreeMap::new(),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
})?;
|
||||||
|
|
||||||
|
tags_event
|
||||||
|
.content
|
||||||
|
.tags
|
||||||
|
.insert(body.tag.clone().into(), body.tag_info.clone());
|
||||||
|
|
||||||
|
services().account_data.update(
|
||||||
|
Some(&body.room_id),
|
||||||
|
sender_user,
|
||||||
|
RoomAccountDataEventType::Tag,
|
||||||
|
&serde_json::to_value(tags_event).expect("to json value always works"),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
Ok(create_tag::v3::Response {})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `DELETE /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags/{tag}`
|
||||||
|
///
|
||||||
|
/// Deletes a tag from the room.
|
||||||
|
///
|
||||||
|
/// - Removes the tag from the tag event of the room account data.
|
||||||
|
pub async fn delete_tag_route(
|
||||||
|
body: Ruma<delete_tag::v3::Request>,
|
||||||
|
) -> Result<delete_tag::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
let event = services().account_data.get(
|
||||||
|
Some(&body.room_id),
|
||||||
|
sender_user,
|
||||||
|
RoomAccountDataEventType::Tag,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
let mut tags_event = event
|
||||||
|
.map(|e| {
|
||||||
|
serde_json::from_str(e.get())
|
||||||
|
.map_err(|_| Error::bad_database("Invalid account data event in db."))
|
||||||
|
})
|
||||||
|
.unwrap_or_else(|| {
|
||||||
|
Ok(TagEvent {
|
||||||
|
content: TagEventContent {
|
||||||
|
tags: BTreeMap::new(),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
})?;
|
||||||
|
|
||||||
|
tags_event.content.tags.remove(&body.tag.clone().into());
|
||||||
|
|
||||||
|
services().account_data.update(
|
||||||
|
Some(&body.room_id),
|
||||||
|
sender_user,
|
||||||
|
RoomAccountDataEventType::Tag,
|
||||||
|
&serde_json::to_value(tags_event).expect("to json value always works"),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
Ok(delete_tag::v3::Response {})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `GET /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags`
|
||||||
|
///
|
||||||
|
/// Returns tags on the room.
|
||||||
|
///
|
||||||
|
/// - Gets the tag event of the room account data.
|
||||||
|
pub async fn get_tags_route(body: Ruma<get_tags::v3::Request>) -> Result<get_tags::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
let event = services().account_data.get(
|
||||||
|
Some(&body.room_id),
|
||||||
|
sender_user,
|
||||||
|
RoomAccountDataEventType::Tag,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
let tags_event = event
|
||||||
|
.map(|e| {
|
||||||
|
serde_json::from_str(e.get())
|
||||||
|
.map_err(|_| Error::bad_database("Invalid account data event in db."))
|
||||||
|
})
|
||||||
|
.unwrap_or_else(|| {
|
||||||
|
Ok(TagEvent {
|
||||||
|
content: TagEventContent {
|
||||||
|
tags: BTreeMap::new(),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
})?;
|
||||||
|
|
||||||
|
Ok(get_tags::v3::Response {
|
||||||
|
tags: tags_event.content.tags,
|
||||||
|
})
|
||||||
|
}
|
|
@ -7,7 +7,7 @@ use std::collections::BTreeMap;
|
||||||
///
|
///
|
||||||
/// TODO: Fetches all metadata about protocols supported by the homeserver.
|
/// TODO: Fetches all metadata about protocols supported by the homeserver.
|
||||||
pub async fn get_protocols_route(
|
pub async fn get_protocols_route(
|
||||||
_body: Ruma<get_protocols::v3::IncomingRequest>,
|
_body: Ruma<get_protocols::v3::Request>,
|
||||||
) -> Result<get_protocols::v3::Response> {
|
) -> Result<get_protocols::v3::Response> {
|
||||||
// TODO
|
// TODO
|
||||||
Ok(get_protocols::v3::Response {
|
Ok(get_protocols::v3::Response {
|
|
@ -1,7 +1,7 @@
|
||||||
use ruma::events::ToDeviceEventType;
|
use ruma::events::ToDeviceEventType;
|
||||||
use std::collections::BTreeMap;
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
use crate::{database::DatabaseGuard, Error, Result, Ruma};
|
use crate::{services, Error, Result, Ruma};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::{
|
api::{
|
||||||
client::{error::ErrorKind, to_device::send_event_to_device},
|
client::{error::ErrorKind, to_device::send_event_to_device},
|
||||||
|
@ -14,14 +14,13 @@ use ruma::{
|
||||||
///
|
///
|
||||||
/// Send a to-device event to a set of client devices.
|
/// Send a to-device event to a set of client devices.
|
||||||
pub async fn send_event_to_device_route(
|
pub async fn send_event_to_device_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<send_event_to_device::v3::Request>,
|
||||||
body: Ruma<send_event_to_device::v3::IncomingRequest>,
|
|
||||||
) -> Result<send_event_to_device::v3::Response> {
|
) -> Result<send_event_to_device::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let sender_device = body.sender_device.as_deref();
|
let sender_device = body.sender_device.as_deref();
|
||||||
|
|
||||||
// Check if this is a new transaction id
|
// Check if this is a new transaction id
|
||||||
if db
|
if services()
|
||||||
.transaction_ids
|
.transaction_ids
|
||||||
.existing_txnid(sender_user, sender_device, &body.txn_id)?
|
.existing_txnid(sender_user, sender_device, &body.txn_id)?
|
||||||
.is_some()
|
.is_some()
|
||||||
|
@ -31,44 +30,46 @@ pub async fn send_event_to_device_route(
|
||||||
|
|
||||||
for (target_user_id, map) in &body.messages {
|
for (target_user_id, map) in &body.messages {
|
||||||
for (target_device_id_maybe, event) in map {
|
for (target_device_id_maybe, event) in map {
|
||||||
if target_user_id.server_name() != db.globals.server_name() {
|
if target_user_id.server_name() != services().globals.server_name() {
|
||||||
let mut map = BTreeMap::new();
|
let mut map = BTreeMap::new();
|
||||||
map.insert(target_device_id_maybe.clone(), event.clone());
|
map.insert(target_device_id_maybe.clone(), event.clone());
|
||||||
let mut messages = BTreeMap::new();
|
let mut messages = BTreeMap::new();
|
||||||
messages.insert(target_user_id.clone(), map);
|
messages.insert(target_user_id.clone(), map);
|
||||||
|
let count = services().globals.next_count()?;
|
||||||
|
|
||||||
db.sending.send_reliable_edu(
|
services().sending.send_reliable_edu(
|
||||||
target_user_id.server_name(),
|
target_user_id.server_name(),
|
||||||
serde_json::to_vec(&federation::transactions::edu::Edu::DirectToDevice(
|
serde_json::to_vec(&federation::transactions::edu::Edu::DirectToDevice(
|
||||||
DirectDeviceContent {
|
DirectDeviceContent {
|
||||||
sender: sender_user.clone(),
|
sender: sender_user.clone(),
|
||||||
ev_type: ToDeviceEventType::from(&*body.event_type),
|
ev_type: ToDeviceEventType::from(&*body.event_type),
|
||||||
message_id: body.txn_id.to_owned(),
|
message_id: count.to_string().into(),
|
||||||
messages,
|
messages,
|
||||||
},
|
},
|
||||||
))
|
))
|
||||||
.expect("DirectToDevice EDU can be serialized"),
|
.expect("DirectToDevice EDU can be serialized"),
|
||||||
db.globals.next_count()?,
|
count,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
match target_device_id_maybe {
|
match target_device_id_maybe {
|
||||||
DeviceIdOrAllDevices::DeviceId(target_device_id) => db.users.add_to_device_event(
|
DeviceIdOrAllDevices::DeviceId(target_device_id) => {
|
||||||
|
services().users.add_to_device_event(
|
||||||
sender_user,
|
sender_user,
|
||||||
target_user_id,
|
target_user_id,
|
||||||
&target_device_id,
|
target_device_id,
|
||||||
&body.event_type,
|
&body.event_type,
|
||||||
event.deserialize_as().map_err(|_| {
|
event.deserialize_as().map_err(|_| {
|
||||||
Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid")
|
Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid")
|
||||||
})?,
|
})?,
|
||||||
&db.globals,
|
)?
|
||||||
)?,
|
}
|
||||||
|
|
||||||
DeviceIdOrAllDevices::AllDevices => {
|
DeviceIdOrAllDevices::AllDevices => {
|
||||||
for target_device_id in db.users.all_device_ids(target_user_id) {
|
for target_device_id in services().users.all_device_ids(target_user_id) {
|
||||||
db.users.add_to_device_event(
|
services().users.add_to_device_event(
|
||||||
sender_user,
|
sender_user,
|
||||||
target_user_id,
|
target_user_id,
|
||||||
&target_device_id?,
|
&target_device_id?,
|
||||||
|
@ -76,7 +77,6 @@ pub async fn send_event_to_device_route(
|
||||||
event.deserialize_as().map_err(|_| {
|
event.deserialize_as().map_err(|_| {
|
||||||
Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid")
|
Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid")
|
||||||
})?,
|
})?,
|
||||||
&db.globals,
|
|
||||||
)?;
|
)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -85,10 +85,9 @@ pub async fn send_event_to_device_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
// Save transaction id with empty data
|
// Save transaction id with empty data
|
||||||
db.transaction_ids
|
services()
|
||||||
|
.transaction_ids
|
||||||
.add_txnid(sender_user, sender_device, &body.txn_id, &[])?;
|
.add_txnid(sender_user, sender_device, &body.txn_id, &[])?;
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(send_event_to_device::v3::Response {})
|
Ok(send_event_to_device::v3::Response {})
|
||||||
}
|
}
|
|
@ -1,18 +1,21 @@
|
||||||
use crate::{database::DatabaseGuard, utils, Error, Result, Ruma};
|
use crate::{services, utils, Error, Result, Ruma};
|
||||||
use ruma::api::client::{error::ErrorKind, typing::create_typing_event};
|
use ruma::api::client::{error::ErrorKind, typing::create_typing_event};
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/rooms/{roomId}/typing/{userId}`
|
/// # `PUT /_matrix/client/r0/rooms/{roomId}/typing/{userId}`
|
||||||
///
|
///
|
||||||
/// Sets the typing state of the sender user.
|
/// Sets the typing state of the sender user.
|
||||||
pub async fn create_typing_event_route(
|
pub async fn create_typing_event_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<create_typing_event::v3::Request>,
|
||||||
body: Ruma<create_typing_event::v3::IncomingRequest>,
|
|
||||||
) -> Result<create_typing_event::v3::Response> {
|
) -> Result<create_typing_event::v3::Response> {
|
||||||
use create_typing_event::v3::Typing;
|
use create_typing_event::v3::Typing;
|
||||||
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
if !db.rooms.is_joined(sender_user, &body.room_id)? {
|
if !services()
|
||||||
|
.rooms
|
||||||
|
.state_cache
|
||||||
|
.is_joined(sender_user, &body.room_id)?
|
||||||
|
{
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::Forbidden,
|
||||||
"You are not in this room.",
|
"You are not in this room.",
|
||||||
|
@ -20,16 +23,17 @@ pub async fn create_typing_event_route(
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Typing::Yes(duration) = body.state {
|
if let Typing::Yes(duration) = body.state {
|
||||||
db.rooms.edus.typing_add(
|
services().rooms.edus.typing.typing_add(
|
||||||
sender_user,
|
sender_user,
|
||||||
&body.room_id,
|
&body.room_id,
|
||||||
duration.as_millis() as u64 + utils::millis_since_unix_epoch(),
|
duration.as_millis() as u64 + utils::millis_since_unix_epoch(),
|
||||||
&db.globals,
|
|
||||||
)?;
|
)?;
|
||||||
} else {
|
} else {
|
||||||
db.rooms
|
services()
|
||||||
|
.rooms
|
||||||
.edus
|
.edus
|
||||||
.typing_remove(sender_user, &body.room_id, &db.globals)?;
|
.typing
|
||||||
|
.typing_remove(sender_user, &body.room_id)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(create_typing_event::v3::Response {})
|
Ok(create_typing_event::v3::Response {})
|
|
@ -15,7 +15,7 @@ use crate::{Result, Ruma};
|
||||||
/// Note: Unstable features are used while developing new features. Clients should avoid using
|
/// Note: Unstable features are used while developing new features. Clients should avoid using
|
||||||
/// unstable features in their stable releases
|
/// unstable features in their stable releases
|
||||||
pub async fn get_supported_versions_route(
|
pub async fn get_supported_versions_route(
|
||||||
_body: Ruma<get_supported_versions::IncomingRequest>,
|
_body: Ruma<get_supported_versions::Request>,
|
||||||
) -> Result<get_supported_versions::Response> {
|
) -> Result<get_supported_versions::Response> {
|
||||||
let resp = get_supported_versions::Response {
|
let resp = get_supported_versions::Response {
|
||||||
versions: vec![
|
versions: vec![
|
|
@ -1,4 +1,4 @@
|
||||||
use crate::{database::DatabaseGuard, Result, Ruma};
|
use crate::{services, Result, Ruma};
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::user_directory::search_users,
|
api::client::user_directory::search_users,
|
||||||
events::{
|
events::{
|
||||||
|
@ -14,20 +14,19 @@ use ruma::{
|
||||||
/// - Hides any local users that aren't in any public rooms (i.e. those that have the join rule set to public)
|
/// - Hides any local users that aren't in any public rooms (i.e. those that have the join rule set to public)
|
||||||
/// and don't share a room with the sender
|
/// and don't share a room with the sender
|
||||||
pub async fn search_users_route(
|
pub async fn search_users_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<search_users::v3::Request>,
|
||||||
body: Ruma<search_users::v3::IncomingRequest>,
|
|
||||||
) -> Result<search_users::v3::Response> {
|
) -> Result<search_users::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
let limit = u64::from(body.limit) as usize;
|
let limit = u64::from(body.limit) as usize;
|
||||||
|
|
||||||
let mut users = db.users.iter().filter_map(|user_id| {
|
let mut users = services().users.iter().filter_map(|user_id| {
|
||||||
// Filter out buggy users (they should not exist, but you never know...)
|
// Filter out buggy users (they should not exist, but you never know...)
|
||||||
let user_id = user_id.ok()?;
|
let user_id = user_id.ok()?;
|
||||||
|
|
||||||
let user = search_users::v3::User {
|
let user = search_users::v3::User {
|
||||||
user_id: user_id.clone(),
|
user_id: user_id.clone(),
|
||||||
display_name: db.users.displayname(&user_id).ok()?,
|
display_name: services().users.displayname(&user_id).ok()?,
|
||||||
avatar_url: db.users.avatar_url(&user_id).ok()?,
|
avatar_url: services().users.avatar_url(&user_id).ok()?,
|
||||||
};
|
};
|
||||||
|
|
||||||
let user_id_matches = user
|
let user_id_matches = user
|
||||||
|
@ -49,12 +48,15 @@ pub async fn search_users_route(
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
|
|
||||||
let user_is_in_public_rooms =
|
let user_is_in_public_rooms = services()
|
||||||
db.rooms
|
.rooms
|
||||||
|
.state_cache
|
||||||
.rooms_joined(&user_id)
|
.rooms_joined(&user_id)
|
||||||
.filter_map(|r| r.ok())
|
.filter_map(|r| r.ok())
|
||||||
.any(|room| {
|
.any(|room| {
|
||||||
db.rooms
|
services()
|
||||||
|
.rooms
|
||||||
|
.state_accessor
|
||||||
.room_state_get(&room, &StateEventType::RoomJoinRules, "")
|
.room_state_get(&room, &StateEventType::RoomJoinRules, "")
|
||||||
.map_or(false, |event| {
|
.map_or(false, |event| {
|
||||||
event.map_or(false, |event| {
|
event.map_or(false, |event| {
|
||||||
|
@ -70,9 +72,10 @@ pub async fn search_users_route(
|
||||||
return Some(user);
|
return Some(user);
|
||||||
}
|
}
|
||||||
|
|
||||||
let user_is_in_shared_rooms = db
|
let user_is_in_shared_rooms = services()
|
||||||
.rooms
|
.rooms
|
||||||
.get_shared_rooms(vec![sender_user.clone(), user_id.clone()])
|
.user
|
||||||
|
.get_shared_rooms(vec![sender_user.clone(), user_id])
|
||||||
.ok()?
|
.ok()?
|
||||||
.next()
|
.next()
|
||||||
.is_some();
|
.is_some();
|
|
@ -1,5 +1,5 @@
|
||||||
use crate::{database::DatabaseGuard, Result, Ruma};
|
use crate::{services, Result, Ruma};
|
||||||
use hmac::{Hmac, Mac, NewMac};
|
use hmac::{Hmac, Mac};
|
||||||
use ruma::{api::client::voip::get_turn_server_info, SecondsSinceUnixEpoch};
|
use ruma::{api::client::voip::get_turn_server_info, SecondsSinceUnixEpoch};
|
||||||
use sha1::Sha1;
|
use sha1::Sha1;
|
||||||
use std::time::{Duration, SystemTime};
|
use std::time::{Duration, SystemTime};
|
||||||
|
@ -10,16 +10,15 @@ type HmacSha1 = Hmac<Sha1>;
|
||||||
///
|
///
|
||||||
/// TODO: Returns information about the recommended turn server.
|
/// TODO: Returns information about the recommended turn server.
|
||||||
pub async fn turn_server_route(
|
pub async fn turn_server_route(
|
||||||
db: DatabaseGuard,
|
body: Ruma<get_turn_server_info::v3::Request>,
|
||||||
body: Ruma<get_turn_server_info::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_turn_server_info::v3::Response> {
|
) -> Result<get_turn_server_info::v3::Response> {
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
let turn_secret = db.globals.turn_secret();
|
let turn_secret = services().globals.turn_secret().clone();
|
||||||
|
|
||||||
let (username, password) = if !turn_secret.is_empty() {
|
let (username, password) = if !turn_secret.is_empty() {
|
||||||
let expiry = SecondsSinceUnixEpoch::from_system_time(
|
let expiry = SecondsSinceUnixEpoch::from_system_time(
|
||||||
SystemTime::now() + Duration::from_secs(db.globals.turn_ttl()),
|
SystemTime::now() + Duration::from_secs(services().globals.turn_ttl()),
|
||||||
)
|
)
|
||||||
.expect("time is valid");
|
.expect("time is valid");
|
||||||
|
|
||||||
|
@ -34,15 +33,15 @@ pub async fn turn_server_route(
|
||||||
(username, password)
|
(username, password)
|
||||||
} else {
|
} else {
|
||||||
(
|
(
|
||||||
db.globals.turn_username().clone(),
|
services().globals.turn_username().clone(),
|
||||||
db.globals.turn_password().clone(),
|
services().globals.turn_password().clone(),
|
||||||
)
|
)
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(get_turn_server_info::v3::Response {
|
Ok(get_turn_server_info::v3::Response {
|
||||||
username,
|
username,
|
||||||
password,
|
password,
|
||||||
uris: db.globals.turn_uris().to_vec(),
|
uris: services().globals.turn_uris().to_vec(),
|
||||||
ttl: Duration::from_secs(db.globals.turn_ttl()),
|
ttl: Duration::from_secs(services().globals.turn_ttl()),
|
||||||
})
|
})
|
||||||
}
|
}
|
|
@ -0,0 +1,4 @@
|
||||||
|
pub mod appservice_server;
|
||||||
|
pub mod client_server;
|
||||||
|
pub mod ruma_wrapper;
|
||||||
|
pub mod server_server;
|
|
@ -17,14 +17,13 @@ use bytes::{BufMut, Bytes, BytesMut};
|
||||||
use http::StatusCode;
|
use http::StatusCode;
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::{client::error::ErrorKind, AuthScheme, IncomingRequest, OutgoingResponse},
|
api::{client::error::ErrorKind, AuthScheme, IncomingRequest, OutgoingResponse},
|
||||||
signatures::CanonicalJsonValue,
|
CanonicalJsonValue, OwnedDeviceId, OwnedServerName, UserId,
|
||||||
DeviceId, ServerName, UserId,
|
|
||||||
};
|
};
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
use tracing::{debug, error, warn};
|
use tracing::{debug, error, warn};
|
||||||
|
|
||||||
use super::{Ruma, RumaResponse};
|
use super::{Ruma, RumaResponse};
|
||||||
use crate::{database::DatabaseGuard, server_server, Error, Result};
|
use crate::{services, Error, Result};
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<T, B> FromRequest<B> for Ruma<T>
|
impl<T, B> FromRequest<B> for Ruma<T>
|
||||||
|
@ -44,7 +43,6 @@ where
|
||||||
}
|
}
|
||||||
|
|
||||||
let metadata = T::METADATA;
|
let metadata = T::METADATA;
|
||||||
let db = DatabaseGuard::from_request(req).await?;
|
|
||||||
let auth_header = Option::<TypedHeader<Authorization<Bearer>>>::from_request(req).await?;
|
let auth_header = Option::<TypedHeader<Authorization<Bearer>>>::from_request(req).await?;
|
||||||
let path_params = Path::<Vec<String>>::from_request(req).await?;
|
let path_params = Path::<Vec<String>>::from_request(req).await?;
|
||||||
|
|
||||||
|
@ -71,7 +69,7 @@ where
|
||||||
|
|
||||||
let mut json_body = serde_json::from_slice::<CanonicalJsonValue>(&body).ok();
|
let mut json_body = serde_json::from_slice::<CanonicalJsonValue>(&body).ok();
|
||||||
|
|
||||||
let appservices = db.appservice.all().unwrap();
|
let appservices = services().appservice.all().unwrap();
|
||||||
let appservice_registration = appservices.iter().find(|(_id, registration)| {
|
let appservice_registration = appservices.iter().find(|(_id, registration)| {
|
||||||
registration
|
registration
|
||||||
.get("as_token")
|
.get("as_token")
|
||||||
|
@ -82,7 +80,7 @@ where
|
||||||
let (sender_user, sender_device, sender_servername, from_appservice) =
|
let (sender_user, sender_device, sender_servername, from_appservice) =
|
||||||
if let Some((_id, registration)) = appservice_registration {
|
if let Some((_id, registration)) = appservice_registration {
|
||||||
match metadata.authentication {
|
match metadata.authentication {
|
||||||
AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => {
|
AuthScheme::AccessToken => {
|
||||||
let user_id = query_params.user_id.map_or_else(
|
let user_id = query_params.user_id.map_or_else(
|
||||||
|| {
|
|| {
|
||||||
UserId::parse_with_server_name(
|
UserId::parse_with_server_name(
|
||||||
|
@ -91,14 +89,14 @@ where
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.as_str()
|
.as_str()
|
||||||
.unwrap(),
|
.unwrap(),
|
||||||
db.globals.server_name(),
|
services().globals.server_name(),
|
||||||
)
|
)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
},
|
},
|
||||||
|s| UserId::parse(s).unwrap(),
|
|s| UserId::parse(s).unwrap(),
|
||||||
);
|
);
|
||||||
|
|
||||||
if !db.users.exists(&user_id).unwrap() {
|
if !services().users.exists(&user_id).unwrap() {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::Forbidden,
|
ErrorKind::Forbidden,
|
||||||
"User does not exist.",
|
"User does not exist.",
|
||||||
|
@ -113,7 +111,7 @@ where
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
match metadata.authentication {
|
match metadata.authentication {
|
||||||
AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => {
|
AuthScheme::AccessToken => {
|
||||||
let token = match token {
|
let token = match token {
|
||||||
Some(token) => token,
|
Some(token) => token,
|
||||||
_ => {
|
_ => {
|
||||||
|
@ -124,7 +122,7 @@ where
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
match db.users.find_from_token(token).unwrap() {
|
match services().users.find_from_token(token).unwrap() {
|
||||||
None => {
|
None => {
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::UnknownToken { soft_logout: false },
|
ErrorKind::UnknownToken { soft_logout: false },
|
||||||
|
@ -133,7 +131,7 @@ where
|
||||||
}
|
}
|
||||||
Some((user_id, device_id)) => (
|
Some((user_id, device_id)) => (
|
||||||
Some(user_id),
|
Some(user_id),
|
||||||
Some(Box::<DeviceId>::from(device_id)),
|
Some(OwnedDeviceId::from(device_id)),
|
||||||
None,
|
None,
|
||||||
false,
|
false,
|
||||||
),
|
),
|
||||||
|
@ -185,7 +183,7 @@ where
|
||||||
(
|
(
|
||||||
"destination".to_owned(),
|
"destination".to_owned(),
|
||||||
CanonicalJsonValue::String(
|
CanonicalJsonValue::String(
|
||||||
db.globals.server_name().as_str().to_owned(),
|
services().globals.server_name().as_str().to_owned(),
|
||||||
),
|
),
|
||||||
),
|
),
|
||||||
(
|
(
|
||||||
|
@ -198,11 +196,10 @@ where
|
||||||
request_map.insert("content".to_owned(), json_body.clone());
|
request_map.insert("content".to_owned(), json_body.clone());
|
||||||
};
|
};
|
||||||
|
|
||||||
let keys_result = server_server::fetch_signing_keys(
|
let keys_result = services()
|
||||||
&db,
|
.rooms
|
||||||
&x_matrix.origin,
|
.event_handler
|
||||||
vec![x_matrix.key.to_owned()],
|
.fetch_signing_keys(&x_matrix.origin, vec![x_matrix.key.to_owned()])
|
||||||
)
|
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
let keys = match keys_result {
|
let keys = match keys_result {
|
||||||
|
@ -251,7 +248,7 @@ where
|
||||||
|
|
||||||
if let Some(CanonicalJsonValue::Object(json_body)) = &mut json_body {
|
if let Some(CanonicalJsonValue::Object(json_body)) = &mut json_body {
|
||||||
let user_id = sender_user.clone().unwrap_or_else(|| {
|
let user_id = sender_user.clone().unwrap_or_else(|| {
|
||||||
UserId::parse_with_server_name("", db.globals.server_name())
|
UserId::parse_with_server_name("", services().globals.server_name())
|
||||||
.expect("we know this is valid")
|
.expect("we know this is valid")
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -261,7 +258,7 @@ where
|
||||||
.and_then(|auth| auth.get("session"))
|
.and_then(|auth| auth.get("session"))
|
||||||
.and_then(|session| session.as_str())
|
.and_then(|session| session.as_str())
|
||||||
.and_then(|session| {
|
.and_then(|session| {
|
||||||
db.uiaa.get_uiaa_request(
|
services().uiaa.get_uiaa_request(
|
||||||
&user_id,
|
&user_id,
|
||||||
&sender_device.clone().unwrap_or_else(|| "".into()),
|
&sender_device.clone().unwrap_or_else(|| "".into()),
|
||||||
session,
|
session,
|
||||||
|
@ -284,7 +281,7 @@ where
|
||||||
debug!("{:?}", http_request);
|
debug!("{:?}", http_request);
|
||||||
|
|
||||||
let body = T::try_from_http_request(http_request, &path_params).map_err(|e| {
|
let body = T::try_from_http_request(http_request, &path_params).map_err(|e| {
|
||||||
warn!("{:?}", e);
|
warn!("{:?}\n{:?}", e, json_body);
|
||||||
Error::BadRequest(ErrorKind::BadJson, "Failed to deserialize request.")
|
Error::BadRequest(ErrorKind::BadJson, "Failed to deserialize request.")
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
|
@ -300,7 +297,7 @@ where
|
||||||
}
|
}
|
||||||
|
|
||||||
struct XMatrix {
|
struct XMatrix {
|
||||||
origin: Box<ServerName>,
|
origin: OwnedServerName,
|
||||||
key: String, // KeyName?
|
key: String, // KeyName?
|
||||||
sig: String,
|
sig: String,
|
||||||
}
|
}
|
||||||
|
@ -311,8 +308,7 @@ impl Credentials for XMatrix {
|
||||||
fn decode(value: &http::HeaderValue) -> Option<Self> {
|
fn decode(value: &http::HeaderValue) -> Option<Self> {
|
||||||
debug_assert!(
|
debug_assert!(
|
||||||
value.as_bytes().starts_with(b"X-Matrix "),
|
value.as_bytes().starts_with(b"X-Matrix "),
|
||||||
"HeaderValue to decode should start with \"X-Matrix ..\", received = {:?}",
|
"HeaderValue to decode should start with \"X-Matrix ..\", received = {value:?}",
|
||||||
value,
|
|
||||||
);
|
);
|
||||||
|
|
||||||
let parameters = str::from_utf8(&value.as_bytes()["X-Matrix ".len()..])
|
let parameters = str::from_utf8(&value.as_bytes()["X-Matrix ".len()..])
|
|
@ -1,6 +1,7 @@
|
||||||
use crate::Error;
|
use crate::Error;
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::uiaa::UiaaResponse, signatures::CanonicalJsonValue, DeviceId, ServerName, UserId,
|
api::client::uiaa::UiaaResponse, CanonicalJsonValue, OwnedDeviceId, OwnedServerName,
|
||||||
|
OwnedUserId,
|
||||||
};
|
};
|
||||||
use std::ops::Deref;
|
use std::ops::Deref;
|
||||||
|
|
||||||
|
@ -10,9 +11,9 @@ mod axum;
|
||||||
/// Extractor for Ruma request structs
|
/// Extractor for Ruma request structs
|
||||||
pub struct Ruma<T> {
|
pub struct Ruma<T> {
|
||||||
pub body: T,
|
pub body: T,
|
||||||
pub sender_user: Option<Box<UserId>>,
|
pub sender_user: Option<OwnedUserId>,
|
||||||
pub sender_device: Option<Box<DeviceId>>,
|
pub sender_device: Option<OwnedDeviceId>,
|
||||||
pub sender_servername: Option<Box<ServerName>>,
|
pub sender_servername: Option<OwnedServerName>,
|
||||||
// This is None when body is not a valid string
|
// This is None when body is not a valid string
|
||||||
pub json_body: Option<CanonicalJsonValue>,
|
pub json_body: Option<CanonicalJsonValue>,
|
||||||
pub from_appservice: bool,
|
pub from_appservice: bool,
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -1,127 +0,0 @@
|
||||||
use crate::{database::DatabaseGuard, Error, Result, Ruma};
|
|
||||||
use ruma::{
|
|
||||||
api::client::{error::ErrorKind, read_marker::set_read_marker, receipt::create_receipt},
|
|
||||||
events::RoomAccountDataEventType,
|
|
||||||
receipt::ReceiptType,
|
|
||||||
MilliSecondsSinceUnixEpoch,
|
|
||||||
};
|
|
||||||
use std::collections::BTreeMap;
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/rooms/{roomId}/read_markers`
|
|
||||||
///
|
|
||||||
/// Sets different types of read markers.
|
|
||||||
///
|
|
||||||
/// - Updates fully-read account data event to `fully_read`
|
|
||||||
/// - If `read_receipt` is set: Update private marker and public read receipt EDU
|
|
||||||
pub async fn set_read_marker_route(
|
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<set_read_marker::v3::IncomingRequest>,
|
|
||||||
) -> Result<set_read_marker::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let fully_read_event = ruma::events::fully_read::FullyReadEvent {
|
|
||||||
content: ruma::events::fully_read::FullyReadEventContent {
|
|
||||||
event_id: body.fully_read.clone(),
|
|
||||||
},
|
|
||||||
};
|
|
||||||
db.account_data.update(
|
|
||||||
Some(&body.room_id),
|
|
||||||
sender_user,
|
|
||||||
RoomAccountDataEventType::FullyRead,
|
|
||||||
&fully_read_event,
|
|
||||||
&db.globals,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
if let Some(event) = &body.read_receipt {
|
|
||||||
db.rooms.edus.private_read_set(
|
|
||||||
&body.room_id,
|
|
||||||
sender_user,
|
|
||||||
db.rooms.get_pdu_count(event)?.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Event does not exist.",
|
|
||||||
))?,
|
|
||||||
&db.globals,
|
|
||||||
)?;
|
|
||||||
db.rooms
|
|
||||||
.reset_notification_counts(sender_user, &body.room_id)?;
|
|
||||||
|
|
||||||
let mut user_receipts = BTreeMap::new();
|
|
||||||
user_receipts.insert(
|
|
||||||
sender_user.clone(),
|
|
||||||
ruma::events::receipt::Receipt {
|
|
||||||
ts: Some(MilliSecondsSinceUnixEpoch::now()),
|
|
||||||
},
|
|
||||||
);
|
|
||||||
|
|
||||||
let mut receipts = BTreeMap::new();
|
|
||||||
receipts.insert(ReceiptType::Read, user_receipts);
|
|
||||||
|
|
||||||
let mut receipt_content = BTreeMap::new();
|
|
||||||
receipt_content.insert(event.to_owned(), receipts);
|
|
||||||
|
|
||||||
db.rooms.edus.readreceipt_update(
|
|
||||||
sender_user,
|
|
||||||
&body.room_id,
|
|
||||||
ruma::events::receipt::ReceiptEvent {
|
|
||||||
content: ruma::events::receipt::ReceiptEventContent(receipt_content),
|
|
||||||
room_id: body.room_id.clone(),
|
|
||||||
},
|
|
||||||
&db.globals,
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(set_read_marker::v3::Response {})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/rooms/{roomId}/receipt/{receiptType}/{eventId}`
|
|
||||||
///
|
|
||||||
/// Sets private read marker and public read receipt EDU.
|
|
||||||
pub async fn create_receipt_route(
|
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<create_receipt::v3::IncomingRequest>,
|
|
||||||
) -> Result<create_receipt::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
db.rooms.edus.private_read_set(
|
|
||||||
&body.room_id,
|
|
||||||
sender_user,
|
|
||||||
db.rooms
|
|
||||||
.get_pdu_count(&body.event_id)?
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Event does not exist.",
|
|
||||||
))?,
|
|
||||||
&db.globals,
|
|
||||||
)?;
|
|
||||||
db.rooms
|
|
||||||
.reset_notification_counts(sender_user, &body.room_id)?;
|
|
||||||
|
|
||||||
let mut user_receipts = BTreeMap::new();
|
|
||||||
user_receipts.insert(
|
|
||||||
sender_user.clone(),
|
|
||||||
ruma::events::receipt::Receipt {
|
|
||||||
ts: Some(MilliSecondsSinceUnixEpoch::now()),
|
|
||||||
},
|
|
||||||
);
|
|
||||||
let mut receipts = BTreeMap::new();
|
|
||||||
receipts.insert(ReceiptType::Read, user_receipts);
|
|
||||||
|
|
||||||
let mut receipt_content = BTreeMap::new();
|
|
||||||
receipt_content.insert(body.event_id.to_owned(), receipts);
|
|
||||||
|
|
||||||
db.rooms.edus.readreceipt_update(
|
|
||||||
sender_user,
|
|
||||||
&body.room_id,
|
|
||||||
ruma::events::receipt::ReceiptEvent {
|
|
||||||
content: ruma::events::receipt::ReceiptEventContent(receipt_content),
|
|
||||||
room_id: body.room_id.clone(),
|
|
||||||
},
|
|
||||||
&db.globals,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(create_receipt::v3::Response {})
|
|
||||||
}
|
|
|
@ -1,117 +0,0 @@
|
||||||
use crate::{database::DatabaseGuard, Result, Ruma};
|
|
||||||
use ruma::{
|
|
||||||
api::client::tag::{create_tag, delete_tag, get_tags},
|
|
||||||
events::{
|
|
||||||
tag::{TagEvent, TagEventContent},
|
|
||||||
RoomAccountDataEventType,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
use std::collections::BTreeMap;
|
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags/{tag}`
|
|
||||||
///
|
|
||||||
/// Adds a tag to the room.
|
|
||||||
///
|
|
||||||
/// - Inserts the tag into the tag event of the room account data.
|
|
||||||
pub async fn update_tag_route(
|
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<create_tag::v3::IncomingRequest>,
|
|
||||||
) -> Result<create_tag::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let mut tags_event = db
|
|
||||||
.account_data
|
|
||||||
.get(
|
|
||||||
Some(&body.room_id),
|
|
||||||
sender_user,
|
|
||||||
RoomAccountDataEventType::Tag,
|
|
||||||
)?
|
|
||||||
.unwrap_or_else(|| TagEvent {
|
|
||||||
content: TagEventContent {
|
|
||||||
tags: BTreeMap::new(),
|
|
||||||
},
|
|
||||||
});
|
|
||||||
tags_event
|
|
||||||
.content
|
|
||||||
.tags
|
|
||||||
.insert(body.tag.clone().into(), body.tag_info.clone());
|
|
||||||
|
|
||||||
db.account_data.update(
|
|
||||||
Some(&body.room_id),
|
|
||||||
sender_user,
|
|
||||||
RoomAccountDataEventType::Tag,
|
|
||||||
&tags_event,
|
|
||||||
&db.globals,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(create_tag::v3::Response {})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `DELETE /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags/{tag}`
|
|
||||||
///
|
|
||||||
/// Deletes a tag from the room.
|
|
||||||
///
|
|
||||||
/// - Removes the tag from the tag event of the room account data.
|
|
||||||
pub async fn delete_tag_route(
|
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<delete_tag::v3::IncomingRequest>,
|
|
||||||
) -> Result<delete_tag::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let mut tags_event = db
|
|
||||||
.account_data
|
|
||||||
.get(
|
|
||||||
Some(&body.room_id),
|
|
||||||
sender_user,
|
|
||||||
RoomAccountDataEventType::Tag,
|
|
||||||
)?
|
|
||||||
.unwrap_or_else(|| TagEvent {
|
|
||||||
content: TagEventContent {
|
|
||||||
tags: BTreeMap::new(),
|
|
||||||
},
|
|
||||||
});
|
|
||||||
tags_event.content.tags.remove(&body.tag.clone().into());
|
|
||||||
|
|
||||||
db.account_data.update(
|
|
||||||
Some(&body.room_id),
|
|
||||||
sender_user,
|
|
||||||
RoomAccountDataEventType::Tag,
|
|
||||||
&tags_event,
|
|
||||||
&db.globals,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(delete_tag::v3::Response {})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags`
|
|
||||||
///
|
|
||||||
/// Returns tags on the room.
|
|
||||||
///
|
|
||||||
/// - Gets the tag event of the room account data.
|
|
||||||
pub async fn get_tags_route(
|
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<get_tags::v3::IncomingRequest>,
|
|
||||||
) -> Result<get_tags::v3::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
Ok(get_tags::v3::Response {
|
|
||||||
tags: db
|
|
||||||
.account_data
|
|
||||||
.get(
|
|
||||||
Some(&body.room_id),
|
|
||||||
sender_user,
|
|
||||||
RoomAccountDataEventType::Tag,
|
|
||||||
)?
|
|
||||||
.unwrap_or_else(|| TagEvent {
|
|
||||||
content: TagEventContent {
|
|
||||||
tags: BTreeMap::new(),
|
|
||||||
},
|
|
||||||
})
|
|
||||||
.content
|
|
||||||
.tags,
|
|
||||||
})
|
|
||||||
}
|
|
|
@ -4,7 +4,7 @@ use std::{
|
||||||
net::{IpAddr, Ipv4Addr},
|
net::{IpAddr, Ipv4Addr},
|
||||||
};
|
};
|
||||||
|
|
||||||
use ruma::{RoomVersionId, ServerName};
|
use ruma::{OwnedServerName, RoomVersionId};
|
||||||
use serde::{de::IgnoredAny, Deserialize};
|
use serde::{de::IgnoredAny, Deserialize};
|
||||||
use tracing::warn;
|
use tracing::warn;
|
||||||
|
|
||||||
|
@ -20,7 +20,7 @@ pub struct Config {
|
||||||
pub port: u16,
|
pub port: u16,
|
||||||
pub tls: Option<TlsConfig>,
|
pub tls: Option<TlsConfig>,
|
||||||
|
|
||||||
pub server_name: Box<ServerName>,
|
pub server_name: OwnedServerName,
|
||||||
#[serde(default = "default_database_backend")]
|
#[serde(default = "default_database_backend")]
|
||||||
pub database_backend: String,
|
pub database_backend: String,
|
||||||
pub database_path: String,
|
pub database_path: String,
|
||||||
|
@ -40,6 +40,8 @@ pub struct Config {
|
||||||
pub max_request_size: u32,
|
pub max_request_size: u32,
|
||||||
#[serde(default = "default_max_concurrent_requests")]
|
#[serde(default = "default_max_concurrent_requests")]
|
||||||
pub max_concurrent_requests: u16,
|
pub max_concurrent_requests: u16,
|
||||||
|
#[serde(default = "default_max_fetch_prev_events")]
|
||||||
|
pub max_fetch_prev_events: u16,
|
||||||
#[serde(default = "false_fn")]
|
#[serde(default = "false_fn")]
|
||||||
pub allow_registration: bool,
|
pub allow_registration: bool,
|
||||||
#[serde(default = "true_fn")]
|
#[serde(default = "true_fn")]
|
||||||
|
@ -60,7 +62,7 @@ pub struct Config {
|
||||||
pub proxy: ProxyConfig,
|
pub proxy: ProxyConfig,
|
||||||
pub jwt_secret: Option<String>,
|
pub jwt_secret: Option<String>,
|
||||||
#[serde(default = "Vec::new")]
|
#[serde(default = "Vec::new")]
|
||||||
pub trusted_servers: Vec<Box<ServerName>>,
|
pub trusted_servers: Vec<OwnedServerName>,
|
||||||
#[serde(default = "default_log")]
|
#[serde(default = "default_log")]
|
||||||
pub log: String,
|
pub log: String,
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
|
@ -183,7 +185,7 @@ impl fmt::Display for Config {
|
||||||
("Turn TTL", &self.turn_ttl.to_string()),
|
("Turn TTL", &self.turn_ttl.to_string()),
|
||||||
("Turn URIs", {
|
("Turn URIs", {
|
||||||
let mut lst = vec![];
|
let mut lst = vec![];
|
||||||
for item in self.turn_uris.to_vec().into_iter().enumerate() {
|
for item in self.turn_uris.iter().cloned().enumerate() {
|
||||||
let (_, uri): (usize, String) = item;
|
let (_, uri): (usize, String) = item;
|
||||||
lst.push(uri);
|
lst.push(uri);
|
||||||
}
|
}
|
||||||
|
@ -191,13 +193,13 @@ impl fmt::Display for Config {
|
||||||
}),
|
}),
|
||||||
];
|
];
|
||||||
|
|
||||||
let mut msg: String = "Active config values:\n\n".to_string();
|
let mut msg: String = "Active config values:\n\n".to_owned();
|
||||||
|
|
||||||
for line in lines.into_iter().enumerate() {
|
for line in lines.into_iter().enumerate() {
|
||||||
msg += &format!("{}: {}\n", line.1 .0, line.1 .1);
|
msg += &format!("{}: {}\n", line.1 .0, line.1 .1);
|
||||||
}
|
}
|
||||||
|
|
||||||
write!(f, "{}", msg)
|
write!(f, "{msg}")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -222,7 +224,7 @@ fn default_database_backend() -> String {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn default_db_cache_capacity_mb() -> f64 {
|
fn default_db_cache_capacity_mb() -> f64 {
|
||||||
10.0
|
1000.0
|
||||||
}
|
}
|
||||||
|
|
||||||
fn default_conduit_cache_capacity_modifier() -> f64 {
|
fn default_conduit_cache_capacity_modifier() -> f64 {
|
||||||
|
@ -230,7 +232,7 @@ fn default_conduit_cache_capacity_modifier() -> f64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn default_rocksdb_max_open_files() -> i32 {
|
fn default_rocksdb_max_open_files() -> i32 {
|
||||||
20
|
1000
|
||||||
}
|
}
|
||||||
|
|
||||||
fn default_pdu_cache_capacity() -> u32 {
|
fn default_pdu_cache_capacity() -> u32 {
|
||||||
|
@ -238,7 +240,7 @@ fn default_pdu_cache_capacity() -> u32 {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn default_cleanup_second_interval() -> u32 {
|
fn default_cleanup_second_interval() -> u32 {
|
||||||
1 * 60 // every minute
|
60 // every minute
|
||||||
}
|
}
|
||||||
|
|
||||||
fn default_max_request_size() -> u32 {
|
fn default_max_request_size() -> u32 {
|
||||||
|
@ -249,8 +251,12 @@ fn default_max_concurrent_requests() -> u16 {
|
||||||
100
|
100
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn default_max_fetch_prev_events() -> u16 {
|
||||||
|
100_u16
|
||||||
|
}
|
||||||
|
|
||||||
fn default_log() -> String {
|
fn default_log() -> String {
|
||||||
"info,state_res=warn,_=off,sled=off".to_owned()
|
"warn,state_res=warn,_=off,sled=off".to_owned()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn default_turn_ttl() -> u64 {
|
fn default_turn_ttl() -> u64 {
|
||||||
|
@ -258,6 +264,6 @@ fn default_turn_ttl() -> u64 {
|
||||||
}
|
}
|
||||||
|
|
||||||
// I know, it's a great name
|
// I know, it's a great name
|
||||||
fn default_default_room_version() -> RoomVersionId {
|
pub fn default_default_room_version() -> RoomVersionId {
|
||||||
RoomVersionId::V6
|
RoomVersionId::V9
|
||||||
}
|
}
|
1017
src/database.rs
1017
src/database.rs
File diff suppressed because it is too large
Load Diff
|
@ -26,11 +26,11 @@ pub mod persy;
|
||||||
))]
|
))]
|
||||||
pub mod watchers;
|
pub mod watchers;
|
||||||
|
|
||||||
pub trait DatabaseEngine: Send + Sync {
|
pub trait KeyValueDatabaseEngine: Send + Sync {
|
||||||
fn open(config: &Config) -> Result<Self>
|
fn open(config: &Config) -> Result<Self>
|
||||||
where
|
where
|
||||||
Self: Sized;
|
Self: Sized;
|
||||||
fn open_tree(&self, name: &'static str) -> Result<Arc<dyn Tree>>;
|
fn open_tree(&self, name: &'static str) -> Result<Arc<dyn KvTree>>;
|
||||||
fn flush(&self) -> Result<()>;
|
fn flush(&self) -> Result<()>;
|
||||||
fn cleanup(&self) -> Result<()> {
|
fn cleanup(&self) -> Result<()> {
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -40,7 +40,7 @@ pub trait DatabaseEngine: Send + Sync {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub trait Tree: Send + Sync {
|
pub trait KvTree: Send + Sync {
|
||||||
fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>>;
|
fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>>;
|
||||||
|
|
||||||
fn insert(&self, key: &[u8], value: &[u8]) -> Result<()>;
|
fn insert(&self, key: &[u8], value: &[u8]) -> Result<()>;
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
use crate::{
|
use crate::{
|
||||||
database::{
|
database::{
|
||||||
abstraction::{watchers::Watchers, DatabaseEngine, Tree},
|
abstraction::{watchers::Watchers, KeyValueDatabaseEngine, KvTree},
|
||||||
Config,
|
Config,
|
||||||
},
|
},
|
||||||
Result,
|
Result,
|
||||||
|
@ -15,7 +15,7 @@ pub struct Engine {
|
||||||
persy: Persy,
|
persy: Persy,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DatabaseEngine for Arc<Engine> {
|
impl KeyValueDatabaseEngine for Arc<Engine> {
|
||||||
fn open(config: &Config) -> Result<Self> {
|
fn open(config: &Config) -> Result<Self> {
|
||||||
let mut cfg = persy::Config::new();
|
let mut cfg = persy::Config::new();
|
||||||
cfg.change_cache_size((config.db_cache_capacity_mb * 1024.0 * 1024.0) as u64);
|
cfg.change_cache_size((config.db_cache_capacity_mb * 1024.0 * 1024.0) as u64);
|
||||||
|
@ -27,7 +27,7 @@ impl DatabaseEngine for Arc<Engine> {
|
||||||
Ok(Arc::new(Engine { persy }))
|
Ok(Arc::new(Engine { persy }))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn open_tree(&self, name: &'static str) -> Result<Arc<dyn Tree>> {
|
fn open_tree(&self, name: &'static str) -> Result<Arc<dyn KvTree>> {
|
||||||
// Create if it doesn't exist
|
// Create if it doesn't exist
|
||||||
if !self.persy.exists_index(name)? {
|
if !self.persy.exists_index(name)? {
|
||||||
let mut tx = self.persy.begin()?;
|
let mut tx = self.persy.begin()?;
|
||||||
|
@ -61,7 +61,7 @@ impl PersyTree {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Tree for PersyTree {
|
impl KvTree for PersyTree {
|
||||||
fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>> {
|
fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>> {
|
||||||
let result = self
|
let result = self
|
||||||
.persy
|
.persy
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
use super::{super::Config, watchers::Watchers, DatabaseEngine, Tree};
|
use super::{super::Config, watchers::Watchers, KeyValueDatabaseEngine, KvTree};
|
||||||
use crate::{utils, Result};
|
use crate::{utils, Result};
|
||||||
use std::{
|
use std::{
|
||||||
future::Future,
|
future::Future,
|
||||||
|
@ -51,7 +51,7 @@ fn db_options(max_open_files: i32, rocksdb_cache: &rocksdb::Cache) -> rocksdb::O
|
||||||
db_opts
|
db_opts
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DatabaseEngine for Arc<Engine> {
|
impl KeyValueDatabaseEngine for Arc<Engine> {
|
||||||
fn open(config: &Config) -> Result<Self> {
|
fn open(config: &Config) -> Result<Self> {
|
||||||
let cache_capacity_bytes = (config.db_cache_capacity_mb * 1024.0 * 1024.0) as usize;
|
let cache_capacity_bytes = (config.db_cache_capacity_mb * 1024.0 * 1024.0) as usize;
|
||||||
let rocksdb_cache = rocksdb::Cache::new_lru_cache(cache_capacity_bytes).unwrap();
|
let rocksdb_cache = rocksdb::Cache::new_lru_cache(cache_capacity_bytes).unwrap();
|
||||||
|
@ -83,7 +83,7 @@ impl DatabaseEngine for Arc<Engine> {
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn open_tree(&self, name: &'static str) -> Result<Arc<dyn Tree>> {
|
fn open_tree(&self, name: &'static str) -> Result<Arc<dyn KvTree>> {
|
||||||
if !self.old_cfs.contains(&name.to_owned()) {
|
if !self.old_cfs.contains(&name.to_owned()) {
|
||||||
// Create if it didn't exist
|
// Create if it didn't exist
|
||||||
let _ = self
|
let _ = self
|
||||||
|
@ -129,7 +129,7 @@ impl RocksDbEngineTree<'_> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Tree for RocksDbEngineTree<'_> {
|
impl KvTree for RocksDbEngineTree<'_> {
|
||||||
fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>> {
|
fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>> {
|
||||||
Ok(self.db.rocks.get_cf(&self.cf(), key)?)
|
Ok(self.db.rocks.get_cf(&self.cf(), key)?)
|
||||||
}
|
}
|
||||||
|
@ -161,6 +161,7 @@ impl Tree for RocksDbEngineTree<'_> {
|
||||||
self.db
|
self.db
|
||||||
.rocks
|
.rocks
|
||||||
.iterator_cf(&self.cf(), rocksdb::IteratorMode::Start)
|
.iterator_cf(&self.cf(), rocksdb::IteratorMode::Start)
|
||||||
|
//.map(|r| r.unwrap())
|
||||||
.map(|(k, v)| (Vec::from(k), Vec::from(v))),
|
.map(|(k, v)| (Vec::from(k), Vec::from(v))),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
@ -184,6 +185,7 @@ impl Tree for RocksDbEngineTree<'_> {
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
//.map(|r| r.unwrap())
|
||||||
.map(|(k, v)| (Vec::from(k), Vec::from(v))),
|
.map(|(k, v)| (Vec::from(k), Vec::from(v))),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
@ -191,7 +193,7 @@ impl Tree for RocksDbEngineTree<'_> {
|
||||||
fn increment(&self, key: &[u8]) -> Result<Vec<u8>> {
|
fn increment(&self, key: &[u8]) -> Result<Vec<u8>> {
|
||||||
let lock = self.write_lock.write().unwrap();
|
let lock = self.write_lock.write().unwrap();
|
||||||
|
|
||||||
let old = self.db.rocks.get_cf(&self.cf(), &key)?;
|
let old = self.db.rocks.get_cf(&self.cf(), key)?;
|
||||||
let new = utils::increment(old.as_deref()).unwrap();
|
let new = utils::increment(old.as_deref()).unwrap();
|
||||||
self.db.rocks.put_cf(&self.cf(), key, &new)?;
|
self.db.rocks.put_cf(&self.cf(), key, &new)?;
|
||||||
|
|
||||||
|
@ -224,6 +226,7 @@ impl Tree for RocksDbEngineTree<'_> {
|
||||||
&self.cf(),
|
&self.cf(),
|
||||||
rocksdb::IteratorMode::From(&prefix, rocksdb::Direction::Forward),
|
rocksdb::IteratorMode::From(&prefix, rocksdb::Direction::Forward),
|
||||||
)
|
)
|
||||||
|
//.map(|r| r.unwrap())
|
||||||
.map(|(k, v)| (Vec::from(k), Vec::from(v)))
|
.map(|(k, v)| (Vec::from(k), Vec::from(v)))
|
||||||
.take_while(move |(k, _)| k.starts_with(&prefix)),
|
.take_while(move |(k, _)| k.starts_with(&prefix)),
|
||||||
)
|
)
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
use super::{watchers::Watchers, DatabaseEngine, Tree};
|
use super::{watchers::Watchers, KeyValueDatabaseEngine, KvTree};
|
||||||
use crate::{database::Config, Result};
|
use crate::{database::Config, Result};
|
||||||
use parking_lot::{Mutex, MutexGuard};
|
use parking_lot::{Mutex, MutexGuard};
|
||||||
use rusqlite::{Connection, DatabaseName::Main, OptionalExtension};
|
use rusqlite::{Connection, DatabaseName::Main, OptionalExtension};
|
||||||
|
@ -48,13 +48,13 @@ pub struct Engine {
|
||||||
|
|
||||||
impl Engine {
|
impl Engine {
|
||||||
fn prepare_conn(path: &Path, cache_size_kb: u32) -> Result<Connection> {
|
fn prepare_conn(path: &Path, cache_size_kb: u32) -> Result<Connection> {
|
||||||
let conn = Connection::open(&path)?;
|
let conn = Connection::open(path)?;
|
||||||
|
|
||||||
conn.pragma_update(Some(Main), "page_size", &2048)?;
|
conn.pragma_update(Some(Main), "page_size", 2048)?;
|
||||||
conn.pragma_update(Some(Main), "journal_mode", &"WAL")?;
|
conn.pragma_update(Some(Main), "journal_mode", "WAL")?;
|
||||||
conn.pragma_update(Some(Main), "synchronous", &"NORMAL")?;
|
conn.pragma_update(Some(Main), "synchronous", "NORMAL")?;
|
||||||
conn.pragma_update(Some(Main), "cache_size", &(-i64::from(cache_size_kb)))?;
|
conn.pragma_update(Some(Main), "cache_size", -i64::from(cache_size_kb))?;
|
||||||
conn.pragma_update(Some(Main), "wal_autocheckpoint", &0)?;
|
conn.pragma_update(Some(Main), "wal_autocheckpoint", 0)?;
|
||||||
|
|
||||||
Ok(conn)
|
Ok(conn)
|
||||||
}
|
}
|
||||||
|
@ -75,12 +75,12 @@ impl Engine {
|
||||||
|
|
||||||
pub fn flush_wal(self: &Arc<Self>) -> Result<()> {
|
pub fn flush_wal(self: &Arc<Self>) -> Result<()> {
|
||||||
self.write_lock()
|
self.write_lock()
|
||||||
.pragma_update(Some(Main), "wal_checkpoint", &"RESTART")?;
|
.pragma_update(Some(Main), "wal_checkpoint", "RESTART")?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DatabaseEngine for Arc<Engine> {
|
impl KeyValueDatabaseEngine for Arc<Engine> {
|
||||||
fn open(config: &Config) -> Result<Self> {
|
fn open(config: &Config) -> Result<Self> {
|
||||||
let path = Path::new(&config.database_path).join("conduit.db");
|
let path = Path::new(&config.database_path).join("conduit.db");
|
||||||
|
|
||||||
|
@ -105,8 +105,8 @@ impl DatabaseEngine for Arc<Engine> {
|
||||||
Ok(arc)
|
Ok(arc)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn open_tree(&self, name: &str) -> Result<Arc<dyn Tree>> {
|
fn open_tree(&self, name: &str) -> Result<Arc<dyn KvTree>> {
|
||||||
self.write_lock().execute(&format!("CREATE TABLE IF NOT EXISTS {} ( \"key\" BLOB PRIMARY KEY, \"value\" BLOB NOT NULL )", name), [])?;
|
self.write_lock().execute(&format!("CREATE TABLE IF NOT EXISTS {name} ( \"key\" BLOB PRIMARY KEY, \"value\" BLOB NOT NULL )"), [])?;
|
||||||
|
|
||||||
Ok(Arc::new(SqliteTable {
|
Ok(Arc::new(SqliteTable {
|
||||||
engine: Arc::clone(self),
|
engine: Arc::clone(self),
|
||||||
|
@ -135,7 +135,6 @@ type TupleOfBytes = (Vec<u8>, Vec<u8>);
|
||||||
|
|
||||||
impl SqliteTable {
|
impl SqliteTable {
|
||||||
fn get_with_guard(&self, guard: &Connection, key: &[u8]) -> Result<Option<Vec<u8>>> {
|
fn get_with_guard(&self, guard: &Connection, key: &[u8]) -> Result<Option<Vec<u8>>> {
|
||||||
//dbg!(&self.name);
|
|
||||||
Ok(guard
|
Ok(guard
|
||||||
.prepare(format!("SELECT value FROM {} WHERE key = ?", self.name).as_str())?
|
.prepare(format!("SELECT value FROM {} WHERE key = ?", self.name).as_str())?
|
||||||
.query_row([key], |row| row.get(0))
|
.query_row([key], |row| row.get(0))
|
||||||
|
@ -143,7 +142,6 @@ impl SqliteTable {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn insert_with_guard(&self, guard: &Connection, key: &[u8], value: &[u8]) -> Result<()> {
|
fn insert_with_guard(&self, guard: &Connection, key: &[u8], value: &[u8]) -> Result<()> {
|
||||||
//dbg!(&self.name);
|
|
||||||
guard.execute(
|
guard.execute(
|
||||||
format!(
|
format!(
|
||||||
"INSERT OR REPLACE INTO {} (key, value) VALUES (?, ?)",
|
"INSERT OR REPLACE INTO {} (key, value) VALUES (?, ?)",
|
||||||
|
@ -176,10 +174,7 @@ impl SqliteTable {
|
||||||
statement
|
statement
|
||||||
.query_map([], |row| Ok((row.get_unwrap(0), row.get_unwrap(1))))
|
.query_map([], |row| Ok((row.get_unwrap(0), row.get_unwrap(1))))
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.map(move |r| {
|
.map(move |r| r.unwrap()),
|
||||||
//dbg!(&name);
|
|
||||||
r.unwrap()
|
|
||||||
}),
|
|
||||||
);
|
);
|
||||||
|
|
||||||
Box::new(PreparedStatementIterator {
|
Box::new(PreparedStatementIterator {
|
||||||
|
@ -189,7 +184,7 @@ impl SqliteTable {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Tree for SqliteTable {
|
impl KvTree for SqliteTable {
|
||||||
fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>> {
|
fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>> {
|
||||||
self.get_with_guard(self.engine.read_lock(), key)
|
self.get_with_guard(self.engine.read_lock(), key)
|
||||||
}
|
}
|
||||||
|
@ -276,10 +271,7 @@ impl Tree for SqliteTable {
|
||||||
statement
|
statement
|
||||||
.query_map([from], |row| Ok((row.get_unwrap(0), row.get_unwrap(1))))
|
.query_map([from], |row| Ok((row.get_unwrap(0), row.get_unwrap(1))))
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.map(move |r| {
|
.map(move |r| r.unwrap()),
|
||||||
//dbg!(&name);
|
|
||||||
r.unwrap()
|
|
||||||
}),
|
|
||||||
);
|
);
|
||||||
Box::new(PreparedStatementIterator {
|
Box::new(PreparedStatementIterator {
|
||||||
iterator,
|
iterator,
|
||||||
|
@ -301,10 +293,7 @@ impl Tree for SqliteTable {
|
||||||
statement
|
statement
|
||||||
.query_map([from], |row| Ok((row.get_unwrap(0), row.get_unwrap(1))))
|
.query_map([from], |row| Ok((row.get_unwrap(0), row.get_unwrap(1))))
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.map(move |r| {
|
.map(move |r| r.unwrap()),
|
||||||
//dbg!(&name);
|
|
||||||
r.unwrap()
|
|
||||||
}),
|
|
||||||
);
|
);
|
||||||
|
|
||||||
Box::new(PreparedStatementIterator {
|
Box::new(PreparedStatementIterator {
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,30 +1,23 @@
|
||||||
use crate::{utils, Error, Result};
|
use std::collections::HashMap;
|
||||||
|
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::error::ErrorKind,
|
api::client::error::ErrorKind,
|
||||||
events::{AnyEphemeralRoomEvent, RoomAccountDataEventType},
|
events::{AnyEphemeralRoomEvent, RoomAccountDataEventType},
|
||||||
serde::Raw,
|
serde::Raw,
|
||||||
RoomId, UserId,
|
RoomId, UserId,
|
||||||
};
|
};
|
||||||
use serde::{de::DeserializeOwned, Serialize};
|
|
||||||
use std::{collections::HashMap, sync::Arc};
|
|
||||||
|
|
||||||
use super::abstraction::Tree;
|
use crate::{database::KeyValueDatabase, service, services, utils, Error, Result};
|
||||||
|
|
||||||
pub struct AccountData {
|
impl service::account_data::Data for KeyValueDatabase {
|
||||||
pub(super) roomuserdataid_accountdata: Arc<dyn Tree>, // RoomUserDataId = Room + User + Count + Type
|
|
||||||
pub(super) roomusertype_roomuserdataid: Arc<dyn Tree>, // RoomUserType = Room + User + Type
|
|
||||||
}
|
|
||||||
|
|
||||||
impl AccountData {
|
|
||||||
/// Places one event in the account data of the user and removes the previous entry.
|
/// Places one event in the account data of the user and removes the previous entry.
|
||||||
#[tracing::instrument(skip(self, room_id, user_id, event_type, data, globals))]
|
#[tracing::instrument(skip(self, room_id, user_id, event_type, data))]
|
||||||
pub fn update<T: Serialize>(
|
fn update(
|
||||||
&self,
|
&self,
|
||||||
room_id: Option<&RoomId>,
|
room_id: Option<&RoomId>,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
event_type: RoomAccountDataEventType,
|
event_type: RoomAccountDataEventType,
|
||||||
data: &T,
|
data: &serde_json::Value,
|
||||||
globals: &super::globals::Globals,
|
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let mut prefix = room_id
|
let mut prefix = room_id
|
||||||
.map(|r| r.to_string())
|
.map(|r| r.to_string())
|
||||||
|
@ -36,15 +29,14 @@ impl AccountData {
|
||||||
prefix.push(0xff);
|
prefix.push(0xff);
|
||||||
|
|
||||||
let mut roomuserdataid = prefix.clone();
|
let mut roomuserdataid = prefix.clone();
|
||||||
roomuserdataid.extend_from_slice(&globals.next_count()?.to_be_bytes());
|
roomuserdataid.extend_from_slice(&services().globals.next_count()?.to_be_bytes());
|
||||||
roomuserdataid.push(0xff);
|
roomuserdataid.push(0xff);
|
||||||
roomuserdataid.extend_from_slice(event_type.to_string().as_bytes());
|
roomuserdataid.extend_from_slice(event_type.to_string().as_bytes());
|
||||||
|
|
||||||
let mut key = prefix;
|
let mut key = prefix;
|
||||||
key.extend_from_slice(event_type.to_string().as_bytes());
|
key.extend_from_slice(event_type.to_string().as_bytes());
|
||||||
|
|
||||||
let json = serde_json::to_value(data).expect("all types here can be serialized"); // TODO: maybe add error handling
|
if data.get("type").is_none() || data.get("content").is_none() {
|
||||||
if json.get("type").is_none() || json.get("content").is_none() {
|
|
||||||
return Err(Error::BadRequest(
|
return Err(Error::BadRequest(
|
||||||
ErrorKind::InvalidParam,
|
ErrorKind::InvalidParam,
|
||||||
"Account data doesn't have all required fields.",
|
"Account data doesn't have all required fields.",
|
||||||
|
@ -53,7 +45,7 @@ impl AccountData {
|
||||||
|
|
||||||
self.roomuserdataid_accountdata.insert(
|
self.roomuserdataid_accountdata.insert(
|
||||||
&roomuserdataid,
|
&roomuserdataid,
|
||||||
&serde_json::to_vec(&json).expect("to_vec always works on json values"),
|
&serde_json::to_vec(&data).expect("to_vec always works on json values"),
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let prev = self.roomusertype_roomuserdataid.get(&key)?;
|
let prev = self.roomusertype_roomuserdataid.get(&key)?;
|
||||||
|
@ -71,12 +63,12 @@ impl AccountData {
|
||||||
|
|
||||||
/// Searches the account data for a specific kind.
|
/// Searches the account data for a specific kind.
|
||||||
#[tracing::instrument(skip(self, room_id, user_id, kind))]
|
#[tracing::instrument(skip(self, room_id, user_id, kind))]
|
||||||
pub fn get<T: DeserializeOwned>(
|
fn get(
|
||||||
&self,
|
&self,
|
||||||
room_id: Option<&RoomId>,
|
room_id: Option<&RoomId>,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
kind: RoomAccountDataEventType,
|
kind: RoomAccountDataEventType,
|
||||||
) -> Result<Option<T>> {
|
) -> Result<Option<Box<serde_json::value::RawValue>>> {
|
||||||
let mut key = room_id
|
let mut key = room_id
|
||||||
.map(|r| r.to_string())
|
.map(|r| r.to_string())
|
||||||
.unwrap_or_default()
|
.unwrap_or_default()
|
||||||
|
@ -104,7 +96,7 @@ impl AccountData {
|
||||||
|
|
||||||
/// Returns all changes to the account data that happened after `since`.
|
/// Returns all changes to the account data that happened after `since`.
|
||||||
#[tracing::instrument(skip(self, room_id, user_id, since))]
|
#[tracing::instrument(skip(self, room_id, user_id, since))]
|
||||||
pub fn changes_since(
|
fn changes_since(
|
||||||
&self,
|
&self,
|
||||||
room_id: Option<&RoomId>,
|
room_id: Option<&RoomId>,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
|
@ -1,20 +1,8 @@
|
||||||
use crate::{utils, Error, Result};
|
use crate::{database::KeyValueDatabase, service, utils, Error, Result};
|
||||||
use std::{
|
|
||||||
collections::HashMap,
|
|
||||||
sync::{Arc, RwLock},
|
|
||||||
};
|
|
||||||
|
|
||||||
use super::abstraction::Tree;
|
impl service::appservice::Data for KeyValueDatabase {
|
||||||
|
|
||||||
pub struct Appservice {
|
|
||||||
pub(super) cached_registrations: Arc<RwLock<HashMap<String, serde_yaml::Value>>>,
|
|
||||||
pub(super) id_appserviceregistrations: Arc<dyn Tree>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Appservice {
|
|
||||||
/// Registers an appservice and returns the ID to the caller
|
/// Registers an appservice and returns the ID to the caller
|
||||||
///
|
fn register_appservice(&self, yaml: serde_yaml::Value) -> Result<String> {
|
||||||
pub fn register_appservice(&self, yaml: serde_yaml::Value) -> Result<String> {
|
|
||||||
// TODO: Rumaify
|
// TODO: Rumaify
|
||||||
let id = yaml.get("id").unwrap().as_str().unwrap();
|
let id = yaml.get("id").unwrap().as_str().unwrap();
|
||||||
self.id_appserviceregistrations.insert(
|
self.id_appserviceregistrations.insert(
|
||||||
|
@ -34,7 +22,7 @@ impl Appservice {
|
||||||
/// # Arguments
|
/// # Arguments
|
||||||
///
|
///
|
||||||
/// * `service_name` - the name you send to register the service previously
|
/// * `service_name` - the name you send to register the service previously
|
||||||
pub fn unregister_appservice(&self, service_name: &str) -> Result<()> {
|
fn unregister_appservice(&self, service_name: &str) -> Result<()> {
|
||||||
self.id_appserviceregistrations
|
self.id_appserviceregistrations
|
||||||
.remove(service_name.as_bytes())?;
|
.remove(service_name.as_bytes())?;
|
||||||
self.cached_registrations
|
self.cached_registrations
|
||||||
|
@ -44,7 +32,7 @@ impl Appservice {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_registration(&self, id: &str) -> Result<Option<serde_yaml::Value>> {
|
fn get_registration(&self, id: &str) -> Result<Option<serde_yaml::Value>> {
|
||||||
self.cached_registrations
|
self.cached_registrations
|
||||||
.read()
|
.read()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
@ -66,14 +54,17 @@ impl Appservice {
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn iter_ids(&self) -> Result<impl Iterator<Item = Result<String>> + '_> {
|
fn iter_ids<'a>(&'a self) -> Result<Box<dyn Iterator<Item = Result<String>> + 'a>> {
|
||||||
Ok(self.id_appserviceregistrations.iter().map(|(id, _)| {
|
Ok(Box::new(self.id_appserviceregistrations.iter().map(
|
||||||
utils::string_from_bytes(&id)
|
|(id, _)| {
|
||||||
.map_err(|_| Error::bad_database("Invalid id bytes in id_appserviceregistrations."))
|
utils::string_from_bytes(&id).map_err(|_| {
|
||||||
}))
|
Error::bad_database("Invalid id bytes in id_appserviceregistrations.")
|
||||||
|
})
|
||||||
|
},
|
||||||
|
)))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn all(&self) -> Result<Vec<(String, serde_yaml::Value)>> {
|
fn all(&self) -> Result<Vec<(String, serde_yaml::Value)>> {
|
||||||
self.iter_ids()?
|
self.iter_ids()?
|
||||||
.filter_map(|id| id.ok())
|
.filter_map(|id| id.ok())
|
||||||
.map(move |id| {
|
.map(move |id| {
|
|
@ -0,0 +1,233 @@
|
||||||
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use futures_util::{stream::FuturesUnordered, StreamExt};
|
||||||
|
use ruma::{
|
||||||
|
api::federation::discovery::{ServerSigningKeys, VerifyKey},
|
||||||
|
signatures::Ed25519KeyPair,
|
||||||
|
DeviceId, MilliSecondsSinceUnixEpoch, OwnedServerSigningKeyId, ServerName, UserId,
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::{database::KeyValueDatabase, service, services, utils, Error, Result};
|
||||||
|
|
||||||
|
pub const COUNTER: &[u8] = b"c";
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl service::globals::Data for KeyValueDatabase {
|
||||||
|
fn next_count(&self) -> Result<u64> {
|
||||||
|
utils::u64_from_bytes(&self.global.increment(COUNTER)?)
|
||||||
|
.map_err(|_| Error::bad_database("Count has invalid bytes."))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn current_count(&self) -> Result<u64> {
|
||||||
|
self.global.get(COUNTER)?.map_or(Ok(0_u64), |bytes| {
|
||||||
|
utils::u64_from_bytes(&bytes)
|
||||||
|
.map_err(|_| Error::bad_database("Count has invalid bytes."))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn watch(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> {
|
||||||
|
let userid_bytes = user_id.as_bytes().to_vec();
|
||||||
|
let mut userid_prefix = userid_bytes.clone();
|
||||||
|
userid_prefix.push(0xff);
|
||||||
|
|
||||||
|
let mut userdeviceid_prefix = userid_prefix.clone();
|
||||||
|
userdeviceid_prefix.extend_from_slice(device_id.as_bytes());
|
||||||
|
userdeviceid_prefix.push(0xff);
|
||||||
|
|
||||||
|
let mut futures = FuturesUnordered::new();
|
||||||
|
|
||||||
|
// Return when *any* user changed his key
|
||||||
|
// TODO: only send for user they share a room with
|
||||||
|
futures.push(self.todeviceid_events.watch_prefix(&userdeviceid_prefix));
|
||||||
|
|
||||||
|
futures.push(self.userroomid_joined.watch_prefix(&userid_prefix));
|
||||||
|
futures.push(self.userroomid_invitestate.watch_prefix(&userid_prefix));
|
||||||
|
futures.push(self.userroomid_leftstate.watch_prefix(&userid_prefix));
|
||||||
|
futures.push(
|
||||||
|
self.userroomid_notificationcount
|
||||||
|
.watch_prefix(&userid_prefix),
|
||||||
|
);
|
||||||
|
futures.push(self.userroomid_highlightcount.watch_prefix(&userid_prefix));
|
||||||
|
|
||||||
|
// Events for rooms we are in
|
||||||
|
for room_id in services()
|
||||||
|
.rooms
|
||||||
|
.state_cache
|
||||||
|
.rooms_joined(user_id)
|
||||||
|
.filter_map(|r| r.ok())
|
||||||
|
{
|
||||||
|
let short_roomid = services()
|
||||||
|
.rooms
|
||||||
|
.short
|
||||||
|
.get_shortroomid(&room_id)
|
||||||
|
.ok()
|
||||||
|
.flatten()
|
||||||
|
.expect("room exists")
|
||||||
|
.to_be_bytes()
|
||||||
|
.to_vec();
|
||||||
|
|
||||||
|
let roomid_bytes = room_id.as_bytes().to_vec();
|
||||||
|
let mut roomid_prefix = roomid_bytes.clone();
|
||||||
|
roomid_prefix.push(0xff);
|
||||||
|
|
||||||
|
// PDUs
|
||||||
|
futures.push(self.pduid_pdu.watch_prefix(&short_roomid));
|
||||||
|
|
||||||
|
// EDUs
|
||||||
|
futures.push(self.roomid_lasttypingupdate.watch_prefix(&roomid_bytes));
|
||||||
|
|
||||||
|
futures.push(self.readreceiptid_readreceipt.watch_prefix(&roomid_prefix));
|
||||||
|
|
||||||
|
// Key changes
|
||||||
|
futures.push(self.keychangeid_userid.watch_prefix(&roomid_prefix));
|
||||||
|
|
||||||
|
// Room account data
|
||||||
|
let mut roomuser_prefix = roomid_prefix.clone();
|
||||||
|
roomuser_prefix.extend_from_slice(&userid_prefix);
|
||||||
|
|
||||||
|
futures.push(
|
||||||
|
self.roomusertype_roomuserdataid
|
||||||
|
.watch_prefix(&roomuser_prefix),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut globaluserdata_prefix = vec![0xff];
|
||||||
|
globaluserdata_prefix.extend_from_slice(&userid_prefix);
|
||||||
|
|
||||||
|
futures.push(
|
||||||
|
self.roomusertype_roomuserdataid
|
||||||
|
.watch_prefix(&globaluserdata_prefix),
|
||||||
|
);
|
||||||
|
|
||||||
|
// More key changes (used when user is not joined to any rooms)
|
||||||
|
futures.push(self.keychangeid_userid.watch_prefix(&userid_prefix));
|
||||||
|
|
||||||
|
// One time keys
|
||||||
|
futures.push(self.userid_lastonetimekeyupdate.watch_prefix(&userid_bytes));
|
||||||
|
|
||||||
|
futures.push(Box::pin(services().globals.rotate.watch()));
|
||||||
|
|
||||||
|
// Wait until one of them finds something
|
||||||
|
futures.next().await;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn cleanup(&self) -> Result<()> {
|
||||||
|
self._db.cleanup()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn memory_usage(&self) -> Result<String> {
|
||||||
|
self._db.memory_usage()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn load_keypair(&self) -> Result<Ed25519KeyPair> {
|
||||||
|
let keypair_bytes = self.global.get(b"keypair")?.map_or_else(
|
||||||
|
|| {
|
||||||
|
let keypair = utils::generate_keypair();
|
||||||
|
self.global.insert(b"keypair", &keypair)?;
|
||||||
|
Ok::<_, Error>(keypair)
|
||||||
|
},
|
||||||
|
|s| Ok(s.to_vec()),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
let mut parts = keypair_bytes.splitn(2, |&b| b == 0xff);
|
||||||
|
|
||||||
|
utils::string_from_bytes(
|
||||||
|
// 1. version
|
||||||
|
parts
|
||||||
|
.next()
|
||||||
|
.expect("splitn always returns at least one element"),
|
||||||
|
)
|
||||||
|
.map_err(|_| Error::bad_database("Invalid version bytes in keypair."))
|
||||||
|
.and_then(|version| {
|
||||||
|
// 2. key
|
||||||
|
parts
|
||||||
|
.next()
|
||||||
|
.ok_or_else(|| Error::bad_database("Invalid keypair format in database."))
|
||||||
|
.map(|key| (version, key))
|
||||||
|
})
|
||||||
|
.and_then(|(version, key)| {
|
||||||
|
Ed25519KeyPair::from_der(key, version)
|
||||||
|
.map_err(|_| Error::bad_database("Private or public keys are invalid."))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
fn remove_keypair(&self) -> Result<()> {
|
||||||
|
self.global.remove(b"keypair")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn add_signing_key(
|
||||||
|
&self,
|
||||||
|
origin: &ServerName,
|
||||||
|
new_keys: ServerSigningKeys,
|
||||||
|
) -> Result<BTreeMap<OwnedServerSigningKeyId, VerifyKey>> {
|
||||||
|
// Not atomic, but this is not critical
|
||||||
|
let signingkeys = self.server_signingkeys.get(origin.as_bytes())?;
|
||||||
|
|
||||||
|
let mut keys = signingkeys
|
||||||
|
.and_then(|keys| serde_json::from_slice(&keys).ok())
|
||||||
|
.unwrap_or_else(|| {
|
||||||
|
// Just insert "now", it doesn't matter
|
||||||
|
ServerSigningKeys::new(origin.to_owned(), MilliSecondsSinceUnixEpoch::now())
|
||||||
|
});
|
||||||
|
|
||||||
|
let ServerSigningKeys {
|
||||||
|
verify_keys,
|
||||||
|
old_verify_keys,
|
||||||
|
..
|
||||||
|
} = new_keys;
|
||||||
|
|
||||||
|
keys.verify_keys.extend(verify_keys.into_iter());
|
||||||
|
keys.old_verify_keys.extend(old_verify_keys.into_iter());
|
||||||
|
|
||||||
|
self.server_signingkeys.insert(
|
||||||
|
origin.as_bytes(),
|
||||||
|
&serde_json::to_vec(&keys).expect("serversigningkeys can be serialized"),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
let mut tree = keys.verify_keys;
|
||||||
|
tree.extend(
|
||||||
|
keys.old_verify_keys
|
||||||
|
.into_iter()
|
||||||
|
.map(|old| (old.0, VerifyKey::new(old.1.key))),
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(tree)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server.
|
||||||
|
fn signing_keys_for(
|
||||||
|
&self,
|
||||||
|
origin: &ServerName,
|
||||||
|
) -> Result<BTreeMap<OwnedServerSigningKeyId, VerifyKey>> {
|
||||||
|
let signingkeys = self
|
||||||
|
.server_signingkeys
|
||||||
|
.get(origin.as_bytes())?
|
||||||
|
.and_then(|bytes| serde_json::from_slice(&bytes).ok())
|
||||||
|
.map(|keys: ServerSigningKeys| {
|
||||||
|
let mut tree = keys.verify_keys;
|
||||||
|
tree.extend(
|
||||||
|
keys.old_verify_keys
|
||||||
|
.into_iter()
|
||||||
|
.map(|old| (old.0, VerifyKey::new(old.1.key))),
|
||||||
|
);
|
||||||
|
tree
|
||||||
|
})
|
||||||
|
.unwrap_or_else(BTreeMap::new);
|
||||||
|
|
||||||
|
Ok(signingkeys)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn database_version(&self) -> Result<u64> {
|
||||||
|
self.global.get(b"version")?.map_or(Ok(0), |version| {
|
||||||
|
utils::u64_from_bytes(&version)
|
||||||
|
.map_err(|_| Error::bad_database("Database version id is invalid."))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn bump_database_version(&self, new_version: u64) -> Result<()> {
|
||||||
|
self.global.insert(b"version", &new_version.to_be_bytes())?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,30 +1,23 @@
|
||||||
use crate::{utils, Error, Result};
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::{
|
api::client::{
|
||||||
backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup},
|
backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup},
|
||||||
error::ErrorKind,
|
error::ErrorKind,
|
||||||
},
|
},
|
||||||
serde::Raw,
|
serde::Raw,
|
||||||
RoomId, UserId,
|
OwnedRoomId, RoomId, UserId,
|
||||||
};
|
};
|
||||||
use std::{collections::BTreeMap, sync::Arc};
|
|
||||||
|
|
||||||
use super::abstraction::Tree;
|
use crate::{database::KeyValueDatabase, service, services, utils, Error, Result};
|
||||||
|
|
||||||
pub struct KeyBackups {
|
impl service::key_backups::Data for KeyValueDatabase {
|
||||||
pub(super) backupid_algorithm: Arc<dyn Tree>, // BackupId = UserId + Version(Count)
|
fn create_backup(
|
||||||
pub(super) backupid_etag: Arc<dyn Tree>, // BackupId = UserId + Version(Count)
|
|
||||||
pub(super) backupkeyid_backup: Arc<dyn Tree>, // BackupKeyId = UserId + Version + RoomId + SessionId
|
|
||||||
}
|
|
||||||
|
|
||||||
impl KeyBackups {
|
|
||||||
pub fn create_backup(
|
|
||||||
&self,
|
&self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
backup_metadata: &Raw<BackupAlgorithm>,
|
backup_metadata: &Raw<BackupAlgorithm>,
|
||||||
globals: &super::globals::Globals,
|
|
||||||
) -> Result<String> {
|
) -> Result<String> {
|
||||||
let version = globals.next_count()?.to_string();
|
let version = services().globals.next_count()?.to_string();
|
||||||
|
|
||||||
let mut key = user_id.as_bytes().to_vec();
|
let mut key = user_id.as_bytes().to_vec();
|
||||||
key.push(0xff);
|
key.push(0xff);
|
||||||
|
@ -35,11 +28,11 @@ impl KeyBackups {
|
||||||
&serde_json::to_vec(backup_metadata).expect("BackupAlgorithm::to_vec always works"),
|
&serde_json::to_vec(backup_metadata).expect("BackupAlgorithm::to_vec always works"),
|
||||||
)?;
|
)?;
|
||||||
self.backupid_etag
|
self.backupid_etag
|
||||||
.insert(&key, &globals.next_count()?.to_be_bytes())?;
|
.insert(&key, &services().globals.next_count()?.to_be_bytes())?;
|
||||||
Ok(version)
|
Ok(version)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn delete_backup(&self, user_id: &UserId, version: &str) -> Result<()> {
|
fn delete_backup(&self, user_id: &UserId, version: &str) -> Result<()> {
|
||||||
let mut key = user_id.as_bytes().to_vec();
|
let mut key = user_id.as_bytes().to_vec();
|
||||||
key.push(0xff);
|
key.push(0xff);
|
||||||
key.extend_from_slice(version.as_bytes());
|
key.extend_from_slice(version.as_bytes());
|
||||||
|
@ -56,12 +49,11 @@ impl KeyBackups {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn update_backup(
|
fn update_backup(
|
||||||
&self,
|
&self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
version: &str,
|
version: &str,
|
||||||
backup_metadata: &Raw<BackupAlgorithm>,
|
backup_metadata: &Raw<BackupAlgorithm>,
|
||||||
globals: &super::globals::Globals,
|
|
||||||
) -> Result<String> {
|
) -> Result<String> {
|
||||||
let mut key = user_id.as_bytes().to_vec();
|
let mut key = user_id.as_bytes().to_vec();
|
||||||
key.push(0xff);
|
key.push(0xff);
|
||||||
|
@ -77,11 +69,11 @@ impl KeyBackups {
|
||||||
self.backupid_algorithm
|
self.backupid_algorithm
|
||||||
.insert(&key, backup_metadata.json().get().as_bytes())?;
|
.insert(&key, backup_metadata.json().get().as_bytes())?;
|
||||||
self.backupid_etag
|
self.backupid_etag
|
||||||
.insert(&key, &globals.next_count()?.to_be_bytes())?;
|
.insert(&key, &services().globals.next_count()?.to_be_bytes())?;
|
||||||
Ok(version.to_owned())
|
Ok(version.to_owned())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_latest_backup_version(&self, user_id: &UserId) -> Result<Option<String>> {
|
fn get_latest_backup_version(&self, user_id: &UserId) -> Result<Option<String>> {
|
||||||
let mut prefix = user_id.as_bytes().to_vec();
|
let mut prefix = user_id.as_bytes().to_vec();
|
||||||
prefix.push(0xff);
|
prefix.push(0xff);
|
||||||
let mut last_possible_key = prefix.clone();
|
let mut last_possible_key = prefix.clone();
|
||||||
|
@ -102,7 +94,7 @@ impl KeyBackups {
|
||||||
.transpose()
|
.transpose()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_latest_backup(
|
fn get_latest_backup(
|
||||||
&self,
|
&self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
) -> Result<Option<(String, Raw<BackupAlgorithm>)>> {
|
) -> Result<Option<(String, Raw<BackupAlgorithm>)>> {
|
||||||
|
@ -133,11 +125,7 @@ impl KeyBackups {
|
||||||
.transpose()
|
.transpose()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_backup(
|
fn get_backup(&self, user_id: &UserId, version: &str) -> Result<Option<Raw<BackupAlgorithm>>> {
|
||||||
&self,
|
|
||||||
user_id: &UserId,
|
|
||||||
version: &str,
|
|
||||||
) -> Result<Option<Raw<BackupAlgorithm>>> {
|
|
||||||
let mut key = user_id.as_bytes().to_vec();
|
let mut key = user_id.as_bytes().to_vec();
|
||||||
key.push(0xff);
|
key.push(0xff);
|
||||||
key.extend_from_slice(version.as_bytes());
|
key.extend_from_slice(version.as_bytes());
|
||||||
|
@ -150,14 +138,13 @@ impl KeyBackups {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn add_key(
|
fn add_key(
|
||||||
&self,
|
&self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
version: &str,
|
version: &str,
|
||||||
room_id: &RoomId,
|
room_id: &RoomId,
|
||||||
session_id: &str,
|
session_id: &str,
|
||||||
key_data: &Raw<KeyBackupData>,
|
key_data: &Raw<KeyBackupData>,
|
||||||
globals: &super::globals::Globals,
|
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let mut key = user_id.as_bytes().to_vec();
|
let mut key = user_id.as_bytes().to_vec();
|
||||||
key.push(0xff);
|
key.push(0xff);
|
||||||
|
@ -171,7 +158,7 @@ impl KeyBackups {
|
||||||
}
|
}
|
||||||
|
|
||||||
self.backupid_etag
|
self.backupid_etag
|
||||||
.insert(&key, &globals.next_count()?.to_be_bytes())?;
|
.insert(&key, &services().globals.next_count()?.to_be_bytes())?;
|
||||||
|
|
||||||
key.push(0xff);
|
key.push(0xff);
|
||||||
key.extend_from_slice(room_id.as_bytes());
|
key.extend_from_slice(room_id.as_bytes());
|
||||||
|
@ -184,7 +171,7 @@ impl KeyBackups {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn count_keys(&self, user_id: &UserId, version: &str) -> Result<usize> {
|
fn count_keys(&self, user_id: &UserId, version: &str) -> Result<usize> {
|
||||||
let mut prefix = user_id.as_bytes().to_vec();
|
let mut prefix = user_id.as_bytes().to_vec();
|
||||||
prefix.push(0xff);
|
prefix.push(0xff);
|
||||||
prefix.extend_from_slice(version.as_bytes());
|
prefix.extend_from_slice(version.as_bytes());
|
||||||
|
@ -192,7 +179,7 @@ impl KeyBackups {
|
||||||
Ok(self.backupkeyid_backup.scan_prefix(prefix).count())
|
Ok(self.backupkeyid_backup.scan_prefix(prefix).count())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_etag(&self, user_id: &UserId, version: &str) -> Result<String> {
|
fn get_etag(&self, user_id: &UserId, version: &str) -> Result<String> {
|
||||||
let mut key = user_id.as_bytes().to_vec();
|
let mut key = user_id.as_bytes().to_vec();
|
||||||
key.push(0xff);
|
key.push(0xff);
|
||||||
key.extend_from_slice(version.as_bytes());
|
key.extend_from_slice(version.as_bytes());
|
||||||
|
@ -207,17 +194,17 @@ impl KeyBackups {
|
||||||
.to_string())
|
.to_string())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_all(
|
fn get_all(
|
||||||
&self,
|
&self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
version: &str,
|
version: &str,
|
||||||
) -> Result<BTreeMap<Box<RoomId>, RoomKeyBackup>> {
|
) -> Result<BTreeMap<OwnedRoomId, RoomKeyBackup>> {
|
||||||
let mut prefix = user_id.as_bytes().to_vec();
|
let mut prefix = user_id.as_bytes().to_vec();
|
||||||
prefix.push(0xff);
|
prefix.push(0xff);
|
||||||
prefix.extend_from_slice(version.as_bytes());
|
prefix.extend_from_slice(version.as_bytes());
|
||||||
prefix.push(0xff);
|
prefix.push(0xff);
|
||||||
|
|
||||||
let mut rooms = BTreeMap::<Box<RoomId>, RoomKeyBackup>::new();
|
let mut rooms = BTreeMap::<OwnedRoomId, RoomKeyBackup>::new();
|
||||||
|
|
||||||
for result in self
|
for result in self
|
||||||
.backupkeyid_backup
|
.backupkeyid_backup
|
||||||
|
@ -263,7 +250,7 @@ impl KeyBackups {
|
||||||
Ok(rooms)
|
Ok(rooms)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_room(
|
fn get_room(
|
||||||
&self,
|
&self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
version: &str,
|
version: &str,
|
||||||
|
@ -300,7 +287,7 @@ impl KeyBackups {
|
||||||
.collect())
|
.collect())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_session(
|
fn get_session(
|
||||||
&self,
|
&self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
version: &str,
|
version: &str,
|
||||||
|
@ -325,7 +312,7 @@ impl KeyBackups {
|
||||||
.transpose()
|
.transpose()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn delete_all_keys(&self, user_id: &UserId, version: &str) -> Result<()> {
|
fn delete_all_keys(&self, user_id: &UserId, version: &str) -> Result<()> {
|
||||||
let mut key = user_id.as_bytes().to_vec();
|
let mut key = user_id.as_bytes().to_vec();
|
||||||
key.push(0xff);
|
key.push(0xff);
|
||||||
key.extend_from_slice(version.as_bytes());
|
key.extend_from_slice(version.as_bytes());
|
||||||
|
@ -338,12 +325,7 @@ impl KeyBackups {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn delete_room_keys(
|
fn delete_room_keys(&self, user_id: &UserId, version: &str, room_id: &RoomId) -> Result<()> {
|
||||||
&self,
|
|
||||||
user_id: &UserId,
|
|
||||||
version: &str,
|
|
||||||
room_id: &RoomId,
|
|
||||||
) -> Result<()> {
|
|
||||||
let mut key = user_id.as_bytes().to_vec();
|
let mut key = user_id.as_bytes().to_vec();
|
||||||
key.push(0xff);
|
key.push(0xff);
|
||||||
key.extend_from_slice(version.as_bytes());
|
key.extend_from_slice(version.as_bytes());
|
||||||
|
@ -358,7 +340,7 @@ impl KeyBackups {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn delete_room_key(
|
fn delete_room_key(
|
||||||
&self,
|
&self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
version: &str,
|
version: &str,
|
|
@ -0,0 +1,82 @@
|
||||||
|
use ruma::api::client::error::ErrorKind;
|
||||||
|
|
||||||
|
use crate::{database::KeyValueDatabase, service, utils, Error, Result};
|
||||||
|
|
||||||
|
impl service::media::Data for KeyValueDatabase {
|
||||||
|
fn create_file_metadata(
|
||||||
|
&self,
|
||||||
|
mxc: String,
|
||||||
|
width: u32,
|
||||||
|
height: u32,
|
||||||
|
content_disposition: Option<&str>,
|
||||||
|
content_type: Option<&str>,
|
||||||
|
) -> Result<Vec<u8>> {
|
||||||
|
let mut key = mxc.as_bytes().to_vec();
|
||||||
|
key.push(0xff);
|
||||||
|
key.extend_from_slice(&width.to_be_bytes());
|
||||||
|
key.extend_from_slice(&height.to_be_bytes());
|
||||||
|
key.push(0xff);
|
||||||
|
key.extend_from_slice(
|
||||||
|
content_disposition
|
||||||
|
.as_ref()
|
||||||
|
.map(|f| f.as_bytes())
|
||||||
|
.unwrap_or_default(),
|
||||||
|
);
|
||||||
|
key.push(0xff);
|
||||||
|
key.extend_from_slice(
|
||||||
|
content_type
|
||||||
|
.as_ref()
|
||||||
|
.map(|c| c.as_bytes())
|
||||||
|
.unwrap_or_default(),
|
||||||
|
);
|
||||||
|
|
||||||
|
self.mediaid_file.insert(&key, &[])?;
|
||||||
|
|
||||||
|
Ok(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn search_file_metadata(
|
||||||
|
&self,
|
||||||
|
mxc: String,
|
||||||
|
width: u32,
|
||||||
|
height: u32,
|
||||||
|
) -> Result<(Option<String>, Option<String>, Vec<u8>)> {
|
||||||
|
let mut prefix = mxc.as_bytes().to_vec();
|
||||||
|
prefix.push(0xff);
|
||||||
|
prefix.extend_from_slice(&width.to_be_bytes());
|
||||||
|
prefix.extend_from_slice(&height.to_be_bytes());
|
||||||
|
prefix.push(0xff);
|
||||||
|
|
||||||
|
let (key, _) = self
|
||||||
|
.mediaid_file
|
||||||
|
.scan_prefix(prefix)
|
||||||
|
.next()
|
||||||
|
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Media not found"))?;
|
||||||
|
|
||||||
|
let mut parts = key.rsplit(|&b| b == 0xff);
|
||||||
|
|
||||||
|
let content_type = parts
|
||||||
|
.next()
|
||||||
|
.map(|bytes| {
|
||||||
|
utils::string_from_bytes(bytes).map_err(|_| {
|
||||||
|
Error::bad_database("Content type in mediaid_file is invalid unicode.")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.transpose()?;
|
||||||
|
|
||||||
|
let content_disposition_bytes = parts
|
||||||
|
.next()
|
||||||
|
.ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?;
|
||||||
|
|
||||||
|
let content_disposition = if content_disposition_bytes.is_empty() {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
Some(
|
||||||
|
utils::string_from_bytes(content_disposition_bytes).map_err(|_| {
|
||||||
|
Error::bad_database("Content Disposition in mediaid_file is invalid unicode.")
|
||||||
|
})?,
|
||||||
|
)
|
||||||
|
};
|
||||||
|
Ok((content_disposition, content_type, key))
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,13 @@
|
||||||
|
mod account_data;
|
||||||
|
//mod admin;
|
||||||
|
mod appservice;
|
||||||
|
mod globals;
|
||||||
|
mod key_backups;
|
||||||
|
mod media;
|
||||||
|
//mod pdu;
|
||||||
|
mod pusher;
|
||||||
|
mod rooms;
|
||||||
|
mod sending;
|
||||||
|
mod transaction_ids;
|
||||||
|
mod uiaa;
|
||||||
|
mod users;
|
|
@ -0,0 +1,79 @@
|
||||||
|
use ruma::{
|
||||||
|
api::client::push::{set_pusher, Pusher},
|
||||||
|
UserId,
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::{database::KeyValueDatabase, service, utils, Error, Result};
|
||||||
|
|
||||||
|
impl service::pusher::Data for KeyValueDatabase {
|
||||||
|
fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::PusherAction) -> Result<()> {
|
||||||
|
match &pusher {
|
||||||
|
set_pusher::v3::PusherAction::Post(data) => {
|
||||||
|
let mut key = sender.as_bytes().to_vec();
|
||||||
|
key.push(0xff);
|
||||||
|
key.extend_from_slice(data.pusher.ids.pushkey.as_bytes());
|
||||||
|
self.senderkey_pusher.insert(
|
||||||
|
&key,
|
||||||
|
&serde_json::to_vec(&pusher).expect("Pusher is valid JSON value"),
|
||||||
|
)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
set_pusher::v3::PusherAction::Delete(ids) => {
|
||||||
|
let mut key = sender.as_bytes().to_vec();
|
||||||
|
key.push(0xff);
|
||||||
|
key.extend_from_slice(ids.pushkey.as_bytes());
|
||||||
|
self.senderkey_pusher
|
||||||
|
.remove(&key)
|
||||||
|
.map(|_| ())
|
||||||
|
.map_err(Into::into)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_pusher(&self, sender: &UserId, pushkey: &str) -> Result<Option<Pusher>> {
|
||||||
|
let mut senderkey = sender.as_bytes().to_vec();
|
||||||
|
senderkey.push(0xff);
|
||||||
|
senderkey.extend_from_slice(pushkey.as_bytes());
|
||||||
|
|
||||||
|
self.senderkey_pusher
|
||||||
|
.get(&senderkey)?
|
||||||
|
.map(|push| {
|
||||||
|
serde_json::from_slice(&push)
|
||||||
|
.map_err(|_| Error::bad_database("Invalid Pusher in db."))
|
||||||
|
})
|
||||||
|
.transpose()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_pushers(&self, sender: &UserId) -> Result<Vec<Pusher>> {
|
||||||
|
let mut prefix = sender.as_bytes().to_vec();
|
||||||
|
prefix.push(0xff);
|
||||||
|
|
||||||
|
self.senderkey_pusher
|
||||||
|
.scan_prefix(prefix)
|
||||||
|
.map(|(_, push)| {
|
||||||
|
serde_json::from_slice(&push)
|
||||||
|
.map_err(|_| Error::bad_database("Invalid Pusher in db."))
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_pushkeys<'a>(
|
||||||
|
&'a self,
|
||||||
|
sender: &UserId,
|
||||||
|
) -> Box<dyn Iterator<Item = Result<String>> + 'a> {
|
||||||
|
let mut prefix = sender.as_bytes().to_vec();
|
||||||
|
prefix.push(0xff);
|
||||||
|
|
||||||
|
Box::new(self.senderkey_pusher.scan_prefix(prefix).map(|(k, _)| {
|
||||||
|
let mut parts = k.splitn(2, |&b| b == 0xff);
|
||||||
|
let _senderkey = parts.next();
|
||||||
|
let push_key = parts
|
||||||
|
.next()
|
||||||
|
.ok_or_else(|| Error::bad_database("Invalid senderkey_pusher in db"))?;
|
||||||
|
let push_key_string = utils::string_from_bytes(push_key)
|
||||||
|
.map_err(|_| Error::bad_database("Invalid pusher bytes in senderkey_pusher"))?;
|
||||||
|
|
||||||
|
Ok(push_key_string)
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,60 @@
|
||||||
|
use ruma::{api::client::error::ErrorKind, OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId};
|
||||||
|
|
||||||
|
use crate::{database::KeyValueDatabase, service, services, utils, Error, Result};
|
||||||
|
|
||||||
|
impl service::rooms::alias::Data for KeyValueDatabase {
|
||||||
|
fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId) -> Result<()> {
|
||||||
|
self.alias_roomid
|
||||||
|
.insert(alias.alias().as_bytes(), room_id.as_bytes())?;
|
||||||
|
let mut aliasid = room_id.as_bytes().to_vec();
|
||||||
|
aliasid.push(0xff);
|
||||||
|
aliasid.extend_from_slice(&services().globals.next_count()?.to_be_bytes());
|
||||||
|
self.aliasid_alias.insert(&aliasid, alias.as_bytes())?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn remove_alias(&self, alias: &RoomAliasId) -> Result<()> {
|
||||||
|
if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? {
|
||||||
|
let mut prefix = room_id.to_vec();
|
||||||
|
prefix.push(0xff);
|
||||||
|
|
||||||
|
for (key, _) in self.aliasid_alias.scan_prefix(prefix) {
|
||||||
|
self.aliasid_alias.remove(&key)?;
|
||||||
|
}
|
||||||
|
self.alias_roomid.remove(alias.alias().as_bytes())?;
|
||||||
|
} else {
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::NotFound,
|
||||||
|
"Alias does not exist.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn resolve_local_alias(&self, alias: &RoomAliasId) -> Result<Option<OwnedRoomId>> {
|
||||||
|
self.alias_roomid
|
||||||
|
.get(alias.alias().as_bytes())?
|
||||||
|
.map(|bytes| {
|
||||||
|
RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| {
|
||||||
|
Error::bad_database("Room ID in alias_roomid is invalid unicode.")
|
||||||
|
})?)
|
||||||
|
.map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid."))
|
||||||
|
})
|
||||||
|
.transpose()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn local_aliases_for_room<'a>(
|
||||||
|
&'a self,
|
||||||
|
room_id: &RoomId,
|
||||||
|
) -> Box<dyn Iterator<Item = Result<OwnedRoomAliasId>> + 'a> {
|
||||||
|
let mut prefix = room_id.as_bytes().to_vec();
|
||||||
|
prefix.push(0xff);
|
||||||
|
|
||||||
|
Box::new(self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| {
|
||||||
|
utils::string_from_bytes(&bytes)
|
||||||
|
.map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))?
|
||||||
|
.try_into()
|
||||||
|
.map_err(|_| Error::bad_database("Invalid alias in aliasid_alias."))
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,61 @@
|
||||||
|
use std::{collections::HashSet, mem::size_of, sync::Arc};
|
||||||
|
|
||||||
|
use crate::{database::KeyValueDatabase, service, utils, Result};
|
||||||
|
|
||||||
|
impl service::rooms::auth_chain::Data for KeyValueDatabase {
|
||||||
|
fn get_cached_eventid_authchain(&self, key: &[u64]) -> Result<Option<Arc<HashSet<u64>>>> {
|
||||||
|
// Check RAM cache
|
||||||
|
if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key) {
|
||||||
|
return Ok(Some(Arc::clone(result)));
|
||||||
|
}
|
||||||
|
|
||||||
|
// We only save auth chains for single events in the db
|
||||||
|
if key.len() == 1 {
|
||||||
|
// Check DB cache
|
||||||
|
let chain = self
|
||||||
|
.shorteventid_authchain
|
||||||
|
.get(&key[0].to_be_bytes())?
|
||||||
|
.map(|chain| {
|
||||||
|
chain
|
||||||
|
.chunks_exact(size_of::<u64>())
|
||||||
|
.map(|chunk| utils::u64_from_bytes(chunk).expect("byte length is correct"))
|
||||||
|
.collect()
|
||||||
|
});
|
||||||
|
|
||||||
|
if let Some(chain) = chain {
|
||||||
|
let chain = Arc::new(chain);
|
||||||
|
|
||||||
|
// Cache in RAM
|
||||||
|
self.auth_chain_cache
|
||||||
|
.lock()
|
||||||
|
.unwrap()
|
||||||
|
.insert(vec![key[0]], Arc::clone(&chain));
|
||||||
|
|
||||||
|
return Ok(Some(chain));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn cache_auth_chain(&self, key: Vec<u64>, auth_chain: Arc<HashSet<u64>>) -> Result<()> {
|
||||||
|
// Only persist single events in db
|
||||||
|
if key.len() == 1 {
|
||||||
|
self.shorteventid_authchain.insert(
|
||||||
|
&key[0].to_be_bytes(),
|
||||||
|
&auth_chain
|
||||||
|
.iter()
|
||||||
|
.flat_map(|s| s.to_be_bytes().to_vec())
|
||||||
|
.collect::<Vec<u8>>(),
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cache in RAM
|
||||||
|
self.auth_chain_cache
|
||||||
|
.lock()
|
||||||
|
.unwrap()
|
||||||
|
.insert(key, auth_chain);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,28 @@
|
||||||
|
use ruma::{OwnedRoomId, RoomId};
|
||||||
|
|
||||||
|
use crate::{database::KeyValueDatabase, service, utils, Error, Result};
|
||||||
|
|
||||||
|
impl service::rooms::directory::Data for KeyValueDatabase {
|
||||||
|
fn set_public(&self, room_id: &RoomId) -> Result<()> {
|
||||||
|
self.publicroomids.insert(room_id.as_bytes(), &[])
|
||||||
|
}
|
||||||
|
|
||||||
|
fn set_not_public(&self, room_id: &RoomId) -> Result<()> {
|
||||||
|
self.publicroomids.remove(room_id.as_bytes())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn is_public_room(&self, room_id: &RoomId) -> Result<bool> {
|
||||||
|
Ok(self.publicroomids.get(room_id.as_bytes())?.is_some())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn public_rooms<'a>(&'a self) -> Box<dyn Iterator<Item = Result<OwnedRoomId>> + 'a> {
|
||||||
|
Box::new(self.publicroomids.iter().map(|(bytes, _)| {
|
||||||
|
RoomId::parse(
|
||||||
|
utils::string_from_bytes(&bytes).map_err(|_| {
|
||||||
|
Error::bad_database("Room ID in publicroomids is invalid unicode.")
|
||||||
|
})?,
|
||||||
|
)
|
||||||
|
.map_err(|_| Error::bad_database("Room ID in publicroomids is invalid."))
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,7 @@
|
||||||
|
mod presence;
|
||||||
|
mod read_receipt;
|
||||||
|
mod typing;
|
||||||
|
|
||||||
|
use crate::{database::KeyValueDatabase, service};
|
||||||
|
|
||||||
|
impl service::rooms::edus::Data for KeyValueDatabase {}
|
|
@ -0,0 +1,152 @@
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
use ruma::{
|
||||||
|
events::presence::PresenceEvent, presence::PresenceState, OwnedUserId, RoomId, UInt, UserId,
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::{database::KeyValueDatabase, service, services, utils, Error, Result};
|
||||||
|
|
||||||
|
impl service::rooms::edus::presence::Data for KeyValueDatabase {
|
||||||
|
fn update_presence(
|
||||||
|
&self,
|
||||||
|
user_id: &UserId,
|
||||||
|
room_id: &RoomId,
|
||||||
|
presence: PresenceEvent,
|
||||||
|
) -> Result<()> {
|
||||||
|
// TODO: Remove old entry? Or maybe just wipe completely from time to time?
|
||||||
|
|
||||||
|
let count = services().globals.next_count()?.to_be_bytes();
|
||||||
|
|
||||||
|
let mut presence_id = room_id.as_bytes().to_vec();
|
||||||
|
presence_id.push(0xff);
|
||||||
|
presence_id.extend_from_slice(&count);
|
||||||
|
presence_id.push(0xff);
|
||||||
|
presence_id.extend_from_slice(presence.sender.as_bytes());
|
||||||
|
|
||||||
|
self.presenceid_presence.insert(
|
||||||
|
&presence_id,
|
||||||
|
&serde_json::to_vec(&presence).expect("PresenceEvent can be serialized"),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
self.userid_lastpresenceupdate.insert(
|
||||||
|
user_id.as_bytes(),
|
||||||
|
&utils::millis_since_unix_epoch().to_be_bytes(),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn ping_presence(&self, user_id: &UserId) -> Result<()> {
|
||||||
|
self.userid_lastpresenceupdate.insert(
|
||||||
|
user_id.as_bytes(),
|
||||||
|
&utils::millis_since_unix_epoch().to_be_bytes(),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn last_presence_update(&self, user_id: &UserId) -> Result<Option<u64>> {
|
||||||
|
self.userid_lastpresenceupdate
|
||||||
|
.get(user_id.as_bytes())?
|
||||||
|
.map(|bytes| {
|
||||||
|
utils::u64_from_bytes(&bytes).map_err(|_| {
|
||||||
|
Error::bad_database("Invalid timestamp in userid_lastpresenceupdate.")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.transpose()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_presence_event(
|
||||||
|
&self,
|
||||||
|
room_id: &RoomId,
|
||||||
|
user_id: &UserId,
|
||||||
|
count: u64,
|
||||||
|
) -> Result<Option<PresenceEvent>> {
|
||||||
|
let mut presence_id = room_id.as_bytes().to_vec();
|
||||||
|
presence_id.push(0xff);
|
||||||
|
presence_id.extend_from_slice(&count.to_be_bytes());
|
||||||
|
presence_id.push(0xff);
|
||||||
|
presence_id.extend_from_slice(user_id.as_bytes());
|
||||||
|
|
||||||
|
self.presenceid_presence
|
||||||
|
.get(&presence_id)?
|
||||||
|
.map(|value| parse_presence_event(&value))
|
||||||
|
.transpose()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn presence_since(
|
||||||
|
&self,
|
||||||
|
room_id: &RoomId,
|
||||||
|
since: u64,
|
||||||
|
) -> Result<HashMap<OwnedUserId, PresenceEvent>> {
|
||||||
|
let mut prefix = room_id.as_bytes().to_vec();
|
||||||
|
prefix.push(0xff);
|
||||||
|
|
||||||
|
let mut first_possible_edu = prefix.clone();
|
||||||
|
first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since
|
||||||
|
let mut hashmap = HashMap::new();
|
||||||
|
|
||||||
|
for (key, value) in self
|
||||||
|
.presenceid_presence
|
||||||
|
.iter_from(&first_possible_edu, false)
|
||||||
|
.take_while(|(key, _)| key.starts_with(&prefix))
|
||||||
|
{
|
||||||
|
let user_id = UserId::parse(
|
||||||
|
utils::string_from_bytes(
|
||||||
|
key.rsplit(|&b| b == 0xff)
|
||||||
|
.next()
|
||||||
|
.expect("rsplit always returns an element"),
|
||||||
|
)
|
||||||
|
.map_err(|_| Error::bad_database("Invalid UserId bytes in presenceid_presence."))?,
|
||||||
|
)
|
||||||
|
.map_err(|_| Error::bad_database("Invalid UserId in presenceid_presence."))?;
|
||||||
|
|
||||||
|
let presence = parse_presence_event(&value)?;
|
||||||
|
|
||||||
|
hashmap.insert(user_id, presence);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(hashmap)
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
fn presence_maintain(&self, db: Arc<TokioRwLock<Database>>) {
|
||||||
|
// TODO @M0dEx: move this to a timed tasks module
|
||||||
|
tokio::spawn(async move {
|
||||||
|
loop {
|
||||||
|
select! {
|
||||||
|
Some(user_id) = self.presence_timers.next() {
|
||||||
|
// TODO @M0dEx: would it be better to acquire the lock outside the loop?
|
||||||
|
let guard = db.read().await;
|
||||||
|
|
||||||
|
// TODO @M0dEx: add self.presence_timers
|
||||||
|
// TODO @M0dEx: maintain presence
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_presence_event(bytes: &[u8]) -> Result<PresenceEvent> {
|
||||||
|
let mut presence: PresenceEvent = serde_json::from_slice(bytes)
|
||||||
|
.map_err(|_| Error::bad_database("Invalid presence event in db."))?;
|
||||||
|
|
||||||
|
let current_timestamp: UInt = utils::millis_since_unix_epoch()
|
||||||
|
.try_into()
|
||||||
|
.expect("time is valid");
|
||||||
|
|
||||||
|
if presence.content.presence == PresenceState::Online {
|
||||||
|
// Don't set last_active_ago when the user is online
|
||||||
|
presence.content.last_active_ago = None;
|
||||||
|
} else {
|
||||||
|
// Convert from timestamp to duration
|
||||||
|
presence.content.last_active_ago = presence
|
||||||
|
.content
|
||||||
|
.last_active_ago
|
||||||
|
.map(|timestamp| current_timestamp - timestamp);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(presence)
|
||||||
|
}
|
|
@ -0,0 +1,150 @@
|
||||||
|
use std::mem;
|
||||||
|
|
||||||
|
use ruma::{
|
||||||
|
events::receipt::ReceiptEvent, serde::Raw, CanonicalJsonObject, OwnedUserId, RoomId, UserId,
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::{database::KeyValueDatabase, service, services, utils, Error, Result};
|
||||||
|
|
||||||
|
impl service::rooms::edus::read_receipt::Data for KeyValueDatabase {
|
||||||
|
fn readreceipt_update(
|
||||||
|
&self,
|
||||||
|
user_id: &UserId,
|
||||||
|
room_id: &RoomId,
|
||||||
|
event: ReceiptEvent,
|
||||||
|
) -> Result<()> {
|
||||||
|
let mut prefix = room_id.as_bytes().to_vec();
|
||||||
|
prefix.push(0xff);
|
||||||
|
|
||||||
|
let mut last_possible_key = prefix.clone();
|
||||||
|
last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes());
|
||||||
|
|
||||||
|
// Remove old entry
|
||||||
|
if let Some((old, _)) = self
|
||||||
|
.readreceiptid_readreceipt
|
||||||
|
.iter_from(&last_possible_key, true)
|
||||||
|
.take_while(|(key, _)| key.starts_with(&prefix))
|
||||||
|
.find(|(key, _)| {
|
||||||
|
key.rsplit(|&b| b == 0xff)
|
||||||
|
.next()
|
||||||
|
.expect("rsplit always returns an element")
|
||||||
|
== user_id.as_bytes()
|
||||||
|
})
|
||||||
|
{
|
||||||
|
// This is the old room_latest
|
||||||
|
self.readreceiptid_readreceipt.remove(&old)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut room_latest_id = prefix;
|
||||||
|
room_latest_id.extend_from_slice(&services().globals.next_count()?.to_be_bytes());
|
||||||
|
room_latest_id.push(0xff);
|
||||||
|
room_latest_id.extend_from_slice(user_id.as_bytes());
|
||||||
|
|
||||||
|
self.readreceiptid_readreceipt.insert(
|
||||||
|
&room_latest_id,
|
||||||
|
&serde_json::to_vec(&event).expect("EduEvent::to_string always works"),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn readreceipts_since<'a>(
|
||||||
|
&'a self,
|
||||||
|
room_id: &RoomId,
|
||||||
|
since: u64,
|
||||||
|
) -> Box<
|
||||||
|
dyn Iterator<
|
||||||
|
Item = Result<(
|
||||||
|
OwnedUserId,
|
||||||
|
u64,
|
||||||
|
Raw<ruma::events::AnySyncEphemeralRoomEvent>,
|
||||||
|
)>,
|
||||||
|
> + 'a,
|
||||||
|
> {
|
||||||
|
let mut prefix = room_id.as_bytes().to_vec();
|
||||||
|
prefix.push(0xff);
|
||||||
|
let prefix2 = prefix.clone();
|
||||||
|
|
||||||
|
let mut first_possible_edu = prefix.clone();
|
||||||
|
first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since
|
||||||
|
|
||||||
|
Box::new(
|
||||||
|
self.readreceiptid_readreceipt
|
||||||
|
.iter_from(&first_possible_edu, false)
|
||||||
|
.take_while(move |(k, _)| k.starts_with(&prefix2))
|
||||||
|
.map(move |(k, v)| {
|
||||||
|
let count = utils::u64_from_bytes(
|
||||||
|
&k[prefix.len()..prefix.len() + mem::size_of::<u64>()],
|
||||||
|
)
|
||||||
|
.map_err(|_| Error::bad_database("Invalid readreceiptid count in db."))?;
|
||||||
|
let user_id = UserId::parse(
|
||||||
|
utils::string_from_bytes(&k[prefix.len() + mem::size_of::<u64>() + 1..])
|
||||||
|
.map_err(|_| {
|
||||||
|
Error::bad_database("Invalid readreceiptid userid bytes in db.")
|
||||||
|
})?,
|
||||||
|
)
|
||||||
|
.map_err(|_| Error::bad_database("Invalid readreceiptid userid in db."))?;
|
||||||
|
|
||||||
|
let mut json =
|
||||||
|
serde_json::from_slice::<CanonicalJsonObject>(&v).map_err(|_| {
|
||||||
|
Error::bad_database(
|
||||||
|
"Read receipt in roomlatestid_roomlatest is invalid json.",
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
json.remove("room_id");
|
||||||
|
|
||||||
|
Ok((
|
||||||
|
user_id,
|
||||||
|
count,
|
||||||
|
Raw::from_json(
|
||||||
|
serde_json::value::to_raw_value(&json)
|
||||||
|
.expect("json is valid raw value"),
|
||||||
|
),
|
||||||
|
))
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn private_read_set(&self, room_id: &RoomId, user_id: &UserId, count: u64) -> Result<()> {
|
||||||
|
let mut key = room_id.as_bytes().to_vec();
|
||||||
|
key.push(0xff);
|
||||||
|
key.extend_from_slice(user_id.as_bytes());
|
||||||
|
|
||||||
|
self.roomuserid_privateread
|
||||||
|
.insert(&key, &count.to_be_bytes())?;
|
||||||
|
|
||||||
|
self.roomuserid_lastprivatereadupdate
|
||||||
|
.insert(&key, &services().globals.next_count()?.to_be_bytes())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result<Option<u64>> {
|
||||||
|
let mut key = room_id.as_bytes().to_vec();
|
||||||
|
key.push(0xff);
|
||||||
|
key.extend_from_slice(user_id.as_bytes());
|
||||||
|
|
||||||
|
self.roomuserid_privateread
|
||||||
|
.get(&key)?
|
||||||
|
.map_or(Ok(None), |v| {
|
||||||
|
Ok(Some(utils::u64_from_bytes(&v).map_err(|_| {
|
||||||
|
Error::bad_database("Invalid private read marker bytes")
|
||||||
|
})?))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result<u64> {
|
||||||
|
let mut key = room_id.as_bytes().to_vec();
|
||||||
|
key.push(0xff);
|
||||||
|
key.extend_from_slice(user_id.as_bytes());
|
||||||
|
|
||||||
|
Ok(self
|
||||||
|
.roomuserid_lastprivatereadupdate
|
||||||
|
.get(&key)?
|
||||||
|
.map(|bytes| {
|
||||||
|
utils::u64_from_bytes(&bytes).map_err(|_| {
|
||||||
|
Error::bad_database("Count in roomuserid_lastprivatereadupdate is invalid.")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.transpose()?
|
||||||
|
.unwrap_or(0))
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,127 @@
|
||||||
|
use std::{collections::HashSet, mem};
|
||||||
|
|
||||||
|
use ruma::{OwnedUserId, RoomId, UserId};
|
||||||
|
|
||||||
|
use crate::{database::KeyValueDatabase, service, services, utils, Error, Result};
|
||||||
|
|
||||||
|
impl service::rooms::edus::typing::Data for KeyValueDatabase {
|
||||||
|
fn typing_add(&self, user_id: &UserId, room_id: &RoomId, timeout: u64) -> Result<()> {
|
||||||
|
let mut prefix = room_id.as_bytes().to_vec();
|
||||||
|
prefix.push(0xff);
|
||||||
|
|
||||||
|
let count = services().globals.next_count()?.to_be_bytes();
|
||||||
|
|
||||||
|
let mut room_typing_id = prefix;
|
||||||
|
room_typing_id.extend_from_slice(&timeout.to_be_bytes());
|
||||||
|
room_typing_id.push(0xff);
|
||||||
|
room_typing_id.extend_from_slice(&count);
|
||||||
|
|
||||||
|
self.typingid_userid
|
||||||
|
.insert(&room_typing_id, user_id.as_bytes())?;
|
||||||
|
|
||||||
|
self.roomid_lasttypingupdate
|
||||||
|
.insert(room_id.as_bytes(), &count)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn typing_remove(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> {
|
||||||
|
let mut prefix = room_id.as_bytes().to_vec();
|
||||||
|
prefix.push(0xff);
|
||||||
|
|
||||||
|
let user_id = user_id.to_string();
|
||||||
|
|
||||||
|
let mut found_outdated = false;
|
||||||
|
|
||||||
|
// Maybe there are multiple ones from calling roomtyping_add multiple times
|
||||||
|
for outdated_edu in self
|
||||||
|
.typingid_userid
|
||||||
|
.scan_prefix(prefix)
|
||||||
|
.filter(|(_, v)| &**v == user_id.as_bytes())
|
||||||
|
{
|
||||||
|
self.typingid_userid.remove(&outdated_edu.0)?;
|
||||||
|
found_outdated = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if found_outdated {
|
||||||
|
self.roomid_lasttypingupdate.insert(
|
||||||
|
room_id.as_bytes(),
|
||||||
|
&services().globals.next_count()?.to_be_bytes(),
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn typings_maintain(&self, room_id: &RoomId) -> Result<()> {
|
||||||
|
let mut prefix = room_id.as_bytes().to_vec();
|
||||||
|
prefix.push(0xff);
|
||||||
|
|
||||||
|
let current_timestamp = utils::millis_since_unix_epoch();
|
||||||
|
|
||||||
|
let mut found_outdated = false;
|
||||||
|
|
||||||
|
// Find all outdated edus before inserting a new one
|
||||||
|
for outdated_edu in self
|
||||||
|
.typingid_userid
|
||||||
|
.scan_prefix(prefix)
|
||||||
|
.map(|(key, _)| {
|
||||||
|
Ok::<_, Error>((
|
||||||
|
key.clone(),
|
||||||
|
utils::u64_from_bytes(
|
||||||
|
&key.splitn(2, |&b| b == 0xff).nth(1).ok_or_else(|| {
|
||||||
|
Error::bad_database("RoomTyping has invalid timestamp or delimiters.")
|
||||||
|
})?[0..mem::size_of::<u64>()],
|
||||||
|
)
|
||||||
|
.map_err(|_| Error::bad_database("RoomTyping has invalid timestamp bytes."))?,
|
||||||
|
))
|
||||||
|
})
|
||||||
|
.filter_map(|r| r.ok())
|
||||||
|
.take_while(|&(_, timestamp)| timestamp < current_timestamp)
|
||||||
|
{
|
||||||
|
// This is an outdated edu (time > timestamp)
|
||||||
|
self.typingid_userid.remove(&outdated_edu.0)?;
|
||||||
|
found_outdated = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if found_outdated {
|
||||||
|
self.roomid_lasttypingupdate.insert(
|
||||||
|
room_id.as_bytes(),
|
||||||
|
&services().globals.next_count()?.to_be_bytes(),
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn last_typing_update(&self, room_id: &RoomId) -> Result<u64> {
|
||||||
|
Ok(self
|
||||||
|
.roomid_lasttypingupdate
|
||||||
|
.get(room_id.as_bytes())?
|
||||||
|
.map(|bytes| {
|
||||||
|
utils::u64_from_bytes(&bytes).map_err(|_| {
|
||||||
|
Error::bad_database("Count in roomid_lastroomactiveupdate is invalid.")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.transpose()?
|
||||||
|
.unwrap_or(0))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn typings_all(&self, room_id: &RoomId) -> Result<HashSet<OwnedUserId>> {
|
||||||
|
let mut prefix = room_id.as_bytes().to_vec();
|
||||||
|
prefix.push(0xff);
|
||||||
|
|
||||||
|
let mut user_ids = HashSet::new();
|
||||||
|
|
||||||
|
for (_, user_id) in self.typingid_userid.scan_prefix(prefix) {
|
||||||
|
let user_id = UserId::parse(utils::string_from_bytes(&user_id).map_err(|_| {
|
||||||
|
Error::bad_database("User ID in typingid_userid is invalid unicode.")
|
||||||
|
})?)
|
||||||
|
.map_err(|_| Error::bad_database("User ID in typingid_userid is invalid."))?;
|
||||||
|
|
||||||
|
user_ids.insert(user_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(user_ids)
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,65 @@
|
||||||
|
use ruma::{DeviceId, RoomId, UserId};
|
||||||
|
|
||||||
|
use crate::{database::KeyValueDatabase, service, Result};
|
||||||
|
|
||||||
|
impl service::rooms::lazy_loading::Data for KeyValueDatabase {
|
||||||
|
fn lazy_load_was_sent_before(
|
||||||
|
&self,
|
||||||
|
user_id: &UserId,
|
||||||
|
device_id: &DeviceId,
|
||||||
|
room_id: &RoomId,
|
||||||
|
ll_user: &UserId,
|
||||||
|
) -> Result<bool> {
|
||||||
|
let mut key = user_id.as_bytes().to_vec();
|
||||||
|
key.push(0xff);
|
||||||
|
key.extend_from_slice(device_id.as_bytes());
|
||||||
|
key.push(0xff);
|
||||||
|
key.extend_from_slice(room_id.as_bytes());
|
||||||
|
key.push(0xff);
|
||||||
|
key.extend_from_slice(ll_user.as_bytes());
|
||||||
|
Ok(self.lazyloadedids.get(&key)?.is_some())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn lazy_load_confirm_delivery(
|
||||||
|
&self,
|
||||||
|
user_id: &UserId,
|
||||||
|
device_id: &DeviceId,
|
||||||
|
room_id: &RoomId,
|
||||||
|
confirmed_user_ids: &mut dyn Iterator<Item = &UserId>,
|
||||||
|
) -> Result<()> {
|
||||||
|
let mut prefix = user_id.as_bytes().to_vec();
|
||||||
|
prefix.push(0xff);
|
||||||
|
prefix.extend_from_slice(device_id.as_bytes());
|
||||||
|
prefix.push(0xff);
|
||||||
|
prefix.extend_from_slice(room_id.as_bytes());
|
||||||
|
prefix.push(0xff);
|
||||||
|
|
||||||
|
for ll_id in confirmed_user_ids {
|
||||||
|
let mut key = prefix.clone();
|
||||||
|
key.extend_from_slice(ll_id.as_bytes());
|
||||||
|
self.lazyloadedids.insert(&key, &[])?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn lazy_load_reset(
|
||||||
|
&self,
|
||||||
|
user_id: &UserId,
|
||||||
|
device_id: &DeviceId,
|
||||||
|
room_id: &RoomId,
|
||||||
|
) -> Result<()> {
|
||||||
|
let mut prefix = user_id.as_bytes().to_vec();
|
||||||
|
prefix.push(0xff);
|
||||||
|
prefix.extend_from_slice(device_id.as_bytes());
|
||||||
|
prefix.push(0xff);
|
||||||
|
prefix.extend_from_slice(room_id.as_bytes());
|
||||||
|
prefix.push(0xff);
|
||||||
|
|
||||||
|
for (key, _) in self.lazyloadedids.scan_prefix(prefix) {
|
||||||
|
self.lazyloadedids.remove(&key)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,45 @@
|
||||||
|
use ruma::{OwnedRoomId, RoomId};
|
||||||
|
|
||||||
|
use crate::{database::KeyValueDatabase, service, services, utils, Error, Result};
|
||||||
|
|
||||||
|
impl service::rooms::metadata::Data for KeyValueDatabase {
|
||||||
|
fn exists(&self, room_id: &RoomId) -> Result<bool> {
|
||||||
|
let prefix = match services().rooms.short.get_shortroomid(room_id)? {
|
||||||
|
Some(b) => b.to_be_bytes().to_vec(),
|
||||||
|
None => return Ok(false),
|
||||||
|
};
|
||||||
|
|
||||||
|
// Look for PDUs in that room.
|
||||||
|
Ok(self
|
||||||
|
.pduid_pdu
|
||||||
|
.iter_from(&prefix, false)
|
||||||
|
.next()
|
||||||
|
.filter(|(k, _)| k.starts_with(&prefix))
|
||||||
|
.is_some())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn iter_ids<'a>(&'a self) -> Box<dyn Iterator<Item = Result<OwnedRoomId>> + 'a> {
|
||||||
|
Box::new(self.roomid_shortroomid.iter().map(|(bytes, _)| {
|
||||||
|
RoomId::parse(
|
||||||
|
utils::string_from_bytes(&bytes).map_err(|_| {
|
||||||
|
Error::bad_database("Room ID in publicroomids is invalid unicode.")
|
||||||
|
})?,
|
||||||
|
)
|
||||||
|
.map_err(|_| Error::bad_database("Room ID in roomid_shortroomid is invalid."))
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn is_disabled(&self, room_id: &RoomId) -> Result<bool> {
|
||||||
|
Ok(self.disabledroomids.get(room_id.as_bytes())?.is_some())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn disable_room(&self, room_id: &RoomId, disabled: bool) -> Result<()> {
|
||||||
|
if disabled {
|
||||||
|
self.disabledroomids.insert(room_id.as_bytes(), &[])?;
|
||||||
|
} else {
|
||||||
|
self.disabledroomids.remove(room_id.as_bytes())?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,20 @@
|
||||||
|
mod alias;
|
||||||
|
mod auth_chain;
|
||||||
|
mod directory;
|
||||||
|
mod edus;
|
||||||
|
mod lazy_load;
|
||||||
|
mod metadata;
|
||||||
|
mod outlier;
|
||||||
|
mod pdu_metadata;
|
||||||
|
mod search;
|
||||||
|
mod short;
|
||||||
|
mod state;
|
||||||
|
mod state_accessor;
|
||||||
|
mod state_cache;
|
||||||
|
mod state_compressor;
|
||||||
|
mod timeline;
|
||||||
|
mod user;
|
||||||
|
|
||||||
|
use crate::{database::KeyValueDatabase, service};
|
||||||
|
|
||||||
|
impl service::rooms::Data for KeyValueDatabase {}
|
|
@ -0,0 +1,28 @@
|
||||||
|
use ruma::{CanonicalJsonObject, EventId};
|
||||||
|
|
||||||
|
use crate::{database::KeyValueDatabase, service, Error, PduEvent, Result};
|
||||||
|
|
||||||
|
impl service::rooms::outlier::Data for KeyValueDatabase {
|
||||||
|
fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result<Option<CanonicalJsonObject>> {
|
||||||
|
self.eventid_outlierpdu
|
||||||
|
.get(event_id.as_bytes())?
|
||||||
|
.map_or(Ok(None), |pdu| {
|
||||||
|
serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db."))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_outlier_pdu(&self, event_id: &EventId) -> Result<Option<PduEvent>> {
|
||||||
|
self.eventid_outlierpdu
|
||||||
|
.get(event_id.as_bytes())?
|
||||||
|
.map_or(Ok(None), |pdu| {
|
||||||
|
serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db."))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> {
|
||||||
|
self.eventid_outlierpdu.insert(
|
||||||
|
event_id.as_bytes(),
|
||||||
|
&serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,33 @@
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use ruma::{EventId, RoomId};
|
||||||
|
|
||||||
|
use crate::{database::KeyValueDatabase, service, Result};
|
||||||
|
|
||||||
|
impl service::rooms::pdu_metadata::Data for KeyValueDatabase {
|
||||||
|
fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc<EventId>]) -> Result<()> {
|
||||||
|
for prev in event_ids {
|
||||||
|
let mut key = room_id.as_bytes().to_vec();
|
||||||
|
key.extend_from_slice(prev.as_bytes());
|
||||||
|
self.referencedevents.insert(&key, &[])?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result<bool> {
|
||||||
|
let mut key = room_id.as_bytes().to_vec();
|
||||||
|
key.extend_from_slice(event_id.as_bytes());
|
||||||
|
Ok(self.referencedevents.get(&key)?.is_some())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> {
|
||||||
|
self.softfailedeventids.insert(event_id.as_bytes(), &[])
|
||||||
|
}
|
||||||
|
|
||||||
|
fn is_event_soft_failed(&self, event_id: &EventId) -> Result<bool> {
|
||||||
|
self.softfailedeventids
|
||||||
|
.get(event_id.as_bytes())
|
||||||
|
.map(|o| o.is_some())
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,75 @@
|
||||||
|
use std::mem::size_of;
|
||||||
|
|
||||||
|
use ruma::RoomId;
|
||||||
|
|
||||||
|
use crate::{database::KeyValueDatabase, service, services, utils, Result};
|
||||||
|
|
||||||
|
impl service::rooms::search::Data for KeyValueDatabase {
|
||||||
|
fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()> {
|
||||||
|
let mut batch = message_body
|
||||||
|
.split_terminator(|c: char| !c.is_alphanumeric())
|
||||||
|
.filter(|s| !s.is_empty())
|
||||||
|
.filter(|word| word.len() <= 50)
|
||||||
|
.map(str::to_lowercase)
|
||||||
|
.map(|word| {
|
||||||
|
let mut key = shortroomid.to_be_bytes().to_vec();
|
||||||
|
key.extend_from_slice(word.as_bytes());
|
||||||
|
key.push(0xff);
|
||||||
|
key.extend_from_slice(pdu_id);
|
||||||
|
(key, Vec::new())
|
||||||
|
});
|
||||||
|
|
||||||
|
self.tokenids.insert_batch(&mut batch)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn search_pdus<'a>(
|
||||||
|
&'a self,
|
||||||
|
room_id: &RoomId,
|
||||||
|
search_string: &str,
|
||||||
|
) -> Result<Option<(Box<dyn Iterator<Item = Vec<u8>> + 'a>, Vec<String>)>> {
|
||||||
|
let prefix = services()
|
||||||
|
.rooms
|
||||||
|
.short
|
||||||
|
.get_shortroomid(room_id)?
|
||||||
|
.expect("room exists")
|
||||||
|
.to_be_bytes()
|
||||||
|
.to_vec();
|
||||||
|
let prefix_clone = prefix.clone();
|
||||||
|
|
||||||
|
let words: Vec<_> = search_string
|
||||||
|
.split_terminator(|c: char| !c.is_alphanumeric())
|
||||||
|
.filter(|s| !s.is_empty())
|
||||||
|
.map(str::to_lowercase)
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let iterators = words.clone().into_iter().map(move |word| {
|
||||||
|
let mut prefix2 = prefix.clone();
|
||||||
|
prefix2.extend_from_slice(word.as_bytes());
|
||||||
|
prefix2.push(0xff);
|
||||||
|
|
||||||
|
let mut last_possible_id = prefix2.clone();
|
||||||
|
last_possible_id.extend_from_slice(&u64::MAX.to_be_bytes());
|
||||||
|
|
||||||
|
self.tokenids
|
||||||
|
.iter_from(&last_possible_id, true) // Newest pdus first
|
||||||
|
.take_while(move |(k, _)| k.starts_with(&prefix2))
|
||||||
|
.map(|(key, _)| key[key.len() - size_of::<u64>()..].to_vec())
|
||||||
|
});
|
||||||
|
|
||||||
|
let common_elements = match utils::common_elements(iterators, |a, b| {
|
||||||
|
// We compare b with a because we reversed the iterator earlier
|
||||||
|
b.cmp(a)
|
||||||
|
}) {
|
||||||
|
Some(it) => it,
|
||||||
|
None => return Ok(None),
|
||||||
|
};
|
||||||
|
|
||||||
|
let mapped = common_elements.map(move |id| {
|
||||||
|
let mut pduid = prefix_clone.clone();
|
||||||
|
pduid.extend_from_slice(&id);
|
||||||
|
pduid
|
||||||
|
});
|
||||||
|
|
||||||
|
Ok(Some((Box::new(mapped), words)))
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,218 @@
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use ruma::{events::StateEventType, EventId, RoomId};
|
||||||
|
|
||||||
|
use crate::{database::KeyValueDatabase, service, services, utils, Error, Result};
|
||||||
|
|
||||||
|
impl service::rooms::short::Data for KeyValueDatabase {
|
||||||
|
fn get_or_create_shorteventid(&self, event_id: &EventId) -> Result<u64> {
|
||||||
|
if let Some(short) = self.eventidshort_cache.lock().unwrap().get_mut(event_id) {
|
||||||
|
return Ok(*short);
|
||||||
|
}
|
||||||
|
|
||||||
|
let short = match self.eventid_shorteventid.get(event_id.as_bytes())? {
|
||||||
|
Some(shorteventid) => utils::u64_from_bytes(&shorteventid)
|
||||||
|
.map_err(|_| Error::bad_database("Invalid shorteventid in db."))?,
|
||||||
|
None => {
|
||||||
|
let shorteventid = services().globals.next_count()?;
|
||||||
|
self.eventid_shorteventid
|
||||||
|
.insert(event_id.as_bytes(), &shorteventid.to_be_bytes())?;
|
||||||
|
self.shorteventid_eventid
|
||||||
|
.insert(&shorteventid.to_be_bytes(), event_id.as_bytes())?;
|
||||||
|
shorteventid
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
self.eventidshort_cache
|
||||||
|
.lock()
|
||||||
|
.unwrap()
|
||||||
|
.insert(event_id.to_owned(), short);
|
||||||
|
|
||||||
|
Ok(short)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_shortstatekey(
|
||||||
|
&self,
|
||||||
|
event_type: &StateEventType,
|
||||||
|
state_key: &str,
|
||||||
|
) -> Result<Option<u64>> {
|
||||||
|
if let Some(short) = self
|
||||||
|
.statekeyshort_cache
|
||||||
|
.lock()
|
||||||
|
.unwrap()
|
||||||
|
.get_mut(&(event_type.clone(), state_key.to_owned()))
|
||||||
|
{
|
||||||
|
return Ok(Some(*short));
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut statekey = event_type.to_string().as_bytes().to_vec();
|
||||||
|
statekey.push(0xff);
|
||||||
|
statekey.extend_from_slice(state_key.as_bytes());
|
||||||
|
|
||||||
|
let short = self
|
||||||
|
.statekey_shortstatekey
|
||||||
|
.get(&statekey)?
|
||||||
|
.map(|shortstatekey| {
|
||||||
|
utils::u64_from_bytes(&shortstatekey)
|
||||||
|
.map_err(|_| Error::bad_database("Invalid shortstatekey in db."))
|
||||||
|
})
|
||||||
|
.transpose()?;
|
||||||
|
|
||||||
|
if let Some(s) = short {
|
||||||
|
self.statekeyshort_cache
|
||||||
|
.lock()
|
||||||
|
.unwrap()
|
||||||
|
.insert((event_type.clone(), state_key.to_owned()), s);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(short)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_or_create_shortstatekey(
|
||||||
|
&self,
|
||||||
|
event_type: &StateEventType,
|
||||||
|
state_key: &str,
|
||||||
|
) -> Result<u64> {
|
||||||
|
if let Some(short) = self
|
||||||
|
.statekeyshort_cache
|
||||||
|
.lock()
|
||||||
|
.unwrap()
|
||||||
|
.get_mut(&(event_type.clone(), state_key.to_owned()))
|
||||||
|
{
|
||||||
|
return Ok(*short);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut statekey = event_type.to_string().as_bytes().to_vec();
|
||||||
|
statekey.push(0xff);
|
||||||
|
statekey.extend_from_slice(state_key.as_bytes());
|
||||||
|
|
||||||
|
let short = match self.statekey_shortstatekey.get(&statekey)? {
|
||||||
|
Some(shortstatekey) => utils::u64_from_bytes(&shortstatekey)
|
||||||
|
.map_err(|_| Error::bad_database("Invalid shortstatekey in db."))?,
|
||||||
|
None => {
|
||||||
|
let shortstatekey = services().globals.next_count()?;
|
||||||
|
self.statekey_shortstatekey
|
||||||
|
.insert(&statekey, &shortstatekey.to_be_bytes())?;
|
||||||
|
self.shortstatekey_statekey
|
||||||
|
.insert(&shortstatekey.to_be_bytes(), &statekey)?;
|
||||||
|
shortstatekey
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
self.statekeyshort_cache
|
||||||
|
.lock()
|
||||||
|
.unwrap()
|
||||||
|
.insert((event_type.clone(), state_key.to_owned()), short);
|
||||||
|
|
||||||
|
Ok(short)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_eventid_from_short(&self, shorteventid: u64) -> Result<Arc<EventId>> {
|
||||||
|
if let Some(id) = self
|
||||||
|
.shorteventid_cache
|
||||||
|
.lock()
|
||||||
|
.unwrap()
|
||||||
|
.get_mut(&shorteventid)
|
||||||
|
{
|
||||||
|
return Ok(Arc::clone(id));
|
||||||
|
}
|
||||||
|
|
||||||
|
let bytes = self
|
||||||
|
.shorteventid_eventid
|
||||||
|
.get(&shorteventid.to_be_bytes())?
|
||||||
|
.ok_or_else(|| Error::bad_database("Shorteventid does not exist"))?;
|
||||||
|
|
||||||
|
let event_id = EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| {
|
||||||
|
Error::bad_database("EventID in shorteventid_eventid is invalid unicode.")
|
||||||
|
})?)
|
||||||
|
.map_err(|_| Error::bad_database("EventId in shorteventid_eventid is invalid."))?;
|
||||||
|
|
||||||
|
self.shorteventid_cache
|
||||||
|
.lock()
|
||||||
|
.unwrap()
|
||||||
|
.insert(shorteventid, Arc::clone(&event_id));
|
||||||
|
|
||||||
|
Ok(event_id)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)> {
|
||||||
|
if let Some(id) = self
|
||||||
|
.shortstatekey_cache
|
||||||
|
.lock()
|
||||||
|
.unwrap()
|
||||||
|
.get_mut(&shortstatekey)
|
||||||
|
{
|
||||||
|
return Ok(id.clone());
|
||||||
|
}
|
||||||
|
|
||||||
|
let bytes = self
|
||||||
|
.shortstatekey_statekey
|
||||||
|
.get(&shortstatekey.to_be_bytes())?
|
||||||
|
.ok_or_else(|| Error::bad_database("Shortstatekey does not exist"))?;
|
||||||
|
|
||||||
|
let mut parts = bytes.splitn(2, |&b| b == 0xff);
|
||||||
|
let eventtype_bytes = parts.next().expect("split always returns one entry");
|
||||||
|
let statekey_bytes = parts
|
||||||
|
.next()
|
||||||
|
.ok_or_else(|| Error::bad_database("Invalid statekey in shortstatekey_statekey."))?;
|
||||||
|
|
||||||
|
let event_type =
|
||||||
|
StateEventType::try_from(utils::string_from_bytes(eventtype_bytes).map_err(|_| {
|
||||||
|
Error::bad_database("Event type in shortstatekey_statekey is invalid unicode.")
|
||||||
|
})?)
|
||||||
|
.map_err(|_| Error::bad_database("Event type in shortstatekey_statekey is invalid."))?;
|
||||||
|
|
||||||
|
let state_key = utils::string_from_bytes(statekey_bytes).map_err(|_| {
|
||||||
|
Error::bad_database("Statekey in shortstatekey_statekey is invalid unicode.")
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let result = (event_type, state_key);
|
||||||
|
|
||||||
|
self.shortstatekey_cache
|
||||||
|
.lock()
|
||||||
|
.unwrap()
|
||||||
|
.insert(shortstatekey, result.clone());
|
||||||
|
|
||||||
|
Ok(result)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns (shortstatehash, already_existed)
|
||||||
|
fn get_or_create_shortstatehash(&self, state_hash: &[u8]) -> Result<(u64, bool)> {
|
||||||
|
Ok(match self.statehash_shortstatehash.get(state_hash)? {
|
||||||
|
Some(shortstatehash) => (
|
||||||
|
utils::u64_from_bytes(&shortstatehash)
|
||||||
|
.map_err(|_| Error::bad_database("Invalid shortstatehash in db."))?,
|
||||||
|
true,
|
||||||
|
),
|
||||||
|
None => {
|
||||||
|
let shortstatehash = services().globals.next_count()?;
|
||||||
|
self.statehash_shortstatehash
|
||||||
|
.insert(state_hash, &shortstatehash.to_be_bytes())?;
|
||||||
|
(shortstatehash, false)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_shortroomid(&self, room_id: &RoomId) -> Result<Option<u64>> {
|
||||||
|
self.roomid_shortroomid
|
||||||
|
.get(room_id.as_bytes())?
|
||||||
|
.map(|bytes| {
|
||||||
|
utils::u64_from_bytes(&bytes)
|
||||||
|
.map_err(|_| Error::bad_database("Invalid shortroomid in db."))
|
||||||
|
})
|
||||||
|
.transpose()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_or_create_shortroomid(&self, room_id: &RoomId) -> Result<u64> {
|
||||||
|
Ok(match self.roomid_shortroomid.get(room_id.as_bytes())? {
|
||||||
|
Some(short) => utils::u64_from_bytes(&short)
|
||||||
|
.map_err(|_| Error::bad_database("Invalid shortroomid in db."))?,
|
||||||
|
None => {
|
||||||
|
let short = services().globals.next_count()?;
|
||||||
|
self.roomid_shortroomid
|
||||||
|
.insert(room_id.as_bytes(), &short.to_be_bytes())?;
|
||||||
|
short
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,73 @@
|
||||||
|
use ruma::{EventId, OwnedEventId, RoomId};
|
||||||
|
use std::collections::HashSet;
|
||||||
|
|
||||||
|
use std::sync::Arc;
|
||||||
|
use tokio::sync::MutexGuard;
|
||||||
|
|
||||||
|
use crate::{database::KeyValueDatabase, service, utils, Error, Result};
|
||||||
|
|
||||||
|
impl service::rooms::state::Data for KeyValueDatabase {
|
||||||
|
fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result<Option<u64>> {
|
||||||
|
self.roomid_shortstatehash
|
||||||
|
.get(room_id.as_bytes())?
|
||||||
|
.map_or(Ok(None), |bytes| {
|
||||||
|
Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| {
|
||||||
|
Error::bad_database("Invalid shortstatehash in roomid_shortstatehash")
|
||||||
|
})?))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn set_room_state(
|
||||||
|
&self,
|
||||||
|
room_id: &RoomId,
|
||||||
|
new_shortstatehash: u64,
|
||||||
|
_mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex
|
||||||
|
) -> Result<()> {
|
||||||
|
self.roomid_shortstatehash
|
||||||
|
.insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn set_event_state(&self, shorteventid: u64, shortstatehash: u64) -> Result<()> {
|
||||||
|
self.shorteventid_shortstatehash
|
||||||
|
.insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_forward_extremities(&self, room_id: &RoomId) -> Result<HashSet<Arc<EventId>>> {
|
||||||
|
let mut prefix = room_id.as_bytes().to_vec();
|
||||||
|
prefix.push(0xff);
|
||||||
|
|
||||||
|
self.roomid_pduleaves
|
||||||
|
.scan_prefix(prefix)
|
||||||
|
.map(|(_, bytes)| {
|
||||||
|
EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| {
|
||||||
|
Error::bad_database("EventID in roomid_pduleaves is invalid unicode.")
|
||||||
|
})?)
|
||||||
|
.map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid."))
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn set_forward_extremities<'a>(
|
||||||
|
&self,
|
||||||
|
room_id: &RoomId,
|
||||||
|
event_ids: Vec<OwnedEventId>,
|
||||||
|
_mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex
|
||||||
|
) -> Result<()> {
|
||||||
|
let mut prefix = room_id.as_bytes().to_vec();
|
||||||
|
prefix.push(0xff);
|
||||||
|
|
||||||
|
for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) {
|
||||||
|
self.roomid_pduleaves.remove(&key)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
for event_id in event_ids {
|
||||||
|
let mut key = prefix.to_owned();
|
||||||
|
key.extend_from_slice(event_id.as_bytes());
|
||||||
|
self.roomid_pduleaves.insert(&key, event_id.as_bytes())?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,186 @@
|
||||||
|
use std::{collections::HashMap, sync::Arc};
|
||||||
|
|
||||||
|
use crate::{database::KeyValueDatabase, service, services, utils, Error, PduEvent, Result};
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use ruma::{events::StateEventType, EventId, RoomId};
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl service::rooms::state_accessor::Data for KeyValueDatabase {
|
||||||
|
async fn state_full_ids(&self, shortstatehash: u64) -> Result<HashMap<u64, Arc<EventId>>> {
|
||||||
|
let full_state = services()
|
||||||
|
.rooms
|
||||||
|
.state_compressor
|
||||||
|
.load_shortstatehash_info(shortstatehash)?
|
||||||
|
.pop()
|
||||||
|
.expect("there is always one layer")
|
||||||
|
.1;
|
||||||
|
let mut result = HashMap::new();
|
||||||
|
let mut i = 0;
|
||||||
|
for compressed in full_state.into_iter() {
|
||||||
|
let parsed = services()
|
||||||
|
.rooms
|
||||||
|
.state_compressor
|
||||||
|
.parse_compressed_state_event(&compressed)?;
|
||||||
|
result.insert(parsed.0, parsed.1);
|
||||||
|
|
||||||
|
i += 1;
|
||||||
|
if i % 100 == 0 {
|
||||||
|
tokio::task::yield_now().await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(result)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn state_full(
|
||||||
|
&self,
|
||||||
|
shortstatehash: u64,
|
||||||
|
) -> Result<HashMap<(StateEventType, String), Arc<PduEvent>>> {
|
||||||
|
let full_state = services()
|
||||||
|
.rooms
|
||||||
|
.state_compressor
|
||||||
|
.load_shortstatehash_info(shortstatehash)?
|
||||||
|
.pop()
|
||||||
|
.expect("there is always one layer")
|
||||||
|
.1;
|
||||||
|
|
||||||
|
let mut result = HashMap::new();
|
||||||
|
let mut i = 0;
|
||||||
|
for compressed in full_state {
|
||||||
|
let (_, eventid) = services()
|
||||||
|
.rooms
|
||||||
|
.state_compressor
|
||||||
|
.parse_compressed_state_event(&compressed)?;
|
||||||
|
if let Some(pdu) = services().rooms.timeline.get_pdu(&eventid)? {
|
||||||
|
result.insert(
|
||||||
|
(
|
||||||
|
pdu.kind.to_string().into(),
|
||||||
|
pdu.state_key
|
||||||
|
.as_ref()
|
||||||
|
.ok_or_else(|| Error::bad_database("State event has no state key."))?
|
||||||
|
.clone(),
|
||||||
|
),
|
||||||
|
pdu,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
i += 1;
|
||||||
|
if i % 100 == 0 {
|
||||||
|
tokio::task::yield_now().await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(result)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a single PDU from `room_id` with key (`event_type`, `state_key`).
|
||||||
|
fn state_get_id(
|
||||||
|
&self,
|
||||||
|
shortstatehash: u64,
|
||||||
|
event_type: &StateEventType,
|
||||||
|
state_key: &str,
|
||||||
|
) -> Result<Option<Arc<EventId>>> {
|
||||||
|
let shortstatekey = match services()
|
||||||
|
.rooms
|
||||||
|
.short
|
||||||
|
.get_shortstatekey(event_type, state_key)?
|
||||||
|
{
|
||||||
|
Some(s) => s,
|
||||||
|
None => return Ok(None),
|
||||||
|
};
|
||||||
|
let full_state = services()
|
||||||
|
.rooms
|
||||||
|
.state_compressor
|
||||||
|
.load_shortstatehash_info(shortstatehash)?
|
||||||
|
.pop()
|
||||||
|
.expect("there is always one layer")
|
||||||
|
.1;
|
||||||
|
Ok(full_state
|
||||||
|
.into_iter()
|
||||||
|
.find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes()))
|
||||||
|
.and_then(|compressed| {
|
||||||
|
services()
|
||||||
|
.rooms
|
||||||
|
.state_compressor
|
||||||
|
.parse_compressed_state_event(&compressed)
|
||||||
|
.ok()
|
||||||
|
.map(|(_, id)| id)
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a single PDU from `room_id` with key (`event_type`, `state_key`).
|
||||||
|
fn state_get(
|
||||||
|
&self,
|
||||||
|
shortstatehash: u64,
|
||||||
|
event_type: &StateEventType,
|
||||||
|
state_key: &str,
|
||||||
|
) -> Result<Option<Arc<PduEvent>>> {
|
||||||
|
self.state_get_id(shortstatehash, event_type, state_key)?
|
||||||
|
.map_or(Ok(None), |event_id| {
|
||||||
|
services().rooms.timeline.get_pdu(&event_id)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the state hash for this pdu.
|
||||||
|
fn pdu_shortstatehash(&self, event_id: &EventId) -> Result<Option<u64>> {
|
||||||
|
self.eventid_shorteventid
|
||||||
|
.get(event_id.as_bytes())?
|
||||||
|
.map_or(Ok(None), |shorteventid| {
|
||||||
|
self.shorteventid_shortstatehash
|
||||||
|
.get(&shorteventid)?
|
||||||
|
.map(|bytes| {
|
||||||
|
utils::u64_from_bytes(&bytes).map_err(|_| {
|
||||||
|
Error::bad_database(
|
||||||
|
"Invalid shortstatehash bytes in shorteventid_shortstatehash",
|
||||||
|
)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.transpose()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the full room state.
|
||||||
|
async fn room_state_full(
|
||||||
|
&self,
|
||||||
|
room_id: &RoomId,
|
||||||
|
) -> Result<HashMap<(StateEventType, String), Arc<PduEvent>>> {
|
||||||
|
if let Some(current_shortstatehash) =
|
||||||
|
services().rooms.state.get_room_shortstatehash(room_id)?
|
||||||
|
{
|
||||||
|
self.state_full(current_shortstatehash).await
|
||||||
|
} else {
|
||||||
|
Ok(HashMap::new())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a single PDU from `room_id` with key (`event_type`, `state_key`).
|
||||||
|
fn room_state_get_id(
|
||||||
|
&self,
|
||||||
|
room_id: &RoomId,
|
||||||
|
event_type: &StateEventType,
|
||||||
|
state_key: &str,
|
||||||
|
) -> Result<Option<Arc<EventId>>> {
|
||||||
|
if let Some(current_shortstatehash) =
|
||||||
|
services().rooms.state.get_room_shortstatehash(room_id)?
|
||||||
|
{
|
||||||
|
self.state_get_id(current_shortstatehash, event_type, state_key)
|
||||||
|
} else {
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns a single PDU from `room_id` with key (`event_type`, `state_key`).
|
||||||
|
fn room_state_get(
|
||||||
|
&self,
|
||||||
|
room_id: &RoomId,
|
||||||
|
event_type: &StateEventType,
|
||||||
|
state_key: &str,
|
||||||
|
) -> Result<Option<Arc<PduEvent>>> {
|
||||||
|
if let Some(current_shortstatehash) =
|
||||||
|
services().rooms.state.get_room_shortstatehash(room_id)?
|
||||||
|
{
|
||||||
|
self.state_get(current_shortstatehash, event_type, state_key)
|
||||||
|
} else {
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,622 @@
|
||||||
|
use std::{collections::HashSet, sync::Arc};
|
||||||
|
|
||||||
|
use regex::Regex;
|
||||||
|
use ruma::{
|
||||||
|
events::{AnyStrippedStateEvent, AnySyncStateEvent},
|
||||||
|
serde::Raw,
|
||||||
|
OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId,
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::{database::KeyValueDatabase, service, services, utils, Error, Result};
|
||||||
|
|
||||||
|
impl service::rooms::state_cache::Data for KeyValueDatabase {
|
||||||
|
fn mark_as_once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> {
|
||||||
|
let mut userroom_id = user_id.as_bytes().to_vec();
|
||||||
|
userroom_id.push(0xff);
|
||||||
|
userroom_id.extend_from_slice(room_id.as_bytes());
|
||||||
|
self.roomuseroncejoinedids.insert(&userroom_id, &[])
|
||||||
|
}
|
||||||
|
|
||||||
|
fn mark_as_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> {
|
||||||
|
let mut roomuser_id = room_id.as_bytes().to_vec();
|
||||||
|
roomuser_id.push(0xff);
|
||||||
|
roomuser_id.extend_from_slice(user_id.as_bytes());
|
||||||
|
|
||||||
|
let mut userroom_id = user_id.as_bytes().to_vec();
|
||||||
|
userroom_id.push(0xff);
|
||||||
|
userroom_id.extend_from_slice(room_id.as_bytes());
|
||||||
|
|
||||||
|
self.userroomid_joined.insert(&userroom_id, &[])?;
|
||||||
|
self.roomuserid_joined.insert(&roomuser_id, &[])?;
|
||||||
|
self.userroomid_invitestate.remove(&userroom_id)?;
|
||||||
|
self.roomuserid_invitecount.remove(&roomuser_id)?;
|
||||||
|
self.userroomid_leftstate.remove(&userroom_id)?;
|
||||||
|
self.roomuserid_leftcount.remove(&roomuser_id)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn mark_as_invited(
|
||||||
|
&self,
|
||||||
|
user_id: &UserId,
|
||||||
|
room_id: &RoomId,
|
||||||
|
last_state: Option<Vec<Raw<AnyStrippedStateEvent>>>,
|
||||||
|
) -> Result<()> {
|
||||||
|
let mut roomuser_id = room_id.as_bytes().to_vec();
|
||||||
|
roomuser_id.push(0xff);
|
||||||
|
roomuser_id.extend_from_slice(user_id.as_bytes());
|
||||||
|
|
||||||
|
let mut userroom_id = user_id.as_bytes().to_vec();
|
||||||
|
userroom_id.push(0xff);
|
||||||
|
userroom_id.extend_from_slice(room_id.as_bytes());
|
||||||
|
|
||||||
|
self.userroomid_invitestate.insert(
|
||||||
|
&userroom_id,
|
||||||
|
&serde_json::to_vec(&last_state.unwrap_or_default())
|
||||||
|
.expect("state to bytes always works"),
|
||||||
|
)?;
|
||||||
|
self.roomuserid_invitecount.insert(
|
||||||
|
&roomuser_id,
|
||||||
|
&services().globals.next_count()?.to_be_bytes(),
|
||||||
|
)?;
|
||||||
|
self.userroomid_joined.remove(&userroom_id)?;
|
||||||
|
self.roomuserid_joined.remove(&roomuser_id)?;
|
||||||
|
self.userroomid_leftstate.remove(&userroom_id)?;
|
||||||
|
self.roomuserid_leftcount.remove(&roomuser_id)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn mark_as_left(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> {
|
||||||
|
let mut roomuser_id = room_id.as_bytes().to_vec();
|
||||||
|
roomuser_id.push(0xff);
|
||||||
|
roomuser_id.extend_from_slice(user_id.as_bytes());
|
||||||
|
|
||||||
|
let mut userroom_id = user_id.as_bytes().to_vec();
|
||||||
|
userroom_id.push(0xff);
|
||||||
|
userroom_id.extend_from_slice(room_id.as_bytes());
|
||||||
|
|
||||||
|
self.userroomid_leftstate.insert(
|
||||||
|
&userroom_id,
|
||||||
|
&serde_json::to_vec(&Vec::<Raw<AnySyncStateEvent>>::new()).unwrap(),
|
||||||
|
)?; // TODO
|
||||||
|
self.roomuserid_leftcount.insert(
|
||||||
|
&roomuser_id,
|
||||||
|
&services().globals.next_count()?.to_be_bytes(),
|
||||||
|
)?;
|
||||||
|
self.userroomid_joined.remove(&userroom_id)?;
|
||||||
|
self.roomuserid_joined.remove(&roomuser_id)?;
|
||||||
|
self.userroomid_invitestate.remove(&userroom_id)?;
|
||||||
|
self.roomuserid_invitecount.remove(&roomuser_id)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn update_joined_count(&self, room_id: &RoomId) -> Result<()> {
|
||||||
|
let mut joinedcount = 0_u64;
|
||||||
|
let mut invitedcount = 0_u64;
|
||||||
|
let mut joined_servers = HashSet::new();
|
||||||
|
let mut real_users = HashSet::new();
|
||||||
|
|
||||||
|
for joined in self.room_members(room_id).filter_map(|r| r.ok()) {
|
||||||
|
joined_servers.insert(joined.server_name().to_owned());
|
||||||
|
if joined.server_name() == services().globals.server_name()
|
||||||
|
&& !services().users.is_deactivated(&joined).unwrap_or(true)
|
||||||
|
{
|
||||||
|
real_users.insert(joined);
|
||||||
|
}
|
||||||
|
joinedcount += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
for _invited in self.room_members_invited(room_id).filter_map(|r| r.ok()) {
|
||||||
|
invitedcount += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
self.roomid_joinedcount
|
||||||
|
.insert(room_id.as_bytes(), &joinedcount.to_be_bytes())?;
|
||||||
|
|
||||||
|
self.roomid_invitedcount
|
||||||
|
.insert(room_id.as_bytes(), &invitedcount.to_be_bytes())?;
|
||||||
|
|
||||||
|
self.our_real_users_cache
|
||||||
|
.write()
|
||||||
|
.unwrap()
|
||||||
|
.insert(room_id.to_owned(), Arc::new(real_users));
|
||||||
|
|
||||||
|
for old_joined_server in self.room_servers(room_id).filter_map(|r| r.ok()) {
|
||||||
|
if !joined_servers.remove(&old_joined_server) {
|
||||||
|
// Server not in room anymore
|
||||||
|
let mut roomserver_id = room_id.as_bytes().to_vec();
|
||||||
|
roomserver_id.push(0xff);
|
||||||
|
roomserver_id.extend_from_slice(old_joined_server.as_bytes());
|
||||||
|
|
||||||
|
let mut serverroom_id = old_joined_server.as_bytes().to_vec();
|
||||||
|
serverroom_id.push(0xff);
|
||||||
|
serverroom_id.extend_from_slice(room_id.as_bytes());
|
||||||
|
|
||||||
|
self.roomserverids.remove(&roomserver_id)?;
|
||||||
|
self.serverroomids.remove(&serverroom_id)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now only new servers are in joined_servers anymore
|
||||||
|
for server in joined_servers {
|
||||||
|
let mut roomserver_id = room_id.as_bytes().to_vec();
|
||||||
|
roomserver_id.push(0xff);
|
||||||
|
roomserver_id.extend_from_slice(server.as_bytes());
|
||||||
|
|
||||||
|
let mut serverroom_id = server.as_bytes().to_vec();
|
||||||
|
serverroom_id.push(0xff);
|
||||||
|
serverroom_id.extend_from_slice(room_id.as_bytes());
|
||||||
|
|
||||||
|
self.roomserverids.insert(&roomserver_id, &[])?;
|
||||||
|
self.serverroomids.insert(&serverroom_id, &[])?;
|
||||||
|
}
|
||||||
|
|
||||||
|
self.appservice_in_room_cache
|
||||||
|
.write()
|
||||||
|
.unwrap()
|
||||||
|
.remove(room_id);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tracing::instrument(skip(self, room_id))]
|
||||||
|
fn get_our_real_users(&self, room_id: &RoomId) -> Result<Arc<HashSet<OwnedUserId>>> {
|
||||||
|
let maybe = self
|
||||||
|
.our_real_users_cache
|
||||||
|
.read()
|
||||||
|
.unwrap()
|
||||||
|
.get(room_id)
|
||||||
|
.cloned();
|
||||||
|
if let Some(users) = maybe {
|
||||||
|
Ok(users)
|
||||||
|
} else {
|
||||||
|
self.update_joined_count(room_id)?;
|
||||||
|
Ok(Arc::clone(
|
||||||
|
self.our_real_users_cache
|
||||||
|
.read()
|
||||||
|
.unwrap()
|
||||||
|
.get(room_id)
|
||||||
|
.unwrap(),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tracing::instrument(skip(self, room_id, appservice))]
|
||||||
|
fn appservice_in_room(
|
||||||
|
&self,
|
||||||
|
room_id: &RoomId,
|
||||||
|
appservice: &(String, serde_yaml::Value),
|
||||||
|
) -> Result<bool> {
|
||||||
|
let maybe = self
|
||||||
|
.appservice_in_room_cache
|
||||||
|
.read()
|
||||||
|
.unwrap()
|
||||||
|
.get(room_id)
|
||||||
|
.and_then(|map| map.get(&appservice.0))
|
||||||
|
.copied();
|
||||||
|
|
||||||
|
if let Some(b) = maybe {
|
||||||
|
Ok(b)
|
||||||
|
} else if let Some(namespaces) = appservice.1.get("namespaces") {
|
||||||
|
let users = namespaces
|
||||||
|
.get("users")
|
||||||
|
.and_then(|users| users.as_sequence())
|
||||||
|
.map_or_else(Vec::new, |users| {
|
||||||
|
users
|
||||||
|
.iter()
|
||||||
|
.filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok())
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
});
|
||||||
|
|
||||||
|
let bridge_user_id = appservice
|
||||||
|
.1
|
||||||
|
.get("sender_localpart")
|
||||||
|
.and_then(|string| string.as_str())
|
||||||
|
.and_then(|string| {
|
||||||
|
UserId::parse_with_server_name(string, services().globals.server_name()).ok()
|
||||||
|
});
|
||||||
|
|
||||||
|
let in_room = bridge_user_id
|
||||||
|
.map_or(false, |id| self.is_joined(&id, room_id).unwrap_or(false))
|
||||||
|
|| self.room_members(room_id).any(|userid| {
|
||||||
|
userid.map_or(false, |userid| {
|
||||||
|
users.iter().any(|r| r.is_match(userid.as_str()))
|
||||||
|
})
|
||||||
|
});
|
||||||
|
|
||||||
|
self.appservice_in_room_cache
|
||||||
|
.write()
|
||||||
|
.unwrap()
|
||||||
|
.entry(room_id.to_owned())
|
||||||
|
.or_default()
|
||||||
|
.insert(appservice.0.clone(), in_room);
|
||||||
|
|
||||||
|
Ok(in_room)
|
||||||
|
} else {
|
||||||
|
Ok(false)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Makes a user forget a room.
|
||||||
|
#[tracing::instrument(skip(self))]
|
||||||
|
fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()> {
|
||||||
|
let mut userroom_id = user_id.as_bytes().to_vec();
|
||||||
|
userroom_id.push(0xff);
|
||||||
|
userroom_id.extend_from_slice(room_id.as_bytes());
|
||||||
|
|
||||||
|
let mut roomuser_id = room_id.as_bytes().to_vec();
|
||||||
|
roomuser_id.push(0xff);
|
||||||
|
roomuser_id.extend_from_slice(user_id.as_bytes());
|
||||||
|
|
||||||
|
self.userroomid_leftstate.remove(&userroom_id)?;
|
||||||
|
self.roomuserid_leftcount.remove(&roomuser_id)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns an iterator of all servers participating in this room.
|
||||||
|
#[tracing::instrument(skip(self))]
|
||||||
|
fn room_servers<'a>(
|
||||||
|
&'a self,
|
||||||
|
room_id: &RoomId,
|
||||||
|
) -> Box<dyn Iterator<Item = Result<OwnedServerName>> + 'a> {
|
||||||
|
let mut prefix = room_id.as_bytes().to_vec();
|
||||||
|
prefix.push(0xff);
|
||||||
|
|
||||||
|
Box::new(self.roomserverids.scan_prefix(prefix).map(|(key, _)| {
|
||||||
|
ServerName::parse(
|
||||||
|
utils::string_from_bytes(
|
||||||
|
key.rsplit(|&b| b == 0xff)
|
||||||
|
.next()
|
||||||
|
.expect("rsplit always returns an element"),
|
||||||
|
)
|
||||||
|
.map_err(|_| {
|
||||||
|
Error::bad_database("Server name in roomserverids is invalid unicode.")
|
||||||
|
})?,
|
||||||
|
)
|
||||||
|
.map_err(|_| Error::bad_database("Server name in roomserverids is invalid."))
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tracing::instrument(skip(self))]
|
||||||
|
fn server_in_room<'a>(&'a self, server: &ServerName, room_id: &RoomId) -> Result<bool> {
|
||||||
|
let mut key = server.as_bytes().to_vec();
|
||||||
|
key.push(0xff);
|
||||||
|
key.extend_from_slice(room_id.as_bytes());
|
||||||
|
|
||||||
|
self.serverroomids.get(&key).map(|o| o.is_some())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns an iterator of all rooms a server participates in (as far as we know).
|
||||||
|
#[tracing::instrument(skip(self))]
|
||||||
|
fn server_rooms<'a>(
|
||||||
|
&'a self,
|
||||||
|
server: &ServerName,
|
||||||
|
) -> Box<dyn Iterator<Item = Result<OwnedRoomId>> + 'a> {
|
||||||
|
let mut prefix = server.as_bytes().to_vec();
|
||||||
|
prefix.push(0xff);
|
||||||
|
|
||||||
|
Box::new(self.serverroomids.scan_prefix(prefix).map(|(key, _)| {
|
||||||
|
RoomId::parse(
|
||||||
|
utils::string_from_bytes(
|
||||||
|
key.rsplit(|&b| b == 0xff)
|
||||||
|
.next()
|
||||||
|
.expect("rsplit always returns an element"),
|
||||||
|
)
|
||||||
|
.map_err(|_| Error::bad_database("RoomId in serverroomids is invalid unicode."))?,
|
||||||
|
)
|
||||||
|
.map_err(|_| Error::bad_database("RoomId in serverroomids is invalid."))
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns an iterator over all joined members of a room.
|
||||||
|
#[tracing::instrument(skip(self))]
|
||||||
|
fn room_members<'a>(
|
||||||
|
&'a self,
|
||||||
|
room_id: &RoomId,
|
||||||
|
) -> Box<dyn Iterator<Item = Result<OwnedUserId>> + 'a> {
|
||||||
|
let mut prefix = room_id.as_bytes().to_vec();
|
||||||
|
prefix.push(0xff);
|
||||||
|
|
||||||
|
Box::new(self.roomuserid_joined.scan_prefix(prefix).map(|(key, _)| {
|
||||||
|
UserId::parse(
|
||||||
|
utils::string_from_bytes(
|
||||||
|
key.rsplit(|&b| b == 0xff)
|
||||||
|
.next()
|
||||||
|
.expect("rsplit always returns an element"),
|
||||||
|
)
|
||||||
|
.map_err(|_| {
|
||||||
|
Error::bad_database("User ID in roomuserid_joined is invalid unicode.")
|
||||||
|
})?,
|
||||||
|
)
|
||||||
|
.map_err(|_| Error::bad_database("User ID in roomuserid_joined is invalid."))
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tracing::instrument(skip(self))]
|
||||||
|
fn room_joined_count(&self, room_id: &RoomId) -> Result<Option<u64>> {
|
||||||
|
self.roomid_joinedcount
|
||||||
|
.get(room_id.as_bytes())?
|
||||||
|
.map(|b| {
|
||||||
|
utils::u64_from_bytes(&b)
|
||||||
|
.map_err(|_| Error::bad_database("Invalid joinedcount in db."))
|
||||||
|
})
|
||||||
|
.transpose()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tracing::instrument(skip(self))]
|
||||||
|
fn room_invited_count(&self, room_id: &RoomId) -> Result<Option<u64>> {
|
||||||
|
self.roomid_invitedcount
|
||||||
|
.get(room_id.as_bytes())?
|
||||||
|
.map(|b| {
|
||||||
|
utils::u64_from_bytes(&b)
|
||||||
|
.map_err(|_| Error::bad_database("Invalid joinedcount in db."))
|
||||||
|
})
|
||||||
|
.transpose()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns an iterator over all User IDs who ever joined a room.
|
||||||
|
#[tracing::instrument(skip(self))]
|
||||||
|
fn room_useroncejoined<'a>(
|
||||||
|
&'a self,
|
||||||
|
room_id: &RoomId,
|
||||||
|
) -> Box<dyn Iterator<Item = Result<OwnedUserId>> + 'a> {
|
||||||
|
let mut prefix = room_id.as_bytes().to_vec();
|
||||||
|
prefix.push(0xff);
|
||||||
|
|
||||||
|
Box::new(
|
||||||
|
self.roomuseroncejoinedids
|
||||||
|
.scan_prefix(prefix)
|
||||||
|
.map(|(key, _)| {
|
||||||
|
UserId::parse(
|
||||||
|
utils::string_from_bytes(
|
||||||
|
key.rsplit(|&b| b == 0xff)
|
||||||
|
.next()
|
||||||
|
.expect("rsplit always returns an element"),
|
||||||
|
)
|
||||||
|
.map_err(|_| {
|
||||||
|
Error::bad_database(
|
||||||
|
"User ID in room_useroncejoined is invalid unicode.",
|
||||||
|
)
|
||||||
|
})?,
|
||||||
|
)
|
||||||
|
.map_err(|_| Error::bad_database("User ID in room_useroncejoined is invalid."))
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns an iterator over all invited members of a room.
|
||||||
|
#[tracing::instrument(skip(self))]
|
||||||
|
fn room_members_invited<'a>(
|
||||||
|
&'a self,
|
||||||
|
room_id: &RoomId,
|
||||||
|
) -> Box<dyn Iterator<Item = Result<OwnedUserId>> + 'a> {
|
||||||
|
let mut prefix = room_id.as_bytes().to_vec();
|
||||||
|
prefix.push(0xff);
|
||||||
|
|
||||||
|
Box::new(
|
||||||
|
self.roomuserid_invitecount
|
||||||
|
.scan_prefix(prefix)
|
||||||
|
.map(|(key, _)| {
|
||||||
|
UserId::parse(
|
||||||
|
utils::string_from_bytes(
|
||||||
|
key.rsplit(|&b| b == 0xff)
|
||||||
|
.next()
|
||||||
|
.expect("rsplit always returns an element"),
|
||||||
|
)
|
||||||
|
.map_err(|_| {
|
||||||
|
Error::bad_database("User ID in roomuserid_invited is invalid unicode.")
|
||||||
|
})?,
|
||||||
|
)
|
||||||
|
.map_err(|_| Error::bad_database("User ID in roomuserid_invited is invalid."))
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tracing::instrument(skip(self))]
|
||||||
|
fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result<Option<u64>> {
|
||||||
|
let mut key = room_id.as_bytes().to_vec();
|
||||||
|
key.push(0xff);
|
||||||
|
key.extend_from_slice(user_id.as_bytes());
|
||||||
|
|
||||||
|
self.roomuserid_invitecount
|
||||||
|
.get(&key)?
|
||||||
|
.map_or(Ok(None), |bytes| {
|
||||||
|
Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| {
|
||||||
|
Error::bad_database("Invalid invitecount in db.")
|
||||||
|
})?))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tracing::instrument(skip(self))]
|
||||||
|
fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result<Option<u64>> {
|
||||||
|
let mut key = room_id.as_bytes().to_vec();
|
||||||
|
key.push(0xff);
|
||||||
|
key.extend_from_slice(user_id.as_bytes());
|
||||||
|
|
||||||
|
self.roomuserid_leftcount
|
||||||
|
.get(&key)?
|
||||||
|
.map(|bytes| {
|
||||||
|
utils::u64_from_bytes(&bytes)
|
||||||
|
.map_err(|_| Error::bad_database("Invalid leftcount in db."))
|
||||||
|
})
|
||||||
|
.transpose()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns an iterator over all rooms this user joined.
|
||||||
|
#[tracing::instrument(skip(self))]
|
||||||
|
fn rooms_joined<'a>(
|
||||||
|
&'a self,
|
||||||
|
user_id: &UserId,
|
||||||
|
) -> Box<dyn Iterator<Item = Result<OwnedRoomId>> + 'a> {
|
||||||
|
Box::new(
|
||||||
|
self.userroomid_joined
|
||||||
|
.scan_prefix(user_id.as_bytes().to_vec())
|
||||||
|
.map(|(key, _)| {
|
||||||
|
RoomId::parse(
|
||||||
|
utils::string_from_bytes(
|
||||||
|
key.rsplit(|&b| b == 0xff)
|
||||||
|
.next()
|
||||||
|
.expect("rsplit always returns an element"),
|
||||||
|
)
|
||||||
|
.map_err(|_| {
|
||||||
|
Error::bad_database("Room ID in userroomid_joined is invalid unicode.")
|
||||||
|
})?,
|
||||||
|
)
|
||||||
|
.map_err(|_| Error::bad_database("Room ID in userroomid_joined is invalid."))
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns an iterator over all rooms a user was invited to.
|
||||||
|
#[tracing::instrument(skip(self))]
|
||||||
|
fn rooms_invited<'a>(
|
||||||
|
&'a self,
|
||||||
|
user_id: &UserId,
|
||||||
|
) -> Box<dyn Iterator<Item = Result<(OwnedRoomId, Vec<Raw<AnyStrippedStateEvent>>)>> + 'a> {
|
||||||
|
let mut prefix = user_id.as_bytes().to_vec();
|
||||||
|
prefix.push(0xff);
|
||||||
|
|
||||||
|
Box::new(
|
||||||
|
self.userroomid_invitestate
|
||||||
|
.scan_prefix(prefix)
|
||||||
|
.map(|(key, state)| {
|
||||||
|
let room_id = RoomId::parse(
|
||||||
|
utils::string_from_bytes(
|
||||||
|
key.rsplit(|&b| b == 0xff)
|
||||||
|
.next()
|
||||||
|
.expect("rsplit always returns an element"),
|
||||||
|
)
|
||||||
|
.map_err(|_| {
|
||||||
|
Error::bad_database("Room ID in userroomid_invited is invalid unicode.")
|
||||||
|
})?,
|
||||||
|
)
|
||||||
|
.map_err(|_| {
|
||||||
|
Error::bad_database("Room ID in userroomid_invited is invalid.")
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let state = serde_json::from_slice(&state).map_err(|_| {
|
||||||
|
Error::bad_database("Invalid state in userroomid_invitestate.")
|
||||||
|
})?;
|
||||||
|
|
||||||
|
Ok((room_id, state))
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tracing::instrument(skip(self))]
|
||||||
|
fn invite_state(
|
||||||
|
&self,
|
||||||
|
user_id: &UserId,
|
||||||
|
room_id: &RoomId,
|
||||||
|
) -> Result<Option<Vec<Raw<AnyStrippedStateEvent>>>> {
|
||||||
|
let mut key = user_id.as_bytes().to_vec();
|
||||||
|
key.push(0xff);
|
||||||
|
key.extend_from_slice(room_id.as_bytes());
|
||||||
|
|
||||||
|
self.userroomid_invitestate
|
||||||
|
.get(&key)?
|
||||||
|
.map(|state| {
|
||||||
|
let state = serde_json::from_slice(&state)
|
||||||
|
.map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?;
|
||||||
|
|
||||||
|
Ok(state)
|
||||||
|
})
|
||||||
|
.transpose()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tracing::instrument(skip(self))]
|
||||||
|
fn left_state(
|
||||||
|
&self,
|
||||||
|
user_id: &UserId,
|
||||||
|
room_id: &RoomId,
|
||||||
|
) -> Result<Option<Vec<Raw<AnyStrippedStateEvent>>>> {
|
||||||
|
let mut key = user_id.as_bytes().to_vec();
|
||||||
|
key.push(0xff);
|
||||||
|
key.extend_from_slice(room_id.as_bytes());
|
||||||
|
|
||||||
|
self.userroomid_leftstate
|
||||||
|
.get(&key)?
|
||||||
|
.map(|state| {
|
||||||
|
let state = serde_json::from_slice(&state)
|
||||||
|
.map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?;
|
||||||
|
|
||||||
|
Ok(state)
|
||||||
|
})
|
||||||
|
.transpose()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns an iterator over all rooms a user left.
|
||||||
|
#[tracing::instrument(skip(self))]
|
||||||
|
fn rooms_left<'a>(
|
||||||
|
&'a self,
|
||||||
|
user_id: &UserId,
|
||||||
|
) -> Box<dyn Iterator<Item = Result<(OwnedRoomId, Vec<Raw<AnySyncStateEvent>>)>> + 'a> {
|
||||||
|
let mut prefix = user_id.as_bytes().to_vec();
|
||||||
|
prefix.push(0xff);
|
||||||
|
|
||||||
|
Box::new(
|
||||||
|
self.userroomid_leftstate
|
||||||
|
.scan_prefix(prefix)
|
||||||
|
.map(|(key, state)| {
|
||||||
|
let room_id = RoomId::parse(
|
||||||
|
utils::string_from_bytes(
|
||||||
|
key.rsplit(|&b| b == 0xff)
|
||||||
|
.next()
|
||||||
|
.expect("rsplit always returns an element"),
|
||||||
|
)
|
||||||
|
.map_err(|_| {
|
||||||
|
Error::bad_database("Room ID in userroomid_invited is invalid unicode.")
|
||||||
|
})?,
|
||||||
|
)
|
||||||
|
.map_err(|_| {
|
||||||
|
Error::bad_database("Room ID in userroomid_invited is invalid.")
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let state = serde_json::from_slice(&state).map_err(|_| {
|
||||||
|
Error::bad_database("Invalid state in userroomid_leftstate.")
|
||||||
|
})?;
|
||||||
|
|
||||||
|
Ok((room_id, state))
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tracing::instrument(skip(self))]
|
||||||
|
fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result<bool> {
|
||||||
|
let mut userroom_id = user_id.as_bytes().to_vec();
|
||||||
|
userroom_id.push(0xff);
|
||||||
|
userroom_id.extend_from_slice(room_id.as_bytes());
|
||||||
|
|
||||||
|
Ok(self.roomuseroncejoinedids.get(&userroom_id)?.is_some())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tracing::instrument(skip(self))]
|
||||||
|
fn is_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result<bool> {
|
||||||
|
let mut userroom_id = user_id.as_bytes().to_vec();
|
||||||
|
userroom_id.push(0xff);
|
||||||
|
userroom_id.extend_from_slice(room_id.as_bytes());
|
||||||
|
|
||||||
|
Ok(self.userroomid_joined.get(&userroom_id)?.is_some())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tracing::instrument(skip(self))]
|
||||||
|
fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> Result<bool> {
|
||||||
|
let mut userroom_id = user_id.as_bytes().to_vec();
|
||||||
|
userroom_id.push(0xff);
|
||||||
|
userroom_id.extend_from_slice(room_id.as_bytes());
|
||||||
|
|
||||||
|
Ok(self.userroomid_invitestate.get(&userroom_id)?.is_some())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tracing::instrument(skip(self))]
|
||||||
|
fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result<bool> {
|
||||||
|
let mut userroom_id = user_id.as_bytes().to_vec();
|
||||||
|
userroom_id.push(0xff);
|
||||||
|
userroom_id.extend_from_slice(room_id.as_bytes());
|
||||||
|
|
||||||
|
Ok(self.userroomid_leftstate.get(&userroom_id)?.is_some())
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,61 @@
|
||||||
|
use std::{collections::HashSet, mem::size_of};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
database::KeyValueDatabase,
|
||||||
|
service::{self, rooms::state_compressor::data::StateDiff},
|
||||||
|
utils, Error, Result,
|
||||||
|
};
|
||||||
|
|
||||||
|
impl service::rooms::state_compressor::Data for KeyValueDatabase {
|
||||||
|
fn get_statediff(&self, shortstatehash: u64) -> Result<StateDiff> {
|
||||||
|
let value = self
|
||||||
|
.shortstatehash_statediff
|
||||||
|
.get(&shortstatehash.to_be_bytes())?
|
||||||
|
.ok_or_else(|| Error::bad_database("State hash does not exist"))?;
|
||||||
|
let parent =
|
||||||
|
utils::u64_from_bytes(&value[0..size_of::<u64>()]).expect("bytes have right length");
|
||||||
|
let parent = if parent != 0 { Some(parent) } else { None };
|
||||||
|
|
||||||
|
let mut add_mode = true;
|
||||||
|
let mut added = HashSet::new();
|
||||||
|
let mut removed = HashSet::new();
|
||||||
|
|
||||||
|
let mut i = size_of::<u64>();
|
||||||
|
while let Some(v) = value.get(i..i + 2 * size_of::<u64>()) {
|
||||||
|
if add_mode && v.starts_with(&0_u64.to_be_bytes()) {
|
||||||
|
add_mode = false;
|
||||||
|
i += size_of::<u64>();
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if add_mode {
|
||||||
|
added.insert(v.try_into().expect("we checked the size above"));
|
||||||
|
} else {
|
||||||
|
removed.insert(v.try_into().expect("we checked the size above"));
|
||||||
|
}
|
||||||
|
i += 2 * size_of::<u64>();
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(StateDiff {
|
||||||
|
parent,
|
||||||
|
added,
|
||||||
|
removed,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn save_statediff(&self, shortstatehash: u64, diff: StateDiff) -> Result<()> {
|
||||||
|
let mut value = diff.parent.unwrap_or(0).to_be_bytes().to_vec();
|
||||||
|
for new in &diff.added {
|
||||||
|
value.extend_from_slice(&new[..]);
|
||||||
|
}
|
||||||
|
|
||||||
|
if !diff.removed.is_empty() {
|
||||||
|
value.extend_from_slice(&0_u64.to_be_bytes());
|
||||||
|
for removed in &diff.removed {
|
||||||
|
value.extend_from_slice(&removed[..]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
self.shortstatehash_statediff
|
||||||
|
.insert(&shortstatehash.to_be_bytes(), &value)
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,370 @@
|
||||||
|
use std::{collections::hash_map, mem::size_of, sync::Arc};
|
||||||
|
|
||||||
|
use ruma::{
|
||||||
|
api::client::error::ErrorKind, CanonicalJsonObject, EventId, OwnedUserId, RoomId, UserId,
|
||||||
|
};
|
||||||
|
use tracing::error;
|
||||||
|
|
||||||
|
use crate::{database::KeyValueDatabase, service, services, utils, Error, PduEvent, Result};
|
||||||
|
|
||||||
|
impl service::rooms::timeline::Data for KeyValueDatabase {
|
||||||
|
fn first_pdu_in_room(&self, room_id: &RoomId) -> Result<Option<Arc<PduEvent>>> {
|
||||||
|
let prefix = services()
|
||||||
|
.rooms
|
||||||
|
.short
|
||||||
|
.get_shortroomid(room_id)?
|
||||||
|
.expect("room exists")
|
||||||
|
.to_be_bytes()
|
||||||
|
.to_vec();
|
||||||
|
|
||||||
|
// Look for PDUs in that room.
|
||||||
|
self.pduid_pdu
|
||||||
|
.iter_from(&prefix, false)
|
||||||
|
.filter(|(k, _)| k.starts_with(&prefix))
|
||||||
|
.map(|(_, pdu)| {
|
||||||
|
serde_json::from_slice(&pdu)
|
||||||
|
.map_err(|_| Error::bad_database("Invalid first PDU in db."))
|
||||||
|
.map(Arc::new)
|
||||||
|
})
|
||||||
|
.next()
|
||||||
|
.transpose()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result<u64> {
|
||||||
|
match self
|
||||||
|
.lasttimelinecount_cache
|
||||||
|
.lock()
|
||||||
|
.unwrap()
|
||||||
|
.entry(room_id.to_owned())
|
||||||
|
{
|
||||||
|
hash_map::Entry::Vacant(v) => {
|
||||||
|
if let Some(last_count) = self
|
||||||
|
.pdus_until(sender_user, room_id, u64::MAX)?
|
||||||
|
.filter_map(|r| {
|
||||||
|
// Filter out buggy events
|
||||||
|
if r.is_err() {
|
||||||
|
error!("Bad pdu in pdus_since: {:?}", r);
|
||||||
|
}
|
||||||
|
r.ok()
|
||||||
|
})
|
||||||
|
.map(|(pduid, _)| self.pdu_count(&pduid))
|
||||||
|
.next()
|
||||||
|
{
|
||||||
|
Ok(*v.insert(last_count?))
|
||||||
|
} else {
|
||||||
|
Ok(0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
hash_map::Entry::Occupied(o) => Ok(*o.get()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the `count` of this pdu's id.
|
||||||
|
fn get_pdu_count(&self, event_id: &EventId) -> Result<Option<u64>> {
|
||||||
|
self.eventid_pduid
|
||||||
|
.get(event_id.as_bytes())?
|
||||||
|
.map(|pdu_id| self.pdu_count(&pdu_id))
|
||||||
|
.transpose()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the json of a pdu.
|
||||||
|
fn get_pdu_json(&self, event_id: &EventId) -> Result<Option<CanonicalJsonObject>> {
|
||||||
|
self.eventid_pduid
|
||||||
|
.get(event_id.as_bytes())?
|
||||||
|
.map_or_else(
|
||||||
|
|| self.eventid_outlierpdu.get(event_id.as_bytes()),
|
||||||
|
|pduid| {
|
||||||
|
Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| {
|
||||||
|
Error::bad_database("Invalid pduid in eventid_pduid.")
|
||||||
|
})?))
|
||||||
|
},
|
||||||
|
)?
|
||||||
|
.map(|pdu| {
|
||||||
|
serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db."))
|
||||||
|
})
|
||||||
|
.transpose()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the json of a pdu.
|
||||||
|
fn get_non_outlier_pdu_json(&self, event_id: &EventId) -> Result<Option<CanonicalJsonObject>> {
|
||||||
|
self.eventid_pduid
|
||||||
|
.get(event_id.as_bytes())?
|
||||||
|
.map(|pduid| {
|
||||||
|
self.pduid_pdu
|
||||||
|
.get(&pduid)?
|
||||||
|
.ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid."))
|
||||||
|
})
|
||||||
|
.transpose()?
|
||||||
|
.map(|pdu| {
|
||||||
|
serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db."))
|
||||||
|
})
|
||||||
|
.transpose()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the pdu's id.
|
||||||
|
fn get_pdu_id(&self, event_id: &EventId) -> Result<Option<Vec<u8>>> {
|
||||||
|
self.eventid_pduid.get(event_id.as_bytes())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the pdu.
|
||||||
|
///
|
||||||
|
/// Checks the `eventid_outlierpdu` Tree if not found in the timeline.
|
||||||
|
fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result<Option<PduEvent>> {
|
||||||
|
self.eventid_pduid
|
||||||
|
.get(event_id.as_bytes())?
|
||||||
|
.map(|pduid| {
|
||||||
|
self.pduid_pdu
|
||||||
|
.get(&pduid)?
|
||||||
|
.ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid."))
|
||||||
|
})
|
||||||
|
.transpose()?
|
||||||
|
.map(|pdu| {
|
||||||
|
serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db."))
|
||||||
|
})
|
||||||
|
.transpose()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the pdu.
|
||||||
|
///
|
||||||
|
/// Checks the `eventid_outlierpdu` Tree if not found in the timeline.
|
||||||
|
fn get_pdu(&self, event_id: &EventId) -> Result<Option<Arc<PduEvent>>> {
|
||||||
|
if let Some(p) = self.pdu_cache.lock().unwrap().get_mut(event_id) {
|
||||||
|
return Ok(Some(Arc::clone(p)));
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(pdu) = self
|
||||||
|
.eventid_pduid
|
||||||
|
.get(event_id.as_bytes())?
|
||||||
|
.map_or_else(
|
||||||
|
|| self.eventid_outlierpdu.get(event_id.as_bytes()),
|
||||||
|
|pduid| {
|
||||||
|
Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| {
|
||||||
|
Error::bad_database("Invalid pduid in eventid_pduid.")
|
||||||
|
})?))
|
||||||
|
},
|
||||||
|
)?
|
||||||
|
.map(|pdu| {
|
||||||
|
serde_json::from_slice(&pdu)
|
||||||
|
.map_err(|_| Error::bad_database("Invalid PDU in db."))
|
||||||
|
.map(Arc::new)
|
||||||
|
})
|
||||||
|
.transpose()?
|
||||||
|
{
|
||||||
|
self.pdu_cache
|
||||||
|
.lock()
|
||||||
|
.unwrap()
|
||||||
|
.insert(event_id.to_owned(), Arc::clone(&pdu));
|
||||||
|
Ok(Some(pdu))
|
||||||
|
} else {
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the pdu.
|
||||||
|
///
|
||||||
|
/// This does __NOT__ check the outliers `Tree`.
|
||||||
|
fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result<Option<PduEvent>> {
|
||||||
|
self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| {
|
||||||
|
Ok(Some(
|
||||||
|
serde_json::from_slice(&pdu)
|
||||||
|
.map_err(|_| Error::bad_database("Invalid PDU in db."))?,
|
||||||
|
))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the pdu as a `BTreeMap<String, CanonicalJsonValue>`.
|
||||||
|
fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result<Option<CanonicalJsonObject>> {
|
||||||
|
self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| {
|
||||||
|
Ok(Some(
|
||||||
|
serde_json::from_slice(&pdu)
|
||||||
|
.map_err(|_| Error::bad_database("Invalid PDU in db."))?,
|
||||||
|
))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns the `count` of this pdu's id.
|
||||||
|
fn pdu_count(&self, pdu_id: &[u8]) -> Result<u64> {
|
||||||
|
utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::<u64>()..])
|
||||||
|
.map_err(|_| Error::bad_database("PDU has invalid count bytes."))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn append_pdu(
|
||||||
|
&self,
|
||||||
|
pdu_id: &[u8],
|
||||||
|
pdu: &PduEvent,
|
||||||
|
json: &CanonicalJsonObject,
|
||||||
|
count: u64,
|
||||||
|
) -> Result<()> {
|
||||||
|
self.pduid_pdu.insert(
|
||||||
|
pdu_id,
|
||||||
|
&serde_json::to_vec(json).expect("CanonicalJsonObject is always a valid"),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
self.lasttimelinecount_cache
|
||||||
|
.lock()
|
||||||
|
.unwrap()
|
||||||
|
.insert(pdu.room_id.clone(), count);
|
||||||
|
|
||||||
|
self.eventid_pduid.insert(pdu.event_id.as_bytes(), pdu_id)?;
|
||||||
|
self.eventid_outlierpdu.remove(pdu.event_id.as_bytes())?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Removes a pdu and creates a new one with the same id.
|
||||||
|
fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> {
|
||||||
|
if self.pduid_pdu.get(pdu_id)?.is_some() {
|
||||||
|
self.pduid_pdu.insert(
|
||||||
|
pdu_id,
|
||||||
|
&serde_json::to_vec(pdu).expect("CanonicalJsonObject is always a valid"),
|
||||||
|
)?;
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
Err(Error::BadRequest(
|
||||||
|
ErrorKind::NotFound,
|
||||||
|
"PDU does not exist.",
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns an iterator over all events in a room that happened after the event with id `since`
|
||||||
|
/// in chronological order.
|
||||||
|
fn pdus_since<'a>(
|
||||||
|
&'a self,
|
||||||
|
user_id: &UserId,
|
||||||
|
room_id: &RoomId,
|
||||||
|
since: u64,
|
||||||
|
) -> Result<Box<dyn Iterator<Item = Result<(Vec<u8>, PduEvent)>> + 'a>> {
|
||||||
|
let prefix = services()
|
||||||
|
.rooms
|
||||||
|
.short
|
||||||
|
.get_shortroomid(room_id)?
|
||||||
|
.expect("room exists")
|
||||||
|
.to_be_bytes()
|
||||||
|
.to_vec();
|
||||||
|
|
||||||
|
// Skip the first pdu if it's exactly at since, because we sent that last time
|
||||||
|
let mut first_pdu_id = prefix.clone();
|
||||||
|
first_pdu_id.extend_from_slice(&(since + 1).to_be_bytes());
|
||||||
|
|
||||||
|
let user_id = user_id.to_owned();
|
||||||
|
|
||||||
|
Ok(Box::new(
|
||||||
|
self.pduid_pdu
|
||||||
|
.iter_from(&first_pdu_id, false)
|
||||||
|
.take_while(move |(k, _)| k.starts_with(&prefix))
|
||||||
|
.map(move |(pdu_id, v)| {
|
||||||
|
let mut pdu = serde_json::from_slice::<PduEvent>(&v)
|
||||||
|
.map_err(|_| Error::bad_database("PDU in db is invalid."))?;
|
||||||
|
if pdu.sender != user_id {
|
||||||
|
pdu.remove_transaction_id()?;
|
||||||
|
}
|
||||||
|
Ok((pdu_id, pdu))
|
||||||
|
}),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns an iterator over all events and their tokens in a room that happened before the
|
||||||
|
/// event with id `until` in reverse-chronological order.
|
||||||
|
fn pdus_until<'a>(
|
||||||
|
&'a self,
|
||||||
|
user_id: &UserId,
|
||||||
|
room_id: &RoomId,
|
||||||
|
until: u64,
|
||||||
|
) -> Result<Box<dyn Iterator<Item = Result<(Vec<u8>, PduEvent)>> + 'a>> {
|
||||||
|
// Create the first part of the full pdu id
|
||||||
|
let prefix = services()
|
||||||
|
.rooms
|
||||||
|
.short
|
||||||
|
.get_shortroomid(room_id)?
|
||||||
|
.expect("room exists")
|
||||||
|
.to_be_bytes()
|
||||||
|
.to_vec();
|
||||||
|
|
||||||
|
let mut current = prefix.clone();
|
||||||
|
current.extend_from_slice(&(until.saturating_sub(1)).to_be_bytes()); // -1 because we don't want event at `until`
|
||||||
|
|
||||||
|
let current: &[u8] = ¤t;
|
||||||
|
|
||||||
|
let user_id = user_id.to_owned();
|
||||||
|
|
||||||
|
Ok(Box::new(
|
||||||
|
self.pduid_pdu
|
||||||
|
.iter_from(current, true)
|
||||||
|
.take_while(move |(k, _)| k.starts_with(&prefix))
|
||||||
|
.map(move |(pdu_id, v)| {
|
||||||
|
let mut pdu = serde_json::from_slice::<PduEvent>(&v)
|
||||||
|
.map_err(|_| Error::bad_database("PDU in db is invalid."))?;
|
||||||
|
if pdu.sender != user_id {
|
||||||
|
pdu.remove_transaction_id()?;
|
||||||
|
}
|
||||||
|
Ok((pdu_id, pdu))
|
||||||
|
}),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn pdus_after<'a>(
|
||||||
|
&'a self,
|
||||||
|
user_id: &UserId,
|
||||||
|
room_id: &RoomId,
|
||||||
|
from: u64,
|
||||||
|
) -> Result<Box<dyn Iterator<Item = Result<(Vec<u8>, PduEvent)>> + 'a>> {
|
||||||
|
// Create the first part of the full pdu id
|
||||||
|
let prefix = services()
|
||||||
|
.rooms
|
||||||
|
.short
|
||||||
|
.get_shortroomid(room_id)?
|
||||||
|
.expect("room exists")
|
||||||
|
.to_be_bytes()
|
||||||
|
.to_vec();
|
||||||
|
|
||||||
|
let mut current = prefix.clone();
|
||||||
|
current.extend_from_slice(&(from + 1).to_be_bytes()); // +1 so we don't send the base event
|
||||||
|
|
||||||
|
let current: &[u8] = ¤t;
|
||||||
|
|
||||||
|
let user_id = user_id.to_owned();
|
||||||
|
|
||||||
|
Ok(Box::new(
|
||||||
|
self.pduid_pdu
|
||||||
|
.iter_from(current, false)
|
||||||
|
.take_while(move |(k, _)| k.starts_with(&prefix))
|
||||||
|
.map(move |(pdu_id, v)| {
|
||||||
|
let mut pdu = serde_json::from_slice::<PduEvent>(&v)
|
||||||
|
.map_err(|_| Error::bad_database("PDU in db is invalid."))?;
|
||||||
|
if pdu.sender != user_id {
|
||||||
|
pdu.remove_transaction_id()?;
|
||||||
|
}
|
||||||
|
Ok((pdu_id, pdu))
|
||||||
|
}),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn increment_notification_counts(
|
||||||
|
&self,
|
||||||
|
room_id: &RoomId,
|
||||||
|
notifies: Vec<OwnedUserId>,
|
||||||
|
highlights: Vec<OwnedUserId>,
|
||||||
|
) -> Result<()> {
|
||||||
|
let mut notifies_batch = Vec::new();
|
||||||
|
let mut highlights_batch = Vec::new();
|
||||||
|
for user in notifies {
|
||||||
|
let mut userroom_id = user.as_bytes().to_vec();
|
||||||
|
userroom_id.push(0xff);
|
||||||
|
userroom_id.extend_from_slice(room_id.as_bytes());
|
||||||
|
notifies_batch.push(userroom_id);
|
||||||
|
}
|
||||||
|
for user in highlights {
|
||||||
|
let mut userroom_id = user.as_bytes().to_vec();
|
||||||
|
userroom_id.push(0xff);
|
||||||
|
userroom_id.extend_from_slice(room_id.as_bytes());
|
||||||
|
highlights_batch.push(userroom_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
self.userroomid_notificationcount
|
||||||
|
.increment_batch(&mut notifies_batch.into_iter())?;
|
||||||
|
self.userroomid_highlightcount
|
||||||
|
.increment_batch(&mut highlights_batch.into_iter())?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,149 @@
|
||||||
|
use ruma::{OwnedRoomId, OwnedUserId, RoomId, UserId};
|
||||||
|
|
||||||
|
use crate::{database::KeyValueDatabase, service, services, utils, Error, Result};
|
||||||
|
|
||||||
|
impl service::rooms::user::Data for KeyValueDatabase {
|
||||||
|
fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> {
|
||||||
|
let mut userroom_id = user_id.as_bytes().to_vec();
|
||||||
|
userroom_id.push(0xff);
|
||||||
|
userroom_id.extend_from_slice(room_id.as_bytes());
|
||||||
|
let mut roomuser_id = room_id.as_bytes().to_vec();
|
||||||
|
roomuser_id.push(0xff);
|
||||||
|
roomuser_id.extend_from_slice(user_id.as_bytes());
|
||||||
|
|
||||||
|
self.userroomid_notificationcount
|
||||||
|
.insert(&userroom_id, &0_u64.to_be_bytes())?;
|
||||||
|
self.userroomid_highlightcount
|
||||||
|
.insert(&userroom_id, &0_u64.to_be_bytes())?;
|
||||||
|
|
||||||
|
self.roomuserid_lastnotificationread.insert(
|
||||||
|
&roomuser_id,
|
||||||
|
&services().globals.next_count()?.to_be_bytes(),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result<u64> {
|
||||||
|
let mut userroom_id = user_id.as_bytes().to_vec();
|
||||||
|
userroom_id.push(0xff);
|
||||||
|
userroom_id.extend_from_slice(room_id.as_bytes());
|
||||||
|
|
||||||
|
self.userroomid_notificationcount
|
||||||
|
.get(&userroom_id)?
|
||||||
|
.map(|bytes| {
|
||||||
|
utils::u64_from_bytes(&bytes)
|
||||||
|
.map_err(|_| Error::bad_database("Invalid notification count in db."))
|
||||||
|
})
|
||||||
|
.unwrap_or(Ok(0))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result<u64> {
|
||||||
|
let mut userroom_id = user_id.as_bytes().to_vec();
|
||||||
|
userroom_id.push(0xff);
|
||||||
|
userroom_id.extend_from_slice(room_id.as_bytes());
|
||||||
|
|
||||||
|
self.userroomid_highlightcount
|
||||||
|
.get(&userroom_id)?
|
||||||
|
.map(|bytes| {
|
||||||
|
utils::u64_from_bytes(&bytes)
|
||||||
|
.map_err(|_| Error::bad_database("Invalid highlight count in db."))
|
||||||
|
})
|
||||||
|
.unwrap_or(Ok(0))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn last_notification_read(&self, user_id: &UserId, room_id: &RoomId) -> Result<u64> {
|
||||||
|
let mut key = room_id.as_bytes().to_vec();
|
||||||
|
key.push(0xff);
|
||||||
|
key.extend_from_slice(user_id.as_bytes());
|
||||||
|
|
||||||
|
Ok(self
|
||||||
|
.roomuserid_lastnotificationread
|
||||||
|
.get(&key)?
|
||||||
|
.map(|bytes| {
|
||||||
|
utils::u64_from_bytes(&bytes).map_err(|_| {
|
||||||
|
Error::bad_database("Count in roomuserid_lastprivatereadupdate is invalid.")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.transpose()?
|
||||||
|
.unwrap_or(0))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn associate_token_shortstatehash(
|
||||||
|
&self,
|
||||||
|
room_id: &RoomId,
|
||||||
|
token: u64,
|
||||||
|
shortstatehash: u64,
|
||||||
|
) -> Result<()> {
|
||||||
|
let shortroomid = services()
|
||||||
|
.rooms
|
||||||
|
.short
|
||||||
|
.get_shortroomid(room_id)?
|
||||||
|
.expect("room exists");
|
||||||
|
|
||||||
|
let mut key = shortroomid.to_be_bytes().to_vec();
|
||||||
|
key.extend_from_slice(&token.to_be_bytes());
|
||||||
|
|
||||||
|
self.roomsynctoken_shortstatehash
|
||||||
|
.insert(&key, &shortstatehash.to_be_bytes())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result<Option<u64>> {
|
||||||
|
let shortroomid = services()
|
||||||
|
.rooms
|
||||||
|
.short
|
||||||
|
.get_shortroomid(room_id)?
|
||||||
|
.expect("room exists");
|
||||||
|
|
||||||
|
let mut key = shortroomid.to_be_bytes().to_vec();
|
||||||
|
key.extend_from_slice(&token.to_be_bytes());
|
||||||
|
|
||||||
|
self.roomsynctoken_shortstatehash
|
||||||
|
.get(&key)?
|
||||||
|
.map(|bytes| {
|
||||||
|
utils::u64_from_bytes(&bytes).map_err(|_| {
|
||||||
|
Error::bad_database("Invalid shortstatehash in roomsynctoken_shortstatehash")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.transpose()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_shared_rooms<'a>(
|
||||||
|
&'a self,
|
||||||
|
users: Vec<OwnedUserId>,
|
||||||
|
) -> Result<Box<dyn Iterator<Item = Result<OwnedRoomId>> + 'a>> {
|
||||||
|
let iterators = users.into_iter().map(move |user_id| {
|
||||||
|
let mut prefix = user_id.as_bytes().to_vec();
|
||||||
|
prefix.push(0xff);
|
||||||
|
|
||||||
|
self.userroomid_joined
|
||||||
|
.scan_prefix(prefix)
|
||||||
|
.map(|(key, _)| {
|
||||||
|
let roomid_index = key
|
||||||
|
.iter()
|
||||||
|
.enumerate()
|
||||||
|
.find(|(_, &b)| b == 0xff)
|
||||||
|
.ok_or_else(|| Error::bad_database("Invalid userroomid_joined in db."))?
|
||||||
|
.0
|
||||||
|
+ 1; // +1 because the room id starts AFTER the separator
|
||||||
|
|
||||||
|
let room_id = key[roomid_index..].to_vec();
|
||||||
|
|
||||||
|
Ok::<_, Error>(room_id)
|
||||||
|
})
|
||||||
|
.filter_map(|r| r.ok())
|
||||||
|
});
|
||||||
|
|
||||||
|
// We use the default compare function because keys are sorted correctly (not reversed)
|
||||||
|
Ok(Box::new(
|
||||||
|
utils::common_elements(iterators, Ord::cmp)
|
||||||
|
.expect("users is not empty")
|
||||||
|
.map(|bytes| {
|
||||||
|
RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| {
|
||||||
|
Error::bad_database("Invalid RoomId bytes in userroomid_joined")
|
||||||
|
})?)
|
||||||
|
.map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined."))
|
||||||
|
}),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,205 @@
|
||||||
|
use ruma::{ServerName, UserId};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
database::KeyValueDatabase,
|
||||||
|
service::{
|
||||||
|
self,
|
||||||
|
sending::{OutgoingKind, SendingEventType},
|
||||||
|
},
|
||||||
|
services, utils, Error, Result,
|
||||||
|
};
|
||||||
|
|
||||||
|
impl service::sending::Data for KeyValueDatabase {
|
||||||
|
fn active_requests<'a>(
|
||||||
|
&'a self,
|
||||||
|
) -> Box<dyn Iterator<Item = Result<(Vec<u8>, OutgoingKind, SendingEventType)>> + 'a> {
|
||||||
|
Box::new(
|
||||||
|
self.servercurrentevent_data
|
||||||
|
.iter()
|
||||||
|
.map(|(key, v)| parse_servercurrentevent(&key, v).map(|(k, e)| (key, k, e))),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn active_requests_for<'a>(
|
||||||
|
&'a self,
|
||||||
|
outgoing_kind: &OutgoingKind,
|
||||||
|
) -> Box<dyn Iterator<Item = Result<(Vec<u8>, SendingEventType)>> + 'a> {
|
||||||
|
let prefix = outgoing_kind.get_prefix();
|
||||||
|
Box::new(
|
||||||
|
self.servercurrentevent_data
|
||||||
|
.scan_prefix(prefix)
|
||||||
|
.map(|(key, v)| parse_servercurrentevent(&key, v).map(|(_, e)| (key, e))),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn delete_active_request(&self, key: Vec<u8>) -> Result<()> {
|
||||||
|
self.servercurrentevent_data.remove(&key)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn delete_all_active_requests_for(&self, outgoing_kind: &OutgoingKind) -> Result<()> {
|
||||||
|
let prefix = outgoing_kind.get_prefix();
|
||||||
|
for (key, _) in self.servercurrentevent_data.scan_prefix(prefix) {
|
||||||
|
self.servercurrentevent_data.remove(&key)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn delete_all_requests_for(&self, outgoing_kind: &OutgoingKind) -> Result<()> {
|
||||||
|
let prefix = outgoing_kind.get_prefix();
|
||||||
|
for (key, _) in self.servercurrentevent_data.scan_prefix(prefix.clone()) {
|
||||||
|
self.servercurrentevent_data.remove(&key).unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
for (key, _) in self.servernameevent_data.scan_prefix(prefix) {
|
||||||
|
self.servernameevent_data.remove(&key).unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn queue_requests(
|
||||||
|
&self,
|
||||||
|
requests: &[(&OutgoingKind, SendingEventType)],
|
||||||
|
) -> Result<Vec<Vec<u8>>> {
|
||||||
|
let mut batch = Vec::new();
|
||||||
|
let mut keys = Vec::new();
|
||||||
|
for (outgoing_kind, event) in requests {
|
||||||
|
let mut key = outgoing_kind.get_prefix();
|
||||||
|
if let SendingEventType::Pdu(value) = &event {
|
||||||
|
key.extend_from_slice(value)
|
||||||
|
} else {
|
||||||
|
key.extend_from_slice(&services().globals.next_count()?.to_be_bytes())
|
||||||
|
}
|
||||||
|
let value = if let SendingEventType::Edu(value) = &event {
|
||||||
|
&**value
|
||||||
|
} else {
|
||||||
|
&[]
|
||||||
|
};
|
||||||
|
batch.push((key.clone(), value.to_owned()));
|
||||||
|
keys.push(key);
|
||||||
|
}
|
||||||
|
self.servernameevent_data
|
||||||
|
.insert_batch(&mut batch.into_iter())?;
|
||||||
|
Ok(keys)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn queued_requests<'a>(
|
||||||
|
&'a self,
|
||||||
|
outgoing_kind: &OutgoingKind,
|
||||||
|
) -> Box<dyn Iterator<Item = Result<(SendingEventType, Vec<u8>)>> + 'a> {
|
||||||
|
let prefix = outgoing_kind.get_prefix();
|
||||||
|
return Box::new(
|
||||||
|
self.servernameevent_data
|
||||||
|
.scan_prefix(prefix)
|
||||||
|
.map(|(k, v)| parse_servercurrentevent(&k, v).map(|(_, ev)| (ev, k))),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
fn mark_as_active(&self, events: &[(SendingEventType, Vec<u8>)]) -> Result<()> {
|
||||||
|
for (e, key) in events {
|
||||||
|
let value = if let SendingEventType::Edu(value) = &e {
|
||||||
|
&**value
|
||||||
|
} else {
|
||||||
|
&[]
|
||||||
|
};
|
||||||
|
self.servercurrentevent_data.insert(key, value)?;
|
||||||
|
self.servernameevent_data.remove(key)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn set_latest_educount(&self, server_name: &ServerName, last_count: u64) -> Result<()> {
|
||||||
|
self.servername_educount
|
||||||
|
.insert(server_name.as_bytes(), &last_count.to_be_bytes())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_latest_educount(&self, server_name: &ServerName) -> Result<u64> {
|
||||||
|
self.servername_educount
|
||||||
|
.get(server_name.as_bytes())?
|
||||||
|
.map_or(Ok(0), |bytes| {
|
||||||
|
utils::u64_from_bytes(&bytes)
|
||||||
|
.map_err(|_| Error::bad_database("Invalid u64 in servername_educount."))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tracing::instrument(skip(key))]
|
||||||
|
fn parse_servercurrentevent(
|
||||||
|
key: &[u8],
|
||||||
|
value: Vec<u8>,
|
||||||
|
) -> Result<(OutgoingKind, SendingEventType)> {
|
||||||
|
// Appservices start with a plus
|
||||||
|
Ok::<_, Error>(if key.starts_with(b"+") {
|
||||||
|
let mut parts = key[1..].splitn(2, |&b| b == 0xff);
|
||||||
|
|
||||||
|
let server = parts.next().expect("splitn always returns one element");
|
||||||
|
let event = parts
|
||||||
|
.next()
|
||||||
|
.ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?;
|
||||||
|
|
||||||
|
let server = utils::string_from_bytes(server).map_err(|_| {
|
||||||
|
Error::bad_database("Invalid server bytes in server_currenttransaction")
|
||||||
|
})?;
|
||||||
|
|
||||||
|
(
|
||||||
|
OutgoingKind::Appservice(server),
|
||||||
|
if value.is_empty() {
|
||||||
|
SendingEventType::Pdu(event.to_vec())
|
||||||
|
} else {
|
||||||
|
SendingEventType::Edu(value)
|
||||||
|
},
|
||||||
|
)
|
||||||
|
} else if key.starts_with(b"$") {
|
||||||
|
let mut parts = key[1..].splitn(3, |&b| b == 0xff);
|
||||||
|
|
||||||
|
let user = parts.next().expect("splitn always returns one element");
|
||||||
|
let user_string = utils::string_from_bytes(user)
|
||||||
|
.map_err(|_| Error::bad_database("Invalid user string in servercurrentevent"))?;
|
||||||
|
let user_id = UserId::parse(user_string)
|
||||||
|
.map_err(|_| Error::bad_database("Invalid user id in servercurrentevent"))?;
|
||||||
|
|
||||||
|
let pushkey = parts
|
||||||
|
.next()
|
||||||
|
.ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?;
|
||||||
|
let pushkey_string = utils::string_from_bytes(pushkey)
|
||||||
|
.map_err(|_| Error::bad_database("Invalid pushkey in servercurrentevent"))?;
|
||||||
|
|
||||||
|
let event = parts
|
||||||
|
.next()
|
||||||
|
.ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?;
|
||||||
|
|
||||||
|
(
|
||||||
|
OutgoingKind::Push(user_id, pushkey_string),
|
||||||
|
if value.is_empty() {
|
||||||
|
SendingEventType::Pdu(event.to_vec())
|
||||||
|
} else {
|
||||||
|
// I'm pretty sure this should never be called
|
||||||
|
SendingEventType::Edu(value)
|
||||||
|
},
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
let mut parts = key.splitn(2, |&b| b == 0xff);
|
||||||
|
|
||||||
|
let server = parts.next().expect("splitn always returns one element");
|
||||||
|
let event = parts
|
||||||
|
.next()
|
||||||
|
.ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?;
|
||||||
|
|
||||||
|
let server = utils::string_from_bytes(server).map_err(|_| {
|
||||||
|
Error::bad_database("Invalid server bytes in server_currenttransaction")
|
||||||
|
})?;
|
||||||
|
|
||||||
|
(
|
||||||
|
OutgoingKind::Normal(ServerName::parse(server).map_err(|_| {
|
||||||
|
Error::bad_database("Invalid server string in server_currenttransaction")
|
||||||
|
})?),
|
||||||
|
if value.is_empty() {
|
||||||
|
SendingEventType::Pdu(event.to_vec())
|
||||||
|
} else {
|
||||||
|
SendingEventType::Edu(value)
|
||||||
|
},
|
||||||
|
)
|
||||||
|
})
|
||||||
|
}
|
|
@ -1,16 +1,9 @@
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use crate::Result;
|
|
||||||
use ruma::{DeviceId, TransactionId, UserId};
|
use ruma::{DeviceId, TransactionId, UserId};
|
||||||
|
|
||||||
use super::abstraction::Tree;
|
use crate::{database::KeyValueDatabase, service, Result};
|
||||||
|
|
||||||
pub struct TransactionIds {
|
impl service::transaction_ids::Data for KeyValueDatabase {
|
||||||
pub(super) userdevicetxnid_response: Arc<dyn Tree>, // Response can be empty (/sendToDevice) or the event id (/send)
|
fn add_txnid(
|
||||||
}
|
|
||||||
|
|
||||||
impl TransactionIds {
|
|
||||||
pub fn add_txnid(
|
|
||||||
&self,
|
&self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
device_id: Option<&DeviceId>,
|
device_id: Option<&DeviceId>,
|
||||||
|
@ -28,7 +21,7 @@ impl TransactionIds {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn existing_txnid(
|
fn existing_txnid(
|
||||||
&self,
|
&self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
device_id: Option<&DeviceId>,
|
device_id: Option<&DeviceId>,
|
|
@ -0,0 +1,89 @@
|
||||||
|
use ruma::{
|
||||||
|
api::client::{error::ErrorKind, uiaa::UiaaInfo},
|
||||||
|
CanonicalJsonValue, DeviceId, UserId,
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::{database::KeyValueDatabase, service, Error, Result};
|
||||||
|
|
||||||
|
impl service::uiaa::Data for KeyValueDatabase {
|
||||||
|
fn set_uiaa_request(
|
||||||
|
&self,
|
||||||
|
user_id: &UserId,
|
||||||
|
device_id: &DeviceId,
|
||||||
|
session: &str,
|
||||||
|
request: &CanonicalJsonValue,
|
||||||
|
) -> Result<()> {
|
||||||
|
self.userdevicesessionid_uiaarequest
|
||||||
|
.write()
|
||||||
|
.unwrap()
|
||||||
|
.insert(
|
||||||
|
(user_id.to_owned(), device_id.to_owned(), session.to_owned()),
|
||||||
|
request.to_owned(),
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_uiaa_request(
|
||||||
|
&self,
|
||||||
|
user_id: &UserId,
|
||||||
|
device_id: &DeviceId,
|
||||||
|
session: &str,
|
||||||
|
) -> Option<CanonicalJsonValue> {
|
||||||
|
self.userdevicesessionid_uiaarequest
|
||||||
|
.read()
|
||||||
|
.unwrap()
|
||||||
|
.get(&(user_id.to_owned(), device_id.to_owned(), session.to_owned()))
|
||||||
|
.map(|j| j.to_owned())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn update_uiaa_session(
|
||||||
|
&self,
|
||||||
|
user_id: &UserId,
|
||||||
|
device_id: &DeviceId,
|
||||||
|
session: &str,
|
||||||
|
uiaainfo: Option<&UiaaInfo>,
|
||||||
|
) -> Result<()> {
|
||||||
|
let mut userdevicesessionid = user_id.as_bytes().to_vec();
|
||||||
|
userdevicesessionid.push(0xff);
|
||||||
|
userdevicesessionid.extend_from_slice(device_id.as_bytes());
|
||||||
|
userdevicesessionid.push(0xff);
|
||||||
|
userdevicesessionid.extend_from_slice(session.as_bytes());
|
||||||
|
|
||||||
|
if let Some(uiaainfo) = uiaainfo {
|
||||||
|
self.userdevicesessionid_uiaainfo.insert(
|
||||||
|
&userdevicesessionid,
|
||||||
|
&serde_json::to_vec(&uiaainfo).expect("UiaaInfo::to_vec always works"),
|
||||||
|
)?;
|
||||||
|
} else {
|
||||||
|
self.userdevicesessionid_uiaainfo
|
||||||
|
.remove(&userdevicesessionid)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_uiaa_session(
|
||||||
|
&self,
|
||||||
|
user_id: &UserId,
|
||||||
|
device_id: &DeviceId,
|
||||||
|
session: &str,
|
||||||
|
) -> Result<UiaaInfo> {
|
||||||
|
let mut userdevicesessionid = user_id.as_bytes().to_vec();
|
||||||
|
userdevicesessionid.push(0xff);
|
||||||
|
userdevicesessionid.extend_from_slice(device_id.as_bytes());
|
||||||
|
userdevicesessionid.push(0xff);
|
||||||
|
userdevicesessionid.extend_from_slice(session.as_bytes());
|
||||||
|
|
||||||
|
serde_json::from_slice(
|
||||||
|
&self
|
||||||
|
.userdevicesessionid_uiaainfo
|
||||||
|
.get(&userdevicesessionid)?
|
||||||
|
.ok_or(Error::BadRequest(
|
||||||
|
ErrorKind::Forbidden,
|
||||||
|
"UIAA session does not exist.",
|
||||||
|
))?,
|
||||||
|
)
|
||||||
|
.map_err(|_| Error::bad_database("UiaaInfo in userdeviceid_uiaainfo is invalid."))
|
||||||
|
}
|
||||||
|
}
|
|
@ -1,50 +1,29 @@
|
||||||
use crate::{utils, Error, Result};
|
use std::{collections::BTreeMap, mem::size_of};
|
||||||
|
|
||||||
use ruma::{
|
use ruma::{
|
||||||
api::client::{device::Device, error::ErrorKind, filter::IncomingFilterDefinition},
|
api::client::{device::Device, error::ErrorKind, filter::FilterDefinition},
|
||||||
encryption::{CrossSigningKey, DeviceKeys, OneTimeKey},
|
encryption::{CrossSigningKey, DeviceKeys, OneTimeKey},
|
||||||
events::{AnyToDeviceEvent, StateEventType},
|
events::{AnyToDeviceEvent, StateEventType},
|
||||||
serde::Raw,
|
serde::Raw,
|
||||||
DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, MxcUri, RoomAliasId,
|
DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, OwnedDeviceId,
|
||||||
UInt, UserId,
|
OwnedDeviceKeyId, OwnedMxcUri, OwnedUserId, UInt, UserId,
|
||||||
};
|
};
|
||||||
use std::{collections::BTreeMap, mem, sync::Arc};
|
|
||||||
use tracing::warn;
|
use tracing::warn;
|
||||||
|
|
||||||
use super::abstraction::Tree;
|
use crate::{
|
||||||
|
database::KeyValueDatabase,
|
||||||
|
service::{self, users::clean_signatures},
|
||||||
|
services, utils, Error, Result,
|
||||||
|
};
|
||||||
|
|
||||||
pub struct Users {
|
impl service::users::Data for KeyValueDatabase {
|
||||||
pub(super) userid_password: Arc<dyn Tree>,
|
|
||||||
pub(super) userid_displayname: Arc<dyn Tree>,
|
|
||||||
pub(super) userid_avatarurl: Arc<dyn Tree>,
|
|
||||||
pub(super) userid_blurhash: Arc<dyn Tree>,
|
|
||||||
pub(super) userdeviceid_token: Arc<dyn Tree>,
|
|
||||||
pub(super) userdeviceid_metadata: Arc<dyn Tree>, // This is also used to check if a device exists
|
|
||||||
pub(super) userid_devicelistversion: Arc<dyn Tree>, // DevicelistVersion = u64
|
|
||||||
pub(super) token_userdeviceid: Arc<dyn Tree>,
|
|
||||||
|
|
||||||
pub(super) onetimekeyid_onetimekeys: Arc<dyn Tree>, // OneTimeKeyId = UserId + DeviceKeyId
|
|
||||||
pub(super) userid_lastonetimekeyupdate: Arc<dyn Tree>, // LastOneTimeKeyUpdate = Count
|
|
||||||
pub(super) keychangeid_userid: Arc<dyn Tree>, // KeyChangeId = UserId/RoomId + Count
|
|
||||||
pub(super) keyid_key: Arc<dyn Tree>, // KeyId = UserId + KeyId (depends on key type)
|
|
||||||
pub(super) userid_masterkeyid: Arc<dyn Tree>,
|
|
||||||
pub(super) userid_selfsigningkeyid: Arc<dyn Tree>,
|
|
||||||
pub(super) userid_usersigningkeyid: Arc<dyn Tree>,
|
|
||||||
|
|
||||||
pub(super) userfilterid_filter: Arc<dyn Tree>, // UserFilterId = UserId + FilterId
|
|
||||||
|
|
||||||
pub(super) todeviceid_events: Arc<dyn Tree>, // ToDeviceId = UserId + DeviceId + Count
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Users {
|
|
||||||
/// Check if a user has an account on this homeserver.
|
/// Check if a user has an account on this homeserver.
|
||||||
#[tracing::instrument(skip(self, user_id))]
|
fn exists(&self, user_id: &UserId) -> Result<bool> {
|
||||||
pub fn exists(&self, user_id: &UserId) -> Result<bool> {
|
|
||||||
Ok(self.userid_password.get(user_id.as_bytes())?.is_some())
|
Ok(self.userid_password.get(user_id.as_bytes())?.is_some())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Check if account is deactivated
|
/// Check if account is deactivated
|
||||||
#[tracing::instrument(skip(self, user_id))]
|
fn is_deactivated(&self, user_id: &UserId) -> Result<bool> {
|
||||||
pub fn is_deactivated(&self, user_id: &UserId) -> Result<bool> {
|
|
||||||
Ok(self
|
Ok(self
|
||||||
.userid_password
|
.userid_password
|
||||||
.get(user_id.as_bytes())?
|
.get(user_id.as_bytes())?
|
||||||
|
@ -55,37 +34,13 @@ impl Users {
|
||||||
.is_empty())
|
.is_empty())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Check if a user is an admin
|
|
||||||
#[tracing::instrument(skip(self, user_id, rooms, globals))]
|
|
||||||
pub fn is_admin(
|
|
||||||
&self,
|
|
||||||
user_id: &UserId,
|
|
||||||
rooms: &super::rooms::Rooms,
|
|
||||||
globals: &super::globals::Globals,
|
|
||||||
) -> Result<bool> {
|
|
||||||
let admin_room_alias_id = RoomAliasId::parse(format!("#admins:{}", globals.server_name()))
|
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?;
|
|
||||||
let admin_room_id = rooms.id_from_alias(&admin_room_alias_id)?.unwrap();
|
|
||||||
|
|
||||||
rooms.is_joined(user_id, &admin_room_id)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create a new user account on this homeserver.
|
|
||||||
#[tracing::instrument(skip(self, user_id, password))]
|
|
||||||
pub fn create(&self, user_id: &UserId, password: Option<&str>) -> Result<()> {
|
|
||||||
self.set_password(user_id, password)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the number of users registered on this server.
|
/// Returns the number of users registered on this server.
|
||||||
#[tracing::instrument(skip(self))]
|
fn count(&self) -> Result<usize> {
|
||||||
pub fn count(&self) -> Result<usize> {
|
|
||||||
Ok(self.userid_password.iter().count())
|
Ok(self.userid_password.iter().count())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Find out which user an access token belongs to.
|
/// Find out which user an access token belongs to.
|
||||||
#[tracing::instrument(skip(self, token))]
|
fn find_from_token(&self, token: &str) -> Result<Option<(OwnedUserId, String)>> {
|
||||||
pub fn find_from_token(&self, token: &str) -> Result<Option<(Box<UserId>, String)>> {
|
|
||||||
self.token_userdeviceid
|
self.token_userdeviceid
|
||||||
.get(token.as_bytes())?
|
.get(token.as_bytes())?
|
||||||
.map_or(Ok(None), |bytes| {
|
.map_or(Ok(None), |bytes| {
|
||||||
|
@ -112,55 +67,29 @@ impl Users {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns an iterator over all users on this homeserver.
|
/// Returns an iterator over all users on this homeserver.
|
||||||
#[tracing::instrument(skip(self))]
|
fn iter<'a>(&'a self) -> Box<dyn Iterator<Item = Result<OwnedUserId>> + 'a> {
|
||||||
pub fn iter(&self) -> impl Iterator<Item = Result<Box<UserId>>> + '_ {
|
Box::new(self.userid_password.iter().map(|(bytes, _)| {
|
||||||
self.userid_password.iter().map(|(bytes, _)| {
|
|
||||||
UserId::parse(utils::string_from_bytes(&bytes).map_err(|_| {
|
UserId::parse(utils::string_from_bytes(&bytes).map_err(|_| {
|
||||||
Error::bad_database("User ID in userid_password is invalid unicode.")
|
Error::bad_database("User ID in userid_password is invalid unicode.")
|
||||||
})?)
|
})?)
|
||||||
.map_err(|_| Error::bad_database("User ID in userid_password is invalid."))
|
.map_err(|_| Error::bad_database("User ID in userid_password is invalid."))
|
||||||
})
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns a list of local users as list of usernames.
|
/// Returns a list of local users as list of usernames.
|
||||||
///
|
///
|
||||||
/// A user account is considered `local` if the length of it's password is greater then zero.
|
/// A user account is considered `local` if the length of it's password is greater then zero.
|
||||||
#[tracing::instrument(skip(self))]
|
fn list_local_users(&self) -> Result<Vec<String>> {
|
||||||
pub fn list_local_users(&self) -> Result<Vec<String>> {
|
|
||||||
let users: Vec<String> = self
|
let users: Vec<String> = self
|
||||||
.userid_password
|
.userid_password
|
||||||
.iter()
|
.iter()
|
||||||
.filter_map(|(username, pw)| self.get_username_with_valid_password(&username, &pw))
|
.filter_map(|(username, pw)| get_username_with_valid_password(&username, &pw))
|
||||||
.collect();
|
.collect();
|
||||||
Ok(users)
|
Ok(users)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Will only return with Some(username) if the password was not empty and the
|
|
||||||
/// username could be successfully parsed.
|
|
||||||
/// If utils::string_from_bytes(...) returns an error that username will be skipped
|
|
||||||
/// and the error will be logged.
|
|
||||||
#[tracing::instrument(skip(self))]
|
|
||||||
fn get_username_with_valid_password(&self, username: &[u8], password: &[u8]) -> Option<String> {
|
|
||||||
// A valid password is not empty
|
|
||||||
if password.is_empty() {
|
|
||||||
None
|
|
||||||
} else {
|
|
||||||
match utils::string_from_bytes(username) {
|
|
||||||
Ok(u) => Some(u),
|
|
||||||
Err(e) => {
|
|
||||||
warn!(
|
|
||||||
"Failed to parse username while calling get_local_users(): {}",
|
|
||||||
e.to_string()
|
|
||||||
);
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the password hash for the given user.
|
/// Returns the password hash for the given user.
|
||||||
#[tracing::instrument(skip(self, user_id))]
|
fn password_hash(&self, user_id: &UserId) -> Result<Option<String>> {
|
||||||
pub fn password_hash(&self, user_id: &UserId) -> Result<Option<String>> {
|
|
||||||
self.userid_password
|
self.userid_password
|
||||||
.get(user_id.as_bytes())?
|
.get(user_id.as_bytes())?
|
||||||
.map_or(Ok(None), |bytes| {
|
.map_or(Ok(None), |bytes| {
|
||||||
|
@ -171,10 +100,9 @@ impl Users {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Hash and set the user's password to the Argon2 hash
|
/// Hash and set the user's password to the Argon2 hash
|
||||||
#[tracing::instrument(skip(self, user_id, password))]
|
fn set_password(&self, user_id: &UserId, password: Option<&str>) -> Result<()> {
|
||||||
pub fn set_password(&self, user_id: &UserId, password: Option<&str>) -> Result<()> {
|
|
||||||
if let Some(password) = password {
|
if let Some(password) = password {
|
||||||
if let Ok(hash) = utils::calculate_hash(password) {
|
if let Ok(hash) = utils::calculate_password_hash(password) {
|
||||||
self.userid_password
|
self.userid_password
|
||||||
.insert(user_id.as_bytes(), hash.as_bytes())?;
|
.insert(user_id.as_bytes(), hash.as_bytes())?;
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -191,8 +119,7 @@ impl Users {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the displayname of a user on this homeserver.
|
/// Returns the displayname of a user on this homeserver.
|
||||||
#[tracing::instrument(skip(self, user_id))]
|
fn displayname(&self, user_id: &UserId) -> Result<Option<String>> {
|
||||||
pub fn displayname(&self, user_id: &UserId) -> Result<Option<String>> {
|
|
||||||
self.userid_displayname
|
self.userid_displayname
|
||||||
.get(user_id.as_bytes())?
|
.get(user_id.as_bytes())?
|
||||||
.map_or(Ok(None), |bytes| {
|
.map_or(Ok(None), |bytes| {
|
||||||
|
@ -203,8 +130,7 @@ impl Users {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Sets a new displayname or removes it if displayname is None. You still need to nofify all rooms of this change.
|
/// Sets a new displayname or removes it if displayname is None. You still need to nofify all rooms of this change.
|
||||||
#[tracing::instrument(skip(self, user_id, displayname))]
|
fn set_displayname(&self, user_id: &UserId, displayname: Option<String>) -> Result<()> {
|
||||||
pub fn set_displayname(&self, user_id: &UserId, displayname: Option<String>) -> Result<()> {
|
|
||||||
if let Some(displayname) = displayname {
|
if let Some(displayname) = displayname {
|
||||||
self.userid_displayname
|
self.userid_displayname
|
||||||
.insert(user_id.as_bytes(), displayname.as_bytes())?;
|
.insert(user_id.as_bytes(), displayname.as_bytes())?;
|
||||||
|
@ -216,8 +142,7 @@ impl Users {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get the avatar_url of a user.
|
/// Get the avatar_url of a user.
|
||||||
#[tracing::instrument(skip(self, user_id))]
|
fn avatar_url(&self, user_id: &UserId) -> Result<Option<OwnedMxcUri>> {
|
||||||
pub fn avatar_url(&self, user_id: &UserId) -> Result<Option<Box<MxcUri>>> {
|
|
||||||
self.userid_avatarurl
|
self.userid_avatarurl
|
||||||
.get(user_id.as_bytes())?
|
.get(user_id.as_bytes())?
|
||||||
.map(|bytes| {
|
.map(|bytes| {
|
||||||
|
@ -230,8 +155,7 @@ impl Users {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Sets a new avatar_url or removes it if avatar_url is None.
|
/// Sets a new avatar_url or removes it if avatar_url is None.
|
||||||
#[tracing::instrument(skip(self, user_id, avatar_url))]
|
fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option<OwnedMxcUri>) -> Result<()> {
|
||||||
pub fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option<Box<MxcUri>>) -> Result<()> {
|
|
||||||
if let Some(avatar_url) = avatar_url {
|
if let Some(avatar_url) = avatar_url {
|
||||||
self.userid_avatarurl
|
self.userid_avatarurl
|
||||||
.insert(user_id.as_bytes(), avatar_url.to_string().as_bytes())?;
|
.insert(user_id.as_bytes(), avatar_url.to_string().as_bytes())?;
|
||||||
|
@ -243,8 +167,7 @@ impl Users {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get the blurhash of a user.
|
/// Get the blurhash of a user.
|
||||||
#[tracing::instrument(skip(self, user_id))]
|
fn blurhash(&self, user_id: &UserId) -> Result<Option<String>> {
|
||||||
pub fn blurhash(&self, user_id: &UserId) -> Result<Option<String>> {
|
|
||||||
self.userid_blurhash
|
self.userid_blurhash
|
||||||
.get(user_id.as_bytes())?
|
.get(user_id.as_bytes())?
|
||||||
.map(|bytes| {
|
.map(|bytes| {
|
||||||
|
@ -257,8 +180,7 @@ impl Users {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Sets a new avatar_url or removes it if avatar_url is None.
|
/// Sets a new avatar_url or removes it if avatar_url is None.
|
||||||
#[tracing::instrument(skip(self, user_id, blurhash))]
|
fn set_blurhash(&self, user_id: &UserId, blurhash: Option<String>) -> Result<()> {
|
||||||
pub fn set_blurhash(&self, user_id: &UserId, blurhash: Option<String>) -> Result<()> {
|
|
||||||
if let Some(blurhash) = blurhash {
|
if let Some(blurhash) = blurhash {
|
||||||
self.userid_blurhash
|
self.userid_blurhash
|
||||||
.insert(user_id.as_bytes(), blurhash.as_bytes())?;
|
.insert(user_id.as_bytes(), blurhash.as_bytes())?;
|
||||||
|
@ -270,8 +192,7 @@ impl Users {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Adds a new device to a user.
|
/// Adds a new device to a user.
|
||||||
#[tracing::instrument(skip(self, user_id, device_id, token, initial_device_display_name))]
|
fn create_device(
|
||||||
pub fn create_device(
|
|
||||||
&self,
|
&self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
device_id: &DeviceId,
|
device_id: &DeviceId,
|
||||||
|
@ -305,8 +226,7 @@ impl Users {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Removes a device from a user.
|
/// Removes a device from a user.
|
||||||
#[tracing::instrument(skip(self, user_id, device_id))]
|
fn remove_device(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> {
|
||||||
pub fn remove_device(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> {
|
|
||||||
let mut userdeviceid = user_id.as_bytes().to_vec();
|
let mut userdeviceid = user_id.as_bytes().to_vec();
|
||||||
userdeviceid.push(0xff);
|
userdeviceid.push(0xff);
|
||||||
userdeviceid.extend_from_slice(device_id.as_bytes());
|
userdeviceid.extend_from_slice(device_id.as_bytes());
|
||||||
|
@ -336,31 +256,32 @@ impl Users {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns an iterator over all device ids of this user.
|
/// Returns an iterator over all device ids of this user.
|
||||||
#[tracing::instrument(skip(self, user_id))]
|
fn all_device_ids<'a>(
|
||||||
pub fn all_device_ids<'a>(
|
|
||||||
&'a self,
|
&'a self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
) -> impl Iterator<Item = Result<Box<DeviceId>>> + 'a {
|
) -> Box<dyn Iterator<Item = Result<OwnedDeviceId>> + 'a> {
|
||||||
let mut prefix = user_id.as_bytes().to_vec();
|
let mut prefix = user_id.as_bytes().to_vec();
|
||||||
prefix.push(0xff);
|
prefix.push(0xff);
|
||||||
// All devices have metadata
|
// All devices have metadata
|
||||||
|
Box::new(
|
||||||
self.userdeviceid_metadata
|
self.userdeviceid_metadata
|
||||||
.scan_prefix(prefix)
|
.scan_prefix(prefix)
|
||||||
.map(|(bytes, _)| {
|
.map(|(bytes, _)| {
|
||||||
Ok(utils::string_from_bytes(
|
Ok(utils::string_from_bytes(
|
||||||
bytes
|
bytes.rsplit(|&b| b == 0xff).next().ok_or_else(|| {
|
||||||
.rsplit(|&b| b == 0xff)
|
Error::bad_database("UserDevice ID in db is invalid.")
|
||||||
.next()
|
})?,
|
||||||
.ok_or_else(|| Error::bad_database("UserDevice ID in db is invalid."))?,
|
|
||||||
)
|
)
|
||||||
.map_err(|_| Error::bad_database("Device ID in userdeviceid_metadata is invalid."))?
|
.map_err(|_| {
|
||||||
|
Error::bad_database("Device ID in userdeviceid_metadata is invalid.")
|
||||||
|
})?
|
||||||
.into())
|
.into())
|
||||||
})
|
}),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Replaces the access token of one device.
|
/// Replaces the access token of one device.
|
||||||
#[tracing::instrument(skip(self, user_id, device_id, token))]
|
fn set_token(&self, user_id: &UserId, device_id: &DeviceId, token: &str) -> Result<()> {
|
||||||
pub fn set_token(&self, user_id: &UserId, device_id: &DeviceId, token: &str) -> Result<()> {
|
|
||||||
let mut userdeviceid = user_id.as_bytes().to_vec();
|
let mut userdeviceid = user_id.as_bytes().to_vec();
|
||||||
userdeviceid.push(0xff);
|
userdeviceid.push(0xff);
|
||||||
userdeviceid.extend_from_slice(device_id.as_bytes());
|
userdeviceid.extend_from_slice(device_id.as_bytes());
|
||||||
|
@ -383,21 +304,12 @@ impl Users {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip(
|
fn add_one_time_key(
|
||||||
self,
|
|
||||||
user_id,
|
|
||||||
device_id,
|
|
||||||
one_time_key_key,
|
|
||||||
one_time_key_value,
|
|
||||||
globals
|
|
||||||
))]
|
|
||||||
pub fn add_one_time_key(
|
|
||||||
&self,
|
&self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
device_id: &DeviceId,
|
device_id: &DeviceId,
|
||||||
one_time_key_key: &DeviceKeyId,
|
one_time_key_key: &DeviceKeyId,
|
||||||
one_time_key_value: &Raw<OneTimeKey>,
|
one_time_key_value: &Raw<OneTimeKey>,
|
||||||
globals: &super::globals::Globals,
|
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let mut key = user_id.as_bytes().to_vec();
|
let mut key = user_id.as_bytes().to_vec();
|
||||||
key.push(0xff);
|
key.push(0xff);
|
||||||
|
@ -421,14 +333,15 @@ impl Users {
|
||||||
&serde_json::to_vec(&one_time_key_value).expect("OneTimeKey::to_vec always works"),
|
&serde_json::to_vec(&one_time_key_value).expect("OneTimeKey::to_vec always works"),
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
self.userid_lastonetimekeyupdate
|
self.userid_lastonetimekeyupdate.insert(
|
||||||
.insert(user_id.as_bytes(), &globals.next_count()?.to_be_bytes())?;
|
user_id.as_bytes(),
|
||||||
|
&services().globals.next_count()?.to_be_bytes(),
|
||||||
|
)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip(self, user_id))]
|
fn last_one_time_keys_update(&self, user_id: &UserId) -> Result<u64> {
|
||||||
pub fn last_one_time_keys_update(&self, user_id: &UserId) -> Result<u64> {
|
|
||||||
self.userid_lastonetimekeyupdate
|
self.userid_lastonetimekeyupdate
|
||||||
.get(user_id.as_bytes())?
|
.get(user_id.as_bytes())?
|
||||||
.map(|bytes| {
|
.map(|bytes| {
|
||||||
|
@ -439,14 +352,12 @@ impl Users {
|
||||||
.unwrap_or(Ok(0))
|
.unwrap_or(Ok(0))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip(self, user_id, device_id, key_algorithm, globals))]
|
fn take_one_time_key(
|
||||||
pub fn take_one_time_key(
|
|
||||||
&self,
|
&self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
device_id: &DeviceId,
|
device_id: &DeviceId,
|
||||||
key_algorithm: &DeviceKeyAlgorithm,
|
key_algorithm: &DeviceKeyAlgorithm,
|
||||||
globals: &super::globals::Globals,
|
) -> Result<Option<(OwnedDeviceKeyId, Raw<OneTimeKey>)>> {
|
||||||
) -> Result<Option<(Box<DeviceKeyId>, Raw<OneTimeKey>)>> {
|
|
||||||
let mut prefix = user_id.as_bytes().to_vec();
|
let mut prefix = user_id.as_bytes().to_vec();
|
||||||
prefix.push(0xff);
|
prefix.push(0xff);
|
||||||
prefix.extend_from_slice(device_id.as_bytes());
|
prefix.extend_from_slice(device_id.as_bytes());
|
||||||
|
@ -455,8 +366,10 @@ impl Users {
|
||||||
prefix.extend_from_slice(key_algorithm.as_ref().as_bytes());
|
prefix.extend_from_slice(key_algorithm.as_ref().as_bytes());
|
||||||
prefix.push(b':');
|
prefix.push(b':');
|
||||||
|
|
||||||
self.userid_lastonetimekeyupdate
|
self.userid_lastonetimekeyupdate.insert(
|
||||||
.insert(user_id.as_bytes(), &globals.next_count()?.to_be_bytes())?;
|
user_id.as_bytes(),
|
||||||
|
&services().globals.next_count()?.to_be_bytes(),
|
||||||
|
)?;
|
||||||
|
|
||||||
self.onetimekeyid_onetimekeys
|
self.onetimekeyid_onetimekeys
|
||||||
.scan_prefix(prefix)
|
.scan_prefix(prefix)
|
||||||
|
@ -466,21 +379,19 @@ impl Users {
|
||||||
|
|
||||||
Ok((
|
Ok((
|
||||||
serde_json::from_slice(
|
serde_json::from_slice(
|
||||||
&*key
|
key.rsplit(|&b| b == 0xff)
|
||||||
.rsplit(|&b| b == 0xff)
|
|
||||||
.next()
|
.next()
|
||||||
.ok_or_else(|| Error::bad_database("OneTimeKeyId in db is invalid."))?,
|
.ok_or_else(|| Error::bad_database("OneTimeKeyId in db is invalid."))?,
|
||||||
)
|
)
|
||||||
.map_err(|_| Error::bad_database("OneTimeKeyId in db is invalid."))?,
|
.map_err(|_| Error::bad_database("OneTimeKeyId in db is invalid."))?,
|
||||||
serde_json::from_slice(&*value)
|
serde_json::from_slice(&value)
|
||||||
.map_err(|_| Error::bad_database("OneTimeKeys in db are invalid."))?,
|
.map_err(|_| Error::bad_database("OneTimeKeys in db are invalid."))?,
|
||||||
))
|
))
|
||||||
})
|
})
|
||||||
.transpose()
|
.transpose()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip(self, user_id, device_id))]
|
fn count_one_time_keys(
|
||||||
pub fn count_one_time_keys(
|
|
||||||
&self,
|
&self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
device_id: &DeviceId,
|
device_id: &DeviceId,
|
||||||
|
@ -496,8 +407,8 @@ impl Users {
|
||||||
.scan_prefix(userdeviceid)
|
.scan_prefix(userdeviceid)
|
||||||
.map(|(bytes, _)| {
|
.map(|(bytes, _)| {
|
||||||
Ok::<_, Error>(
|
Ok::<_, Error>(
|
||||||
serde_json::from_slice::<Box<DeviceKeyId>>(
|
serde_json::from_slice::<OwnedDeviceKeyId>(
|
||||||
&*bytes.rsplit(|&b| b == 0xff).next().ok_or_else(|| {
|
bytes.rsplit(|&b| b == 0xff).next().ok_or_else(|| {
|
||||||
Error::bad_database("OneTimeKey ID in db is invalid.")
|
Error::bad_database("OneTimeKey ID in db is invalid.")
|
||||||
})?,
|
})?,
|
||||||
)
|
)
|
||||||
|
@ -512,14 +423,11 @@ impl Users {
|
||||||
Ok(counts)
|
Ok(counts)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip(self, user_id, device_id, device_keys, rooms, globals))]
|
fn add_device_keys(
|
||||||
pub fn add_device_keys(
|
|
||||||
&self,
|
&self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
device_id: &DeviceId,
|
device_id: &DeviceId,
|
||||||
device_keys: &Raw<DeviceKeys>,
|
device_keys: &Raw<DeviceKeys>,
|
||||||
rooms: &super::rooms::Rooms,
|
|
||||||
globals: &super::globals::Globals,
|
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let mut userdeviceid = user_id.as_bytes().to_vec();
|
let mut userdeviceid = user_id.as_bytes().to_vec();
|
||||||
userdeviceid.push(0xff);
|
userdeviceid.push(0xff);
|
||||||
|
@ -530,27 +438,17 @@ impl Users {
|
||||||
&serde_json::to_vec(&device_keys).expect("DeviceKeys::to_vec always works"),
|
&serde_json::to_vec(&device_keys).expect("DeviceKeys::to_vec always works"),
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
self.mark_device_key_update(user_id, rooms, globals)?;
|
self.mark_device_key_update(user_id)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip(
|
fn add_cross_signing_keys(
|
||||||
self,
|
|
||||||
master_key,
|
|
||||||
self_signing_key,
|
|
||||||
user_signing_key,
|
|
||||||
rooms,
|
|
||||||
globals
|
|
||||||
))]
|
|
||||||
pub fn add_cross_signing_keys(
|
|
||||||
&self,
|
&self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
master_key: &Raw<CrossSigningKey>,
|
master_key: &Raw<CrossSigningKey>,
|
||||||
self_signing_key: &Option<Raw<CrossSigningKey>>,
|
self_signing_key: &Option<Raw<CrossSigningKey>>,
|
||||||
user_signing_key: &Option<Raw<CrossSigningKey>>,
|
user_signing_key: &Option<Raw<CrossSigningKey>>,
|
||||||
rooms: &super::rooms::Rooms,
|
|
||||||
globals: &super::globals::Globals,
|
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
// TODO: Check signatures
|
// TODO: Check signatures
|
||||||
|
|
||||||
|
@ -653,20 +551,17 @@ impl Users {
|
||||||
.insert(user_id.as_bytes(), &user_signing_key_key)?;
|
.insert(user_id.as_bytes(), &user_signing_key_key)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
self.mark_device_key_update(user_id, rooms, globals)?;
|
self.mark_device_key_update(user_id)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip(self, target_id, key_id, signature, sender_id, rooms, globals))]
|
fn sign_key(
|
||||||
pub fn sign_key(
|
|
||||||
&self,
|
&self,
|
||||||
target_id: &UserId,
|
target_id: &UserId,
|
||||||
key_id: &str,
|
key_id: &str,
|
||||||
signature: (String, String),
|
signature: (String, String),
|
||||||
sender_id: &UserId,
|
sender_id: &UserId,
|
||||||
rooms: &super::rooms::Rooms,
|
|
||||||
globals: &super::globals::Globals,
|
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let mut key = target_id.as_bytes().to_vec();
|
let mut key = target_id.as_bytes().to_vec();
|
||||||
key.push(0xff);
|
key.push(0xff);
|
||||||
|
@ -684,7 +579,7 @@ impl Users {
|
||||||
.ok_or_else(|| Error::bad_database("key in keyid_key has no signatures field."))?
|
.ok_or_else(|| Error::bad_database("key in keyid_key has no signatures field."))?
|
||||||
.as_object_mut()
|
.as_object_mut()
|
||||||
.ok_or_else(|| Error::bad_database("key in keyid_key has invalid signatures field."))?
|
.ok_or_else(|| Error::bad_database("key in keyid_key has invalid signatures field."))?
|
||||||
.entry(sender_id.to_owned())
|
.entry(sender_id.to_string())
|
||||||
.or_insert_with(|| serde_json::Map::new().into());
|
.or_insert_with(|| serde_json::Map::new().into());
|
||||||
|
|
||||||
signatures
|
signatures
|
||||||
|
@ -698,18 +593,17 @@ impl Users {
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
// TODO: Should we notify about this change?
|
// TODO: Should we notify about this change?
|
||||||
self.mark_device_key_update(target_id, rooms, globals)?;
|
self.mark_device_key_update(target_id)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip(self, user_or_room_id, from, to))]
|
fn keys_changed<'a>(
|
||||||
pub fn keys_changed<'a>(
|
|
||||||
&'a self,
|
&'a self,
|
||||||
user_or_room_id: &str,
|
user_or_room_id: &str,
|
||||||
from: u64,
|
from: u64,
|
||||||
to: Option<u64>,
|
to: Option<u64>,
|
||||||
) -> impl Iterator<Item = Result<Box<UserId>>> + 'a {
|
) -> Box<dyn Iterator<Item = Result<OwnedUserId>> + 'a> {
|
||||||
let mut prefix = user_or_room_id.as_bytes().to_vec();
|
let mut prefix = user_or_room_id.as_bytes().to_vec();
|
||||||
prefix.push(0xff);
|
prefix.push(0xff);
|
||||||
|
|
||||||
|
@ -718,6 +612,7 @@ impl Users {
|
||||||
|
|
||||||
let to = to.unwrap_or(u64::MAX);
|
let to = to.unwrap_or(u64::MAX);
|
||||||
|
|
||||||
|
Box::new(
|
||||||
self.keychangeid_userid
|
self.keychangeid_userid
|
||||||
.iter_from(&start, false)
|
.iter_from(&start, false)
|
||||||
.take_while(move |(k, _)| {
|
.take_while(move |(k, _)| {
|
||||||
|
@ -736,23 +631,29 @@ impl Users {
|
||||||
})
|
})
|
||||||
.map(|(_, bytes)| {
|
.map(|(_, bytes)| {
|
||||||
UserId::parse(utils::string_from_bytes(&bytes).map_err(|_| {
|
UserId::parse(utils::string_from_bytes(&bytes).map_err(|_| {
|
||||||
Error::bad_database("User ID in devicekeychangeid_userid is invalid unicode.")
|
Error::bad_database(
|
||||||
|
"User ID in devicekeychangeid_userid is invalid unicode.",
|
||||||
|
)
|
||||||
})?)
|
})?)
|
||||||
.map_err(|_| Error::bad_database("User ID in devicekeychangeid_userid is invalid."))
|
.map_err(|_| {
|
||||||
|
Error::bad_database("User ID in devicekeychangeid_userid is invalid.")
|
||||||
})
|
})
|
||||||
|
}),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip(self, user_id, rooms, globals))]
|
fn mark_device_key_update(&self, user_id: &UserId) -> Result<()> {
|
||||||
pub fn mark_device_key_update(
|
let count = services().globals.next_count()?.to_be_bytes();
|
||||||
&self,
|
for room_id in services()
|
||||||
user_id: &UserId,
|
.rooms
|
||||||
rooms: &super::rooms::Rooms,
|
.state_cache
|
||||||
globals: &super::globals::Globals,
|
.rooms_joined(user_id)
|
||||||
) -> Result<()> {
|
.filter_map(|r| r.ok())
|
||||||
let count = globals.next_count()?.to_be_bytes();
|
{
|
||||||
for room_id in rooms.rooms_joined(user_id).filter_map(|r| r.ok()) {
|
|
||||||
// Don't send key updates to unencrypted rooms
|
// Don't send key updates to unencrypted rooms
|
||||||
if rooms
|
if services()
|
||||||
|
.rooms
|
||||||
|
.state_accessor
|
||||||
.room_state_get(&room_id, &StateEventType::RoomEncryption, "")?
|
.room_state_get(&room_id, &StateEventType::RoomEncryption, "")?
|
||||||
.is_none()
|
.is_none()
|
||||||
{
|
{
|
||||||
|
@ -774,8 +675,7 @@ impl Users {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip(self, user_id, device_id))]
|
fn get_device_keys(
|
||||||
pub fn get_device_keys(
|
|
||||||
&self,
|
&self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
device_id: &DeviceId,
|
device_id: &DeviceId,
|
||||||
|
@ -791,11 +691,10 @@ impl Users {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip(self, user_id, allowed_signatures))]
|
fn get_master_key(
|
||||||
pub fn get_master_key<F: Fn(&UserId) -> bool>(
|
|
||||||
&self,
|
&self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
allowed_signatures: F,
|
allowed_signatures: &dyn Fn(&UserId) -> bool,
|
||||||
) -> Result<Option<Raw<CrossSigningKey>>> {
|
) -> Result<Option<Raw<CrossSigningKey>>> {
|
||||||
self.userid_masterkeyid
|
self.userid_masterkeyid
|
||||||
.get(user_id.as_bytes())?
|
.get(user_id.as_bytes())?
|
||||||
|
@ -813,11 +712,10 @@ impl Users {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip(self, user_id, allowed_signatures))]
|
fn get_self_signing_key(
|
||||||
pub fn get_self_signing_key<F: Fn(&UserId) -> bool>(
|
|
||||||
&self,
|
&self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
allowed_signatures: F,
|
allowed_signatures: &dyn Fn(&UserId) -> bool,
|
||||||
) -> Result<Option<Raw<CrossSigningKey>>> {
|
) -> Result<Option<Raw<CrossSigningKey>>> {
|
||||||
self.userid_selfsigningkeyid
|
self.userid_selfsigningkeyid
|
||||||
.get(user_id.as_bytes())?
|
.get(user_id.as_bytes())?
|
||||||
|
@ -835,8 +733,7 @@ impl Users {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip(self, user_id))]
|
fn get_user_signing_key(&self, user_id: &UserId) -> Result<Option<Raw<CrossSigningKey>>> {
|
||||||
pub fn get_user_signing_key(&self, user_id: &UserId) -> Result<Option<Raw<CrossSigningKey>>> {
|
|
||||||
self.userid_usersigningkeyid
|
self.userid_usersigningkeyid
|
||||||
.get(user_id.as_bytes())?
|
.get(user_id.as_bytes())?
|
||||||
.map_or(Ok(None), |key| {
|
.map_or(Ok(None), |key| {
|
||||||
|
@ -848,29 +745,19 @@ impl Users {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip(
|
fn add_to_device_event(
|
||||||
self,
|
|
||||||
sender,
|
|
||||||
target_user_id,
|
|
||||||
target_device_id,
|
|
||||||
event_type,
|
|
||||||
content,
|
|
||||||
globals
|
|
||||||
))]
|
|
||||||
pub fn add_to_device_event(
|
|
||||||
&self,
|
&self,
|
||||||
sender: &UserId,
|
sender: &UserId,
|
||||||
target_user_id: &UserId,
|
target_user_id: &UserId,
|
||||||
target_device_id: &DeviceId,
|
target_device_id: &DeviceId,
|
||||||
event_type: &str,
|
event_type: &str,
|
||||||
content: serde_json::Value,
|
content: serde_json::Value,
|
||||||
globals: &super::globals::Globals,
|
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let mut key = target_user_id.as_bytes().to_vec();
|
let mut key = target_user_id.as_bytes().to_vec();
|
||||||
key.push(0xff);
|
key.push(0xff);
|
||||||
key.extend_from_slice(target_device_id.as_bytes());
|
key.extend_from_slice(target_device_id.as_bytes());
|
||||||
key.push(0xff);
|
key.push(0xff);
|
||||||
key.extend_from_slice(&globals.next_count()?.to_be_bytes());
|
key.extend_from_slice(&services().globals.next_count()?.to_be_bytes());
|
||||||
|
|
||||||
let mut json = serde_json::Map::new();
|
let mut json = serde_json::Map::new();
|
||||||
json.insert("type".to_owned(), event_type.to_owned().into());
|
json.insert("type".to_owned(), event_type.to_owned().into());
|
||||||
|
@ -884,8 +771,7 @@ impl Users {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip(self, user_id, device_id))]
|
fn get_to_device_events(
|
||||||
pub fn get_to_device_events(
|
|
||||||
&self,
|
&self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
device_id: &DeviceId,
|
device_id: &DeviceId,
|
||||||
|
@ -907,8 +793,7 @@ impl Users {
|
||||||
Ok(events)
|
Ok(events)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip(self, user_id, device_id, until))]
|
fn remove_to_device_events(
|
||||||
pub fn remove_to_device_events(
|
|
||||||
&self,
|
&self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
device_id: &DeviceId,
|
device_id: &DeviceId,
|
||||||
|
@ -929,7 +814,7 @@ impl Users {
|
||||||
.map(|(key, _)| {
|
.map(|(key, _)| {
|
||||||
Ok::<_, Error>((
|
Ok::<_, Error>((
|
||||||
key.clone(),
|
key.clone(),
|
||||||
utils::u64_from_bytes(&key[key.len() - mem::size_of::<u64>()..key.len()])
|
utils::u64_from_bytes(&key[key.len() - size_of::<u64>()..key.len()])
|
||||||
.map_err(|_| Error::bad_database("ToDeviceId has invalid count bytes."))?,
|
.map_err(|_| Error::bad_database("ToDeviceId has invalid count bytes."))?,
|
||||||
))
|
))
|
||||||
})
|
})
|
||||||
|
@ -942,8 +827,7 @@ impl Users {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip(self, user_id, device_id, device))]
|
fn update_device_metadata(
|
||||||
pub fn update_device_metadata(
|
|
||||||
&self,
|
&self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
device_id: &DeviceId,
|
device_id: &DeviceId,
|
||||||
|
@ -968,8 +852,7 @@ impl Users {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get device metadata.
|
/// Get device metadata.
|
||||||
#[tracing::instrument(skip(self, user_id, device_id))]
|
fn get_device_metadata(
|
||||||
pub fn get_device_metadata(
|
|
||||||
&self,
|
&self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
device_id: &DeviceId,
|
device_id: &DeviceId,
|
||||||
|
@ -987,8 +870,7 @@ impl Users {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip(self, user_id))]
|
fn get_devicelist_version(&self, user_id: &UserId) -> Result<Option<u64>> {
|
||||||
pub fn get_devicelist_version(&self, user_id: &UserId) -> Result<Option<u64>> {
|
|
||||||
self.userid_devicelistversion
|
self.userid_devicelistversion
|
||||||
.get(user_id.as_bytes())?
|
.get(user_id.as_bytes())?
|
||||||
.map_or(Ok(None), |bytes| {
|
.map_or(Ok(None), |bytes| {
|
||||||
|
@ -998,46 +880,26 @@ impl Users {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip(self, user_id))]
|
fn all_devices_metadata<'a>(
|
||||||
pub fn all_devices_metadata<'a>(
|
|
||||||
&'a self,
|
&'a self,
|
||||||
user_id: &UserId,
|
user_id: &UserId,
|
||||||
) -> impl Iterator<Item = Result<Device>> + 'a {
|
) -> Box<dyn Iterator<Item = Result<Device>> + 'a> {
|
||||||
let mut key = user_id.as_bytes().to_vec();
|
let mut key = user_id.as_bytes().to_vec();
|
||||||
key.push(0xff);
|
key.push(0xff);
|
||||||
|
|
||||||
|
Box::new(
|
||||||
self.userdeviceid_metadata
|
self.userdeviceid_metadata
|
||||||
.scan_prefix(key)
|
.scan_prefix(key)
|
||||||
.map(|(_, bytes)| {
|
.map(|(_, bytes)| {
|
||||||
serde_json::from_slice::<Device>(&bytes)
|
serde_json::from_slice::<Device>(&bytes).map_err(|_| {
|
||||||
.map_err(|_| Error::bad_database("Device in userdeviceid_metadata is invalid."))
|
Error::bad_database("Device in userdeviceid_metadata is invalid.")
|
||||||
})
|
})
|
||||||
}
|
}),
|
||||||
|
)
|
||||||
/// Deactivate account
|
|
||||||
#[tracing::instrument(skip(self, user_id))]
|
|
||||||
pub fn deactivate_account(&self, user_id: &UserId) -> Result<()> {
|
|
||||||
// Remove all associated devices
|
|
||||||
for device_id in self.all_device_ids(user_id) {
|
|
||||||
self.remove_device(user_id, &device_id?)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set the password to "" to indicate a deactivated account. Hashes will never result in an
|
|
||||||
// empty string, so the user will not be able to log in again. Systems like changing the
|
|
||||||
// password without logging in should check if the account is deactivated.
|
|
||||||
self.userid_password.insert(user_id.as_bytes(), &[])?;
|
|
||||||
|
|
||||||
// TODO: Unhook 3PID
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Creates a new sync filter. Returns the filter id.
|
/// Creates a new sync filter. Returns the filter id.
|
||||||
#[tracing::instrument(skip(self))]
|
fn create_filter(&self, user_id: &UserId, filter: &FilterDefinition) -> Result<String> {
|
||||||
pub fn create_filter(
|
|
||||||
&self,
|
|
||||||
user_id: &UserId,
|
|
||||||
filter: &IncomingFilterDefinition,
|
|
||||||
) -> Result<String> {
|
|
||||||
let filter_id = utils::random_string(4);
|
let filter_id = utils::random_string(4);
|
||||||
|
|
||||||
let mut key = user_id.as_bytes().to_vec();
|
let mut key = user_id.as_bytes().to_vec();
|
||||||
|
@ -1052,12 +914,7 @@ impl Users {
|
||||||
Ok(filter_id)
|
Ok(filter_id)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip(self))]
|
fn get_filter(&self, user_id: &UserId, filter_id: &str) -> Result<Option<FilterDefinition>> {
|
||||||
pub fn get_filter(
|
|
||||||
&self,
|
|
||||||
user_id: &UserId,
|
|
||||||
filter_id: &str,
|
|
||||||
) -> Result<Option<IncomingFilterDefinition>> {
|
|
||||||
let mut key = user_id.as_bytes().to_vec();
|
let mut key = user_id.as_bytes().to_vec();
|
||||||
key.push(0xff);
|
key.push(0xff);
|
||||||
key.extend_from_slice(filter_id.as_bytes());
|
key.extend_from_slice(filter_id.as_bytes());
|
||||||
|
@ -1073,29 +930,24 @@ impl Users {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Ensure that a user only sees signatures from themselves and the target user
|
/// Will only return with Some(username) if the password was not empty and the
|
||||||
fn clean_signatures<F: Fn(&UserId) -> bool>(
|
/// username could be successfully parsed.
|
||||||
cross_signing_key: &mut serde_json::Value,
|
/// If utils::string_from_bytes(...) returns an error that username will be skipped
|
||||||
user_id: &UserId,
|
/// and the error will be logged.
|
||||||
allowed_signatures: F,
|
fn get_username_with_valid_password(username: &[u8], password: &[u8]) -> Option<String> {
|
||||||
) -> Result<(), Error> {
|
// A valid password is not empty
|
||||||
if let Some(signatures) = cross_signing_key
|
if password.is_empty() {
|
||||||
.get_mut("signatures")
|
None
|
||||||
.and_then(|v| v.as_object_mut())
|
} else {
|
||||||
{
|
match utils::string_from_bytes(username) {
|
||||||
// Don't allocate for the full size of the current signatures, but require
|
Ok(u) => Some(u),
|
||||||
// at most one resize if nothing is dropped
|
Err(e) => {
|
||||||
let new_capacity = signatures.len() / 2;
|
warn!(
|
||||||
for (user, signature) in
|
"Failed to parse username while calling get_local_users(): {}",
|
||||||
mem::replace(signatures, serde_json::Map::with_capacity(new_capacity))
|
e.to_string()
|
||||||
{
|
);
|
||||||
let id = <&UserId>::try_from(user.as_str())
|
None
|
||||||
.map_err(|_| Error::bad_database("Invalid user ID in database."))?;
|
|
||||||
if id == user_id || allowed_signatures(id) {
|
|
||||||
signatures.insert(user, signature);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
|
@ -1,358 +0,0 @@
|
||||||
use crate::database::globals::Globals;
|
|
||||||
use image::{imageops::FilterType, GenericImageView};
|
|
||||||
|
|
||||||
use super::abstraction::Tree;
|
|
||||||
use crate::{utils, Error, Result};
|
|
||||||
use std::{mem, sync::Arc};
|
|
||||||
use tokio::{
|
|
||||||
fs::File,
|
|
||||||
io::{AsyncReadExt, AsyncWriteExt},
|
|
||||||
};
|
|
||||||
|
|
||||||
pub struct FileMeta {
|
|
||||||
pub content_disposition: Option<String>,
|
|
||||||
pub content_type: Option<String>,
|
|
||||||
pub file: Vec<u8>,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct Media {
|
|
||||||
pub(super) mediaid_file: Arc<dyn Tree>, // MediaId = MXC + WidthHeight + ContentDisposition + ContentType
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Media {
|
|
||||||
/// Uploads a file.
|
|
||||||
pub async fn create(
|
|
||||||
&self,
|
|
||||||
mxc: String,
|
|
||||||
globals: &Globals,
|
|
||||||
content_disposition: &Option<&str>,
|
|
||||||
content_type: &Option<&str>,
|
|
||||||
file: &[u8],
|
|
||||||
) -> Result<()> {
|
|
||||||
let mut key = mxc.as_bytes().to_vec();
|
|
||||||
key.push(0xff);
|
|
||||||
key.extend_from_slice(&0_u32.to_be_bytes()); // Width = 0 if it's not a thumbnail
|
|
||||||
key.extend_from_slice(&0_u32.to_be_bytes()); // Height = 0 if it's not a thumbnail
|
|
||||||
key.push(0xff);
|
|
||||||
key.extend_from_slice(
|
|
||||||
content_disposition
|
|
||||||
.as_ref()
|
|
||||||
.map(|f| f.as_bytes())
|
|
||||||
.unwrap_or_default(),
|
|
||||||
);
|
|
||||||
key.push(0xff);
|
|
||||||
key.extend_from_slice(
|
|
||||||
content_type
|
|
||||||
.as_ref()
|
|
||||||
.map(|c| c.as_bytes())
|
|
||||||
.unwrap_or_default(),
|
|
||||||
);
|
|
||||||
|
|
||||||
let path = globals.get_media_file(&key);
|
|
||||||
let mut f = File::create(path).await?;
|
|
||||||
f.write_all(file).await?;
|
|
||||||
|
|
||||||
self.mediaid_file.insert(&key, &[])?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Uploads or replaces a file thumbnail.
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
|
||||||
pub async fn upload_thumbnail(
|
|
||||||
&self,
|
|
||||||
mxc: String,
|
|
||||||
globals: &Globals,
|
|
||||||
content_disposition: &Option<String>,
|
|
||||||
content_type: &Option<String>,
|
|
||||||
width: u32,
|
|
||||||
height: u32,
|
|
||||||
file: &[u8],
|
|
||||||
) -> Result<()> {
|
|
||||||
let mut key = mxc.as_bytes().to_vec();
|
|
||||||
key.push(0xff);
|
|
||||||
key.extend_from_slice(&width.to_be_bytes());
|
|
||||||
key.extend_from_slice(&height.to_be_bytes());
|
|
||||||
key.push(0xff);
|
|
||||||
key.extend_from_slice(
|
|
||||||
content_disposition
|
|
||||||
.as_ref()
|
|
||||||
.map(|f| f.as_bytes())
|
|
||||||
.unwrap_or_default(),
|
|
||||||
);
|
|
||||||
key.push(0xff);
|
|
||||||
key.extend_from_slice(
|
|
||||||
content_type
|
|
||||||
.as_ref()
|
|
||||||
.map(|c| c.as_bytes())
|
|
||||||
.unwrap_or_default(),
|
|
||||||
);
|
|
||||||
|
|
||||||
let path = globals.get_media_file(&key);
|
|
||||||
let mut f = File::create(path).await?;
|
|
||||||
f.write_all(file).await?;
|
|
||||||
|
|
||||||
self.mediaid_file.insert(&key, &[])?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Downloads a file.
|
|
||||||
pub async fn get(&self, globals: &Globals, mxc: &str) -> Result<Option<FileMeta>> {
|
|
||||||
let mut prefix = mxc.as_bytes().to_vec();
|
|
||||||
prefix.push(0xff);
|
|
||||||
prefix.extend_from_slice(&0_u32.to_be_bytes()); // Width = 0 if it's not a thumbnail
|
|
||||||
prefix.extend_from_slice(&0_u32.to_be_bytes()); // Height = 0 if it's not a thumbnail
|
|
||||||
prefix.push(0xff);
|
|
||||||
|
|
||||||
let first = self.mediaid_file.scan_prefix(prefix).next();
|
|
||||||
if let Some((key, _)) = first {
|
|
||||||
let path = globals.get_media_file(&key);
|
|
||||||
let mut file = Vec::new();
|
|
||||||
File::open(path).await?.read_to_end(&mut file).await?;
|
|
||||||
let mut parts = key.rsplit(|&b| b == 0xff);
|
|
||||||
|
|
||||||
let content_type = parts
|
|
||||||
.next()
|
|
||||||
.map(|bytes| {
|
|
||||||
utils::string_from_bytes(bytes).map_err(|_| {
|
|
||||||
Error::bad_database("Content type in mediaid_file is invalid unicode.")
|
|
||||||
})
|
|
||||||
})
|
|
||||||
.transpose()?;
|
|
||||||
|
|
||||||
let content_disposition_bytes = parts
|
|
||||||
.next()
|
|
||||||
.ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?;
|
|
||||||
|
|
||||||
let content_disposition = if content_disposition_bytes.is_empty() {
|
|
||||||
None
|
|
||||||
} else {
|
|
||||||
Some(
|
|
||||||
utils::string_from_bytes(content_disposition_bytes).map_err(|_| {
|
|
||||||
Error::bad_database(
|
|
||||||
"Content Disposition in mediaid_file is invalid unicode.",
|
|
||||||
)
|
|
||||||
})?,
|
|
||||||
)
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(Some(FileMeta {
|
|
||||||
content_disposition,
|
|
||||||
content_type,
|
|
||||||
file,
|
|
||||||
}))
|
|
||||||
} else {
|
|
||||||
Ok(None)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns width, height of the thumbnail and whether it should be cropped. Returns None when
|
|
||||||
/// the server should send the original file.
|
|
||||||
pub fn thumbnail_properties(&self, width: u32, height: u32) -> Option<(u32, u32, bool)> {
|
|
||||||
match (width, height) {
|
|
||||||
(0..=32, 0..=32) => Some((32, 32, true)),
|
|
||||||
(0..=96, 0..=96) => Some((96, 96, true)),
|
|
||||||
(0..=320, 0..=240) => Some((320, 240, false)),
|
|
||||||
(0..=640, 0..=480) => Some((640, 480, false)),
|
|
||||||
(0..=800, 0..=600) => Some((800, 600, false)),
|
|
||||||
_ => None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Downloads a file's thumbnail.
|
|
||||||
///
|
|
||||||
/// Here's an example on how it works:
|
|
||||||
///
|
|
||||||
/// - Client requests an image with width=567, height=567
|
|
||||||
/// - Server rounds that up to (800, 600), so it doesn't have to save too many thumbnails
|
|
||||||
/// - Server rounds that up again to (958, 600) to fix the aspect ratio (only for width,height>96)
|
|
||||||
/// - Server creates the thumbnail and sends it to the user
|
|
||||||
///
|
|
||||||
/// For width,height <= 96 the server uses another thumbnailing algorithm which crops the image afterwards.
|
|
||||||
pub async fn get_thumbnail(
|
|
||||||
&self,
|
|
||||||
mxc: &str,
|
|
||||||
globals: &Globals,
|
|
||||||
width: u32,
|
|
||||||
height: u32,
|
|
||||||
) -> Result<Option<FileMeta>> {
|
|
||||||
let (width, height, crop) = self
|
|
||||||
.thumbnail_properties(width, height)
|
|
||||||
.unwrap_or((0, 0, false)); // 0, 0 because that's the original file
|
|
||||||
|
|
||||||
let mut main_prefix = mxc.as_bytes().to_vec();
|
|
||||||
main_prefix.push(0xff);
|
|
||||||
|
|
||||||
let mut thumbnail_prefix = main_prefix.clone();
|
|
||||||
thumbnail_prefix.extend_from_slice(&width.to_be_bytes());
|
|
||||||
thumbnail_prefix.extend_from_slice(&height.to_be_bytes());
|
|
||||||
thumbnail_prefix.push(0xff);
|
|
||||||
|
|
||||||
let mut original_prefix = main_prefix;
|
|
||||||
original_prefix.extend_from_slice(&0_u32.to_be_bytes()); // Width = 0 if it's not a thumbnail
|
|
||||||
original_prefix.extend_from_slice(&0_u32.to_be_bytes()); // Height = 0 if it's not a thumbnail
|
|
||||||
original_prefix.push(0xff);
|
|
||||||
|
|
||||||
let first_thumbnailprefix = self.mediaid_file.scan_prefix(thumbnail_prefix).next();
|
|
||||||
let first_originalprefix = self.mediaid_file.scan_prefix(original_prefix).next();
|
|
||||||
if let Some((key, _)) = first_thumbnailprefix {
|
|
||||||
// Using saved thumbnail
|
|
||||||
let path = globals.get_media_file(&key);
|
|
||||||
let mut file = Vec::new();
|
|
||||||
File::open(path).await?.read_to_end(&mut file).await?;
|
|
||||||
let mut parts = key.rsplit(|&b| b == 0xff);
|
|
||||||
|
|
||||||
let content_type = parts
|
|
||||||
.next()
|
|
||||||
.map(|bytes| {
|
|
||||||
utils::string_from_bytes(bytes).map_err(|_| {
|
|
||||||
Error::bad_database("Content type in mediaid_file is invalid unicode.")
|
|
||||||
})
|
|
||||||
})
|
|
||||||
.transpose()?;
|
|
||||||
|
|
||||||
let content_disposition_bytes = parts
|
|
||||||
.next()
|
|
||||||
.ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?;
|
|
||||||
|
|
||||||
let content_disposition = if content_disposition_bytes.is_empty() {
|
|
||||||
None
|
|
||||||
} else {
|
|
||||||
Some(
|
|
||||||
utils::string_from_bytes(content_disposition_bytes).map_err(|_| {
|
|
||||||
Error::bad_database("Content Disposition in db is invalid.")
|
|
||||||
})?,
|
|
||||||
)
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(Some(FileMeta {
|
|
||||||
content_disposition,
|
|
||||||
content_type,
|
|
||||||
file: file.to_vec(),
|
|
||||||
}))
|
|
||||||
} else if let Some((key, _)) = first_originalprefix {
|
|
||||||
// Generate a thumbnail
|
|
||||||
let path = globals.get_media_file(&key);
|
|
||||||
let mut file = Vec::new();
|
|
||||||
File::open(path).await?.read_to_end(&mut file).await?;
|
|
||||||
|
|
||||||
let mut parts = key.rsplit(|&b| b == 0xff);
|
|
||||||
|
|
||||||
let content_type = parts
|
|
||||||
.next()
|
|
||||||
.map(|bytes| {
|
|
||||||
utils::string_from_bytes(bytes).map_err(|_| {
|
|
||||||
Error::bad_database("Content type in mediaid_file is invalid unicode.")
|
|
||||||
})
|
|
||||||
})
|
|
||||||
.transpose()?;
|
|
||||||
|
|
||||||
let content_disposition_bytes = parts
|
|
||||||
.next()
|
|
||||||
.ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?;
|
|
||||||
|
|
||||||
let content_disposition = if content_disposition_bytes.is_empty() {
|
|
||||||
None
|
|
||||||
} else {
|
|
||||||
Some(
|
|
||||||
utils::string_from_bytes(content_disposition_bytes).map_err(|_| {
|
|
||||||
Error::bad_database(
|
|
||||||
"Content Disposition in mediaid_file is invalid unicode.",
|
|
||||||
)
|
|
||||||
})?,
|
|
||||||
)
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Ok(image) = image::load_from_memory(&file) {
|
|
||||||
let original_width = image.width();
|
|
||||||
let original_height = image.height();
|
|
||||||
if width > original_width || height > original_height {
|
|
||||||
return Ok(Some(FileMeta {
|
|
||||||
content_disposition,
|
|
||||||
content_type,
|
|
||||||
file: file.to_vec(),
|
|
||||||
}));
|
|
||||||
}
|
|
||||||
|
|
||||||
let thumbnail = if crop {
|
|
||||||
image.resize_to_fill(width, height, FilterType::CatmullRom)
|
|
||||||
} else {
|
|
||||||
let (exact_width, exact_height) = {
|
|
||||||
// Copied from image::dynimage::resize_dimensions
|
|
||||||
let ratio = u64::from(original_width) * u64::from(height);
|
|
||||||
let nratio = u64::from(width) * u64::from(original_height);
|
|
||||||
|
|
||||||
let use_width = nratio <= ratio;
|
|
||||||
let intermediate = if use_width {
|
|
||||||
u64::from(original_height) * u64::from(width)
|
|
||||||
/ u64::from(original_width)
|
|
||||||
} else {
|
|
||||||
u64::from(original_width) * u64::from(height)
|
|
||||||
/ u64::from(original_height)
|
|
||||||
};
|
|
||||||
if use_width {
|
|
||||||
if intermediate <= u64::from(::std::u32::MAX) {
|
|
||||||
(width, intermediate as u32)
|
|
||||||
} else {
|
|
||||||
(
|
|
||||||
(u64::from(width) * u64::from(::std::u32::MAX) / intermediate)
|
|
||||||
as u32,
|
|
||||||
::std::u32::MAX,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
} else if intermediate <= u64::from(::std::u32::MAX) {
|
|
||||||
(intermediate as u32, height)
|
|
||||||
} else {
|
|
||||||
(
|
|
||||||
::std::u32::MAX,
|
|
||||||
(u64::from(height) * u64::from(::std::u32::MAX) / intermediate)
|
|
||||||
as u32,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
image.thumbnail_exact(exact_width, exact_height)
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut thumbnail_bytes = Vec::new();
|
|
||||||
thumbnail.write_to(&mut thumbnail_bytes, image::ImageOutputFormat::Png)?;
|
|
||||||
|
|
||||||
// Save thumbnail in database so we don't have to generate it again next time
|
|
||||||
let mut thumbnail_key = key.to_vec();
|
|
||||||
let width_index = thumbnail_key
|
|
||||||
.iter()
|
|
||||||
.position(|&b| b == 0xff)
|
|
||||||
.ok_or_else(|| Error::bad_database("Media in db is invalid."))?
|
|
||||||
+ 1;
|
|
||||||
let mut widthheight = width.to_be_bytes().to_vec();
|
|
||||||
widthheight.extend_from_slice(&height.to_be_bytes());
|
|
||||||
|
|
||||||
thumbnail_key.splice(
|
|
||||||
width_index..width_index + 2 * mem::size_of::<u32>(),
|
|
||||||
widthheight,
|
|
||||||
);
|
|
||||||
|
|
||||||
let path = globals.get_media_file(&thumbnail_key);
|
|
||||||
let mut f = File::create(path).await?;
|
|
||||||
f.write_all(&thumbnail_bytes).await?;
|
|
||||||
|
|
||||||
self.mediaid_file.insert(&thumbnail_key, &[])?;
|
|
||||||
|
|
||||||
Ok(Some(FileMeta {
|
|
||||||
content_disposition,
|
|
||||||
content_type,
|
|
||||||
file: thumbnail_bytes.to_vec(),
|
|
||||||
}))
|
|
||||||
} else {
|
|
||||||
// Couldn't parse file to generate thumbnail, send original
|
|
||||||
Ok(Some(FileMeta {
|
|
||||||
content_disposition,
|
|
||||||
content_type,
|
|
||||||
file: file.to_vec(),
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
Ok(None)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue