mirror of
https://github.com/GothenburgBitFactory/taskchampion-sync-server.git
synced 2026-04-05 17:20:35 +00:00
Compare commits
80 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
a08cda96ce
|
|||
| a862f02682 | |||
| 2e69bea5ea | |||
| c60e95bbf0 | |||
| 9cf07e9d80 | |||
| c3470f4756 | |||
| 789106b517 | |||
| 72aeebdda7 | |||
| eed6421a4e | |||
| 5d013073cb | |||
| 67524a0a91 | |||
| d206729d5e | |||
| bf19b76577 | |||
| 505dd3f442 | |||
| e10f3e6cfb | |||
| 213be852b8 | |||
| b57dd24d9e | |||
| a9cf67c8e2 | |||
| daf6855f14 | |||
| dbc9a6909b | |||
| 1ad9e344c7 | |||
| 3820a8deea | |||
| 2de70ac336 | |||
| c2b4c94fb5 | |||
| 0a317cd86d | |||
| 1b80398365 | |||
| 5a2bd4cde7 | |||
| e5b35210af | |||
| 3bbfcb9f88 | |||
| 8752531e2c | |||
| a94be2649e | |||
| 624efa8b0d | |||
| ae9adf1572 | |||
| 547621950f | |||
| ab6df362bf | |||
| 820aaf363c | |||
| 535db07153 | |||
| 4bc4856ff1 | |||
| c445ac475a | |||
| 6e8c72b543 | |||
| 609660b01f | |||
| 8b1f7e2b30 | |||
| 309abce339 | |||
| 816c9a3c80 | |||
| b858febbca | |||
| 48b3aebee2 | |||
| 60436a5524 | |||
| 25911b44a6 | |||
| c539e604d9 | |||
| 57bbac8fea | |||
| 093446e5fb | |||
| 87d1d026b3 | |||
| 67576fe382 | |||
| 7559364017 | |||
| 61b9293287 | |||
| 0a71cce2d1 | |||
| 3a794341ce | |||
| 4de5c9a345 | |||
| d049618a59 | |||
| cd874bc56a | |||
| 240d1b4df5 | |||
| 953411bff8 | |||
| 91763641c6 | |||
| 721957d7c7 | |||
| 35a4eefda3 | |||
| ad01f28a40 | |||
| 29a4214117 | |||
| b9cdae975b | |||
| 271e5eaf3d | |||
| 67b441081d | |||
| 5abb89c421 | |||
| cd15b2377b | |||
| ceed460707 | |||
| 8a7df6d9d5 | |||
| 92206f2488 | |||
| db8fbb3919 | |||
| ba69f98195 | |||
| cae0bb3fd8 | |||
| 7bec7ce25d | |||
| 4b55423595 |
@ -4,4 +4,6 @@
|
||||
!core/
|
||||
!server/
|
||||
!sqlite/
|
||||
!docker-entrypoint.sh
|
||||
!postgres/
|
||||
!entrypoint-*
|
||||
!Dockerfile*
|
||||
|
||||
3
.env
3
.env
@ -1,3 +0,0 @@
|
||||
# Versions must be major.minor
|
||||
ALPINE_VERSION=3.19
|
||||
RUST_VERSION=1.78
|
||||
18
.github/workflows/add-to-project.yml
vendored
Normal file
18
.github/workflows/add-to-project.yml
vendored
Normal file
@ -0,0 +1,18 @@
|
||||
# This adds all new issues to the Taskwarrior project, for better tracking.
|
||||
# It uses a PAT that belongs to @taskwarrior.
|
||||
name: Add issues to Taskwarrior Project
|
||||
|
||||
on:
|
||||
issues:
|
||||
types:
|
||||
- opened
|
||||
|
||||
jobs:
|
||||
add-to-project:
|
||||
name: Add issue to project
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/add-to-project@v1.0.2
|
||||
with:
|
||||
project-url: https://github.com/orgs/GothenburgBitFactory/projects/4
|
||||
github-token: ${{ secrets.ADD_TO_PROJECT_PAT }}
|
||||
44
.github/workflows/checks.yml
vendored
44
.github/workflows/checks.yml
vendored
@ -13,16 +13,16 @@ jobs:
|
||||
name: "Check & Clippy"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: Cache cargo registry
|
||||
uses: actions/cache@v4
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: ~/.cargo/registry
|
||||
key: ${{ runner.os }}-cargo-registry-${{ hashFiles('**/Cargo.lock') }}
|
||||
|
||||
- name: Cache cargo build
|
||||
uses: actions/cache@v4
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: target
|
||||
key: ${{ runner.os }}-cargo-build-target-${{ hashFiles('**/Cargo.lock') }}
|
||||
@ -48,10 +48,10 @@ jobs:
|
||||
name: "Rustdoc"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: Cache cargo registry
|
||||
uses: actions/cache@v4
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: ~/.cargo/registry
|
||||
key: ${{ runner.os }}-cargo-registry-${{ hashFiles('**/Cargo.lock') }}
|
||||
@ -66,7 +66,13 @@ jobs:
|
||||
uses: actions-rs/cargo@v1.0.3
|
||||
with:
|
||||
command: rustdoc
|
||||
args: -p taskchampion-sync-server --all-features -- -Z unstable-options --check -Dwarnings
|
||||
args: -p taskchampion-sync-server --bin taskchampion-sync-server --all-features -- -Z unstable-options --check -Dwarnings
|
||||
|
||||
- name: taskchampion-sync-server-postgres
|
||||
uses: actions-rs/cargo@v1.0.3
|
||||
with:
|
||||
command: rustdoc
|
||||
args: -p taskchampion-sync-server --bin taskchampion-sync-server-postgres --all-features -- -Z unstable-options --check -Dwarnings
|
||||
|
||||
- name: taskchampion-sync-server-core
|
||||
uses: actions-rs/cargo@v1.0.3
|
||||
@ -80,11 +86,17 @@ jobs:
|
||||
command: rustdoc
|
||||
args: -p taskchampion-sync-server-storage-sqlite --all-features -- -Z unstable-options --check -Dwarnings
|
||||
|
||||
- name: taskchampion-sync-server-storage-postgres
|
||||
uses: actions-rs/cargo@v1.0.3
|
||||
with:
|
||||
command: rustdoc
|
||||
args: -p taskchampion-sync-server-storage-postgres --all-features -- -Z unstable-options --check -Dwarnings
|
||||
|
||||
fmt:
|
||||
runs-on: ubuntu-latest
|
||||
name: "Formatting"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
@ -102,8 +114,24 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
name: "Cargo Semver Checks"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v6
|
||||
- uses: obi1kenobi/cargo-semver-checks-action@v2
|
||||
with:
|
||||
# exclude the binary package from semver checks, since it is not published as a crate.
|
||||
exclude: taskchampion-sync-server
|
||||
|
||||
mdbook:
|
||||
runs-on: ubuntu-latest
|
||||
name: "mdBook Documentation"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: Setup mdBook
|
||||
uses: peaceiris/actions-mdbook@v2
|
||||
with:
|
||||
# if this changes, change it in .github/workflows/publish-docs.yml as well
|
||||
mdbook-version: '0.4.48'
|
||||
|
||||
- run: mdbook test docs
|
||||
- run: mdbook build docs
|
||||
|
||||
49
.github/workflows/docker.yml
vendored
49
.github/workflows/docker.yml
vendored
@ -6,15 +6,41 @@ on:
|
||||
- '*'
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
sqlite:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
- name: Load .env file
|
||||
uses: xom9ikk/dotenv@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
- name: Login to ghcr.io
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Docker meta
|
||||
id: meta-sqlite
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: |
|
||||
ghcr.io/gothenburgbitfactory/taskchampion-sync-server
|
||||
tags: |
|
||||
type=ref,event=branch
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=match,pattern=\d.\d.\d,value=latest
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
file: "./Dockerfile-sqlite"
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: ${{ steps.meta-sqlite.outputs.tags }}
|
||||
labels: ${{ steps.meta-sqlite.outputs.labels }}
|
||||
postgres:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Login to ghcr.io
|
||||
@ -24,11 +50,11 @@ jobs:
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Docker meta
|
||||
id: meta
|
||||
id: meta-postgres
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: |
|
||||
ghcr.io/${{ github.repository }}
|
||||
ghcr.io/gothenburgbitfactory/taskchampion-sync-server-postgres
|
||||
tags: |
|
||||
type=ref,event=branch
|
||||
type=semver,pattern={{version}}
|
||||
@ -37,11 +63,8 @@ jobs:
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
file: "./Dockerfile-postgres"
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
build-args: |
|
||||
ALPINE_VERSION=${{ env.ALPINE_VERSION }}
|
||||
RUST_VERSION=${{ env.RUST_VERSION }}
|
||||
tags: ${{ steps.meta-postgres.outputs.tags }}
|
||||
labels: ${{ steps.meta-postgres.outputs.labels }}
|
||||
|
||||
30
.github/workflows/publish-docs.yml
vendored
Normal file
30
.github/workflows/publish-docs.yml
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
name: docs
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
mdbook-deploy:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: Setup mdBook
|
||||
uses: peaceiris/actions-mdbook@v2
|
||||
with:
|
||||
# if this changes, change it in .github/workflows/checks.yml as well
|
||||
mdbook-version: '0.4.48'
|
||||
|
||||
- run: mdbook build docs
|
||||
|
||||
- name: Deploy
|
||||
uses: peaceiris/actions-gh-pages@v4
|
||||
with:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
publish_dir: ./docs/book
|
||||
32
.github/workflows/rust-tests.yml
vendored
32
.github/workflows/rust-tests.yml
vendored
@ -10,27 +10,45 @@ on:
|
||||
jobs:
|
||||
test:
|
||||
strategy:
|
||||
# A simple matrix for now, but if we introduce an MSRV it can be added here.
|
||||
matrix:
|
||||
postgres:
|
||||
- "17"
|
||||
rust:
|
||||
# MSRV
|
||||
- "1.81.0"
|
||||
- "1.85.0"
|
||||
- "stable"
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
name: "rust ${{ matrix.rust }}"
|
||||
name: "rust ${{ matrix.rust }} / postgres ${{ matrix.postgres }}"
|
||||
|
||||
services:
|
||||
# Service container for PostgreSQL
|
||||
postgres:
|
||||
image: "postgres:${{ matrix.postgres }}"
|
||||
env:
|
||||
POSTGRES_DB: test_db
|
||||
POSTGRES_USER: test_user
|
||||
POSTGRES_PASSWORD: test_password
|
||||
ports:
|
||||
- 5432:5432
|
||||
# Set health checks to ensure Postgres is ready before the job starts
|
||||
options: >-
|
||||
--health-cmd pg_isready
|
||||
--health-interval 10s
|
||||
--health-timeout 5s
|
||||
--health-retries 5
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: Cache cargo registry
|
||||
uses: actions/cache@v4
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: ~/.cargo/registry
|
||||
key: ${{ runner.os }}-${{ matrix.rust }}-cargo-registry-${{ hashFiles('**/Cargo.lock') }}
|
||||
|
||||
- name: Cache cargo build
|
||||
uses: actions/cache@v4
|
||||
uses: actions/cache@v5
|
||||
with:
|
||||
path: target
|
||||
key: ${{ runner.os }}-${{ matrix.rust }}-cargo-build-target-${{ hashFiles('**/Cargo.lock') }}
|
||||
@ -41,4 +59,6 @@ jobs:
|
||||
override: true
|
||||
|
||||
- name: test
|
||||
env:
|
||||
TEST_DB_URL: postgresql://test_user:test_password@localhost:5432/test_db
|
||||
run: cargo test
|
||||
|
||||
4
.github/workflows/security.yml
vendored
4
.github/workflows/security.yml
vendored
@ -2,7 +2,7 @@ name: security
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 0 * * *'
|
||||
- cron: '33 0 * * THU'
|
||||
push:
|
||||
paths:
|
||||
- '**/Cargo.toml'
|
||||
@ -14,7 +14,7 @@ jobs:
|
||||
permissions: write-all
|
||||
name: "Audit Rust Dependencies"
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v6
|
||||
- uses: rustsec/audit-check@v2.0.0
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
1396
Cargo.lock
generated
1396
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
19
Cargo.toml
19
Cargo.toml
@ -4,12 +4,14 @@ members = [
|
||||
"core",
|
||||
"server",
|
||||
"sqlite",
|
||||
"postgres",
|
||||
]
|
||||
rust-version = "1.81.0" # MSRV
|
||||
rust-version = "1.85.0" # MSRV
|
||||
|
||||
[workspace.dependencies]
|
||||
uuid = { version = "^1.15.1", features = ["serde", "v4"] }
|
||||
actix-web = "^4.9.0"
|
||||
async-trait = "0.1.88"
|
||||
uuid = { version = "^1.19.0", features = ["serde", "v4"] }
|
||||
actix-web = "^4.11.0"
|
||||
anyhow = "1.0"
|
||||
thiserror = "2.0"
|
||||
futures = "^0.3.25"
|
||||
@ -17,10 +19,17 @@ serde_json = "^1.0"
|
||||
serde = { version = "^1.0.147", features = ["derive"] }
|
||||
clap = { version = "^4.5.6", features = ["string", "env"] }
|
||||
log = "^0.4.17"
|
||||
env_logger = "^0.11.5"
|
||||
rusqlite = { version = "0.32", features = ["bundled"] }
|
||||
env_logger = "^0.11.7"
|
||||
rusqlite = { version = "0.37", features = ["bundled"] }
|
||||
chrono = { version = "^0.4.38", features = ["serde"] }
|
||||
actix-rt = "2"
|
||||
tempfile = "3"
|
||||
pretty_assertions = "1"
|
||||
temp-env = "0.3"
|
||||
tokio = { version = "1.48", features = ["rt", "macros"] }
|
||||
tokio-postgres = { version = "0.7.13", features = ["with-uuid-1"] }
|
||||
bb8 = "0.9.0"
|
||||
bb8-postgres = { version = "0.9.0", features = ["with-uuid-1"] }
|
||||
openssl = { version = "0.10.73", default-features = false, features = ["vendored"] }
|
||||
native-tls = { version = "0.2.14", default-features = false, features = ["vendored"] }
|
||||
postgres-native-tls = "0.5.1"
|
||||
|
||||
26
Dockerfile-postgres
Normal file
26
Dockerfile-postgres
Normal file
@ -0,0 +1,26 @@
|
||||
# Versions must be major.minor
|
||||
# Default versions are as below
|
||||
ARG RUST_VERSION=1.85
|
||||
ARG ALPINE_VERSION=3.20
|
||||
|
||||
FROM docker.io/rust:${RUST_VERSION}-alpine${ALPINE_VERSION} AS builder
|
||||
# perl and make are required to build openssl.
|
||||
RUN apk -U add libc-dev perl make
|
||||
COPY Cargo.lock Cargo.toml /data/
|
||||
COPY core /data/core/
|
||||
COPY server /data/server/
|
||||
COPY postgres /data/postgres/
|
||||
COPY sqlite /data/sqlite/
|
||||
RUN cd /data && \
|
||||
cargo build -p taskchampion-sync-server --release --no-default-features --features postgres --bin taskchampion-sync-server-postgres
|
||||
|
||||
FROM docker.io/alpine:${ALPINE_VERSION}
|
||||
COPY --from=builder /data/target/release/taskchampion-sync-server-postgres /bin
|
||||
RUN apk add --no-cache su-exec && \
|
||||
adduser -u 1092 -S -D -H -h /var/lib/taskchampion-sync-server -s /sbin/nologin -G users \
|
||||
-g taskchampion taskchampion && \
|
||||
install -d -m1755 -o1092 -g1092 "/var/lib/taskchampion-sync-server"
|
||||
EXPOSE 8080
|
||||
COPY entrypoint-postgres.sh /bin/entrypoint.sh
|
||||
ENTRYPOINT [ "/bin/entrypoint.sh" ]
|
||||
CMD [ "/bin/taskchampion-sync-server-postgres" ]
|
||||
@ -1,16 +1,17 @@
|
||||
# Versions must be major.minor
|
||||
# Default versions are as below
|
||||
ARG RUST_VERSION=1.78
|
||||
ARG ALPINE_VERSION=3.19
|
||||
ARG RUST_VERSION=1.85
|
||||
ARG ALPINE_VERSION=3.20
|
||||
|
||||
FROM docker.io/rust:${RUST_VERSION}-alpine${ALPINE_VERSION} AS builder
|
||||
RUN apk -U add libc-dev
|
||||
COPY Cargo.lock Cargo.toml /data/
|
||||
COPY core /data/core/
|
||||
COPY server /data/server/
|
||||
COPY postgres /data/postgres/
|
||||
COPY sqlite /data/sqlite/
|
||||
RUN apk -U add libc-dev && \
|
||||
cd /data && \
|
||||
cargo build --release
|
||||
RUN cd /data && \
|
||||
cargo build --release --bin taskchampion-sync-server
|
||||
|
||||
FROM docker.io/alpine:${ALPINE_VERSION}
|
||||
COPY --from=builder /data/target/release/taskchampion-sync-server /bin
|
||||
@ -19,7 +20,7 @@ RUN apk add --no-cache su-exec && \
|
||||
-g taskchampion taskchampion && \
|
||||
install -d -m1755 -o1092 -g1092 "/var/lib/taskchampion-sync-server"
|
||||
EXPOSE 8080
|
||||
VOLUME /var/lib/task-champion-sync-server/data
|
||||
COPY docker-entrypoint.sh /bin
|
||||
ENTRYPOINT [ "/bin/docker-entrypoint.sh" ]
|
||||
VOLUME /var/lib/taskchampion-sync-server/data
|
||||
COPY entrypoint-sqlite.sh /bin/entrypoint.sh
|
||||
ENTRYPOINT [ "/bin/entrypoint.sh" ]
|
||||
CMD [ "/bin/taskchampion-sync-server" ]
|
||||
123
README.md
123
README.md
@ -9,94 +9,21 @@ and other applications embedding TaskChampion can sync.
|
||||
|
||||
## Status
|
||||
|
||||
This repository was spun off from Taskwarrior itself after the 3.0.0
|
||||
release. It is still under development and currently best described as
|
||||
a reference implementation of the Taskchampion sync protocol.
|
||||
This project provides both pre-built images for common use-cases and Rust
|
||||
libraries that can be used to build more sophisticated applications. See [the documentation][documentation]
|
||||
for more on how to use this project.
|
||||
|
||||
It is comprised of three crates:
|
||||
[documentation]: https://gothenburgbitfactory.org/taskchampion-sync-server
|
||||
|
||||
## Repository Guide
|
||||
|
||||
The repository is comprised of four crates:
|
||||
|
||||
- `taskchampion-sync-server-core` implements the core of the protocol
|
||||
- `taskchmpaion-sync-server-sqlite` implements an SQLite backend for the core
|
||||
- `taskchampion-sync-server-storage-sqlite` implements an SQLite backend for the core
|
||||
- `taskchampion-sync-server-storage-postgres` implements a Postgres backend for the core
|
||||
- `taskchampion-sync-server` implements a simple HTTP server for the protocol
|
||||
|
||||
## Running the Server
|
||||
|
||||
The server is a simple binary that serves HTTP requests on a TCP port. The
|
||||
server does not implement TLS; for public deployments, the recommendation is to
|
||||
use a reverse proxy such as Nginx, haproxy, or Apache httpd.
|
||||
|
||||
### Using Docker-Compose
|
||||
|
||||
Every release of the server generates a Docker image in
|
||||
`ghcr.io/gothenburgbitfactory/taskchampion-sync-server`. The tags include
|
||||
`latest` for the latest release, and both minor and patch versions, e.g., `0.5`
|
||||
and `0.5.1`.
|
||||
|
||||
The
|
||||
[`docker-compose.yml`](https://raw.githubusercontent.com/GothenburgBitFactory/taskchampion-sync-server/refs/tags/v0.6.1/docker-compose.yml)
|
||||
file in this repository is sufficient to run taskchampion-sync-server,
|
||||
including setting up TLS certificates using Lets Encrypt, thanks to
|
||||
[Caddy](https://caddyserver.com/).
|
||||
|
||||
You will need a server with ports 80 and 443 open to the Internet and with a
|
||||
fixed, publicly-resolvable hostname. These ports must be available both to your
|
||||
Taskwarrior clients and to the Lets Encrypt servers.
|
||||
|
||||
On that server, download `docker-compose.yml` from the link above (it is pinned
|
||||
to the latest release) into the current directory. Then run
|
||||
|
||||
```sh
|
||||
TASKCHAMPION_SYNC_SERVER_HOSTNAME=taskwarrior.example.com \
|
||||
TASKCHAMPION_SYNC_SERVER_CLIENT_ID=your-client-id \
|
||||
docker compose up
|
||||
```
|
||||
|
||||
The `TASKCHAMPION_SYNC_SERVER_CLIENT_ID` limits the server to the given client
|
||||
ID; omit it to allow all client IDs.
|
||||
|
||||
It can take a few minutes to obtain the certificate; the caddy container will
|
||||
log a message "certificate obtained successfully" when this is complete, or
|
||||
error messages if the process fails. Once this process is complete, configure
|
||||
your `.taskrc`'s to point to the server:
|
||||
|
||||
```
|
||||
sync.server.url=https://taskwarrior.example.com
|
||||
sync.server.client_id=your-client-id
|
||||
sync.encryption_secret=your-encryption-secret
|
||||
```
|
||||
|
||||
The docker-compose images store data in a docker volume named
|
||||
`taskchampion-sync-server_data`. This volume contains all of the task data, as
|
||||
well as the TLS certificate information. It will persist over restarts, in a
|
||||
typical Docker installation. The docker containers will start automatically on
|
||||
system startup. See the docker-compose documentation for more information.
|
||||
|
||||
### Running the Binary
|
||||
|
||||
The server is configured with command-line options. See
|
||||
`taskchampion-sync-server --help` for full details.
|
||||
|
||||
The `--listen` option specifies the interface and port the server listens on.
|
||||
It must contain an IP-Address or a DNS name and a port number. This option is
|
||||
mandatory, but can be repeated to specify multiple interfaces or ports. This
|
||||
value can be specified in environment variable `LISTEN`, as a comma-separated
|
||||
list of values.
|
||||
|
||||
The `--data-dir` option specifies where the server should store its data. This
|
||||
value can be specified in the environment variable `DATA_DIR`.
|
||||
|
||||
By default, the server allows all client IDs. To limit the accepted client IDs,
|
||||
specify them in the environment variable `CLIENT_ID`, as a comma-separated list
|
||||
of UUIDs. Client IDs can be specified with `--allow-client-id`, but this should
|
||||
not be used on shared systems, as command line arguments are visible to all
|
||||
users on the system.
|
||||
|
||||
The server only logs errors by default. To add additional logging output, set
|
||||
environment variable `RUST_LOG` to `info` to get a log message for every
|
||||
request, or to `debug` to get more verbose debugging output.
|
||||
|
||||
## Building
|
||||
|
||||
### Building From Source
|
||||
|
||||
#### Installing Rust
|
||||
@ -120,7 +47,7 @@ rustup override set stable
|
||||
|
||||
[rustup]: https://rustup.rs/
|
||||
|
||||
#### Installing TaskChampion Sync-Server
|
||||
#### Building TaskChampion Sync-Server
|
||||
|
||||
To build TaskChampion Sync-Server binary simply execute the following
|
||||
commands.
|
||||
@ -133,15 +60,31 @@ cargo build --release
|
||||
After build the binary is located in
|
||||
`target/release/taskchampion-sync-server`.
|
||||
|
||||
### Building the Container
|
||||
#### Building the Postgres Backend
|
||||
|
||||
To build the container execute the following commands.
|
||||
The storage backend is controlled by Cargo features `postres` and `sqlite`.
|
||||
By default, only the `sqlite` feature is enabled.
|
||||
To enable building the Postgres backend, add `--features postgres`.
|
||||
The Postgres binary is located in
|
||||
`target/release/taskchampion-sync-server-postgres`.
|
||||
|
||||
### Building the Docker Images
|
||||
|
||||
To build the images, execute the following commands.
|
||||
|
||||
SQLite:
|
||||
```sh
|
||||
docker build \
|
||||
-t taskchampion-sync-server \
|
||||
-f Dockerfile-sqlite
|
||||
```
|
||||
|
||||
Postgres:
|
||||
```sh
|
||||
source .env
|
||||
docker build \
|
||||
--build-arg RUST_VERSION=${RUST_VERSION} \
|
||||
--build-arg ALPINE_VERSION=${ALPINE_VERSION} \
|
||||
-t taskchampion-sync-server .
|
||||
-t taskchampion-sync-server-postgres \
|
||||
-f Dockerfile-postgres
|
||||
```
|
||||
|
||||
Now to run it, simply exec.
|
||||
@ -152,7 +95,7 @@ docker run -t -d \
|
||||
taskchampion-sync-server
|
||||
```
|
||||
|
||||
This start TaskChampion Sync-Server and publish the port to host. Please
|
||||
This starts TaskChampion Sync-Server and publishes port 8080 to the host. Please
|
||||
note that this is a basic run, all data will be destroyed after stop and
|
||||
delete container. You may also set `DATA_DIR`, `CLIENT_ID`, or `LISTEN` with `-e`, e.g.,
|
||||
|
||||
|
||||
@ -4,7 +4,7 @@
|
||||
1. Run `cargo test`
|
||||
1. Run `cargo clean && cargo clippy`
|
||||
1. Remove the `-pre` from `version` in all `*/Cargo.toml`, and from the `version = ..` in any references between packages.
|
||||
1. Update the link to `docker-compose.yml` in `README.md` to refer to the new version.
|
||||
1. Update the link to `docker-compose.yml` in `docs/src/usage/docker-compose.md` to refer to the new version.
|
||||
1. Update the docker image in `docker-compose.yml` to refer to the new version.
|
||||
1. Run `cargo semver-checks` (https://crates.io/crates/cargo-semver-checks)
|
||||
1. Run `cargo build --release`
|
||||
@ -12,8 +12,7 @@
|
||||
1. Run `git tag vX.Y.Z`
|
||||
1. Run `git push upstream`
|
||||
1. Run `git push upstream --tag vX.Y.Z`
|
||||
1. Run `cargo publish -p taskchampion-sync-server-core`
|
||||
1. Run `cargo publish -p taskchampion-sync-server-storage-sqlite` (and add any other new published packages here)
|
||||
1. Run `cargo publish` to publish all packages in the workspace
|
||||
1. Bump the patch version in `*/Cargo.toml` and add the `-pre` suffix. This allows `cargo-semver-checks` to check for changes not accounted for in the version delta.
|
||||
1. Run `cargo build --release` again to update `Cargo.lock`
|
||||
1. Commit that change with comment "Bump to -pre version".
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "taskchampion-sync-server-core"
|
||||
version = "0.6.1"
|
||||
version = "0.7.2-pre"
|
||||
authors = ["Dustin J. Mitchell <dustin@mozilla.com>"]
|
||||
edition = "2021"
|
||||
description = "Core of sync protocol for TaskChampion"
|
||||
@ -10,6 +10,7 @@ license = "MIT"
|
||||
|
||||
[dependencies]
|
||||
uuid.workspace = true
|
||||
async-trait.workspace = true
|
||||
anyhow.workspace = true
|
||||
thiserror.workspace = true
|
||||
log.workspace = true
|
||||
@ -18,3 +19,4 @@ chrono.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
pretty_assertions.workspace = true
|
||||
tokio.workspace = true
|
||||
|
||||
@ -44,8 +44,9 @@ struct InnerTxn<'a> {
|
||||
committed: bool,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Storage for InMemoryStorage {
|
||||
fn txn(&self, client_id: Uuid) -> anyhow::Result<Box<dyn StorageTxn + '_>> {
|
||||
async fn txn(&self, client_id: Uuid) -> anyhow::Result<Box<dyn StorageTxn + '_>> {
|
||||
Ok(Box::new(InnerTxn {
|
||||
client_id,
|
||||
guard: self.0.lock().expect("poisoned lock"),
|
||||
@ -55,12 +56,13 @@ impl Storage for InMemoryStorage {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait(?Send)]
|
||||
impl StorageTxn for InnerTxn<'_> {
|
||||
fn get_client(&mut self) -> anyhow::Result<Option<Client>> {
|
||||
async fn get_client(&mut self) -> anyhow::Result<Option<Client>> {
|
||||
Ok(self.guard.clients.get(&self.client_id).cloned())
|
||||
}
|
||||
|
||||
fn new_client(&mut self, latest_version_id: Uuid) -> anyhow::Result<()> {
|
||||
async fn new_client(&mut self, latest_version_id: Uuid) -> anyhow::Result<()> {
|
||||
if self.guard.clients.contains_key(&self.client_id) {
|
||||
return Err(anyhow::anyhow!("Client {} already exists", self.client_id));
|
||||
}
|
||||
@ -75,7 +77,7 @@ impl StorageTxn for InnerTxn<'_> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn set_snapshot(&mut self, snapshot: Snapshot, data: Vec<u8>) -> anyhow::Result<()> {
|
||||
async fn set_snapshot(&mut self, snapshot: Snapshot, data: Vec<u8>) -> anyhow::Result<()> {
|
||||
let client = self
|
||||
.guard
|
||||
.clients
|
||||
@ -87,7 +89,7 @@ impl StorageTxn for InnerTxn<'_> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_snapshot_data(&mut self, version_id: Uuid) -> anyhow::Result<Option<Vec<u8>>> {
|
||||
async fn get_snapshot_data(&mut self, version_id: Uuid) -> anyhow::Result<Option<Vec<u8>>> {
|
||||
// sanity check
|
||||
let client = self.guard.clients.get(&self.client_id);
|
||||
let client = client.ok_or_else(|| anyhow::anyhow!("no such client"))?;
|
||||
@ -97,7 +99,7 @@ impl StorageTxn for InnerTxn<'_> {
|
||||
Ok(self.guard.snapshots.get(&self.client_id).cloned())
|
||||
}
|
||||
|
||||
fn get_version_by_parent(
|
||||
async fn get_version_by_parent(
|
||||
&mut self,
|
||||
parent_version_id: Uuid,
|
||||
) -> anyhow::Result<Option<Version>> {
|
||||
@ -116,7 +118,7 @@ impl StorageTxn for InnerTxn<'_> {
|
||||
}
|
||||
}
|
||||
|
||||
fn get_version(&mut self, version_id: Uuid) -> anyhow::Result<Option<Version>> {
|
||||
async fn get_version(&mut self, version_id: Uuid) -> anyhow::Result<Option<Version>> {
|
||||
Ok(self
|
||||
.guard
|
||||
.versions
|
||||
@ -124,7 +126,7 @@ impl StorageTxn for InnerTxn<'_> {
|
||||
.cloned())
|
||||
}
|
||||
|
||||
fn add_version(
|
||||
async fn add_version(
|
||||
&mut self,
|
||||
version_id: Uuid,
|
||||
parent_version_id: Uuid,
|
||||
@ -174,7 +176,7 @@ impl StorageTxn for InnerTxn<'_> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn commit(&mut self) -> anyhow::Result<()> {
|
||||
async fn commit(&mut self) -> anyhow::Result<()> {
|
||||
self.committed = true;
|
||||
Ok(())
|
||||
}
|
||||
@ -193,32 +195,33 @@ mod test {
|
||||
use super::*;
|
||||
use chrono::Utc;
|
||||
|
||||
#[test]
|
||||
fn test_get_client_empty() -> anyhow::Result<()> {
|
||||
#[tokio::test]
|
||||
async fn test_get_client_empty() -> anyhow::Result<()> {
|
||||
let storage = InMemoryStorage::new();
|
||||
let mut txn = storage.txn(Uuid::new_v4())?;
|
||||
let maybe_client = txn.get_client()?;
|
||||
let mut txn = storage.txn(Uuid::new_v4()).await?;
|
||||
let maybe_client = txn.get_client().await?;
|
||||
assert!(maybe_client.is_none());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_client_storage() -> anyhow::Result<()> {
|
||||
#[tokio::test]
|
||||
async fn test_client_storage() -> anyhow::Result<()> {
|
||||
let storage = InMemoryStorage::new();
|
||||
let client_id = Uuid::new_v4();
|
||||
let mut txn = storage.txn(client_id)?;
|
||||
let mut txn = storage.txn(client_id).await?;
|
||||
|
||||
let latest_version_id = Uuid::new_v4();
|
||||
txn.new_client(latest_version_id)?;
|
||||
txn.new_client(latest_version_id).await?;
|
||||
|
||||
let client = txn.get_client()?.unwrap();
|
||||
let client = txn.get_client().await?.unwrap();
|
||||
assert_eq!(client.latest_version_id, latest_version_id);
|
||||
assert!(client.snapshot.is_none());
|
||||
|
||||
let latest_version_id = Uuid::new_v4();
|
||||
txn.add_version(latest_version_id, Uuid::new_v4(), vec![1, 1])?;
|
||||
txn.add_version(latest_version_id, Uuid::new_v4(), vec![1, 1])
|
||||
.await?;
|
||||
|
||||
let client = txn.get_client()?.unwrap();
|
||||
let client = txn.get_client().await?.unwrap();
|
||||
assert_eq!(client.latest_version_id, latest_version_id);
|
||||
assert!(client.snapshot.is_none());
|
||||
|
||||
@ -227,38 +230,39 @@ mod test {
|
||||
timestamp: Utc::now(),
|
||||
versions_since: 4,
|
||||
};
|
||||
txn.set_snapshot(snap.clone(), vec![1, 2, 3])?;
|
||||
txn.set_snapshot(snap.clone(), vec![1, 2, 3]).await?;
|
||||
|
||||
let client = txn.get_client()?.unwrap();
|
||||
let client = txn.get_client().await?.unwrap();
|
||||
assert_eq!(client.latest_version_id, latest_version_id);
|
||||
assert_eq!(client.snapshot.unwrap(), snap);
|
||||
|
||||
txn.commit()?;
|
||||
txn.commit().await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_gvbp_empty() -> anyhow::Result<()> {
|
||||
#[tokio::test]
|
||||
async fn test_gvbp_empty() -> anyhow::Result<()> {
|
||||
let storage = InMemoryStorage::new();
|
||||
let client_id = Uuid::new_v4();
|
||||
let mut txn = storage.txn(client_id)?;
|
||||
let maybe_version = txn.get_version_by_parent(Uuid::new_v4())?;
|
||||
let mut txn = storage.txn(client_id).await?;
|
||||
let maybe_version = txn.get_version_by_parent(Uuid::new_v4()).await?;
|
||||
assert!(maybe_version.is_none());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_add_version_and_get_version() -> anyhow::Result<()> {
|
||||
#[tokio::test]
|
||||
async fn test_add_version_and_get_version() -> anyhow::Result<()> {
|
||||
let storage = InMemoryStorage::new();
|
||||
let client_id = Uuid::new_v4();
|
||||
let mut txn = storage.txn(client_id)?;
|
||||
let mut txn = storage.txn(client_id).await?;
|
||||
|
||||
let version_id = Uuid::new_v4();
|
||||
let parent_version_id = Uuid::new_v4();
|
||||
let history_segment = b"abc".to_vec();
|
||||
|
||||
txn.new_client(parent_version_id)?;
|
||||
txn.add_version(version_id, parent_version_id, history_segment.clone())?;
|
||||
txn.new_client(parent_version_id).await?;
|
||||
txn.add_version(version_id, parent_version_id, history_segment.clone())
|
||||
.await?;
|
||||
|
||||
let expected = Version {
|
||||
version_id,
|
||||
@ -266,74 +270,76 @@ mod test {
|
||||
history_segment,
|
||||
};
|
||||
|
||||
let version = txn.get_version_by_parent(parent_version_id)?.unwrap();
|
||||
let version = txn.get_version_by_parent(parent_version_id).await?.unwrap();
|
||||
assert_eq!(version, expected);
|
||||
|
||||
let version = txn.get_version(version_id)?.unwrap();
|
||||
let version = txn.get_version(version_id).await?.unwrap();
|
||||
assert_eq!(version, expected);
|
||||
|
||||
txn.commit()?;
|
||||
txn.commit().await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_add_version_exists() -> anyhow::Result<()> {
|
||||
#[tokio::test]
|
||||
async fn test_add_version_exists() -> anyhow::Result<()> {
|
||||
let storage = InMemoryStorage::new();
|
||||
let client_id = Uuid::new_v4();
|
||||
let mut txn = storage.txn(client_id)?;
|
||||
let mut txn = storage.txn(client_id).await?;
|
||||
|
||||
let version_id = Uuid::new_v4();
|
||||
let parent_version_id = Uuid::new_v4();
|
||||
let history_segment = b"abc".to_vec();
|
||||
|
||||
txn.new_client(parent_version_id)?;
|
||||
txn.add_version(version_id, parent_version_id, history_segment.clone())?;
|
||||
txn.new_client(parent_version_id).await?;
|
||||
txn.add_version(version_id, parent_version_id, history_segment.clone())
|
||||
.await?;
|
||||
assert!(txn
|
||||
.add_version(version_id, parent_version_id, history_segment.clone())
|
||||
.await
|
||||
.is_err());
|
||||
txn.commit()?;
|
||||
txn.commit().await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_snapshots() -> anyhow::Result<()> {
|
||||
#[tokio::test]
|
||||
async fn test_snapshots() -> anyhow::Result<()> {
|
||||
let storage = InMemoryStorage::new();
|
||||
let client_id = Uuid::new_v4();
|
||||
let mut txn = storage.txn(client_id)?;
|
||||
let mut txn = storage.txn(client_id).await?;
|
||||
|
||||
txn.new_client(Uuid::new_v4())?;
|
||||
assert!(txn.get_client()?.unwrap().snapshot.is_none());
|
||||
txn.new_client(Uuid::new_v4()).await?;
|
||||
assert!(txn.get_client().await?.unwrap().snapshot.is_none());
|
||||
|
||||
let snap = Snapshot {
|
||||
version_id: Uuid::new_v4(),
|
||||
timestamp: Utc::now(),
|
||||
versions_since: 3,
|
||||
};
|
||||
txn.set_snapshot(snap.clone(), vec![9, 8, 9])?;
|
||||
txn.set_snapshot(snap.clone(), vec![9, 8, 9]).await?;
|
||||
|
||||
assert_eq!(
|
||||
txn.get_snapshot_data(snap.version_id)?.unwrap(),
|
||||
txn.get_snapshot_data(snap.version_id).await?.unwrap(),
|
||||
vec![9, 8, 9]
|
||||
);
|
||||
assert_eq!(txn.get_client()?.unwrap().snapshot, Some(snap));
|
||||
assert_eq!(txn.get_client().await?.unwrap().snapshot, Some(snap));
|
||||
|
||||
let snap2 = Snapshot {
|
||||
version_id: Uuid::new_v4(),
|
||||
timestamp: Utc::now(),
|
||||
versions_since: 10,
|
||||
};
|
||||
txn.set_snapshot(snap2.clone(), vec![0, 2, 4, 6])?;
|
||||
txn.set_snapshot(snap2.clone(), vec![0, 2, 4, 6]).await?;
|
||||
|
||||
assert_eq!(
|
||||
txn.get_snapshot_data(snap2.version_id)?.unwrap(),
|
||||
txn.get_snapshot_data(snap2.version_id).await?.unwrap(),
|
||||
vec![0, 2, 4, 6]
|
||||
);
|
||||
assert_eq!(txn.get_client()?.unwrap().snapshot, Some(snap2));
|
||||
assert_eq!(txn.get_client().await?.unwrap().snapshot, Some(snap2));
|
||||
|
||||
// check that mismatched version is detected
|
||||
assert!(txn.get_snapshot_data(Uuid::new_v4()).is_err());
|
||||
assert!(txn.get_snapshot_data(Uuid::new_v4()).await.is_err());
|
||||
|
||||
txn.commit()?;
|
||||
txn.commit().await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@ -106,17 +106,17 @@ impl Server {
|
||||
}
|
||||
|
||||
/// Implementation of the GetChildVersion protocol transaction.
|
||||
pub fn get_child_version(
|
||||
pub async fn get_child_version(
|
||||
&self,
|
||||
client_id: ClientId,
|
||||
parent_version_id: VersionId,
|
||||
) -> Result<GetVersionResult, ServerError> {
|
||||
let mut txn = self.storage.txn(client_id)?;
|
||||
let client = txn.get_client()?.ok_or(ServerError::NoSuchClient)?;
|
||||
let mut txn = self.txn(client_id).await?;
|
||||
let client = txn.get_client().await?.ok_or(ServerError::NoSuchClient)?;
|
||||
|
||||
// If a version with parentVersionId equal to the requested parentVersionId exists, it is
|
||||
// returned.
|
||||
if let Some(version) = txn.get_version_by_parent(parent_version_id)? {
|
||||
if let Some(version) = txn.get_version_by_parent(parent_version_id).await? {
|
||||
return Ok(GetVersionResult::Success {
|
||||
version_id: version.version_id,
|
||||
parent_version_id: version.parent_version_id,
|
||||
@ -142,7 +142,7 @@ impl Server {
|
||||
}
|
||||
|
||||
/// Implementation of the AddVersion protocol transaction
|
||||
pub fn add_version(
|
||||
pub async fn add_version(
|
||||
&self,
|
||||
client_id: ClientId,
|
||||
parent_version_id: VersionId,
|
||||
@ -150,8 +150,8 @@ impl Server {
|
||||
) -> Result<(AddVersionResult, SnapshotUrgency), ServerError> {
|
||||
log::debug!("add_version(client_id: {client_id}, parent_version_id: {parent_version_id})");
|
||||
|
||||
let mut txn = self.storage.txn(client_id)?;
|
||||
let client = txn.get_client()?.ok_or(ServerError::NoSuchClient)?;
|
||||
let mut txn = self.txn(client_id).await?;
|
||||
let client = txn.get_client().await?.ok_or(ServerError::NoSuchClient)?;
|
||||
|
||||
// check if this version is acceptable, under the protection of the transaction
|
||||
if client.latest_version_id != NIL_VERSION_ID
|
||||
@ -169,8 +169,9 @@ impl Server {
|
||||
log::debug!("add_version request accepted: new version_id: {version_id}");
|
||||
|
||||
// update the DB
|
||||
txn.add_version(version_id, parent_version_id, history_segment)?;
|
||||
txn.commit()?;
|
||||
txn.add_version(version_id, parent_version_id, history_segment)
|
||||
.await?;
|
||||
txn.commit().await?;
|
||||
|
||||
// calculate the urgency
|
||||
let time_urgency = match client.snapshot {
|
||||
@ -194,7 +195,7 @@ impl Server {
|
||||
}
|
||||
|
||||
/// Implementation of the AddSnapshot protocol transaction
|
||||
pub fn add_snapshot(
|
||||
pub async fn add_snapshot(
|
||||
&self,
|
||||
client_id: ClientId,
|
||||
version_id: VersionId,
|
||||
@ -202,8 +203,8 @@ impl Server {
|
||||
) -> Result<(), ServerError> {
|
||||
log::debug!("add_snapshot(client_id: {client_id}, version_id: {version_id})");
|
||||
|
||||
let mut txn = self.storage.txn(client_id)?;
|
||||
let client = txn.get_client()?.ok_or(ServerError::NoSuchClient)?;
|
||||
let mut txn = self.txn(client_id).await?;
|
||||
let client = txn.get_client().await?.ok_or(ServerError::NoSuchClient)?;
|
||||
|
||||
// NOTE: if the snapshot is rejected, this function logs about it and returns
|
||||
// Ok(()), as there's no reason to report an errot to the client / user.
|
||||
@ -239,7 +240,7 @@ impl Server {
|
||||
}
|
||||
|
||||
// get the parent version ID
|
||||
if let Some(parent) = txn.get_version(vid)? {
|
||||
if let Some(parent) = txn.get_version(vid).await? {
|
||||
vid = parent.parent_version_id;
|
||||
} else {
|
||||
// this version does not exist; "this should not happen" but if it does,
|
||||
@ -257,21 +258,23 @@ impl Server {
|
||||
versions_since: 0,
|
||||
},
|
||||
data,
|
||||
)?;
|
||||
txn.commit()?;
|
||||
)
|
||||
.await?;
|
||||
txn.commit().await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Implementation of the GetSnapshot protocol transaction
|
||||
pub fn get_snapshot(
|
||||
pub async fn get_snapshot(
|
||||
&self,
|
||||
client_id: ClientId,
|
||||
) -> Result<Option<(Uuid, Vec<u8>)>, ServerError> {
|
||||
let mut txn = self.storage.txn(client_id)?;
|
||||
let client = txn.get_client()?.ok_or(ServerError::NoSuchClient)?;
|
||||
let mut txn = self.txn(client_id).await?;
|
||||
let client = txn.get_client().await?.ok_or(ServerError::NoSuchClient)?;
|
||||
|
||||
Ok(if let Some(snap) = client.snapshot {
|
||||
txn.get_snapshot_data(snap.version_id)?
|
||||
txn.get_snapshot_data(snap.version_id)
|
||||
.await?
|
||||
.map(|data| (snap.version_id, data))
|
||||
} else {
|
||||
None
|
||||
@ -279,8 +282,8 @@ impl Server {
|
||||
}
|
||||
|
||||
/// Convenience method to get a transaction for the embedded storage.
|
||||
pub fn txn(&self, client_id: Uuid) -> Result<Box<dyn StorageTxn + '_>, ServerError> {
|
||||
Ok(self.storage.txn(client_id)?)
|
||||
pub async fn txn(&self, client_id: Uuid) -> Result<Box<dyn StorageTxn + '_>, ServerError> {
|
||||
Ok(self.storage.txn(client_id).await?)
|
||||
}
|
||||
}
|
||||
|
||||
@ -288,68 +291,70 @@ impl Server {
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::inmemory::InMemoryStorage;
|
||||
use crate::storage::{Snapshot, Storage, StorageTxn};
|
||||
use crate::storage::{Snapshot, Storage};
|
||||
use chrono::{Duration, TimeZone, Utc};
|
||||
use pretty_assertions::assert_eq;
|
||||
|
||||
fn setup<INIT, RES>(init: INIT) -> anyhow::Result<(Server, RES)>
|
||||
where
|
||||
INIT: FnOnce(&mut dyn StorageTxn, Uuid) -> anyhow::Result<RES>,
|
||||
{
|
||||
/// Set up for a test, returning storage and a client_id.
|
||||
fn setup() -> (InMemoryStorage, Uuid) {
|
||||
let _ = env_logger::builder().is_test(true).try_init();
|
||||
let storage = InMemoryStorage::new();
|
||||
let client_id = Uuid::new_v4();
|
||||
let res;
|
||||
{
|
||||
let mut txn = storage.txn(client_id)?;
|
||||
res = init(txn.as_mut(), client_id)?;
|
||||
txn.commit()?;
|
||||
}
|
||||
Ok((Server::new(ServerConfig::default(), storage), res))
|
||||
(storage, client_id)
|
||||
}
|
||||
|
||||
/// Utility setup function for add_version tests
|
||||
fn av_setup(
|
||||
/// Convert storage into a Server.
|
||||
fn into_server(storage: InMemoryStorage) -> Server {
|
||||
Server::new(ServerConfig::default(), storage)
|
||||
}
|
||||
|
||||
/// Add versions to the DB for the given client.
|
||||
async fn add_versions(
|
||||
storage: &InMemoryStorage,
|
||||
client_id: Uuid,
|
||||
num_versions: u32,
|
||||
snapshot_version: Option<u32>,
|
||||
snapshot_days_ago: Option<i64>,
|
||||
) -> anyhow::Result<(Server, Uuid, Vec<Uuid>)> {
|
||||
let (server, (client_id, versions)) = setup(|txn, client_id| {
|
||||
let mut versions = vec![];
|
||||
) -> anyhow::Result<Vec<Uuid>> {
|
||||
let mut txn = storage.txn(client_id).await?;
|
||||
let mut versions = vec![];
|
||||
|
||||
let mut version_id = Uuid::nil();
|
||||
txn.new_client(Uuid::nil())?;
|
||||
debug_assert!(num_versions < u8::MAX.into());
|
||||
for vnum in 0..num_versions {
|
||||
let parent_version_id = version_id;
|
||||
version_id = Uuid::new_v4();
|
||||
versions.push(version_id);
|
||||
txn.add_version(
|
||||
version_id,
|
||||
parent_version_id,
|
||||
// Generate some unique data for this version.
|
||||
vec![0, 0, vnum as u8],
|
||||
)?;
|
||||
if Some(vnum) == snapshot_version {
|
||||
txn.set_snapshot(
|
||||
Snapshot {
|
||||
version_id,
|
||||
versions_since: 0,
|
||||
timestamp: Utc::now() - Duration::days(snapshot_days_ago.unwrap_or(0)),
|
||||
},
|
||||
// Generate some unique data for this snapshot.
|
||||
vec![vnum as u8],
|
||||
)?;
|
||||
}
|
||||
let mut version_id = Uuid::nil();
|
||||
txn.new_client(Uuid::nil()).await?;
|
||||
assert!(
|
||||
num_versions < u8::MAX.into(),
|
||||
"we cast the version number to u8"
|
||||
);
|
||||
for vnum in 0..num_versions {
|
||||
let parent_version_id = version_id;
|
||||
version_id = Uuid::new_v4();
|
||||
versions.push(version_id);
|
||||
txn.add_version(
|
||||
version_id,
|
||||
parent_version_id,
|
||||
// Generate some unique data for this version.
|
||||
vec![0, 0, vnum as u8],
|
||||
)
|
||||
.await?;
|
||||
if Some(vnum) == snapshot_version {
|
||||
txn.set_snapshot(
|
||||
Snapshot {
|
||||
version_id,
|
||||
versions_since: 0,
|
||||
timestamp: Utc::now() - Duration::days(snapshot_days_ago.unwrap_or(0)),
|
||||
},
|
||||
// Generate some unique data for this snapshot.
|
||||
vec![vnum as u8],
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Ok((client_id, versions))
|
||||
})?;
|
||||
Ok((server, client_id, versions))
|
||||
}
|
||||
txn.commit().await?;
|
||||
Ok(versions)
|
||||
}
|
||||
|
||||
/// Utility function to check the results of an add_version call
|
||||
fn av_success_check(
|
||||
async fn av_success_check(
|
||||
server: &Server,
|
||||
client_id: Uuid,
|
||||
existing_versions: &[Uuid],
|
||||
@ -364,17 +369,17 @@ mod test {
|
||||
}
|
||||
|
||||
// verify that the storage was updated
|
||||
let mut txn = server.txn(client_id)?;
|
||||
let client = txn.get_client()?.unwrap();
|
||||
let mut txn = server.txn(client_id).await?;
|
||||
let client = txn.get_client().await?.unwrap();
|
||||
assert_eq!(client.latest_version_id, new_version_id);
|
||||
|
||||
let parent_version_id = existing_versions.last().cloned().unwrap_or_else(Uuid::nil);
|
||||
let version = txn.get_version(new_version_id)?.unwrap();
|
||||
let version = txn.get_version(new_version_id).await?.unwrap();
|
||||
assert_eq!(version.version_id, new_version_id);
|
||||
assert_eq!(version.parent_version_id, parent_version_id);
|
||||
assert_eq!(version.history_segment, expected_history);
|
||||
} else {
|
||||
panic!("did not get Ok from add_version: {:?}", add_version_result);
|
||||
panic!("did not get Ok from add_version: {add_version_result:?}");
|
||||
}
|
||||
|
||||
assert_eq!(snapshot_urgency, expected_urgency);
|
||||
@ -426,89 +431,108 @@ mod test {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn get_child_version_not_found_initial_nil() -> anyhow::Result<()> {
|
||||
let (server, client_id) = setup(|txn, client_id| {
|
||||
txn.new_client(NIL_VERSION_ID)?;
|
||||
#[tokio::test]
|
||||
async fn get_child_version_not_found_initial_nil() -> anyhow::Result<()> {
|
||||
let (storage, client_id) = setup();
|
||||
{
|
||||
let mut txn = storage.txn(client_id).await?;
|
||||
txn.new_client(NIL_VERSION_ID).await?;
|
||||
txn.commit().await?;
|
||||
}
|
||||
|
||||
let server = into_server(storage);
|
||||
|
||||
Ok(client_id)
|
||||
})?;
|
||||
// when no latest version exists, the first version is NotFound
|
||||
assert_eq!(
|
||||
server.get_child_version(client_id, NIL_VERSION_ID)?,
|
||||
server.get_child_version(client_id, NIL_VERSION_ID).await?,
|
||||
GetVersionResult::NotFound
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn get_child_version_not_found_initial_continuing() -> anyhow::Result<()> {
|
||||
let (server, client_id) = setup(|txn, client_id| {
|
||||
txn.new_client(NIL_VERSION_ID)?;
|
||||
#[tokio::test]
|
||||
async fn get_child_version_not_found_initial_continuing() -> anyhow::Result<()> {
|
||||
let (storage, client_id) = setup();
|
||||
{
|
||||
let mut txn = storage.txn(client_id).await?;
|
||||
txn.new_client(NIL_VERSION_ID).await?;
|
||||
txn.commit().await?;
|
||||
}
|
||||
|
||||
Ok(client_id)
|
||||
})?;
|
||||
let server = into_server(storage);
|
||||
|
||||
// when no latest version exists, _any_ child version is NOT_FOUND. This allows syncs to
|
||||
// start to a new server even if the client already has been uploading to another service.
|
||||
assert_eq!(
|
||||
server.get_child_version(client_id, Uuid::new_v4(),)?,
|
||||
server.get_child_version(client_id, Uuid::new_v4(),).await?,
|
||||
GetVersionResult::NotFound
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn get_child_version_not_found_up_to_date() -> anyhow::Result<()> {
|
||||
let (server, (client_id, parent_version_id)) = setup(|txn, client_id| {
|
||||
#[tokio::test]
|
||||
async fn get_child_version_not_found_up_to_date() -> anyhow::Result<()> {
|
||||
let (storage, client_id) = setup();
|
||||
let parent_version_id = Uuid::new_v4();
|
||||
{
|
||||
let mut txn = storage.txn(client_id).await?;
|
||||
// add a parent version, but not the requested child version
|
||||
let parent_version_id = Uuid::new_v4();
|
||||
txn.new_client(parent_version_id)?;
|
||||
txn.add_version(parent_version_id, NIL_VERSION_ID, vec![])?;
|
||||
|
||||
Ok((client_id, parent_version_id))
|
||||
})?;
|
||||
txn.new_client(parent_version_id).await?;
|
||||
txn.add_version(parent_version_id, NIL_VERSION_ID, vec![])
|
||||
.await?;
|
||||
txn.commit().await?;
|
||||
}
|
||||
|
||||
let server = into_server(storage);
|
||||
assert_eq!(
|
||||
server.get_child_version(client_id, parent_version_id)?,
|
||||
server
|
||||
.get_child_version(client_id, parent_version_id)
|
||||
.await?,
|
||||
GetVersionResult::NotFound
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn get_child_version_gone_not_latest() -> anyhow::Result<()> {
|
||||
let (server, client_id) = setup(|txn, client_id| {
|
||||
#[tokio::test]
|
||||
async fn get_child_version_gone_not_latest() -> anyhow::Result<()> {
|
||||
let (storage, client_id) = setup();
|
||||
let parent_version_id = Uuid::new_v4();
|
||||
{
|
||||
let mut txn = storage.txn(client_id).await?;
|
||||
// Add a parent version, but not the requested parent version
|
||||
let parent_version_id = Uuid::new_v4();
|
||||
txn.new_client(parent_version_id)?;
|
||||
txn.add_version(parent_version_id, NIL_VERSION_ID, vec![])?;
|
||||
|
||||
Ok(client_id)
|
||||
})?;
|
||||
txn.new_client(parent_version_id).await?;
|
||||
txn.add_version(parent_version_id, NIL_VERSION_ID, vec![])
|
||||
.await?;
|
||||
txn.commit().await?;
|
||||
}
|
||||
|
||||
let server = into_server(storage);
|
||||
assert_eq!(
|
||||
server.get_child_version(client_id, Uuid::new_v4(),)?,
|
||||
server.get_child_version(client_id, Uuid::new_v4(),).await?,
|
||||
GetVersionResult::Gone
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn get_child_version_found() -> anyhow::Result<()> {
|
||||
let (server, (client_id, version_id, parent_version_id, history_segment)) =
|
||||
setup(|txn, client_id| {
|
||||
let version_id = Uuid::new_v4();
|
||||
let parent_version_id = Uuid::new_v4();
|
||||
let history_segment = b"abcd".to_vec();
|
||||
#[tokio::test]
|
||||
async fn get_child_version_found() -> anyhow::Result<()> {
|
||||
let (storage, client_id) = setup();
|
||||
let version_id = Uuid::new_v4();
|
||||
let parent_version_id = Uuid::new_v4();
|
||||
let history_segment = b"abcd".to_vec();
|
||||
{
|
||||
let mut txn = storage.txn(client_id).await?;
|
||||
txn.new_client(version_id).await?;
|
||||
txn.add_version(version_id, parent_version_id, history_segment.clone())
|
||||
.await?;
|
||||
txn.commit().await?;
|
||||
}
|
||||
|
||||
txn.new_client(version_id)?;
|
||||
txn.add_version(version_id, parent_version_id, history_segment.clone())?;
|
||||
|
||||
Ok((client_id, version_id, parent_version_id, history_segment))
|
||||
})?;
|
||||
let server = into_server(storage);
|
||||
assert_eq!(
|
||||
server.get_child_version(client_id, parent_version_id)?,
|
||||
server
|
||||
.get_child_version(client_id, parent_version_id)
|
||||
.await?,
|
||||
GetVersionResult::Success {
|
||||
version_id,
|
||||
parent_version_id,
|
||||
@ -518,29 +542,41 @@ mod test {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn add_version_conflict() -> anyhow::Result<()> {
|
||||
let (server, client_id, versions) = av_setup(3, None, None)?;
|
||||
#[tokio::test]
|
||||
async fn add_version_conflict() -> anyhow::Result<()> {
|
||||
let (storage, client_id) = setup();
|
||||
let versions = add_versions(&storage, client_id, 3, None, None).await?;
|
||||
|
||||
// try to add a child of a version other than the latest
|
||||
let server = into_server(storage);
|
||||
assert_eq!(
|
||||
server.add_version(client_id, versions[1], vec![3, 6, 9])?.0,
|
||||
server
|
||||
.add_version(client_id, versions[1], vec![3, 6, 9])
|
||||
.await?
|
||||
.0,
|
||||
AddVersionResult::ExpectedParentVersion(versions[2])
|
||||
);
|
||||
|
||||
// verify that the storage wasn't updated
|
||||
let mut txn = server.txn(client_id)?;
|
||||
assert_eq!(txn.get_client()?.unwrap().latest_version_id, versions[2]);
|
||||
assert_eq!(txn.get_version_by_parent(versions[2])?, None);
|
||||
let mut txn = server.txn(client_id).await?;
|
||||
assert_eq!(
|
||||
txn.get_client().await?.unwrap().latest_version_id,
|
||||
versions[2]
|
||||
);
|
||||
assert_eq!(txn.get_version_by_parent(versions[2]).await?, None);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn add_version_with_existing_history() -> anyhow::Result<()> {
|
||||
let (server, client_id, versions) = av_setup(1, None, None)?;
|
||||
#[tokio::test]
|
||||
async fn add_version_with_existing_history() -> anyhow::Result<()> {
|
||||
let (storage, client_id) = setup();
|
||||
let versions = add_versions(&storage, client_id, 1, None, None).await?;
|
||||
|
||||
let result = server.add_version(client_id, versions[0], vec![3, 6, 9])?;
|
||||
let server = into_server(storage);
|
||||
let result = server
|
||||
.add_version(client_id, versions[0], vec![3, 6, 9])
|
||||
.await?;
|
||||
|
||||
av_success_check(
|
||||
&server,
|
||||
@ -550,17 +586,22 @@ mod test {
|
||||
vec![3, 6, 9],
|
||||
// urgency=high because there are no snapshots yet
|
||||
SnapshotUrgency::High,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn add_version_with_no_history() -> anyhow::Result<()> {
|
||||
let (server, client_id, versions) = av_setup(0, None, None)?;
|
||||
#[tokio::test]
|
||||
async fn add_version_with_no_history() -> anyhow::Result<()> {
|
||||
let (storage, client_id) = setup();
|
||||
let versions = add_versions(&storage, client_id, 0, None, None).await?;
|
||||
|
||||
let server = into_server(storage);
|
||||
let parent_version_id = Uuid::nil();
|
||||
let result = server.add_version(client_id, parent_version_id, vec![3, 6, 9])?;
|
||||
let result = server
|
||||
.add_version(client_id, parent_version_id, vec![3, 6, 9])
|
||||
.await?;
|
||||
|
||||
av_success_check(
|
||||
&server,
|
||||
@ -570,16 +611,21 @@ mod test {
|
||||
vec![3, 6, 9],
|
||||
// urgency=high because there are no snapshots yet
|
||||
SnapshotUrgency::High,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn add_version_success_recent_snapshot() -> anyhow::Result<()> {
|
||||
let (server, client_id, versions) = av_setup(1, Some(0), None)?;
|
||||
#[tokio::test]
|
||||
async fn add_version_success_recent_snapshot() -> anyhow::Result<()> {
|
||||
let (storage, client_id) = setup();
|
||||
let versions = add_versions(&storage, client_id, 1, Some(0), None).await?;
|
||||
|
||||
let result = server.add_version(client_id, versions[0], vec![1, 2, 3])?;
|
||||
let server = into_server(storage);
|
||||
let result = server
|
||||
.add_version(client_id, versions[0], vec![1, 2, 3])
|
||||
.await?;
|
||||
|
||||
av_success_check(
|
||||
&server,
|
||||
@ -589,17 +635,22 @@ mod test {
|
||||
vec![1, 2, 3],
|
||||
// no snapshot request since the previous version has a snapshot
|
||||
SnapshotUrgency::None,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn add_version_success_aged_snapshot() -> anyhow::Result<()> {
|
||||
#[tokio::test]
|
||||
async fn add_version_success_aged_snapshot() -> anyhow::Result<()> {
|
||||
// one snapshot, but it was 50 days ago
|
||||
let (server, client_id, versions) = av_setup(1, Some(0), Some(50))?;
|
||||
let (storage, client_id) = setup();
|
||||
let versions = add_versions(&storage, client_id, 1, Some(0), Some(50)).await?;
|
||||
|
||||
let result = server.add_version(client_id, versions[0], vec![1, 2, 3])?;
|
||||
let server = into_server(storage);
|
||||
let result = server
|
||||
.add_version(client_id, versions[0], vec![1, 2, 3])
|
||||
.await?;
|
||||
|
||||
av_success_check(
|
||||
&server,
|
||||
@ -609,18 +660,24 @@ mod test {
|
||||
vec![1, 2, 3],
|
||||
// urgency=high due to days since the snapshot
|
||||
SnapshotUrgency::High,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn add_version_success_snapshot_many_versions_ago() -> anyhow::Result<()> {
|
||||
#[tokio::test]
|
||||
async fn add_version_success_snapshot_many_versions_ago() -> anyhow::Result<()> {
|
||||
// one snapshot, but it was 50 versions ago
|
||||
let (mut server, client_id, versions) = av_setup(50, Some(0), None)?;
|
||||
let (storage, client_id) = setup();
|
||||
let versions = add_versions(&storage, client_id, 50, Some(0), None).await?;
|
||||
|
||||
let mut server = into_server(storage);
|
||||
server.config.snapshot_versions = 30;
|
||||
|
||||
let result = server.add_version(client_id, versions[49], vec![1, 2, 3])?;
|
||||
let result = server
|
||||
.add_version(client_id, versions[49], vec![1, 2, 3])
|
||||
.await?;
|
||||
|
||||
av_success_check(
|
||||
&server,
|
||||
@ -630,136 +687,165 @@ mod test {
|
||||
vec![1, 2, 3],
|
||||
// urgency=high due to number of versions since the snapshot
|
||||
SnapshotUrgency::High,
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn add_snapshot_success_latest() -> anyhow::Result<()> {
|
||||
let (server, (client_id, version_id)) = setup(|txn, client_id| {
|
||||
let version_id = Uuid::new_v4();
|
||||
#[tokio::test]
|
||||
async fn add_snapshot_success_latest() -> anyhow::Result<()> {
|
||||
let (storage, client_id) = setup();
|
||||
let version_id = Uuid::new_v4();
|
||||
|
||||
{
|
||||
let mut txn = storage.txn(client_id).await?;
|
||||
// set up a task DB with one version in it
|
||||
txn.new_client(version_id)?;
|
||||
txn.add_version(version_id, NIL_VERSION_ID, vec![])?;
|
||||
txn.new_client(version_id).await?;
|
||||
txn.add_version(version_id, NIL_VERSION_ID, vec![]).await?;
|
||||
|
||||
// add a snapshot for that version
|
||||
Ok((client_id, version_id))
|
||||
})?;
|
||||
server.add_snapshot(client_id, version_id, vec![1, 2, 3])?;
|
||||
txn.commit().await?;
|
||||
}
|
||||
|
||||
let server = into_server(storage);
|
||||
server
|
||||
.add_snapshot(client_id, version_id, vec![1, 2, 3])
|
||||
.await?;
|
||||
|
||||
// verify the snapshot
|
||||
let mut txn = server.txn(client_id)?;
|
||||
let client = txn.get_client()?.unwrap();
|
||||
let mut txn = server.txn(client_id).await?;
|
||||
let client = txn.get_client().await?.unwrap();
|
||||
let snapshot = client.snapshot.unwrap();
|
||||
assert_eq!(snapshot.version_id, version_id);
|
||||
assert_eq!(snapshot.versions_since, 0);
|
||||
assert_eq!(
|
||||
txn.get_snapshot_data(version_id).unwrap(),
|
||||
txn.get_snapshot_data(version_id).await.unwrap(),
|
||||
Some(vec![1, 2, 3])
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn add_snapshot_success_older() -> anyhow::Result<()> {
|
||||
let (server, (client_id, version_id_1)) = setup(|txn, client_id| {
|
||||
let version_id_1 = Uuid::new_v4();
|
||||
let version_id_2 = Uuid::new_v4();
|
||||
#[tokio::test]
|
||||
async fn add_snapshot_success_older() -> anyhow::Result<()> {
|
||||
let (storage, client_id) = setup();
|
||||
let version_id_1 = Uuid::new_v4();
|
||||
let version_id_2 = Uuid::new_v4();
|
||||
|
||||
{
|
||||
let mut txn = storage.txn(client_id).await?;
|
||||
// set up a task DB with two versions in it
|
||||
txn.new_client(version_id_2)?;
|
||||
txn.add_version(version_id_1, NIL_VERSION_ID, vec![])?;
|
||||
txn.add_version(version_id_2, version_id_1, vec![])?;
|
||||
txn.new_client(version_id_2).await?;
|
||||
txn.add_version(version_id_1, NIL_VERSION_ID, vec![])
|
||||
.await?;
|
||||
txn.add_version(version_id_2, version_id_1, vec![]).await?;
|
||||
|
||||
txn.commit().await?;
|
||||
}
|
||||
|
||||
Ok((client_id, version_id_1))
|
||||
})?;
|
||||
// add a snapshot for version 1
|
||||
server.add_snapshot(client_id, version_id_1, vec![1, 2, 3])?;
|
||||
let server = into_server(storage);
|
||||
server
|
||||
.add_snapshot(client_id, version_id_1, vec![1, 2, 3])
|
||||
.await?;
|
||||
|
||||
// verify the snapshot
|
||||
let mut txn = server.txn(client_id)?;
|
||||
let client = txn.get_client()?.unwrap();
|
||||
let mut txn = server.txn(client_id).await?;
|
||||
let client = txn.get_client().await?.unwrap();
|
||||
let snapshot = client.snapshot.unwrap();
|
||||
assert_eq!(snapshot.version_id, version_id_1);
|
||||
assert_eq!(snapshot.versions_since, 0);
|
||||
assert_eq!(
|
||||
txn.get_snapshot_data(version_id_1).unwrap(),
|
||||
txn.get_snapshot_data(version_id_1).await.unwrap(),
|
||||
Some(vec![1, 2, 3])
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn add_snapshot_fails_no_such() -> anyhow::Result<()> {
|
||||
let (server, client_id) = setup(|txn, client_id| {
|
||||
let version_id_1 = Uuid::new_v4();
|
||||
let version_id_2 = Uuid::new_v4();
|
||||
#[tokio::test]
|
||||
async fn add_snapshot_fails_no_such() -> anyhow::Result<()> {
|
||||
let (storage, client_id) = setup();
|
||||
let version_id_1 = Uuid::new_v4();
|
||||
let version_id_2 = Uuid::new_v4();
|
||||
|
||||
{
|
||||
let mut txn = storage.txn(client_id).await?;
|
||||
// set up a task DB with two versions in it
|
||||
txn.new_client(version_id_2)?;
|
||||
txn.add_version(version_id_1, NIL_VERSION_ID, vec![])?;
|
||||
txn.add_version(version_id_2, version_id_1, vec![])?;
|
||||
txn.new_client(version_id_2).await?;
|
||||
txn.add_version(version_id_1, NIL_VERSION_ID, vec![])
|
||||
.await?;
|
||||
txn.add_version(version_id_2, version_id_1, vec![]).await?;
|
||||
|
||||
// add a snapshot for unknown version
|
||||
Ok(client_id)
|
||||
})?;
|
||||
txn.commit().await?;
|
||||
}
|
||||
|
||||
// add a snapshot for unknown version
|
||||
let server = into_server(storage);
|
||||
let version_id_unk = Uuid::new_v4();
|
||||
server.add_snapshot(client_id, version_id_unk, vec![1, 2, 3])?;
|
||||
server
|
||||
.add_snapshot(client_id, version_id_unk, vec![1, 2, 3])
|
||||
.await?;
|
||||
|
||||
// verify the snapshot does not exist
|
||||
let mut txn = server.txn(client_id)?;
|
||||
let client = txn.get_client()?.unwrap();
|
||||
let mut txn = server.txn(client_id).await?;
|
||||
let client = txn.get_client().await?.unwrap();
|
||||
assert!(client.snapshot.is_none());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn add_snapshot_fails_too_old() -> anyhow::Result<()> {
|
||||
let (server, (client_id, version_ids)) = setup(|txn, client_id| {
|
||||
let mut version_id = Uuid::new_v4();
|
||||
let mut parent_version_id = Uuid::nil();
|
||||
let mut version_ids = vec![];
|
||||
#[tokio::test]
|
||||
async fn add_snapshot_fails_too_old() -> anyhow::Result<()> {
|
||||
let (storage, client_id) = setup();
|
||||
let mut version_id = Uuid::new_v4();
|
||||
let mut parent_version_id = Uuid::nil();
|
||||
let mut version_ids = vec![];
|
||||
|
||||
{
|
||||
let mut txn = storage.txn(client_id).await?;
|
||||
// set up a task DB with 10 versions in it (oldest to newest)
|
||||
txn.new_client(Uuid::nil())?;
|
||||
txn.new_client(Uuid::nil()).await?;
|
||||
for _ in 0..10 {
|
||||
txn.add_version(version_id, parent_version_id, vec![])?;
|
||||
txn.add_version(version_id, parent_version_id, vec![])
|
||||
.await?;
|
||||
version_ids.push(version_id);
|
||||
parent_version_id = version_id;
|
||||
version_id = Uuid::new_v4();
|
||||
}
|
||||
|
||||
// add a snapshot for the earliest of those
|
||||
Ok((client_id, version_ids))
|
||||
})?;
|
||||
server.add_snapshot(client_id, version_ids[0], vec![1, 2, 3])?;
|
||||
txn.commit().await?;
|
||||
}
|
||||
|
||||
// add a snapshot for the earliest of those
|
||||
let server = into_server(storage);
|
||||
server
|
||||
.add_snapshot(client_id, version_ids[0], vec![1, 2, 3])
|
||||
.await?;
|
||||
|
||||
// verify the snapshot does not exist
|
||||
let mut txn = server.txn(client_id)?;
|
||||
let client = txn.get_client()?.unwrap();
|
||||
let mut txn = server.txn(client_id).await?;
|
||||
let client = txn.get_client().await?.unwrap();
|
||||
assert!(client.snapshot.is_none());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn add_snapshot_fails_newer_exists() -> anyhow::Result<()> {
|
||||
let (server, (client_id, version_ids)) = setup(|txn, client_id| {
|
||||
let mut version_id = Uuid::new_v4();
|
||||
let mut parent_version_id = Uuid::nil();
|
||||
let mut version_ids = vec![];
|
||||
#[tokio::test]
|
||||
async fn add_snapshot_fails_newer_exists() -> anyhow::Result<()> {
|
||||
let (storage, client_id) = setup();
|
||||
let mut version_id = Uuid::new_v4();
|
||||
let mut parent_version_id = Uuid::nil();
|
||||
let mut version_ids = vec![];
|
||||
|
||||
{
|
||||
let mut txn = storage.txn(client_id).await?;
|
||||
// set up a task DB with 5 versions in it (oldest to newest) and a snapshot of the
|
||||
// middle one
|
||||
txn.new_client(Uuid::nil())?;
|
||||
txn.new_client(Uuid::nil()).await?;
|
||||
for _ in 0..5 {
|
||||
txn.add_version(version_id, parent_version_id, vec![])?;
|
||||
txn.add_version(version_id, parent_version_id, vec![])
|
||||
.await?;
|
||||
version_ids.push(version_id);
|
||||
parent_version_id = version_id;
|
||||
version_id = Uuid::new_v4();
|
||||
@ -771,55 +857,64 @@ mod test {
|
||||
timestamp: Utc.with_ymd_and_hms(2001, 9, 9, 1, 46, 40).unwrap(),
|
||||
},
|
||||
vec![1, 2, 3],
|
||||
)?;
|
||||
)
|
||||
.await?;
|
||||
|
||||
// add a snapshot for the earliest of those
|
||||
Ok((client_id, version_ids))
|
||||
})?;
|
||||
txn.commit().await?;
|
||||
}
|
||||
|
||||
server.add_snapshot(client_id, version_ids[0], vec![9, 9, 9])?;
|
||||
// add a snapshot for the earliest of those
|
||||
let server = into_server(storage);
|
||||
server
|
||||
.add_snapshot(client_id, version_ids[0], vec![9, 9, 9])
|
||||
.await?;
|
||||
|
||||
// verify the snapshot was not replaced
|
||||
let mut txn = server.txn(client_id)?;
|
||||
let client = txn.get_client()?.unwrap();
|
||||
let mut txn = server.txn(client_id).await?;
|
||||
let client = txn.get_client().await?.unwrap();
|
||||
let snapshot = client.snapshot.unwrap();
|
||||
assert_eq!(snapshot.version_id, version_ids[2]);
|
||||
assert_eq!(snapshot.versions_since, 2);
|
||||
assert_eq!(
|
||||
txn.get_snapshot_data(version_ids[2]).unwrap(),
|
||||
txn.get_snapshot_data(version_ids[2]).await.unwrap(),
|
||||
Some(vec![1, 2, 3])
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn add_snapshot_fails_nil_version() -> anyhow::Result<()> {
|
||||
let (server, client_id) = setup(|txn, client_id| {
|
||||
#[tokio::test]
|
||||
async fn add_snapshot_fails_nil_version() -> anyhow::Result<()> {
|
||||
let (storage, client_id) = setup();
|
||||
{
|
||||
let mut txn = storage.txn(client_id).await?;
|
||||
// just set up the client
|
||||
txn.new_client(NIL_VERSION_ID)?;
|
||||
txn.new_client(NIL_VERSION_ID).await?;
|
||||
txn.commit().await?;
|
||||
}
|
||||
|
||||
// add a snapshot for the nil version
|
||||
Ok(client_id)
|
||||
})?;
|
||||
|
||||
server.add_snapshot(client_id, NIL_VERSION_ID, vec![9, 9, 9])?;
|
||||
let server = into_server(storage);
|
||||
server
|
||||
.add_snapshot(client_id, NIL_VERSION_ID, vec![9, 9, 9])
|
||||
.await?;
|
||||
|
||||
// verify the snapshot does not exist
|
||||
let mut txn = server.txn(client_id)?;
|
||||
let client = txn.get_client()?.unwrap();
|
||||
let mut txn = server.txn(client_id).await?;
|
||||
let client = txn.get_client().await?.unwrap();
|
||||
assert!(client.snapshot.is_none());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn get_snapshot_found() -> anyhow::Result<()> {
|
||||
let (server, (client_id, data, snapshot_version_id)) = setup(|txn, client_id| {
|
||||
let data = vec![1, 2, 3];
|
||||
let snapshot_version_id = Uuid::new_v4();
|
||||
#[tokio::test]
|
||||
async fn get_snapshot_found() -> anyhow::Result<()> {
|
||||
let (storage, client_id) = setup();
|
||||
let data = vec![1, 2, 3];
|
||||
let snapshot_version_id = Uuid::new_v4();
|
||||
|
||||
txn.new_client(snapshot_version_id)?;
|
||||
{
|
||||
let mut txn = storage.txn(client_id).await?;
|
||||
txn.new_client(snapshot_version_id).await?;
|
||||
txn.set_snapshot(
|
||||
Snapshot {
|
||||
version_id: snapshot_version_id,
|
||||
@ -827,25 +922,31 @@ mod test {
|
||||
timestamp: Utc.with_ymd_and_hms(2001, 9, 9, 1, 46, 40).unwrap(),
|
||||
},
|
||||
data.clone(),
|
||||
)?;
|
||||
Ok((client_id, data, snapshot_version_id))
|
||||
})?;
|
||||
)
|
||||
.await?;
|
||||
txn.commit().await?;
|
||||
}
|
||||
|
||||
let server = into_server(storage);
|
||||
assert_eq!(
|
||||
server.get_snapshot(client_id)?,
|
||||
server.get_snapshot(client_id).await?,
|
||||
Some((snapshot_version_id, data))
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn get_snapshot_not_found() -> anyhow::Result<()> {
|
||||
let (server, client_id) = setup(|txn, client_id| {
|
||||
txn.new_client(NIL_VERSION_ID)?;
|
||||
Ok(client_id)
|
||||
})?;
|
||||
#[tokio::test]
|
||||
async fn get_snapshot_not_found() -> anyhow::Result<()> {
|
||||
let (storage, client_id) = setup();
|
||||
{
|
||||
let mut txn = storage.txn(client_id).await?;
|
||||
txn.new_client(NIL_VERSION_ID).await?;
|
||||
txn.commit().await?;
|
||||
}
|
||||
|
||||
assert_eq!(server.get_snapshot(client_id)?, None);
|
||||
let server = into_server(storage);
|
||||
assert_eq!(server.get_snapshot(client_id).await?, None);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -37,39 +37,46 @@ pub struct Version {
|
||||
///
|
||||
/// Transactions must be sequentially consistent. That is, the results of transactions performed
|
||||
/// in storage must be as if each were executed sequentially in some order. In particular,
|
||||
/// un-committed changes must not be read by another transaction.
|
||||
/// un-committed changes must not be read by another transaction, but committed changes must
|
||||
/// be visible to subequent transations. Together, this guarantees that `add_version` reliably
|
||||
/// constructs a linear sequence of versions.
|
||||
///
|
||||
/// Transactions with different client IDs cannot share any data, so it is safe to handle them
|
||||
/// concurrently.
|
||||
///
|
||||
/// Changes in a transaction that is dropped without calling `commit` must not appear in any other
|
||||
/// transaction.
|
||||
#[async_trait::async_trait(?Send)]
|
||||
pub trait StorageTxn {
|
||||
/// Get information about the client for this transaction
|
||||
fn get_client(&mut self) -> anyhow::Result<Option<Client>>;
|
||||
async fn get_client(&mut self) -> anyhow::Result<Option<Client>>;
|
||||
|
||||
/// Create the client for this transaction, with the given latest_version_id. The client must
|
||||
/// not already exist.
|
||||
fn new_client(&mut self, latest_version_id: Uuid) -> anyhow::Result<()>;
|
||||
async fn new_client(&mut self, latest_version_id: Uuid) -> anyhow::Result<()>;
|
||||
|
||||
/// Set the client's most recent snapshot.
|
||||
fn set_snapshot(&mut self, snapshot: Snapshot, data: Vec<u8>) -> anyhow::Result<()>;
|
||||
async fn set_snapshot(&mut self, snapshot: Snapshot, data: Vec<u8>) -> anyhow::Result<()>;
|
||||
|
||||
/// Get the data for the most recent snapshot. The version_id
|
||||
/// is used to verify that the snapshot is for the correct version.
|
||||
fn get_snapshot_data(&mut self, version_id: Uuid) -> anyhow::Result<Option<Vec<u8>>>;
|
||||
async fn get_snapshot_data(&mut self, version_id: Uuid) -> anyhow::Result<Option<Vec<u8>>>;
|
||||
|
||||
/// Get a version, indexed by parent version id
|
||||
fn get_version_by_parent(&mut self, parent_version_id: Uuid)
|
||||
-> anyhow::Result<Option<Version>>;
|
||||
async fn get_version_by_parent(
|
||||
&mut self,
|
||||
parent_version_id: Uuid,
|
||||
) -> anyhow::Result<Option<Version>>;
|
||||
|
||||
/// Get a version, indexed by its own version id
|
||||
fn get_version(&mut self, version_id: Uuid) -> anyhow::Result<Option<Version>>;
|
||||
async fn get_version(&mut self, version_id: Uuid) -> anyhow::Result<Option<Version>>;
|
||||
|
||||
/// Add a version (that must not already exist), and
|
||||
/// - update latest_version_id
|
||||
/// - update latest_version_id from parent_version_id to version_id
|
||||
/// - increment snapshot.versions_since
|
||||
fn add_version(
|
||||
/// Fails if the existing `latest_version_id` is not equal to `parent_version_id`. Check
|
||||
/// this by calling `get_client` earlier in the same transaction.
|
||||
async fn add_version(
|
||||
&mut self,
|
||||
version_id: Uuid,
|
||||
parent_version_id: Uuid,
|
||||
@ -78,12 +85,13 @@ pub trait StorageTxn {
|
||||
|
||||
/// Commit any changes made in the transaction. It is an error to call this more than
|
||||
/// once. It is safe to skip this call for read-only operations.
|
||||
fn commit(&mut self) -> anyhow::Result<()>;
|
||||
async fn commit(&mut self) -> anyhow::Result<()>;
|
||||
}
|
||||
|
||||
/// A trait for objects able to act as storage. Most of the interesting behavior is in the
|
||||
/// [`crate::storage::StorageTxn`] trait.
|
||||
#[async_trait::async_trait]
|
||||
pub trait Storage: Send + Sync {
|
||||
/// Begin a transaction for the given client ID.
|
||||
fn txn(&self, client_id: Uuid) -> anyhow::Result<Box<dyn StorageTxn + '_>>;
|
||||
async fn txn(&self, client_id: Uuid) -> anyhow::Result<Box<dyn StorageTxn + '_>>;
|
||||
}
|
||||
|
||||
@ -43,7 +43,7 @@ services:
|
||||
condition: service_completed_successfully
|
||||
|
||||
tss:
|
||||
image: ghcr.io/gothenburgbitfactory/taskchampion-sync-server:0.6.1
|
||||
image: ghcr.io/gothenburgbitfactory/taskchampion-sync-server:0.7.1
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- "RUST_LOG=info"
|
||||
|
||||
2
docs/.gitignore
vendored
Normal file
2
docs/.gitignore
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
book
|
||||
tmp
|
||||
3
docs/README.md
Normal file
3
docs/README.md
Normal file
@ -0,0 +1,3 @@
|
||||
This is an [mdbook](https://rust-lang.github.io/mdBook/index.html) book.
|
||||
Minor modifications can be made without installing the mdbook tool, as the content is simple Markdown.
|
||||
Changes are verified on pull requests.
|
||||
9
docs/book.toml
Normal file
9
docs/book.toml
Normal file
@ -0,0 +1,9 @@
|
||||
[book]
|
||||
authors = ["Dustin J. Mitchell"]
|
||||
language = "en"
|
||||
multilingual = false
|
||||
src = "src"
|
||||
title = "TaskChampion Sync Server"
|
||||
|
||||
[output.html]
|
||||
default-theme = "ayu"
|
||||
11
docs/src/SUMMARY.md
Normal file
11
docs/src/SUMMARY.md
Normal file
@ -0,0 +1,11 @@
|
||||
# Summary
|
||||
|
||||
- [Introduction](./introduction.md)
|
||||
- [Usage](./usage.md)
|
||||
- [Docker Compose](./usage/docker-compose.md)
|
||||
- [Docker Images](./usage/docker-images.md)
|
||||
- [Binaries](./usage/binaries.md)
|
||||
- [Integration](./integration.md)
|
||||
- [Pre-built Images](./integration/pre-built.md)
|
||||
- [Rust Crates](./integration/crates.md)
|
||||
- [Sync Protocol Implementation](./integration/protocol-impl.md)
|
||||
16
docs/src/integration.md
Normal file
16
docs/src/integration.md
Normal file
@ -0,0 +1,16 @@
|
||||
# Integration
|
||||
|
||||
Taskchampion-sync-server can be integrated into larger applications, such as
|
||||
web-based hosting services.
|
||||
|
||||
- Most deployments can simply use the pre-built Docker images to implement the
|
||||
sync protocol, handling other aspects of the application in separate
|
||||
containers. See [Pre-built Images](./integration/pre-built.md).
|
||||
|
||||
- More complex deployments may wish to modify or extend the operation of the
|
||||
server. These can use the Rust crates to build precisely the desired
|
||||
functionality. See [Rust Crates](./integration/crates.md).
|
||||
|
||||
- If desired, an integration may completely re-implement the [sync
|
||||
protocol](https://gothenburgbitfactory.org/taskchampion/sync.html). See [Sync
|
||||
Protocol Implementation](./integration/protocol-impl.md).
|
||||
17
docs/src/integration/crates.md
Normal file
17
docs/src/integration/crates.md
Normal file
@ -0,0 +1,17 @@
|
||||
# Rust Crates
|
||||
|
||||
This project publishes several Rust crates on `crates.io`:
|
||||
|
||||
- [`taskchampion-sync-server-core`](https://docs.rs/taskchampion-sync-server-core)
|
||||
implements the core of the protocol
|
||||
- [`taskchampion-sync-server-storage-sqlite`](https://docs.rs/taskchampion-sync-server-storage-sqlite)
|
||||
implements an SQLite backend for the core
|
||||
- [`taskchampion-sync-server-storage-postgres`](https://docs.rs/taskchampion-sync-server-storage-postgres)
|
||||
implements a Postgres backend for the core
|
||||
|
||||
If you are building an integration with, for example, a custom storage system,
|
||||
it may be helpful to use the `core` crate and provide a custom implementation
|
||||
of its `Storage` trait.
|
||||
|
||||
We suggest that any generally useful extensions, such as additional storage
|
||||
backends, be published as open-source packages.
|
||||
40
docs/src/integration/pre-built.md
Normal file
40
docs/src/integration/pre-built.md
Normal file
@ -0,0 +1,40 @@
|
||||
# Pre-built Images
|
||||
|
||||
The pre-built Postgres Docker image described in [Docker
|
||||
Images](../usage/docker-images.md) may be adequate for a production deployment.
|
||||
The image is stateless and can be easily scaled horizontally to increase
|
||||
capacity.
|
||||
|
||||
## Database Schema
|
||||
|
||||
The schema defined in
|
||||
[`postgres/schema.sql`](https://github.com/GothenburgBitFactory/taskchampion-sync-server/blob/main/postgres/schema.sql)
|
||||
must be applied to the database before the container will function.
|
||||
|
||||
The schema is stable, and any changes to the schema will be made in a major
|
||||
version with migration instructions provided.
|
||||
|
||||
An integration may:
|
||||
|
||||
- Add additional tables to the database
|
||||
- Add additional columns to the `clients` table. If those columns do not have
|
||||
default values, ensure the server is configured with `CREATE_CLIENTS=false` as
|
||||
described below.
|
||||
- Insert rows into the `clients` table, using default values for all columns
|
||||
except `client_id` and any application-specific columns.
|
||||
- Delete rows from the `clients` table. Note that this table is configured to
|
||||
automatically delete all data associated with a client when the client's row is
|
||||
deleted.
|
||||
|
||||
## Managing Clients
|
||||
|
||||
By default, taskchampion-sync-server creates a new, empty client when it
|
||||
receives a connection from an unrecognized client ID. Setting
|
||||
`CREATE_CLIENTS=false` disables this functionality, and is recommended in
|
||||
production deployments to avoid abuse.
|
||||
|
||||
In this configuration, it is the responsibility of the integration to create
|
||||
new client rows when desired, using a statement like `INSERT into clients
|
||||
(client_id) values ($1)` with the new client ID as a parameter. Similarly,
|
||||
clients may be deleted, along with all stored task data, using a statement like
|
||||
`DELETE from clients where client_id = $1`.
|
||||
10
docs/src/integration/protocol-impl.md
Normal file
10
docs/src/integration/protocol-impl.md
Normal file
@ -0,0 +1,10 @@
|
||||
# Sync Protocol Implementation
|
||||
|
||||
The [sync protocol](https://gothenburgbitfactory.org/taskchampion/sync.html) is
|
||||
an open specification, and can be re-implemented from that specification as
|
||||
desired. This specification is not battle-tested, so refer to
|
||||
taskchampion-sync-server's implementation to resolve any ambiguities, and
|
||||
please create pull requests to resolve the ambiguity in the specification.
|
||||
|
||||
We suggest that new implementations be published as open-source packages where
|
||||
possible.
|
||||
30
docs/src/introduction.md
Normal file
30
docs/src/introduction.md
Normal file
@ -0,0 +1,30 @@
|
||||
# Introduction
|
||||
|
||||
Taskchampion-sync-server is an implementation of the TaskChampion [sync
|
||||
protocol][sync-protocol] server. It supports synchronizing Taskwarrior tasks
|
||||
between multiple systems.
|
||||
|
||||
The project provides both pre-built images for common use-cases (see
|
||||
[usage](./usage.md)) and Rust libraries that can be used to build more
|
||||
sophisticated applications ([integration](./integration.md)).
|
||||
|
||||
It also serves as a reference implementation: where the
|
||||
[specification][sync-protocol] is ambiguous, this implementation's
|
||||
interpretation is favored in resolving the ambiguity. Other implementations of
|
||||
the protocol should interoperate with this implementation.
|
||||
|
||||
## Sync Overview
|
||||
|
||||
The server identifies each user with a client ID. For example, when
|
||||
syncing Taskwarrior tasks between a desktop computer and a laptop, both systems
|
||||
would use the same client ID to indicate that they share the same user's task data.
|
||||
|
||||
Task data is encrypted, and the server does not have access to the encryption
|
||||
secret. The server sees only encrypted data and cannot read or modify tasks in
|
||||
any way.
|
||||
|
||||
To perform a sync, a replica first downloads and decrypts any changes that have
|
||||
been sent to the server since its last sync. It then gathers any local changes,
|
||||
encrypts them, and uploads them to the server.
|
||||
|
||||
[sync-protocol]: https://gothenburgbitfactory.org/taskchampion/sync.html
|
||||
22
docs/src/usage.md
Normal file
22
docs/src/usage.md
Normal file
@ -0,0 +1,22 @@
|
||||
# Usage
|
||||
|
||||
This repository is flexible and can be used in a number of ways, to suit your
|
||||
needs.
|
||||
|
||||
- If you only need a place to sync your tasks, using cloud storage may be
|
||||
cheaper and easier than running taskchampion-sync-server. See
|
||||
[task-sync(5)](http://taskwarrior.org/docs/man/task-sync.5/) for details on
|
||||
cloud storage.
|
||||
|
||||
- If you have a publicly accessible server, such as a VPS, you can use `docker
|
||||
compose` to run taskchampion-sync-server as pre-built docker images. See
|
||||
[Docker Compose](./usage/docker-compose.md).
|
||||
|
||||
- If you would like more control, such as to deploy taskchampion-sync-server
|
||||
within an orchestration environment such as Kubernetes, you can deploy the
|
||||
docker images directly. See [Docker Images](./usage/docker-images.md).
|
||||
|
||||
- For even more control, or to avoid the overhead of container images, you can
|
||||
build and run the taskchampion-sync-server binary directly. See
|
||||
[Binaries](./usage/binaries.md).
|
||||
|
||||
71
docs/src/usage/binaries.md
Normal file
71
docs/src/usage/binaries.md
Normal file
@ -0,0 +1,71 @@
|
||||
# Binaries
|
||||
|
||||
Taskchampion-sync-server is a single binary that serves HTTP requests on a TCP
|
||||
port. The server does not implement TLS; for public deployments, the
|
||||
recommendation is to use a reverse proxy such as Nginx, haproxy, or Apache
|
||||
httpd.
|
||||
|
||||
One binary is provided for each storage backend:
|
||||
|
||||
- `taskchampion-sync-server` (SQLite)
|
||||
- `taskchampion-sync-server-postgres` (Postgres)
|
||||
|
||||
### Building the Binary
|
||||
|
||||
This is a standard Rust project, and can be built with `cargo build --release`.
|
||||
|
||||
By default, only the SQLite binary is built. To also build the Postgres binary,
|
||||
use
|
||||
```none
|
||||
cargo build --release --features postgres
|
||||
```
|
||||
|
||||
To disable building the SQLite binary and build only the Postgres binary, use
|
||||
|
||||
```none
|
||||
cargo build --release --no-default-features --features postgres
|
||||
```
|
||||
|
||||
### Running the Binary
|
||||
|
||||
The server is configured with command-line options or environment variables.
|
||||
See the `--help` output for full details.
|
||||
|
||||
For the SQLite binary, the `--data-dir` option or `DATA_DIR` environment
|
||||
variable specifies where the server should store its data.
|
||||
|
||||
For the Postgres binary, the `--connection` option or `CONNECTION` environment
|
||||
variable specifies the connection information, in the form of a [LibPQ-style
|
||||
connection
|
||||
URI](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING-URIS).
|
||||
Note that unlike LibPQ, the Rust client only supports `sslmode` values
|
||||
`disable`, `prefer`, and `require`, and will always validate CA hostnames and
|
||||
certificates when using TLS.
|
||||
|
||||
The remaining options are common to all binaries.
|
||||
|
||||
The `--listen` option specifies the interface and port the server listens on.
|
||||
It must contain an IP-Address or a DNS name and a port number. This option is
|
||||
mandatory, but can be repeated to specify multiple interfaces or ports. This
|
||||
value can be specified in environment variable `LISTEN`, as a comma-separated
|
||||
list of values.
|
||||
|
||||
By default, the server will allow all clients and create them in the database
|
||||
on first contact. There are two ways to limit the clients the server will
|
||||
interact with:
|
||||
|
||||
- To limit the accepted client IDs, specify them in the environment variable
|
||||
`CLIENT_ID`, as a comma-separated list of UUIDs. Client IDs can be specified
|
||||
with `--allow-client-id`, but this should not be used on shared systems, as
|
||||
command line arguments are visible to all users on the system. This convenient
|
||||
option is suitable for personal and small-scale deployments.
|
||||
|
||||
- To disable the automatic creation of clients, use the `--no-create-clients`
|
||||
flag or the `CREATE_CLIENTS=false` environment variable. You are now
|
||||
responsible for creating clients in the database manually, so this option is
|
||||
more suitable for large scale deployments. See [Integration](../integration.md)
|
||||
for more information on such deployments.
|
||||
|
||||
The server only logs errors by default. To add additional logging output, set
|
||||
environment variable `RUST_LOG` to `info` to get a log message for every
|
||||
request, or to `debug` to get more verbose debugging output.
|
||||
43
docs/src/usage/docker-compose.md
Normal file
43
docs/src/usage/docker-compose.md
Normal file
@ -0,0 +1,43 @@
|
||||
# Docker Compose
|
||||
|
||||
The
|
||||
[`docker-compose.yml`](https://raw.githubusercontent.com/GothenburgBitFactory/taskchampion-sync-server/refs/tags/v0.7.1/docker-compose.yml)
|
||||
file in this repository is sufficient to run taskchampion-sync-server,
|
||||
including setting up TLS certificates using Lets Encrypt, thanks to
|
||||
[Caddy](https://caddyserver.com/). This setup uses the SQLite backend, which is
|
||||
adequate for one or a few clients.
|
||||
|
||||
You will need a server with ports 80 and 443 open to the Internet and with a
|
||||
fixed, publicly-resolvable hostname. These ports must be available both to your
|
||||
Taskwarrior clients and to the Lets Encrypt servers.
|
||||
|
||||
On that server, download `docker-compose.yml` from the link above (it is pinned
|
||||
to the latest release) into the current directory. Then run
|
||||
|
||||
```sh
|
||||
TASKCHAMPION_SYNC_SERVER_HOSTNAME=taskwarrior.example.com \
|
||||
TASKCHAMPION_SYNC_SERVER_CLIENT_ID=your-client-id \
|
||||
docker compose up
|
||||
```
|
||||
|
||||
The `TASKCHAMPION_SYNC_SERVER_CLIENT_ID` limits the server to the given client
|
||||
ID; omit it to allow all client IDs. You may specify multiple client IDs
|
||||
separated by commas.
|
||||
|
||||
It can take a few minutes to obtain the certificate; the caddy container will
|
||||
log a message "certificate obtained successfully" when this is complete, or
|
||||
error messages if the process fails. Once this process is complete, configure
|
||||
your `.taskrc`'s to point to the server:
|
||||
|
||||
```none
|
||||
sync.server.url=https://taskwarrior.example.com
|
||||
sync.server.client_id=your-client-id
|
||||
sync.encryption_secret=your-encryption-secret
|
||||
```
|
||||
|
||||
The docker-compose images store data in a docker volume named
|
||||
`taskchampion-sync-server_data`. This volume contains all of the task data, as
|
||||
well as the TLS certificate information. It will persist over restarts, in a
|
||||
typical Docker installation. The docker containers will start automatically
|
||||
when the Docker dameon starts. See the docker-compose documentation for more
|
||||
information.
|
||||
57
docs/src/usage/docker-images.md
Normal file
57
docs/src/usage/docker-images.md
Normal file
@ -0,0 +1,57 @@
|
||||
# Docker Images
|
||||
|
||||
Every release of the server generates Docker images. One image is produced for
|
||||
each storage backend:
|
||||
- `ghcr.io/gothenburgbitfactory/taskchampion-sync-server` (SQLite)
|
||||
- `ghcr.io/gothenburgbitfactory/taskchampion-sync-server-postgres` (Postgres)
|
||||
|
||||
The image tags include `latest` for the latest release, and both minor and
|
||||
patch versions, e.g., `0.5` and `0.5.1`.
|
||||
|
||||
## Running the Image
|
||||
|
||||
At startup, each image applies some default values and runs the relevant binary
|
||||
directly. Configuration is typically by environment variables, all of which are
|
||||
documented in the `--help` output of the binaries. These include
|
||||
|
||||
- `RUST_LOG` - log level, one of `trace`, `debug`, `info`, `warn` and `error`.
|
||||
- `DATA_DIR` (SQLite only; default `/var/lib/taskchampion-sync-server/data`) -
|
||||
directory for the synced data.
|
||||
- `CONNECTION` (Postgres only) - Postgres connection information, in the form
|
||||
of a [LibPQ-style connection
|
||||
URI](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING-URIS).
|
||||
- `LISTEN` (default `0.0.0.0:8080`) - address and port on which to listen for
|
||||
HTTP requests.
|
||||
- `CLIENT_ID` - comma-separated list of client IDs that will be allowed, or
|
||||
empty to allow all clients.
|
||||
- `CREATE_CLIENTS` (default `true`) - if true, automatically create clients on
|
||||
first sync. If this is set to false, it is up to you to initialize clients in
|
||||
the DB.
|
||||
|
||||
### Example
|
||||
|
||||
```shell
|
||||
docker run -d \
|
||||
--name=taskchampion-sync-server \
|
||||
-p 8080:8080 \
|
||||
-e RUST_LOG=debug \
|
||||
-v /data/taskchampion-sync-server:/var/lib/taskchampion-sync-server/data \
|
||||
taskchampion-sync-server
|
||||
```
|
||||
|
||||
### Image-Specific Setup
|
||||
|
||||
The SQLite image is configured with `VOLUME
|
||||
/var/lib/taskchampion-sync-server/data`, persisting the task data in an
|
||||
anonymous Docker volume. It is recommended to put this on a named volume, or
|
||||
persistent storage in an environment like Kubernetes, so that it is not
|
||||
accidentally deleted.
|
||||
|
||||
The Postgres image does not automatically create its database schema. See the
|
||||
[integration section](../integration/pre-built.md) for more detail. This
|
||||
implementation is tested with Postgres version 17 but should work with any
|
||||
recent version.
|
||||
|
||||
Note that the Docker images do not implement TLS. The expectation is that
|
||||
another component, such as a Kubernetes ingress, will terminate the TLS
|
||||
connection and proxy HTTP traffic to the taskchampion-sync-server container.
|
||||
30
entrypoint-postgres.sh
Executable file
30
entrypoint-postgres.sh
Executable file
@ -0,0 +1,30 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
echo "starting entrypoint script..."
|
||||
if [ "$1" = "/bin/taskchampion-sync-server-postgres" ]; then
|
||||
: ${PUID:-1092}
|
||||
: ${PGID:-1092}
|
||||
|
||||
: ${DATA_DIR:=/var/lib/taskchampion-sync-server/data}
|
||||
export DATA_DIR
|
||||
echo "setting up data directory ${DATA_DIR}"
|
||||
mkdir -p "${DATA_DIR}"
|
||||
chown -R "${PUID}:${PGID}" "${DATA_DIR}"
|
||||
chmod -R 700 "${DATA_DIR}"
|
||||
|
||||
: ${LISTEN:=0.0.0.0:8080}
|
||||
export LISTEN
|
||||
echo "Listen set to ${LISTEN}"
|
||||
|
||||
if [ -n "${CLIENT_ID}" ]; then
|
||||
export CLIENT_ID
|
||||
echo "Limiting to client ID ${CLIENT_ID}"
|
||||
else
|
||||
unset CLIENT_ID
|
||||
fi
|
||||
|
||||
echo "Running server as user ${PUID} (group ${PGID})"
|
||||
exec su-exec "${PUID}":"${PGID}" "$@"
|
||||
else
|
||||
eval "${@}"
|
||||
fi
|
||||
@ -2,11 +2,14 @@
|
||||
set -e
|
||||
echo "starting entrypoint script..."
|
||||
if [ "$1" = "/bin/taskchampion-sync-server" ]; then
|
||||
: ${DATA_DIR:=/var/lib/taskchampion-sync-server}
|
||||
: ${PUID:-1092}
|
||||
: ${PGID:-1092}
|
||||
|
||||
: ${DATA_DIR:=/var/lib/taskchampion-sync-server/data}
|
||||
export DATA_DIR
|
||||
echo "setting up data directory ${DATA_DIR}"
|
||||
mkdir -p "${DATA_DIR}"
|
||||
chown -R taskchampion:users "${DATA_DIR}"
|
||||
chown -R ${PUID}:${PGID} "${DATA_DIR}"
|
||||
chmod -R 700 "${DATA_DIR}"
|
||||
|
||||
: ${LISTEN:=0.0.0.0:8080}
|
||||
@ -20,10 +23,8 @@ if [ "$1" = "/bin/taskchampion-sync-server" ]; then
|
||||
unset CLIENT_ID
|
||||
fi
|
||||
|
||||
if [ "$(id -u)" = "0" ]; then
|
||||
echo "Running server as user 'taskchampion'"
|
||||
exec su-exec taskchampion "$@"
|
||||
fi
|
||||
echo "Running server as user ${PUID} (group ${PGID})"
|
||||
exec su-exec "${PUID}":"${PGID}" "$@"
|
||||
else
|
||||
eval "${@}"
|
||||
fi
|
||||
30
postgres/Cargo.toml
Normal file
30
postgres/Cargo.toml
Normal file
@ -0,0 +1,30 @@
|
||||
[package]
|
||||
name = "taskchampion-sync-server-storage-postgres"
|
||||
version = "0.7.2-pre"
|
||||
authors = ["Dustin J. Mitchell <dustin@v.igoro.us>"]
|
||||
edition = "2021"
|
||||
description = "Postgres backend for TaskChampion-sync-server"
|
||||
homepage = "https://github.com/GothenburgBitFactory/taskchampion"
|
||||
repository = "https://github.com/GothenburgBitFactory/taskchampion-sync-server"
|
||||
license = "MIT"
|
||||
|
||||
[dependencies]
|
||||
anyhow.workspace = true
|
||||
async-trait.workspace = true
|
||||
bb8-postgres.workspace = true
|
||||
bb8.workspace = true
|
||||
chrono.workspace = true
|
||||
env_logger.workspace = true
|
||||
log.workspace = true
|
||||
taskchampion-sync-server-core = { path = "../core", version = "0.7.2-pre" }
|
||||
thiserror.workspace = true
|
||||
tokio-postgres.workspace = true
|
||||
tokio.workspace = true
|
||||
uuid.workspace = true
|
||||
openssl.workspace = true
|
||||
native-tls.workspace = true
|
||||
postgres-native-tls.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile.workspace = true
|
||||
pretty_assertions.workspace = true
|
||||
4
postgres/README.md
Normal file
4
postgres/README.md
Normal file
@ -0,0 +1,4 @@
|
||||
# taskchampion-sync-server-storage-postgres
|
||||
|
||||
This crate implements a Postgres storage backend for the
|
||||
`taskchampion-sync-server-core`.
|
||||
17
postgres/schema.sql
Normal file
17
postgres/schema.sql
Normal file
@ -0,0 +1,17 @@
|
||||
CREATE TABLE clients (
|
||||
client_id UUID PRIMARY KEY,
|
||||
latest_version_id UUID default '00000000-0000-0000-0000-000000000000',
|
||||
snapshot_version_id UUID,
|
||||
versions_since_snapshot INTEGER,
|
||||
snapshot_timestamp BIGINT,
|
||||
snapshot BYTEA);
|
||||
|
||||
CREATE TABLE versions (
|
||||
client_id UUID NOT NULL,
|
||||
FOREIGN KEY(client_id) REFERENCES clients (client_id) ON DELETE CASCADE,
|
||||
version_id UUID NOT NULL,
|
||||
parent_version_id UUID,
|
||||
history_segment BYTEA,
|
||||
CONSTRAINT versions_pkey PRIMARY KEY (client_id, version_id)
|
||||
);
|
||||
CREATE INDEX versions_by_parent ON versions (parent_version_id);
|
||||
715
postgres/src/lib.rs
Normal file
715
postgres/src/lib.rs
Normal file
@ -0,0 +1,715 @@
|
||||
//! This crate implements a Postgres storage backend for the TaskChampion sync server.
|
||||
//!
|
||||
//! Use the [`PostgresStorage`] type as an implementation of the [`Storage`] trait.
|
||||
//!
|
||||
//! This implementation is tested with Postgres version 17 but should work with any recent version.
|
||||
//!
|
||||
//! ## Schema Setup
|
||||
//!
|
||||
//! The database identified by the connection string must already exist and be set up with the
|
||||
//! following schema (also available in `postgres/schema.sql` in the repository):
|
||||
//!
|
||||
//! ```sql
|
||||
#![doc=include_str!("../schema.sql")]
|
||||
//! ```
|
||||
//!
|
||||
//! ## Integration with External Applications
|
||||
//!
|
||||
//! The schema is stable, and any changes to the schema will be made in a major version with
|
||||
//! migration instructions provided.
|
||||
//!
|
||||
//! An external application may:
|
||||
//! - Add additional tables to the database
|
||||
//! - Add additional columns to the `clients` table. If those columns do not have default
|
||||
//! values, calls to `Txn::new_client` will fail. It is possible to configure
|
||||
//! `taskchampion-sync-server` to never call this method.
|
||||
//! - Insert rows into the `clients` table, using default values for all columns except
|
||||
//! `client_id` and application-specific columns.
|
||||
//! - Delete rows from the `clients` table, noting that any associated task data
|
||||
//! is also deleted.
|
||||
|
||||
use anyhow::Context;
|
||||
use bb8::PooledConnection;
|
||||
use bb8_postgres::PostgresConnectionManager;
|
||||
use chrono::{TimeZone, Utc};
|
||||
use postgres_native_tls::MakeTlsConnector;
|
||||
use taskchampion_sync_server_core::{Client, Snapshot, Storage, StorageTxn, Version};
|
||||
use uuid::Uuid;
|
||||
|
||||
#[cfg(test)]
|
||||
mod testing;
|
||||
|
||||
/// An `ErrorSink` implementation that logs errors to the Rust log.
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct LogErrorSink;
|
||||
|
||||
impl LogErrorSink {
|
||||
fn new() -> Box<Self> {
|
||||
Box::new(Self)
|
||||
}
|
||||
}
|
||||
|
||||
impl bb8::ErrorSink<tokio_postgres::Error> for LogErrorSink {
|
||||
fn sink(&self, e: tokio_postgres::Error) {
|
||||
log::error!("Postgres connection error: {e}");
|
||||
}
|
||||
|
||||
fn boxed_clone(&self) -> Box<dyn bb8::ErrorSink<tokio_postgres::Error>> {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
/// A storage backend which uses Postgres.
|
||||
pub struct PostgresStorage {
|
||||
pool: bb8::Pool<PostgresConnectionManager<MakeTlsConnector>>,
|
||||
}
|
||||
|
||||
impl PostgresStorage {
|
||||
pub async fn new(connection_string: impl ToString) -> anyhow::Result<Self> {
|
||||
let connector = native_tls::TlsConnector::new()?;
|
||||
let connector = postgres_native_tls::MakeTlsConnector::new(connector);
|
||||
let manager = PostgresConnectionManager::new_from_stringlike(connection_string, connector)?;
|
||||
let pool = bb8::Pool::builder()
|
||||
.error_sink(LogErrorSink::new())
|
||||
.build(manager)
|
||||
.await?;
|
||||
Ok(Self { pool })
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Storage for PostgresStorage {
|
||||
async fn txn(&self, client_id: Uuid) -> anyhow::Result<Box<dyn StorageTxn + '_>> {
|
||||
let db_client = self.pool.get_owned().await?;
|
||||
|
||||
db_client
|
||||
.execute("BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE", &[])
|
||||
.await?;
|
||||
|
||||
Ok(Box::new(Txn {
|
||||
client_id,
|
||||
db_client: Some(db_client),
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
struct Txn {
|
||||
client_id: Uuid,
|
||||
/// The DB client or, if `commit` has been called, None. This ensures queries aren't executed
|
||||
/// after commit, and also frees connections back to the pool as quickly as possible.
|
||||
db_client: Option<PooledConnection<'static, PostgresConnectionManager<MakeTlsConnector>>>,
|
||||
}
|
||||
|
||||
impl Txn {
|
||||
/// Get the db_client, or panic if it is gone (after commit).
|
||||
fn db_client(&self) -> &tokio_postgres::Client {
|
||||
let Some(db_client) = &self.db_client else {
|
||||
panic!("Cannot use a postgres Txn after commit");
|
||||
};
|
||||
db_client
|
||||
}
|
||||
|
||||
/// Implementation for queries from the versions table
|
||||
async fn get_version_impl(
|
||||
&mut self,
|
||||
query: &'static str,
|
||||
client_id: Uuid,
|
||||
version_id_arg: Uuid,
|
||||
) -> anyhow::Result<Option<Version>> {
|
||||
Ok(self
|
||||
.db_client()
|
||||
.query_opt(query, &[&version_id_arg, &client_id])
|
||||
.await
|
||||
.context("error getting version")?
|
||||
.map(|r| Version {
|
||||
version_id: r.get(0),
|
||||
parent_version_id: r.get(1),
|
||||
history_segment: r.get("history_segment"),
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait(?Send)]
|
||||
impl StorageTxn for Txn {
|
||||
async fn get_client(&mut self) -> anyhow::Result<Option<Client>> {
|
||||
Ok(self
|
||||
.db_client()
|
||||
.query_opt(
|
||||
"SELECT
|
||||
latest_version_id,
|
||||
snapshot_timestamp,
|
||||
versions_since_snapshot,
|
||||
snapshot_version_id
|
||||
FROM clients
|
||||
WHERE client_id = $1
|
||||
LIMIT 1",
|
||||
&[&self.client_id],
|
||||
)
|
||||
.await
|
||||
.context("error getting client")?
|
||||
.map(|r| {
|
||||
let latest_version_id: Uuid = r.get(0);
|
||||
let snapshot_timestamp: Option<i64> = r.get(1);
|
||||
let versions_since_snapshot: Option<i32> = r.get(2);
|
||||
let snapshot_version_id: Option<Uuid> = r.get(3);
|
||||
|
||||
// if all of the relevant fields are non-NULL, return a snapshot
|
||||
let snapshot = match (
|
||||
snapshot_timestamp,
|
||||
versions_since_snapshot,
|
||||
snapshot_version_id,
|
||||
) {
|
||||
(Some(ts), Some(vs), Some(v)) => Some(Snapshot {
|
||||
version_id: v,
|
||||
timestamp: Utc.timestamp_opt(ts, 0).unwrap(),
|
||||
versions_since: vs as u32,
|
||||
}),
|
||||
_ => None,
|
||||
};
|
||||
Client {
|
||||
latest_version_id,
|
||||
snapshot,
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
||||
async fn new_client(&mut self, latest_version_id: Uuid) -> anyhow::Result<()> {
|
||||
self.db_client()
|
||||
.execute(
|
||||
"INSERT INTO clients (client_id, latest_version_id) VALUES ($1, $2)",
|
||||
&[&self.client_id, &latest_version_id],
|
||||
)
|
||||
.await
|
||||
.context("error creating/updating client")?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn set_snapshot(&mut self, snapshot: Snapshot, data: Vec<u8>) -> anyhow::Result<()> {
|
||||
let timestamp = snapshot.timestamp.timestamp();
|
||||
self.db_client()
|
||||
.execute(
|
||||
"UPDATE clients
|
||||
SET snapshot_version_id = $1,
|
||||
versions_since_snapshot = $2,
|
||||
snapshot_timestamp = $3,
|
||||
snapshot = $4
|
||||
WHERE client_id = $5",
|
||||
&[
|
||||
&snapshot.version_id,
|
||||
&(snapshot.versions_since as i32),
|
||||
×tamp,
|
||||
&data,
|
||||
&self.client_id,
|
||||
],
|
||||
)
|
||||
.await
|
||||
.context("error setting snapshot")?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get_snapshot_data(&mut self, version_id: Uuid) -> anyhow::Result<Option<Vec<u8>>> {
|
||||
Ok(self
|
||||
.db_client()
|
||||
.query_opt(
|
||||
"SELECT snapshot
|
||||
FROM clients
|
||||
WHERE client_id = $1 and snapshot_version_id = $2
|
||||
LIMIT 1",
|
||||
&[&self.client_id, &version_id],
|
||||
)
|
||||
.await
|
||||
.context("error getting snapshot data")?
|
||||
.map(|r| r.get(0)))
|
||||
}
|
||||
|
||||
async fn get_version_by_parent(
|
||||
&mut self,
|
||||
parent_version_id: Uuid,
|
||||
) -> anyhow::Result<Option<Version>> {
|
||||
self.get_version_impl(
|
||||
"SELECT version_id, parent_version_id, history_segment
|
||||
FROM versions
|
||||
WHERE parent_version_id = $1 AND client_id = $2",
|
||||
self.client_id,
|
||||
parent_version_id,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn get_version(&mut self, version_id: Uuid) -> anyhow::Result<Option<Version>> {
|
||||
self.get_version_impl(
|
||||
"SELECT version_id, parent_version_id, history_segment
|
||||
FROM versions
|
||||
WHERE version_id = $1 AND client_id = $2",
|
||||
self.client_id,
|
||||
version_id,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn add_version(
|
||||
&mut self,
|
||||
version_id: Uuid,
|
||||
parent_version_id: Uuid,
|
||||
history_segment: Vec<u8>,
|
||||
) -> anyhow::Result<()> {
|
||||
self.db_client()
|
||||
.execute(
|
||||
"INSERT INTO versions (version_id, client_id, parent_version_id, history_segment)
|
||||
VALUES ($1, $2, $3, $4)",
|
||||
&[
|
||||
&version_id,
|
||||
&self.client_id,
|
||||
&parent_version_id,
|
||||
&history_segment,
|
||||
],
|
||||
)
|
||||
.await
|
||||
.context("error inserting new version")?;
|
||||
let rows_modified = self
|
||||
.db_client()
|
||||
.execute(
|
||||
"UPDATE clients
|
||||
SET latest_version_id = $1,
|
||||
versions_since_snapshot = versions_since_snapshot + 1
|
||||
WHERE client_id = $2 and (latest_version_id = $3 or latest_version_id = $4)",
|
||||
&[
|
||||
&version_id,
|
||||
&self.client_id,
|
||||
&parent_version_id,
|
||||
&Uuid::nil(),
|
||||
],
|
||||
)
|
||||
.await
|
||||
.context("error updating latest_version_id")?;
|
||||
|
||||
// If no rows were modified, this operation failed.
|
||||
if rows_modified == 0 {
|
||||
anyhow::bail!("clients.latest_version_id does not match parent_version_id");
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn commit(&mut self) -> anyhow::Result<()> {
|
||||
self.db_client().execute("COMMIT", &[]).await?;
|
||||
self.db_client = None;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use crate::testing::with_db;
|
||||
|
||||
async fn make_client(db_client: &tokio_postgres::Client) -> anyhow::Result<Uuid> {
|
||||
let client_id = Uuid::new_v4();
|
||||
db_client
|
||||
.execute("insert into clients (client_id) values ($1)", &[&client_id])
|
||||
.await?;
|
||||
Ok(client_id)
|
||||
}
|
||||
|
||||
async fn make_version(
|
||||
db_client: &tokio_postgres::Client,
|
||||
client_id: Uuid,
|
||||
parent_version_id: Uuid,
|
||||
history_segment: &[u8],
|
||||
) -> anyhow::Result<Uuid> {
|
||||
let version_id = Uuid::new_v4();
|
||||
db_client
|
||||
.execute(
|
||||
"insert into versions
|
||||
(version_id, client_id, parent_version_id, history_segment)
|
||||
values ($1, $2, $3, $4)",
|
||||
&[
|
||||
&version_id,
|
||||
&client_id,
|
||||
&parent_version_id,
|
||||
&history_segment,
|
||||
],
|
||||
)
|
||||
.await?;
|
||||
Ok(version_id)
|
||||
}
|
||||
|
||||
async fn set_client_latest_version_id(
|
||||
db_client: &tokio_postgres::Client,
|
||||
client_id: Uuid,
|
||||
latest_version_id: Uuid,
|
||||
) -> anyhow::Result<()> {
|
||||
db_client
|
||||
.execute(
|
||||
"update clients set latest_version_id = $1 where client_id = $2",
|
||||
&[&latest_version_id, &client_id],
|
||||
)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn set_client_snapshot(
|
||||
db_client: &tokio_postgres::Client,
|
||||
client_id: Uuid,
|
||||
snapshot_version_id: Uuid,
|
||||
versions_since_snapshot: u32,
|
||||
snapshot_timestamp: i64,
|
||||
snapshot: &[u8],
|
||||
) -> anyhow::Result<()> {
|
||||
db_client
|
||||
.execute(
|
||||
"
|
||||
update clients
|
||||
set snapshot_version_id = $1,
|
||||
versions_since_snapshot = $2,
|
||||
snapshot_timestamp = $3,
|
||||
snapshot = $4
|
||||
where client_id = $5",
|
||||
&[
|
||||
&snapshot_version_id,
|
||||
&(versions_since_snapshot as i32),
|
||||
&snapshot_timestamp,
|
||||
&snapshot,
|
||||
&client_id,
|
||||
],
|
||||
)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get_client_none() -> anyhow::Result<()> {
|
||||
with_db(async |connection_string, _db_client| {
|
||||
let storage = PostgresStorage::new(connection_string).await?;
|
||||
let client_id = Uuid::new_v4();
|
||||
let mut txn = storage.txn(client_id).await?;
|
||||
assert_eq!(txn.get_client().await?, None);
|
||||
Ok(())
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get_client_exists_empty() -> anyhow::Result<()> {
|
||||
with_db(async |connection_string, db_client| {
|
||||
let storage = PostgresStorage::new(connection_string).await?;
|
||||
let client_id = make_client(&db_client).await?;
|
||||
let mut txn = storage.txn(client_id).await?;
|
||||
assert_eq!(
|
||||
txn.get_client().await?,
|
||||
Some(Client {
|
||||
latest_version_id: Uuid::nil(),
|
||||
snapshot: None
|
||||
})
|
||||
);
|
||||
Ok(())
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get_client_exists_latest() -> anyhow::Result<()> {
|
||||
with_db(async |connection_string, db_client| {
|
||||
let storage = PostgresStorage::new(connection_string).await?;
|
||||
let client_id = make_client(&db_client).await?;
|
||||
let latest_version_id = Uuid::new_v4();
|
||||
set_client_latest_version_id(&db_client, client_id, latest_version_id).await?;
|
||||
let mut txn = storage.txn(client_id).await?;
|
||||
assert_eq!(
|
||||
txn.get_client().await?,
|
||||
Some(Client {
|
||||
latest_version_id,
|
||||
snapshot: None
|
||||
})
|
||||
);
|
||||
Ok(())
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get_client_exists_with_snapshot() -> anyhow::Result<()> {
|
||||
with_db(async |connection_string, db_client| {
|
||||
let storage = PostgresStorage::new(connection_string).await?;
|
||||
let client_id = make_client(&db_client).await?;
|
||||
let snapshot_version_id = Uuid::new_v4();
|
||||
let versions_since_snapshot = 10;
|
||||
let snapshot_timestamp = 10000000;
|
||||
let snapshot = b"abcd";
|
||||
set_client_snapshot(
|
||||
&db_client,
|
||||
client_id,
|
||||
snapshot_version_id,
|
||||
versions_since_snapshot,
|
||||
snapshot_timestamp,
|
||||
snapshot,
|
||||
)
|
||||
.await?;
|
||||
let mut txn = storage.txn(client_id).await?;
|
||||
assert_eq!(
|
||||
txn.get_client().await?,
|
||||
Some(Client {
|
||||
latest_version_id: Uuid::nil(),
|
||||
snapshot: Some(Snapshot {
|
||||
version_id: snapshot_version_id,
|
||||
timestamp: Utc.timestamp_opt(snapshot_timestamp, 0).unwrap(),
|
||||
versions_since: versions_since_snapshot,
|
||||
})
|
||||
})
|
||||
);
|
||||
Ok(())
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_new_client() -> anyhow::Result<()> {
|
||||
with_db(async |connection_string, _db_client| {
|
||||
let storage = PostgresStorage::new(connection_string).await?;
|
||||
let client_id = Uuid::new_v4();
|
||||
let latest_version_id = Uuid::new_v4();
|
||||
|
||||
let mut txn1 = storage.txn(client_id).await?;
|
||||
txn1.new_client(latest_version_id).await?;
|
||||
|
||||
// Client is not visible yet as txn1 is not committed.
|
||||
let mut txn2 = storage.txn(client_id).await?;
|
||||
assert_eq!(txn2.get_client().await?, None);
|
||||
|
||||
txn1.commit().await?;
|
||||
|
||||
// Client is now visible.
|
||||
let mut txn2 = storage.txn(client_id).await?;
|
||||
assert_eq!(
|
||||
txn2.get_client().await?,
|
||||
Some(Client {
|
||||
latest_version_id,
|
||||
snapshot: None
|
||||
})
|
||||
);
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_set_snapshot() -> anyhow::Result<()> {
|
||||
with_db(async |connection_string, db_client| {
|
||||
let storage = PostgresStorage::new(connection_string).await?;
|
||||
let client_id = make_client(&db_client).await?;
|
||||
let mut txn = storage.txn(client_id).await?;
|
||||
let snapshot_version_id = Uuid::new_v4();
|
||||
let versions_since_snapshot = 10;
|
||||
let snapshot_timestamp = 10000000;
|
||||
let snapshot = b"abcd";
|
||||
|
||||
txn.set_snapshot(
|
||||
Snapshot {
|
||||
version_id: snapshot_version_id,
|
||||
timestamp: Utc.timestamp_opt(snapshot_timestamp, 0).unwrap(),
|
||||
versions_since: versions_since_snapshot,
|
||||
},
|
||||
snapshot.to_vec(),
|
||||
)
|
||||
.await?;
|
||||
txn.commit().await?;
|
||||
|
||||
txn = storage.txn(client_id).await?;
|
||||
assert_eq!(
|
||||
txn.get_client().await?,
|
||||
Some(Client {
|
||||
latest_version_id: Uuid::nil(),
|
||||
snapshot: Some(Snapshot {
|
||||
version_id: snapshot_version_id,
|
||||
timestamp: Utc.timestamp_opt(snapshot_timestamp, 0).unwrap(),
|
||||
versions_since: versions_since_snapshot,
|
||||
})
|
||||
})
|
||||
);
|
||||
|
||||
let row = db_client
|
||||
.query_one(
|
||||
"select snapshot from clients where client_id = $1",
|
||||
&[&client_id],
|
||||
)
|
||||
.await?;
|
||||
assert_eq!(row.get::<_, &[u8]>(0), b"abcd");
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get_snapshot_none() -> anyhow::Result<()> {
|
||||
with_db(async |connection_string, db_client| {
|
||||
let storage = PostgresStorage::new(connection_string).await?;
|
||||
let client_id = make_client(&db_client).await?;
|
||||
let mut txn = storage.txn(client_id).await?;
|
||||
assert_eq!(txn.get_snapshot_data(Uuid::new_v4()).await?, None);
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get_snapshot_mismatched_version() -> anyhow::Result<()> {
|
||||
with_db(async |connection_string, db_client| {
|
||||
let storage = PostgresStorage::new(connection_string).await?;
|
||||
let client_id = make_client(&db_client).await?;
|
||||
let mut txn = storage.txn(client_id).await?;
|
||||
|
||||
let snapshot_version_id = Uuid::new_v4();
|
||||
let versions_since_snapshot = 10;
|
||||
let snapshot_timestamp = 10000000;
|
||||
let snapshot = b"abcd";
|
||||
txn.set_snapshot(
|
||||
Snapshot {
|
||||
version_id: snapshot_version_id,
|
||||
timestamp: Utc.timestamp_opt(snapshot_timestamp, 0).unwrap(),
|
||||
versions_since: versions_since_snapshot,
|
||||
},
|
||||
snapshot.to_vec(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
assert_eq!(txn.get_snapshot_data(Uuid::new_v4()).await?, None);
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get_version() -> anyhow::Result<()> {
|
||||
with_db(async |connection_string, db_client| {
|
||||
let storage = PostgresStorage::new(connection_string).await?;
|
||||
let client_id = make_client(&db_client).await?;
|
||||
let parent_version_id = Uuid::new_v4();
|
||||
let version_id = make_version(&db_client, client_id, parent_version_id, b"v1").await?;
|
||||
|
||||
let mut txn = storage.txn(client_id).await?;
|
||||
|
||||
// Different parent doesn't exist.
|
||||
assert_eq!(txn.get_version_by_parent(Uuid::new_v4()).await?, None);
|
||||
|
||||
// Different version doesn't exist.
|
||||
assert_eq!(txn.get_version(Uuid::new_v4()).await?, None);
|
||||
|
||||
let version = Version {
|
||||
version_id,
|
||||
parent_version_id,
|
||||
history_segment: b"v1".to_vec(),
|
||||
};
|
||||
|
||||
// Version found by parent.
|
||||
assert_eq!(
|
||||
txn.get_version_by_parent(parent_version_id).await?,
|
||||
Some(version.clone())
|
||||
);
|
||||
|
||||
// Version found by ID.
|
||||
assert_eq!(txn.get_version(version_id).await?, Some(version));
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_add_version() -> anyhow::Result<()> {
|
||||
with_db(async |connection_string, db_client| {
|
||||
let storage = PostgresStorage::new(connection_string).await?;
|
||||
let client_id = make_client(&db_client).await?;
|
||||
let mut txn = storage.txn(client_id).await?;
|
||||
let version_id = Uuid::new_v4();
|
||||
txn.add_version(version_id, Uuid::nil(), b"v1".to_vec())
|
||||
.await?;
|
||||
assert_eq!(
|
||||
txn.get_version(version_id).await?,
|
||||
Some(Version {
|
||||
version_id,
|
||||
parent_version_id: Uuid::nil(),
|
||||
history_segment: b"v1".to_vec()
|
||||
})
|
||||
);
|
||||
Ok(())
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
/// When an add_version call specifies an incorrect `parent_version_id, it fails. This is
|
||||
/// typically avoided by calling `get_client` beforehand, which (due to repeatable reads)
|
||||
/// allows the caller to check the `latest_version_id` before calling `add_version`.
|
||||
async fn test_add_version_mismatch() -> anyhow::Result<()> {
|
||||
with_db(async |connection_string, db_client| {
|
||||
let storage = PostgresStorage::new(connection_string).await?;
|
||||
let client_id = make_client(&db_client).await?;
|
||||
let latest_version_id = Uuid::new_v4();
|
||||
set_client_latest_version_id(&db_client, client_id, latest_version_id).await?;
|
||||
|
||||
let mut txn = storage.txn(client_id).await?;
|
||||
let version_id = Uuid::new_v4();
|
||||
let parent_version_id = Uuid::new_v4(); // != latest_version_id
|
||||
let res = txn
|
||||
.add_version(version_id, parent_version_id, b"v1".to_vec())
|
||||
.await;
|
||||
assert!(res.is_err());
|
||||
Ok(())
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
/// Adding versions to two different clients can proceed concurrently.
|
||||
async fn test_add_version_no_conflict_different_clients() -> anyhow::Result<()> {
|
||||
with_db(async |connection_string, db_client| {
|
||||
let storage = PostgresStorage::new(connection_string).await?;
|
||||
|
||||
// Clients 1 and 2 do not interfere with each other; if these are the same client, then
|
||||
// this will deadlock as one transaction waits for the other. If the postgres storage
|
||||
// implementation serialized _all_ transactions across clients, that would limit its
|
||||
// scalability.
|
||||
//
|
||||
// So the asertion here is "does not deadlock".
|
||||
|
||||
let client_id1 = make_client(&db_client).await?;
|
||||
let mut txn1 = storage.txn(client_id1).await?;
|
||||
let version_id1 = Uuid::new_v4();
|
||||
txn1.add_version(version_id1, Uuid::nil(), b"v1".to_vec())
|
||||
.await?;
|
||||
|
||||
let client_id2 = make_client(&db_client).await?;
|
||||
let mut txn2 = storage.txn(client_id2).await?;
|
||||
let version_id2 = Uuid::new_v4();
|
||||
txn2.add_version(version_id2, Uuid::nil(), b"v2".to_vec())
|
||||
.await?;
|
||||
|
||||
txn1.commit().await?;
|
||||
txn2.commit().await?;
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
/// When an add_version call specifies a `parent_version_id` that does not exist in the
|
||||
/// DB, but no other versions exist, the call succeeds.
|
||||
async fn test_add_version_no_history() -> anyhow::Result<()> {
|
||||
with_db(async |connection_string, db_client| {
|
||||
let storage = PostgresStorage::new(connection_string).await?;
|
||||
let client_id = make_client(&db_client).await?;
|
||||
|
||||
let mut txn = storage.txn(client_id).await?;
|
||||
let version_id = Uuid::new_v4();
|
||||
let parent_version_id = Uuid::new_v4();
|
||||
txn.add_version(version_id, parent_version_id, b"v1".to_vec())
|
||||
.await?;
|
||||
Ok(())
|
||||
})
|
||||
.await
|
||||
}
|
||||
}
|
||||
76
postgres/src/testing.rs
Normal file
76
postgres/src/testing.rs
Normal file
@ -0,0 +1,76 @@
|
||||
use std::{future::Future, sync::LazyLock};
|
||||
use tokio::{sync::Mutex, task};
|
||||
use tokio_postgres::NoTls;
|
||||
|
||||
// An async mutex used to ensure exclusive access to the database.
|
||||
static DB_LOCK: LazyLock<Mutex<()>> = std::sync::LazyLock::new(|| Mutex::new(()));
|
||||
|
||||
/// Call the given function with a DB client, pointing to an initialized DB.
|
||||
///
|
||||
/// This serializes use of the database so that two tests are not simultaneously
|
||||
/// modifying it.
|
||||
///
|
||||
/// The function's future need not be `Send`.
|
||||
pub(crate) async fn with_db<F, FUT>(f: F) -> anyhow::Result<()>
|
||||
where
|
||||
F: FnOnce(String, tokio_postgres::Client) -> FUT,
|
||||
FUT: Future<Output = anyhow::Result<()>> + 'static,
|
||||
{
|
||||
let _ = env_logger::builder().is_test(true).try_init();
|
||||
|
||||
let Ok(connection_string) = std::env::var("TEST_DB_URL") else {
|
||||
// If this is run in a GitHub action, then we really don't want to skip the tests.
|
||||
if std::env::var("GITHUB_ACTIONS").is_ok() {
|
||||
panic!("TEST_DB_URL must be set in GitHub actions");
|
||||
}
|
||||
// Skip the test.
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
// Serialize use of the DB.
|
||||
let _db_guard = DB_LOCK.lock().await;
|
||||
|
||||
let local_set = task::LocalSet::new();
|
||||
local_set
|
||||
.run_until(async move {
|
||||
let (client, connection) = tokio_postgres::connect(&connection_string, NoTls).await?;
|
||||
let conn_join_handle = tokio::spawn(async move {
|
||||
if let Err(e) = connection.await {
|
||||
log::warn!("connection error: {e}");
|
||||
}
|
||||
});
|
||||
|
||||
// Set up the DB.
|
||||
client
|
||||
.execute("drop schema if exists public cascade", &[])
|
||||
.await?;
|
||||
client.execute("create schema public", &[]).await?;
|
||||
client.simple_query(include_str!("../schema.sql")).await?;
|
||||
|
||||
// Run the test in its own task, so that we can handle all failure cases. This task must be
|
||||
// local because the future typically uses `StorageTxn` which is not `Send`.
|
||||
let test_join_handle = tokio::task::spawn_local(f(connection_string.clone(), client));
|
||||
|
||||
// Wait for the test task to complete.
|
||||
let test_res = test_join_handle.await?;
|
||||
|
||||
conn_join_handle.await?;
|
||||
|
||||
// Clean up the DB.
|
||||
|
||||
let (client, connection) = tokio_postgres::connect(&connection_string, NoTls).await?;
|
||||
let conn_join_handle = tokio::spawn(async move {
|
||||
if let Err(e) = connection.await {
|
||||
log::warn!("connection error: {e}");
|
||||
}
|
||||
});
|
||||
client
|
||||
.execute("drop schema if exists public cascade", &[])
|
||||
.await?;
|
||||
drop(client);
|
||||
conn_join_handle.await?;
|
||||
|
||||
test_res
|
||||
})
|
||||
.await
|
||||
}
|
||||
@ -1,13 +1,29 @@
|
||||
[package]
|
||||
name = "taskchampion-sync-server"
|
||||
version = "0.6.1"
|
||||
version = "0.7.2-pre"
|
||||
authors = ["Dustin J. Mitchell <dustin@mozilla.com>"]
|
||||
edition = "2021"
|
||||
publish = false
|
||||
|
||||
[features]
|
||||
# By default, only build the SQLite backend.
|
||||
default = ["sqlite"]
|
||||
sqlite = ["dep:taskchampion-sync-server-storage-sqlite"]
|
||||
postgres = ["dep:taskchampion-sync-server-storage-postgres"]
|
||||
|
||||
[[bin]]
|
||||
# The simple binary name is the SQLite build.
|
||||
name = "taskchampion-sync-server"
|
||||
required-features = ["sqlite"]
|
||||
|
||||
[[bin]]
|
||||
name = "taskchampion-sync-server-postgres"
|
||||
required-features = ["postgres"]
|
||||
|
||||
[dependencies]
|
||||
taskchampion-sync-server-core = { path = "../core" }
|
||||
taskchampion-sync-server-storage-sqlite = { path = "../sqlite" }
|
||||
taskchampion-sync-server-storage-sqlite = { path = "../sqlite", optional = true }
|
||||
taskchampion-sync-server-storage-postgres = { path = "../postgres", optional = true }
|
||||
uuid.workspace = true
|
||||
actix-web.workspace = true
|
||||
anyhow.workspace = true
|
||||
|
||||
@ -49,17 +49,20 @@ pub(crate) async fn service(
|
||||
server_state
|
||||
.server
|
||||
.add_snapshot(client_id, version_id, body.to_vec())
|
||||
.await
|
||||
.map_err(server_error_to_actix)?;
|
||||
Ok(HttpResponse::Ok().body(""))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use crate::api::CLIENT_ID_HEADER;
|
||||
use crate::WebServer;
|
||||
use crate::{
|
||||
api::CLIENT_ID_HEADER,
|
||||
web::{WebConfig, WebServer},
|
||||
};
|
||||
use actix_web::{http::StatusCode, test, App};
|
||||
use pretty_assertions::assert_eq;
|
||||
use taskchampion_sync_server_core::{InMemoryStorage, Storage, NIL_VERSION_ID};
|
||||
use taskchampion_sync_server_core::{InMemoryStorage, ServerConfig, Storage, NIL_VERSION_ID};
|
||||
use uuid::Uuid;
|
||||
|
||||
#[actix_rt::test]
|
||||
@ -70,17 +73,17 @@ mod test {
|
||||
|
||||
// set up the storage contents..
|
||||
{
|
||||
let mut txn = storage.txn(client_id).unwrap();
|
||||
txn.new_client(version_id).unwrap();
|
||||
txn.add_version(version_id, NIL_VERSION_ID, vec![])?;
|
||||
txn.commit()?;
|
||||
let mut txn = storage.txn(client_id).await.unwrap();
|
||||
txn.new_client(version_id).await.unwrap();
|
||||
txn.add_version(version_id, NIL_VERSION_ID, vec![]).await?;
|
||||
txn.commit().await?;
|
||||
}
|
||||
|
||||
let server = WebServer::new(Default::default(), None, storage);
|
||||
let server = WebServer::new(ServerConfig::default(), WebConfig::default(), storage);
|
||||
let app = App::new().configure(|sc| server.config(sc));
|
||||
let app = test::init_service(app).await;
|
||||
|
||||
let uri = format!("/v1/client/add-snapshot/{}", version_id);
|
||||
let uri = format!("/v1/client/add-snapshot/{version_id}");
|
||||
let req = test::TestRequest::post()
|
||||
.uri(&uri)
|
||||
.insert_header(("Content-Type", "application/vnd.taskchampion.snapshot"))
|
||||
@ -114,17 +117,17 @@ mod test {
|
||||
|
||||
// set up the storage contents..
|
||||
{
|
||||
let mut txn = storage.txn(client_id).unwrap();
|
||||
txn.new_client(NIL_VERSION_ID).unwrap();
|
||||
txn.commit().unwrap();
|
||||
let mut txn = storage.txn(client_id).await.unwrap();
|
||||
txn.new_client(NIL_VERSION_ID).await.unwrap();
|
||||
txn.commit().await.unwrap();
|
||||
}
|
||||
|
||||
let server = WebServer::new(Default::default(), None, storage);
|
||||
let server = WebServer::new(ServerConfig::default(), WebConfig::default(), storage);
|
||||
let app = App::new().configure(|sc| server.config(sc));
|
||||
let app = test::init_service(app).await;
|
||||
|
||||
// add a snapshot for a nonexistent version
|
||||
let uri = format!("/v1/client/add-snapshot/{}", version_id);
|
||||
let uri = format!("/v1/client/add-snapshot/{version_id}");
|
||||
let req = test::TestRequest::post()
|
||||
.uri(&uri)
|
||||
.append_header(("Content-Type", "application/vnd.taskchampion.snapshot"))
|
||||
@ -149,11 +152,11 @@ mod test {
|
||||
let client_id = Uuid::new_v4();
|
||||
let version_id = Uuid::new_v4();
|
||||
let storage = InMemoryStorage::new();
|
||||
let server = WebServer::new(Default::default(), None, storage);
|
||||
let server = WebServer::new(ServerConfig::default(), WebConfig::default(), storage);
|
||||
let app = App::new().configure(|sc| server.config(sc));
|
||||
let app = test::init_service(app).await;
|
||||
|
||||
let uri = format!("/v1/client/add-snapshot/{}", version_id);
|
||||
let uri = format!("/v1/client/add-snapshot/{version_id}");
|
||||
let req = test::TestRequest::post()
|
||||
.uri(&uri)
|
||||
.append_header(("Content-Type", "not/correct"))
|
||||
@ -169,11 +172,11 @@ mod test {
|
||||
let client_id = Uuid::new_v4();
|
||||
let version_id = Uuid::new_v4();
|
||||
let storage = InMemoryStorage::new();
|
||||
let server = WebServer::new(Default::default(), None, storage);
|
||||
let server = WebServer::new(ServerConfig::default(), WebConfig::default(), storage);
|
||||
let app = App::new().configure(|sc| server.config(sc));
|
||||
let app = test::init_service(app).await;
|
||||
|
||||
let uri = format!("/v1/client/add-snapshot/{}", version_id);
|
||||
let uri = format!("/v1/client/add-snapshot/{version_id}");
|
||||
let req = test::TestRequest::post()
|
||||
.uri(&uri)
|
||||
.append_header((
|
||||
|
||||
@ -60,6 +60,7 @@ pub(crate) async fn service(
|
||||
return match server_state
|
||||
.server
|
||||
.add_version(client_id, parent_version_id, body.to_vec())
|
||||
.await
|
||||
{
|
||||
Ok((AddVersionResult::Ok(version_id), snap_urgency)) => {
|
||||
let mut rb = HttpResponse::Ok();
|
||||
@ -80,14 +81,17 @@ pub(crate) async fn service(
|
||||
rb.append_header((PARENT_VERSION_ID_HEADER, parent_version_id.to_string()));
|
||||
Ok(rb.finish())
|
||||
}
|
||||
Err(ServerError::NoSuchClient) => {
|
||||
Err(ServerError::NoSuchClient) if server_state.web_config.create_clients => {
|
||||
// Create a new client and repeat the `add_version` call.
|
||||
let mut txn = server_state
|
||||
.server
|
||||
.txn(client_id)
|
||||
.await
|
||||
.map_err(server_error_to_actix)?;
|
||||
txn.new_client(NIL_VERSION_ID).map_err(failure_to_ise)?;
|
||||
txn.commit().map_err(failure_to_ise)?;
|
||||
txn.new_client(NIL_VERSION_ID)
|
||||
.await
|
||||
.map_err(failure_to_ise)?;
|
||||
txn.commit().await.map_err(failure_to_ise)?;
|
||||
continue;
|
||||
}
|
||||
Err(e) => Err(server_error_to_actix(e)),
|
||||
@ -97,11 +101,13 @@ pub(crate) async fn service(
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use crate::api::CLIENT_ID_HEADER;
|
||||
use crate::WebServer;
|
||||
use crate::{
|
||||
api::CLIENT_ID_HEADER,
|
||||
web::{WebConfig, WebServer},
|
||||
};
|
||||
use actix_web::{http::StatusCode, test, App};
|
||||
use pretty_assertions::assert_eq;
|
||||
use taskchampion_sync_server_core::{InMemoryStorage, Storage};
|
||||
use taskchampion_sync_server_core::{InMemoryStorage, ServerConfig, Storage};
|
||||
use uuid::Uuid;
|
||||
|
||||
#[actix_rt::test]
|
||||
@ -113,16 +119,16 @@ mod test {
|
||||
|
||||
// set up the storage contents..
|
||||
{
|
||||
let mut txn = storage.txn(client_id).unwrap();
|
||||
txn.new_client(Uuid::nil()).unwrap();
|
||||
txn.commit().unwrap();
|
||||
let mut txn = storage.txn(client_id).await.unwrap();
|
||||
txn.new_client(Uuid::nil()).await.unwrap();
|
||||
txn.commit().await.unwrap();
|
||||
}
|
||||
|
||||
let server = WebServer::new(Default::default(), None, storage);
|
||||
let server = WebServer::new(ServerConfig::default(), WebConfig::default(), storage);
|
||||
let app = App::new().configure(|sc| server.config(sc));
|
||||
let app = test::init_service(app).await;
|
||||
|
||||
let uri = format!("/v1/client/add-version/{}", parent_version_id);
|
||||
let uri = format!("/v1/client/add-version/{parent_version_id}");
|
||||
let req = test::TestRequest::post()
|
||||
.uri(&uri)
|
||||
.append_header((
|
||||
@ -152,11 +158,15 @@ mod test {
|
||||
let client_id = Uuid::new_v4();
|
||||
let version_id = Uuid::new_v4();
|
||||
let parent_version_id = Uuid::new_v4();
|
||||
let server = WebServer::new(Default::default(), None, InMemoryStorage::new());
|
||||
let server = WebServer::new(
|
||||
ServerConfig::default(),
|
||||
WebConfig::default(),
|
||||
InMemoryStorage::new(),
|
||||
);
|
||||
let app = App::new().configure(|sc| server.config(sc));
|
||||
let app = test::init_service(app).await;
|
||||
|
||||
let uri = format!("/v1/client/add-version/{}", parent_version_id);
|
||||
let uri = format!("/v1/client/add-version/{parent_version_id}");
|
||||
let req = test::TestRequest::post()
|
||||
.uri(&uri)
|
||||
.append_header((
|
||||
@ -183,13 +193,43 @@ mod test {
|
||||
|
||||
// Check that the client really was created
|
||||
{
|
||||
let mut txn = server.server_state.server.txn(client_id).unwrap();
|
||||
let client = txn.get_client().unwrap().unwrap();
|
||||
let mut txn = server.server_state.server.txn(client_id).await.unwrap();
|
||||
let client = txn.get_client().await.unwrap().unwrap();
|
||||
assert_eq!(client.latest_version_id, new_version_id);
|
||||
assert_eq!(client.snapshot, None);
|
||||
}
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_auto_add_client_disabled() {
|
||||
let client_id = Uuid::new_v4();
|
||||
let parent_version_id = Uuid::new_v4();
|
||||
let server = WebServer::new(
|
||||
ServerConfig::default(),
|
||||
WebConfig {
|
||||
create_clients: false,
|
||||
..WebConfig::default()
|
||||
},
|
||||
InMemoryStorage::new(),
|
||||
);
|
||||
let app = App::new().configure(|sc| server.config(sc));
|
||||
let app = test::init_service(app).await;
|
||||
|
||||
let uri = format!("/v1/client/add-version/{parent_version_id}");
|
||||
let req = test::TestRequest::post()
|
||||
.uri(&uri)
|
||||
.append_header((
|
||||
"Content-Type",
|
||||
"application/vnd.taskchampion.history-segment",
|
||||
))
|
||||
.append_header((CLIENT_ID_HEADER, client_id.to_string()))
|
||||
.set_payload(b"abcd".to_vec())
|
||||
.to_request();
|
||||
let resp = test::call_service(&app, req).await;
|
||||
// Client is not added, and returns 404.
|
||||
assert_eq!(resp.status(), StatusCode::NOT_FOUND);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_conflict() {
|
||||
let client_id = Uuid::new_v4();
|
||||
@ -199,16 +239,16 @@ mod test {
|
||||
|
||||
// set up the storage contents..
|
||||
{
|
||||
let mut txn = storage.txn(client_id).unwrap();
|
||||
txn.new_client(version_id).unwrap();
|
||||
txn.commit().unwrap();
|
||||
let mut txn = storage.txn(client_id).await.unwrap();
|
||||
txn.new_client(version_id).await.unwrap();
|
||||
txn.commit().await.unwrap();
|
||||
}
|
||||
|
||||
let server = WebServer::new(Default::default(), None, storage);
|
||||
let server = WebServer::new(ServerConfig::default(), WebConfig::default(), storage);
|
||||
let app = App::new().configure(|sc| server.config(sc));
|
||||
let app = test::init_service(app).await;
|
||||
|
||||
let uri = format!("/v1/client/add-version/{}", parent_version_id);
|
||||
let uri = format!("/v1/client/add-version/{parent_version_id}");
|
||||
let req = test::TestRequest::post()
|
||||
.uri(&uri)
|
||||
.append_header((
|
||||
@ -232,11 +272,11 @@ mod test {
|
||||
let client_id = Uuid::new_v4();
|
||||
let parent_version_id = Uuid::new_v4();
|
||||
let storage = InMemoryStorage::new();
|
||||
let server = WebServer::new(Default::default(), None, storage);
|
||||
let server = WebServer::new(ServerConfig::default(), WebConfig::default(), storage);
|
||||
let app = App::new().configure(|sc| server.config(sc));
|
||||
let app = test::init_service(app).await;
|
||||
|
||||
let uri = format!("/v1/client/add-version/{}", parent_version_id);
|
||||
let uri = format!("/v1/client/add-version/{parent_version_id}");
|
||||
let req = test::TestRequest::post()
|
||||
.uri(&uri)
|
||||
.append_header(("Content-Type", "not/correct"))
|
||||
@ -252,11 +292,11 @@ mod test {
|
||||
let client_id = Uuid::new_v4();
|
||||
let parent_version_id = Uuid::new_v4();
|
||||
let storage = InMemoryStorage::new();
|
||||
let server = WebServer::new(Default::default(), None, storage);
|
||||
let server = WebServer::new(ServerConfig::default(), WebConfig::default(), storage);
|
||||
let app = App::new().configure(|sc| server.config(sc));
|
||||
let app = test::init_service(app).await;
|
||||
|
||||
let uri = format!("/v1/client/add-version/{}", parent_version_id);
|
||||
let uri = format!("/v1/client/add-version/{parent_version_id}");
|
||||
let req = test::TestRequest::post()
|
||||
.uri(&uri)
|
||||
.append_header((
|
||||
|
||||
@ -26,6 +26,7 @@ pub(crate) async fn service(
|
||||
match server_state
|
||||
.server
|
||||
.get_child_version(client_id, parent_version_id)
|
||||
.await
|
||||
{
|
||||
Ok(GetVersionResult::Success {
|
||||
version_id,
|
||||
@ -48,11 +49,13 @@ pub(crate) async fn service(
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use crate::api::CLIENT_ID_HEADER;
|
||||
use crate::WebServer;
|
||||
use crate::{
|
||||
api::CLIENT_ID_HEADER,
|
||||
web::{WebConfig, WebServer},
|
||||
};
|
||||
use actix_web::{http::StatusCode, test, App};
|
||||
use pretty_assertions::assert_eq;
|
||||
use taskchampion_sync_server_core::{InMemoryStorage, Storage, NIL_VERSION_ID};
|
||||
use taskchampion_sync_server_core::{InMemoryStorage, ServerConfig, Storage, NIL_VERSION_ID};
|
||||
use uuid::Uuid;
|
||||
|
||||
#[actix_rt::test]
|
||||
@ -64,18 +67,19 @@ mod test {
|
||||
|
||||
// set up the storage contents..
|
||||
{
|
||||
let mut txn = storage.txn(client_id).unwrap();
|
||||
txn.new_client(Uuid::new_v4()).unwrap();
|
||||
let mut txn = storage.txn(client_id).await.unwrap();
|
||||
txn.new_client(Uuid::new_v4()).await.unwrap();
|
||||
txn.add_version(version_id, parent_version_id, b"abcd".to_vec())
|
||||
.await
|
||||
.unwrap();
|
||||
txn.commit().unwrap();
|
||||
txn.commit().await.unwrap();
|
||||
}
|
||||
|
||||
let server = WebServer::new(Default::default(), None, storage);
|
||||
let server = WebServer::new(ServerConfig::default(), WebConfig::default(), storage);
|
||||
let app = App::new().configure(|sc| server.config(sc));
|
||||
let app = test::init_service(app).await;
|
||||
|
||||
let uri = format!("/v1/client/get-child-version/{}", parent_version_id);
|
||||
let uri = format!("/v1/client/get-child-version/{parent_version_id}");
|
||||
let req = test::TestRequest::get()
|
||||
.uri(&uri)
|
||||
.append_header((CLIENT_ID_HEADER, client_id.to_string()))
|
||||
@ -105,11 +109,11 @@ mod test {
|
||||
let client_id = Uuid::new_v4();
|
||||
let parent_version_id = Uuid::new_v4();
|
||||
let storage = InMemoryStorage::new();
|
||||
let server = WebServer::new(Default::default(), None, storage);
|
||||
let server = WebServer::new(ServerConfig::default(), WebConfig::default(), storage);
|
||||
let app = App::new().configure(|sc| server.config(sc));
|
||||
let app = test::init_service(app).await;
|
||||
|
||||
let uri = format!("/v1/client/get-child-version/{}", parent_version_id);
|
||||
let uri = format!("/v1/client/get-child-version/{parent_version_id}");
|
||||
let req = test::TestRequest::get()
|
||||
.uri(&uri)
|
||||
.append_header((CLIENT_ID_HEADER, client_id.to_string()))
|
||||
@ -128,18 +132,19 @@ mod test {
|
||||
|
||||
// create the client and a single version.
|
||||
{
|
||||
let mut txn = storage.txn(client_id).unwrap();
|
||||
txn.new_client(Uuid::new_v4()).unwrap();
|
||||
let mut txn = storage.txn(client_id).await.unwrap();
|
||||
txn.new_client(Uuid::new_v4()).await.unwrap();
|
||||
txn.add_version(test_version_id, NIL_VERSION_ID, b"vers".to_vec())
|
||||
.await
|
||||
.unwrap();
|
||||
txn.commit().unwrap();
|
||||
txn.commit().await.unwrap();
|
||||
}
|
||||
let server = WebServer::new(Default::default(), None, storage);
|
||||
let server = WebServer::new(ServerConfig::default(), WebConfig::default(), storage);
|
||||
let app = App::new().configure(|sc| server.config(sc));
|
||||
let app = test::init_service(app).await;
|
||||
|
||||
// the child of the nil version is the added version
|
||||
let uri = format!("/v1/client/get-child-version/{}", NIL_VERSION_ID);
|
||||
let uri = format!("/v1/client/get-child-version/{NIL_VERSION_ID}");
|
||||
let req = test::TestRequest::get()
|
||||
.uri(&uri)
|
||||
.append_header((CLIENT_ID_HEADER, client_id.to_string()))
|
||||
@ -168,7 +173,7 @@ mod test {
|
||||
|
||||
// The child of the latest version is NOT_FOUND. The tests in crate::server test more
|
||||
// corner cases.
|
||||
let uri = format!("/v1/client/get-child-version/{}", test_version_id);
|
||||
let uri = format!("/v1/client/get-child-version/{test_version_id}");
|
||||
let req = test::TestRequest::get()
|
||||
.uri(&uri)
|
||||
.append_header((CLIENT_ID_HEADER, client_id.to_string()))
|
||||
|
||||
@ -20,6 +20,7 @@ pub(crate) async fn service(
|
||||
if let Some((version_id, data)) = server_state
|
||||
.server
|
||||
.get_snapshot(client_id)
|
||||
.await
|
||||
.map_err(server_error_to_actix)?
|
||||
{
|
||||
Ok(HttpResponse::Ok()
|
||||
@ -33,12 +34,14 @@ pub(crate) async fn service(
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use crate::api::CLIENT_ID_HEADER;
|
||||
use crate::WebServer;
|
||||
use crate::{
|
||||
api::CLIENT_ID_HEADER,
|
||||
web::{WebConfig, WebServer},
|
||||
};
|
||||
use actix_web::{http::StatusCode, test, App};
|
||||
use chrono::{TimeZone, Utc};
|
||||
use pretty_assertions::assert_eq;
|
||||
use taskchampion_sync_server_core::{InMemoryStorage, Snapshot, Storage};
|
||||
use taskchampion_sync_server_core::{InMemoryStorage, ServerConfig, Snapshot, Storage};
|
||||
use uuid::Uuid;
|
||||
|
||||
#[actix_rt::test]
|
||||
@ -48,12 +51,12 @@ mod test {
|
||||
|
||||
// set up the storage contents..
|
||||
{
|
||||
let mut txn = storage.txn(client_id).unwrap();
|
||||
txn.new_client(Uuid::new_v4()).unwrap();
|
||||
txn.commit().unwrap();
|
||||
let mut txn = storage.txn(client_id).await.unwrap();
|
||||
txn.new_client(Uuid::new_v4()).await.unwrap();
|
||||
txn.commit().await.unwrap();
|
||||
}
|
||||
|
||||
let server = WebServer::new(Default::default(), None, storage);
|
||||
let server = WebServer::new(ServerConfig::default(), WebConfig::default(), storage);
|
||||
let app = App::new().configure(|sc| server.config(sc));
|
||||
let app = test::init_service(app).await;
|
||||
|
||||
@ -75,8 +78,8 @@ mod test {
|
||||
|
||||
// set up the storage contents..
|
||||
{
|
||||
let mut txn = storage.txn(client_id).unwrap();
|
||||
txn.new_client(Uuid::new_v4()).unwrap();
|
||||
let mut txn = storage.txn(client_id).await.unwrap();
|
||||
txn.new_client(Uuid::new_v4()).await.unwrap();
|
||||
txn.set_snapshot(
|
||||
Snapshot {
|
||||
version_id,
|
||||
@ -85,11 +88,12 @@ mod test {
|
||||
},
|
||||
snapshot_data.clone(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
txn.commit().unwrap();
|
||||
txn.commit().await.unwrap();
|
||||
}
|
||||
|
||||
let server = WebServer::new(Default::default(), None, storage);
|
||||
let server = WebServer::new(ServerConfig::default(), WebConfig::default(), storage);
|
||||
let app = App::new().configure(|sc| server.config(sc));
|
||||
let app = test::init_service(app).await;
|
||||
|
||||
|
||||
@ -1,8 +1,7 @@
|
||||
use std::collections::HashSet;
|
||||
|
||||
use actix_web::{error, web, HttpRequest, Result, Scope};
|
||||
use taskchampion_sync_server_core::{ClientId, Server, ServerError};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::web::WebConfig;
|
||||
|
||||
mod add_snapshot;
|
||||
mod add_version;
|
||||
@ -31,7 +30,7 @@ pub(crate) const SNAPSHOT_REQUEST_HEADER: &str = "X-Snapshot-Request";
|
||||
/// The type containing a reference to the persistent state for the server
|
||||
pub(crate) struct ServerState {
|
||||
pub(crate) server: Server,
|
||||
pub(crate) client_id_allowlist: Option<HashSet<Uuid>>,
|
||||
pub(crate) web_config: WebConfig,
|
||||
}
|
||||
|
||||
impl ServerState {
|
||||
@ -43,7 +42,7 @@ impl ServerState {
|
||||
if let Some(client_id_hdr) = req.headers().get(CLIENT_ID_HEADER) {
|
||||
let client_id = client_id_hdr.to_str().map_err(|_| badrequest())?;
|
||||
let client_id = ClientId::parse_str(client_id).map_err(|_| badrequest())?;
|
||||
if let Some(allow_list) = &self.client_id_allowlist {
|
||||
if let Some(allow_list) = &self.web_config.client_id_allowlist {
|
||||
if !allow_list.contains(&client_id) {
|
||||
return Err(error::ErrorForbidden("unknown x-client-id"));
|
||||
}
|
||||
@ -80,13 +79,18 @@ fn server_error_to_actix(err: ServerError) -> actix_web::Error {
|
||||
mod test {
|
||||
use super::*;
|
||||
use taskchampion_sync_server_core::InMemoryStorage;
|
||||
use uuid::Uuid;
|
||||
|
||||
#[test]
|
||||
fn client_id_header_allow_all() {
|
||||
let client_id = Uuid::new_v4();
|
||||
let state = ServerState {
|
||||
server: Server::new(Default::default(), InMemoryStorage::new()),
|
||||
client_id_allowlist: None,
|
||||
web_config: WebConfig {
|
||||
client_id_allowlist: None,
|
||||
create_clients: true,
|
||||
..WebConfig::default()
|
||||
},
|
||||
};
|
||||
let req = actix_web::test::TestRequest::default()
|
||||
.insert_header((CLIENT_ID_HEADER, client_id.to_string()))
|
||||
@ -100,7 +104,11 @@ mod test {
|
||||
let client_id_disallowed = Uuid::new_v4();
|
||||
let state = ServerState {
|
||||
server: Server::new(Default::default(), InMemoryStorage::new()),
|
||||
client_id_allowlist: Some([client_id_ok].into()),
|
||||
web_config: WebConfig {
|
||||
client_id_allowlist: Some([client_id_ok].into()),
|
||||
create_clients: true,
|
||||
..WebConfig::default()
|
||||
},
|
||||
};
|
||||
let req = actix_web::test::TestRequest::default()
|
||||
.insert_header((CLIENT_ID_HEADER, client_id_ok.to_string()))
|
||||
|
||||
299
server/src/args.rs
Normal file
299
server/src/args.rs
Normal file
@ -0,0 +1,299 @@
|
||||
use crate::web::WebConfig;
|
||||
use clap::{arg, builder::ValueParser, value_parser, ArgAction, ArgMatches, Command};
|
||||
use taskchampion_sync_server_core::ServerConfig;
|
||||
use uuid::Uuid;
|
||||
|
||||
pub fn command() -> Command {
|
||||
let defaults = ServerConfig::default();
|
||||
let default_snapshot_versions = defaults.snapshot_versions.to_string();
|
||||
let default_snapshot_days = defaults.snapshot_days.to_string();
|
||||
Command::new("taskchampion-sync-server")
|
||||
.version(env!("CARGO_PKG_VERSION"))
|
||||
.about("Server for TaskChampion")
|
||||
.arg(
|
||||
arg!(-l --listen <ADDRESS>)
|
||||
.help("Address and Port on which to listen on. Can be an IP Address or a DNS name followed by a colon and a port e.g. localhost:8080")
|
||||
.value_delimiter(',')
|
||||
.value_parser(ValueParser::string())
|
||||
.env("LISTEN")
|
||||
.action(ArgAction::Append)
|
||||
.required(true),
|
||||
)
|
||||
.arg(
|
||||
arg!(-C --"allow-client-id" <CLIENT_ID> "Client IDs to allow (can be repeated; if not specified, all clients are allowed)")
|
||||
.value_delimiter(',')
|
||||
.value_parser(value_parser!(Uuid))
|
||||
.env("CLIENT_ID")
|
||||
.action(ArgAction::Append)
|
||||
.required(false),
|
||||
)
|
||||
.arg(
|
||||
arg!("create-clients": --"no-create-clients" "If a client does not exist in the database, do not create it")
|
||||
.env("CREATE_CLIENTS")
|
||||
.default_value("true")
|
||||
.action(ArgAction::SetFalse)
|
||||
.required(false),
|
||||
)
|
||||
.arg(
|
||||
arg!(--"snapshot-versions" <NUM> "Target number of versions between snapshots")
|
||||
.value_parser(value_parser!(u32))
|
||||
.env("SNAPSHOT_VERSIONS")
|
||||
.default_value(default_snapshot_versions),
|
||||
)
|
||||
.arg(
|
||||
arg!(--"snapshot-days" <NUM> "Target number of days between snapshots")
|
||||
.value_parser(value_parser!(i64))
|
||||
.env("SNAPSHOT_DAYS")
|
||||
.default_value(default_snapshot_days),
|
||||
)
|
||||
}
|
||||
|
||||
/// Create a ServerConfig from these args.
|
||||
pub fn server_config_from_matches(matches: &ArgMatches) -> ServerConfig {
|
||||
ServerConfig {
|
||||
snapshot_versions: *matches.get_one("snapshot-versions").unwrap(),
|
||||
snapshot_days: *matches.get_one("snapshot-days").unwrap(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a WebConfig from these args.
|
||||
pub fn web_config_from_matches(matches: &ArgMatches) -> WebConfig {
|
||||
WebConfig {
|
||||
client_id_allowlist: matches
|
||||
.get_many("allow-client-id")
|
||||
.map(|ids| ids.copied().collect()),
|
||||
create_clients: matches.get_one("create-clients").copied().unwrap_or(true),
|
||||
listen_addresses: matches
|
||||
.get_many::<String>("listen")
|
||||
.unwrap()
|
||||
.cloned()
|
||||
.collect(),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
#![allow(clippy::bool_assert_comparison)]
|
||||
|
||||
use super::*;
|
||||
use crate::web::WebServer;
|
||||
use actix_web::{self, App};
|
||||
use clap::ArgMatches;
|
||||
use taskchampion_sync_server_core::InMemoryStorage;
|
||||
use temp_env::{with_var, with_var_unset, with_vars, with_vars_unset};
|
||||
|
||||
/// Get the list of allowed client IDs, sorted.
|
||||
fn allowed(matches: ArgMatches) -> Option<Vec<Uuid>> {
|
||||
web_config_from_matches(&matches)
|
||||
.client_id_allowlist
|
||||
.map(|ids| ids.into_iter().collect::<Vec<_>>())
|
||||
.map(|mut ids| {
|
||||
ids.sort();
|
||||
ids
|
||||
})
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn command_listen_two() {
|
||||
with_var_unset("LISTEN", || {
|
||||
let matches = command().get_matches_from([
|
||||
"tss",
|
||||
"--listen",
|
||||
"localhost:8080",
|
||||
"--listen",
|
||||
"otherhost:9090",
|
||||
]);
|
||||
assert_eq!(
|
||||
web_config_from_matches(&matches).listen_addresses,
|
||||
vec!["localhost:8080".to_string(), "otherhost:9090".to_string()]
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn command_listen_two_env() {
|
||||
with_var("LISTEN", Some("localhost:8080,otherhost:9090"), || {
|
||||
let matches = command().get_matches_from(["tss"]);
|
||||
assert_eq!(
|
||||
web_config_from_matches(&matches).listen_addresses,
|
||||
vec!["localhost:8080".to_string(), "otherhost:9090".to_string()]
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn command_allowed_client_ids_none() {
|
||||
with_var_unset("CLIENT_ID", || {
|
||||
let matches = command().get_matches_from(["tss", "--listen", "localhost:8080"]);
|
||||
assert_eq!(allowed(matches), None);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn command_allowed_client_ids_one() {
|
||||
with_var_unset("CLIENT_ID", || {
|
||||
let matches = command().get_matches_from([
|
||||
"tss",
|
||||
"--listen",
|
||||
"localhost:8080",
|
||||
"-C",
|
||||
"711d5cf3-0cf0-4eb8-9eca-6f7f220638c0",
|
||||
]);
|
||||
assert_eq!(
|
||||
allowed(matches),
|
||||
Some(vec![Uuid::parse_str(
|
||||
"711d5cf3-0cf0-4eb8-9eca-6f7f220638c0"
|
||||
)
|
||||
.unwrap()])
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn command_allowed_client_ids_one_env() {
|
||||
with_var(
|
||||
"CLIENT_ID",
|
||||
Some("711d5cf3-0cf0-4eb8-9eca-6f7f220638c0"),
|
||||
|| {
|
||||
let matches = command().get_matches_from(["tss", "--listen", "localhost:8080"]);
|
||||
assert_eq!(
|
||||
allowed(matches),
|
||||
Some(vec![Uuid::parse_str(
|
||||
"711d5cf3-0cf0-4eb8-9eca-6f7f220638c0"
|
||||
)
|
||||
.unwrap()])
|
||||
);
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn command_allowed_client_ids_two() {
|
||||
with_var_unset("CLIENT_ID", || {
|
||||
let matches = command().get_matches_from([
|
||||
"tss",
|
||||
"--listen",
|
||||
"localhost:8080",
|
||||
"-C",
|
||||
"711d5cf3-0cf0-4eb8-9eca-6f7f220638c0",
|
||||
"-C",
|
||||
"bbaf4b61-344a-4a39-a19e-8caa0669b353",
|
||||
]);
|
||||
assert_eq!(
|
||||
allowed(matches),
|
||||
Some(vec![
|
||||
Uuid::parse_str("711d5cf3-0cf0-4eb8-9eca-6f7f220638c0").unwrap(),
|
||||
Uuid::parse_str("bbaf4b61-344a-4a39-a19e-8caa0669b353").unwrap()
|
||||
])
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn command_allowed_client_ids_two_env() {
|
||||
with_var(
|
||||
"CLIENT_ID",
|
||||
Some("711d5cf3-0cf0-4eb8-9eca-6f7f220638c0,bbaf4b61-344a-4a39-a19e-8caa0669b353"),
|
||||
|| {
|
||||
let matches = command().get_matches_from(["tss", "--listen", "localhost:8080"]);
|
||||
assert_eq!(
|
||||
allowed(matches),
|
||||
Some(vec![
|
||||
Uuid::parse_str("711d5cf3-0cf0-4eb8-9eca-6f7f220638c0").unwrap(),
|
||||
Uuid::parse_str("bbaf4b61-344a-4a39-a19e-8caa0669b353").unwrap()
|
||||
])
|
||||
);
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn command_snapshot() {
|
||||
with_vars_unset(["SNAPSHOT_DAYS", "SNAPSHOT_VERSIONS"], || {
|
||||
let matches = command().get_matches_from([
|
||||
"tss",
|
||||
"--listen",
|
||||
"localhost:8080",
|
||||
"--snapshot-days",
|
||||
"13",
|
||||
"--snapshot-versions",
|
||||
"20",
|
||||
]);
|
||||
let server_config = server_config_from_matches(&matches);
|
||||
assert_eq!(server_config.snapshot_days, 13i64);
|
||||
assert_eq!(server_config.snapshot_versions, 20u32);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn command_snapshot_env() {
|
||||
with_vars(
|
||||
[
|
||||
("SNAPSHOT_DAYS", Some("13")),
|
||||
("SNAPSHOT_VERSIONS", Some("20")),
|
||||
],
|
||||
|| {
|
||||
let matches = command().get_matches_from(["tss", "--listen", "localhost:8080"]);
|
||||
let server_config = server_config_from_matches(&matches);
|
||||
assert_eq!(server_config.snapshot_days, 13i64);
|
||||
assert_eq!(server_config.snapshot_versions, 20u32);
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn command_create_clients_default() {
|
||||
with_var_unset("CREATE_CLIENTS", || {
|
||||
let matches = command().get_matches_from(["tss", "--listen", "localhost:8080"]);
|
||||
let server_config = web_config_from_matches(&matches);
|
||||
assert_eq!(server_config.create_clients, true);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn command_create_clients_cmdline() {
|
||||
with_var_unset("CREATE_CLIENTS", || {
|
||||
let matches = command().get_matches_from([
|
||||
"tss",
|
||||
"--listen",
|
||||
"localhost:8080",
|
||||
"--no-create-clients",
|
||||
]);
|
||||
let server_config = web_config_from_matches(&matches);
|
||||
assert_eq!(server_config.create_clients, false);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn command_create_clients_env_true() {
|
||||
with_vars([("CREATE_CLIENTS", Some("true"))], || {
|
||||
let matches = command().get_matches_from(["tss", "--listen", "localhost:8080"]);
|
||||
let server_config = web_config_from_matches(&matches);
|
||||
assert_eq!(server_config.create_clients, true);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn command_create_clients_env_false() {
|
||||
with_vars([("CREATE_CLIENTS", Some("false"))], || {
|
||||
let matches = command().get_matches_from(["tss", "--listen", "localhost:8080"]);
|
||||
let server_config = web_config_from_matches(&matches);
|
||||
assert_eq!(server_config.create_clients, false);
|
||||
});
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_index_get() {
|
||||
let server = WebServer::new(
|
||||
ServerConfig::default(),
|
||||
WebConfig::default(),
|
||||
InMemoryStorage::new(),
|
||||
);
|
||||
let app = App::new().configure(|sc| server.config(sc));
|
||||
let app = actix_web::test::init_service(app).await;
|
||||
|
||||
let req = actix_web::test::TestRequest::get().uri("/").to_request();
|
||||
let resp = actix_web::test::call_service(&app, req).await;
|
||||
assert!(resp.status().is_success());
|
||||
}
|
||||
}
|
||||
66
server/src/bin/taskchampion-sync-server-postgres.rs
Normal file
66
server/src/bin/taskchampion-sync-server-postgres.rs
Normal file
@ -0,0 +1,66 @@
|
||||
#![deny(clippy::all)]
|
||||
|
||||
use clap::{arg, builder::ValueParser, ArgMatches, Command};
|
||||
use std::ffi::OsString;
|
||||
use taskchampion_sync_server::{args, web};
|
||||
use taskchampion_sync_server_storage_postgres::PostgresStorage;
|
||||
|
||||
fn command() -> Command {
|
||||
args::command().arg(
|
||||
arg!(-c --"connection" <DIR> "LibPQ-style connection URI")
|
||||
.value_parser(ValueParser::os_string())
|
||||
.help("See https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING-URIS")
|
||||
.required(true)
|
||||
.env("CONNECTION")
|
||||
)
|
||||
}
|
||||
|
||||
fn connection_from_matches(matches: &ArgMatches) -> String {
|
||||
matches
|
||||
.get_one::<OsString>("connection")
|
||||
.unwrap()
|
||||
.to_str()
|
||||
.expect("--connection must be valid UTF-8")
|
||||
.to_string()
|
||||
}
|
||||
|
||||
#[actix_web::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
env_logger::init();
|
||||
let matches = command().get_matches();
|
||||
let server_config = args::server_config_from_matches(&matches);
|
||||
let web_config = args::web_config_from_matches(&matches);
|
||||
let connection = connection_from_matches(&matches);
|
||||
let storage = PostgresStorage::new(connection).await?;
|
||||
|
||||
let server = web::WebServer::new(server_config, web_config, storage);
|
||||
server.run().await
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use temp_env::{with_var, with_var_unset};
|
||||
|
||||
#[test]
|
||||
fn command_connection() {
|
||||
with_var_unset("CONNECTION", || {
|
||||
let matches = command().get_matches_from([
|
||||
"tss",
|
||||
"--connection",
|
||||
"postgresql:/foo/bar",
|
||||
"--listen",
|
||||
"localhost:8080",
|
||||
]);
|
||||
assert_eq!(connection_from_matches(&matches), "postgresql:/foo/bar");
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn command_connection_env() {
|
||||
with_var("CONNECTION", Some("postgresql:/foo/bar"), || {
|
||||
let matches = command().get_matches_from(["tss", "--listen", "localhost:8080"]);
|
||||
assert_eq!(connection_from_matches(&matches), "postgresql:/foo/bar");
|
||||
});
|
||||
}
|
||||
}
|
||||
@ -1,257 +1,40 @@
|
||||
#![deny(clippy::all)]
|
||||
|
||||
use actix_web::{
|
||||
dev::ServiceResponse,
|
||||
http::StatusCode,
|
||||
middleware::{ErrorHandlerResponse, ErrorHandlers, Logger},
|
||||
App, HttpServer,
|
||||
};
|
||||
use clap::{arg, builder::ValueParser, value_parser, ArgAction, Command};
|
||||
use std::{collections::HashSet, ffi::OsString};
|
||||
use taskchampion_sync_server::WebServer;
|
||||
use taskchampion_sync_server_core::ServerConfig;
|
||||
use clap::{arg, builder::ValueParser, ArgMatches, Command};
|
||||
use std::ffi::OsString;
|
||||
use taskchampion_sync_server::{args, web};
|
||||
use taskchampion_sync_server_storage_sqlite::SqliteStorage;
|
||||
use uuid::Uuid;
|
||||
|
||||
fn command() -> Command {
|
||||
let defaults = ServerConfig::default();
|
||||
let default_snapshot_versions = defaults.snapshot_versions.to_string();
|
||||
let default_snapshot_days = defaults.snapshot_days.to_string();
|
||||
Command::new("taskchampion-sync-server")
|
||||
.version(env!("CARGO_PKG_VERSION"))
|
||||
.about("Server for TaskChampion")
|
||||
.arg(
|
||||
arg!(-l --listen <ADDRESS>)
|
||||
.help("Address and Port on which to listen on. Can be an IP Address or a DNS name followed by a colon and a port e.g. localhost:8080")
|
||||
.value_delimiter(',')
|
||||
.value_parser(ValueParser::string())
|
||||
.env("LISTEN")
|
||||
.action(ArgAction::Append)
|
||||
.required(true),
|
||||
)
|
||||
.arg(
|
||||
arg!(-d --"data-dir" <DIR> "Directory in which to store data")
|
||||
.value_parser(ValueParser::os_string())
|
||||
.env("DATA_DIR")
|
||||
.default_value("/var/lib/taskchampion-sync-server"),
|
||||
)
|
||||
.arg(
|
||||
arg!(-C --"allow-client-id" <CLIENT_ID> "Client IDs to allow (can be repeated; if not specified, all clients are allowed)")
|
||||
.value_delimiter(',')
|
||||
.value_parser(value_parser!(Uuid))
|
||||
.env("CLIENT_ID")
|
||||
.action(ArgAction::Append)
|
||||
.required(false),
|
||||
)
|
||||
.arg(
|
||||
arg!(--"snapshot-versions" <NUM> "Target number of versions between snapshots")
|
||||
.value_parser(value_parser!(u32))
|
||||
.env("SNAPSHOT_VERSIONS")
|
||||
.default_value(default_snapshot_versions),
|
||||
)
|
||||
.arg(
|
||||
arg!(--"snapshot-days" <NUM> "Target number of days between snapshots")
|
||||
.value_parser(value_parser!(i64))
|
||||
.env("SNAPSHOT_DAYS")
|
||||
.default_value(default_snapshot_days),
|
||||
)
|
||||
args::command().arg(
|
||||
arg!(-d --"data-dir" <DIR> "Directory in which to store data")
|
||||
.value_parser(ValueParser::os_string())
|
||||
.env("DATA_DIR")
|
||||
.default_value("/var/lib/taskchampion-sync-server"),
|
||||
)
|
||||
}
|
||||
|
||||
fn print_error<B>(res: ServiceResponse<B>) -> actix_web::Result<ErrorHandlerResponse<B>> {
|
||||
if let Some(err) = res.response().error() {
|
||||
log::error!("Internal Server Error caused by:\n{:?}", err);
|
||||
}
|
||||
Ok(ErrorHandlerResponse::Response(res.map_into_left_body()))
|
||||
}
|
||||
|
||||
struct ServerArgs {
|
||||
data_dir: OsString,
|
||||
snapshot_versions: u32,
|
||||
snapshot_days: i64,
|
||||
client_id_allowlist: Option<HashSet<Uuid>>,
|
||||
listen_addresses: Vec<String>,
|
||||
}
|
||||
|
||||
impl ServerArgs {
|
||||
fn new(matches: clap::ArgMatches) -> Self {
|
||||
Self {
|
||||
data_dir: matches.get_one::<OsString>("data-dir").unwrap().clone(),
|
||||
snapshot_versions: *matches.get_one("snapshot-versions").unwrap(),
|
||||
snapshot_days: *matches.get_one("snapshot-days").unwrap(),
|
||||
client_id_allowlist: matches
|
||||
.get_many("allow-client-id")
|
||||
.map(|ids| ids.copied().collect()),
|
||||
listen_addresses: matches
|
||||
.get_many::<String>("listen")
|
||||
.unwrap()
|
||||
.cloned()
|
||||
.collect(),
|
||||
}
|
||||
}
|
||||
fn data_dir_from_matches(matches: &ArgMatches) -> OsString {
|
||||
matches.get_one::<OsString>("data-dir").unwrap().clone()
|
||||
}
|
||||
|
||||
#[actix_web::main]
|
||||
async fn main() -> anyhow::Result<()> {
|
||||
env_logger::init();
|
||||
let matches = command().get_matches();
|
||||
let server_config = args::server_config_from_matches(&matches);
|
||||
let web_config = args::web_config_from_matches(&matches);
|
||||
let data_dir = data_dir_from_matches(&matches);
|
||||
let storage = SqliteStorage::new(data_dir)?;
|
||||
|
||||
let server_args = ServerArgs::new(matches);
|
||||
let config = ServerConfig {
|
||||
snapshot_days: server_args.snapshot_days,
|
||||
snapshot_versions: server_args.snapshot_versions,
|
||||
};
|
||||
let server = WebServer::new(
|
||||
config,
|
||||
server_args.client_id_allowlist,
|
||||
SqliteStorage::new(server_args.data_dir)?,
|
||||
);
|
||||
|
||||
let mut http_server = HttpServer::new(move || {
|
||||
App::new()
|
||||
.wrap(ErrorHandlers::new().handler(StatusCode::INTERNAL_SERVER_ERROR, print_error))
|
||||
.wrap(Logger::default())
|
||||
.configure(|cfg| server.config(cfg))
|
||||
});
|
||||
for listen_address in server_args.listen_addresses {
|
||||
log::info!("Serving on {}", listen_address);
|
||||
http_server = http_server.bind(listen_address)?
|
||||
}
|
||||
http_server.run().await?;
|
||||
Ok(())
|
||||
let server = web::WebServer::new(server_config, web_config, storage);
|
||||
server.run().await
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use actix_web::{self, App};
|
||||
use clap::ArgMatches;
|
||||
use taskchampion_sync_server_core::InMemoryStorage;
|
||||
use temp_env::{with_var, with_var_unset, with_vars, with_vars_unset};
|
||||
|
||||
/// Get the list of allowed client IDs, sorted.
|
||||
fn allowed(matches: ArgMatches) -> Option<Vec<Uuid>> {
|
||||
ServerArgs::new(matches)
|
||||
.client_id_allowlist
|
||||
.map(|ids| ids.into_iter().collect::<Vec<_>>())
|
||||
.map(|mut ids| {
|
||||
ids.sort();
|
||||
ids
|
||||
})
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn command_listen_two() {
|
||||
with_var_unset("LISTEN", || {
|
||||
let matches = command().get_matches_from([
|
||||
"tss",
|
||||
"--listen",
|
||||
"localhost:8080",
|
||||
"--listen",
|
||||
"otherhost:9090",
|
||||
]);
|
||||
assert_eq!(
|
||||
ServerArgs::new(matches).listen_addresses,
|
||||
vec!["localhost:8080".to_string(), "otherhost:9090".to_string()]
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn command_listen_two_env() {
|
||||
with_var("LISTEN", Some("localhost:8080,otherhost:9090"), || {
|
||||
let matches = command().get_matches_from(["tss"]);
|
||||
assert_eq!(
|
||||
ServerArgs::new(matches).listen_addresses,
|
||||
vec!["localhost:8080".to_string(), "otherhost:9090".to_string()]
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn command_allowed_client_ids_none() {
|
||||
with_var_unset("CLIENT_ID", || {
|
||||
let matches = command().get_matches_from(["tss", "--listen", "localhost:8080"]);
|
||||
assert_eq!(allowed(matches), None);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn command_allowed_client_ids_one() {
|
||||
with_var_unset("CLIENT_ID", || {
|
||||
let matches = command().get_matches_from([
|
||||
"tss",
|
||||
"--listen",
|
||||
"localhost:8080",
|
||||
"-C",
|
||||
"711d5cf3-0cf0-4eb8-9eca-6f7f220638c0",
|
||||
]);
|
||||
assert_eq!(
|
||||
allowed(matches),
|
||||
Some(vec![Uuid::parse_str(
|
||||
"711d5cf3-0cf0-4eb8-9eca-6f7f220638c0"
|
||||
)
|
||||
.unwrap()])
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn command_allowed_client_ids_one_env() {
|
||||
with_var(
|
||||
"CLIENT_ID",
|
||||
Some("711d5cf3-0cf0-4eb8-9eca-6f7f220638c0"),
|
||||
|| {
|
||||
let matches = command().get_matches_from(["tss", "--listen", "localhost:8080"]);
|
||||
assert_eq!(
|
||||
allowed(matches),
|
||||
Some(vec![Uuid::parse_str(
|
||||
"711d5cf3-0cf0-4eb8-9eca-6f7f220638c0"
|
||||
)
|
||||
.unwrap()])
|
||||
);
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn command_allowed_client_ids_two() {
|
||||
with_var_unset("CLIENT_ID", || {
|
||||
let matches = command().get_matches_from([
|
||||
"tss",
|
||||
"--listen",
|
||||
"localhost:8080",
|
||||
"-C",
|
||||
"711d5cf3-0cf0-4eb8-9eca-6f7f220638c0",
|
||||
"-C",
|
||||
"bbaf4b61-344a-4a39-a19e-8caa0669b353",
|
||||
]);
|
||||
assert_eq!(
|
||||
allowed(matches),
|
||||
Some(vec![
|
||||
Uuid::parse_str("711d5cf3-0cf0-4eb8-9eca-6f7f220638c0").unwrap(),
|
||||
Uuid::parse_str("bbaf4b61-344a-4a39-a19e-8caa0669b353").unwrap()
|
||||
])
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn command_allowed_client_ids_two_env() {
|
||||
with_var(
|
||||
"CLIENT_ID",
|
||||
Some("711d5cf3-0cf0-4eb8-9eca-6f7f220638c0,bbaf4b61-344a-4a39-a19e-8caa0669b353"),
|
||||
|| {
|
||||
let matches = command().get_matches_from(["tss", "--listen", "localhost:8080"]);
|
||||
assert_eq!(
|
||||
allowed(matches),
|
||||
Some(vec![
|
||||
Uuid::parse_str("711d5cf3-0cf0-4eb8-9eca-6f7f220638c0").unwrap(),
|
||||
Uuid::parse_str("bbaf4b61-344a-4a39-a19e-8caa0669b353").unwrap()
|
||||
])
|
||||
);
|
||||
},
|
||||
);
|
||||
}
|
||||
use temp_env::{with_var, with_var_unset};
|
||||
|
||||
#[test]
|
||||
fn command_data_dir() {
|
||||
@ -263,7 +46,7 @@ mod test {
|
||||
"--listen",
|
||||
"localhost:8080",
|
||||
]);
|
||||
assert_eq!(ServerArgs::new(matches).data_dir, "/foo/bar");
|
||||
assert_eq!(data_dir_from_matches(&matches), "/foo/bar");
|
||||
});
|
||||
}
|
||||
|
||||
@ -271,52 +54,7 @@ mod test {
|
||||
fn command_data_dir_env() {
|
||||
with_var("DATA_DIR", Some("/foo/bar"), || {
|
||||
let matches = command().get_matches_from(["tss", "--listen", "localhost:8080"]);
|
||||
assert_eq!(ServerArgs::new(matches).data_dir, "/foo/bar");
|
||||
assert_eq!(data_dir_from_matches(&matches), "/foo/bar");
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn command_snapshot() {
|
||||
with_vars_unset(["SNAPSHOT_DAYS", "SNAPSHOT_VERSIONS"], || {
|
||||
let matches = command().get_matches_from([
|
||||
"tss",
|
||||
"--listen",
|
||||
"localhost:8080",
|
||||
"--snapshot-days",
|
||||
"13",
|
||||
"--snapshot-versions",
|
||||
"20",
|
||||
]);
|
||||
let server_args = ServerArgs::new(matches);
|
||||
assert_eq!(server_args.snapshot_days, 13i64);
|
||||
assert_eq!(server_args.snapshot_versions, 20u32);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn command_snapshot_env() {
|
||||
with_vars(
|
||||
[
|
||||
("SNAPSHOT_DAYS", Some("13")),
|
||||
("SNAPSHOT_VERSIONS", Some("20")),
|
||||
],
|
||||
|| {
|
||||
let matches = command().get_matches_from(["tss", "--listen", "localhost:8080"]);
|
||||
let server_args = ServerArgs::new(matches);
|
||||
assert_eq!(server_args.snapshot_days, 13i64);
|
||||
assert_eq!(server_args.snapshot_versions, 20u32);
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_index_get() {
|
||||
let server = WebServer::new(Default::default(), None, InMemoryStorage::new());
|
||||
let app = App::new().configure(|sc| server.config(sc));
|
||||
let app = actix_web::test::init_service(app).await;
|
||||
|
||||
let req = actix_web::test::TestRequest::get().uri("/").to_request();
|
||||
let resp = actix_web::test::call_service(&app, req).await;
|
||||
assert!(resp.status().is_success());
|
||||
}
|
||||
}
|
||||
|
||||
@ -1,72 +1,5 @@
|
||||
#![deny(clippy::all)]
|
||||
|
||||
mod api;
|
||||
|
||||
use actix_web::{get, middleware, web, Responder};
|
||||
use api::{api_scope, ServerState};
|
||||
use std::{collections::HashSet, sync::Arc};
|
||||
use taskchampion_sync_server_core::{Server, ServerConfig, Storage};
|
||||
use uuid::Uuid;
|
||||
|
||||
#[get("/")]
|
||||
async fn index() -> impl Responder {
|
||||
format!("TaskChampion sync server v{}", env!("CARGO_PKG_VERSION"))
|
||||
}
|
||||
|
||||
/// A Server represents a sync server.
|
||||
#[derive(Clone)]
|
||||
pub struct WebServer {
|
||||
server_state: Arc<ServerState>,
|
||||
}
|
||||
|
||||
impl WebServer {
|
||||
/// Create a new sync server with the given storage implementation.
|
||||
pub fn new<ST: Storage + 'static>(
|
||||
config: ServerConfig,
|
||||
client_id_allowlist: Option<HashSet<Uuid>>,
|
||||
storage: ST,
|
||||
) -> Self {
|
||||
Self {
|
||||
server_state: Arc::new(ServerState {
|
||||
server: Server::new(config, storage),
|
||||
client_id_allowlist,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get an Actix-web service for this server.
|
||||
pub fn config(&self, cfg: &mut web::ServiceConfig) {
|
||||
cfg.service(
|
||||
web::scope("")
|
||||
.app_data(web::Data::new(self.server_state.clone()))
|
||||
.wrap(
|
||||
middleware::DefaultHeaders::new().add(("Cache-Control", "no-store, max-age=0")),
|
||||
)
|
||||
.service(index)
|
||||
.service(api_scope()),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use actix_web::{test, App};
|
||||
use pretty_assertions::assert_eq;
|
||||
use taskchampion_sync_server_core::InMemoryStorage;
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_cache_control() {
|
||||
let server = WebServer::new(Default::default(), None, InMemoryStorage::new());
|
||||
let app = App::new().configure(|sc| server.config(sc));
|
||||
let app = test::init_service(app).await;
|
||||
|
||||
let req = test::TestRequest::get().uri("/").to_request();
|
||||
let resp = test::call_service(&app, req).await;
|
||||
assert!(resp.status().is_success());
|
||||
assert_eq!(
|
||||
resp.headers().get("Cache-Control").unwrap(),
|
||||
&"no-store, max-age=0".to_string()
|
||||
)
|
||||
}
|
||||
}
|
||||
pub mod api;
|
||||
pub mod args;
|
||||
pub mod web;
|
||||
|
||||
118
server/src/web.rs
Normal file
118
server/src/web.rs
Normal file
@ -0,0 +1,118 @@
|
||||
use crate::api::{api_scope, ServerState};
|
||||
use actix_web::{
|
||||
dev::ServiceResponse,
|
||||
get,
|
||||
http::StatusCode,
|
||||
middleware,
|
||||
middleware::{ErrorHandlerResponse, ErrorHandlers, Logger},
|
||||
web, App, HttpServer, Responder,
|
||||
};
|
||||
use std::{collections::HashSet, sync::Arc};
|
||||
use taskchampion_sync_server_core::{Server, ServerConfig, Storage};
|
||||
use uuid::Uuid;
|
||||
|
||||
fn print_error<B>(res: ServiceResponse<B>) -> actix_web::Result<ErrorHandlerResponse<B>> {
|
||||
if let Some(err) = res.response().error() {
|
||||
log::error!("Internal Server Error caused by:\n{err:?}");
|
||||
}
|
||||
Ok(ErrorHandlerResponse::Response(res.map_into_left_body()))
|
||||
}
|
||||
|
||||
/// Configuration for WebServer (as distinct from [`ServerConfig`]).
|
||||
pub struct WebConfig {
|
||||
pub client_id_allowlist: Option<HashSet<Uuid>>,
|
||||
pub create_clients: bool,
|
||||
pub listen_addresses: Vec<String>,
|
||||
}
|
||||
|
||||
impl Default for WebConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
client_id_allowlist: Default::default(),
|
||||
create_clients: true,
|
||||
listen_addresses: vec![],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[get("/")]
|
||||
async fn index() -> impl Responder {
|
||||
format!("TaskChampion sync server v{}", env!("CARGO_PKG_VERSION"))
|
||||
}
|
||||
|
||||
/// A Server represents a sync server.
|
||||
#[derive(Clone)]
|
||||
pub struct WebServer {
|
||||
pub(crate) server_state: Arc<ServerState>,
|
||||
}
|
||||
|
||||
impl WebServer {
|
||||
/// Create a new sync server with the given storage implementation.
|
||||
pub fn new<ST: Storage + 'static>(
|
||||
config: ServerConfig,
|
||||
web_config: WebConfig,
|
||||
storage: ST,
|
||||
) -> Self {
|
||||
Self {
|
||||
server_state: Arc::new(ServerState {
|
||||
server: Server::new(config, storage),
|
||||
web_config,
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn config(&self, cfg: &mut web::ServiceConfig) {
|
||||
cfg.service(
|
||||
web::scope("")
|
||||
.app_data(web::Data::new(self.server_state.clone()))
|
||||
.wrap(
|
||||
middleware::DefaultHeaders::new().add(("Cache-Control", "no-store, max-age=0")),
|
||||
)
|
||||
.service(index)
|
||||
.service(api_scope()),
|
||||
);
|
||||
}
|
||||
|
||||
pub async fn run(self) -> anyhow::Result<()> {
|
||||
let listen_addresses = self.server_state.web_config.listen_addresses.clone();
|
||||
let mut http_server = HttpServer::new(move || {
|
||||
App::new()
|
||||
.wrap(ErrorHandlers::new().handler(StatusCode::INTERNAL_SERVER_ERROR, print_error))
|
||||
.wrap(Logger::default())
|
||||
.configure(|cfg| self.config(cfg))
|
||||
});
|
||||
for listen_address in listen_addresses {
|
||||
log::info!("Serving on {listen_address}");
|
||||
http_server = http_server.bind(listen_address)?
|
||||
}
|
||||
http_server.run().await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use actix_web::{test, App};
|
||||
use pretty_assertions::assert_eq;
|
||||
use taskchampion_sync_server_core::InMemoryStorage;
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_cache_control() {
|
||||
let server = WebServer::new(
|
||||
ServerConfig::default(),
|
||||
WebConfig::default(),
|
||||
InMemoryStorage::new(),
|
||||
);
|
||||
let app = App::new().configure(|sc| server.config(sc));
|
||||
let app = test::init_service(app).await;
|
||||
|
||||
let req = test::TestRequest::get().uri("/").to_request();
|
||||
let resp = test::call_service(&app, req).await;
|
||||
assert!(resp.status().is_success());
|
||||
assert_eq!(
|
||||
resp.headers().get("Cache-Control").unwrap(),
|
||||
&"no-store, max-age=0".to_string()
|
||||
)
|
||||
}
|
||||
}
|
||||
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "taskchampion-sync-server-storage-sqlite"
|
||||
version = "0.6.1"
|
||||
version = "0.7.2-pre"
|
||||
authors = ["Dustin J. Mitchell <dustin@mozilla.com>"]
|
||||
edition = "2021"
|
||||
description = "SQLite backend for TaskChampion-sync-server"
|
||||
@ -9,7 +9,8 @@ repository = "https://github.com/GothenburgBitFactory/taskchampion-sync-server"
|
||||
license = "MIT"
|
||||
|
||||
[dependencies]
|
||||
taskchampion-sync-server-core = { path = "../core", version = "0.6.1" }
|
||||
taskchampion-sync-server-core = { path = "../core", version = "0.7.2-pre" }
|
||||
async-trait.workspace = true
|
||||
uuid.workspace = true
|
||||
anyhow.workspace = true
|
||||
thiserror.workspace = true
|
||||
@ -19,3 +20,4 @@ chrono.workspace = true
|
||||
[dev-dependencies]
|
||||
tempfile.workspace = true
|
||||
pretty_assertions.workspace = true
|
||||
tokio.workspace = true
|
||||
|
||||
@ -1,4 +1,11 @@
|
||||
//! Tihs crate implements a SQLite storage backend for the TaskChampion sync server.
|
||||
//! This crate implements a SQLite storage backend for the TaskChampion sync server.
|
||||
//!
|
||||
//! Use the [`SqliteStorage`] type as an implementation of the [`Storage`] trait.
|
||||
//!
|
||||
//! This crate is intended for small deployments of a sync server, supporting one or a small number
|
||||
//! of users. The schema for the database is considered an implementation detail. For more robust
|
||||
//! database support, consider `taskchampion-sync-server-storage-postgres`.
|
||||
|
||||
use anyhow::Context;
|
||||
use chrono::{TimeZone, Utc};
|
||||
use rusqlite::types::{FromSql, ToSql};
|
||||
@ -43,7 +50,7 @@ impl SqliteStorage {
|
||||
/// Create a new instance using a database at the given directory.
|
||||
///
|
||||
/// The database will be stored in a file named `taskchampion-sync-server.sqlite3` in the given
|
||||
/// directory.
|
||||
/// directory. The database will be created if it does not exist.
|
||||
pub fn new<P: AsRef<Path>>(directory: P) -> anyhow::Result<SqliteStorage> {
|
||||
std::fs::create_dir_all(&directory)
|
||||
.with_context(|| format!("Failed to create `{}`.", directory.as_ref().display()))?;
|
||||
@ -77,8 +84,9 @@ impl SqliteStorage {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Storage for SqliteStorage {
|
||||
fn txn(&self, client_id: Uuid) -> anyhow::Result<Box<dyn StorageTxn + '_>> {
|
||||
async fn txn(&self, client_id: Uuid) -> anyhow::Result<Box<dyn StorageTxn + '_>> {
|
||||
let con = self.new_connection()?;
|
||||
// Begin the transaction on this new connection. An IMMEDIATE connection is in
|
||||
// write (exclusive) mode from the start.
|
||||
@ -126,8 +134,9 @@ impl Txn {
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait(?Send)]
|
||||
impl StorageTxn for Txn {
|
||||
fn get_client(&mut self) -> anyhow::Result<Option<Client>> {
|
||||
async fn get_client(&mut self) -> anyhow::Result<Option<Client>> {
|
||||
let result: Option<Client> = self
|
||||
.con
|
||||
.query_row(
|
||||
@ -171,17 +180,17 @@ impl StorageTxn for Txn {
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
fn new_client(&mut self, latest_version_id: Uuid) -> anyhow::Result<()> {
|
||||
async fn new_client(&mut self, latest_version_id: Uuid) -> anyhow::Result<()> {
|
||||
self.con
|
||||
.execute(
|
||||
"INSERT OR REPLACE INTO clients (client_id, latest_version_id) VALUES (?, ?)",
|
||||
"INSERT INTO clients (client_id, latest_version_id) VALUES (?, ?)",
|
||||
params![&StoredUuid(self.client_id), &StoredUuid(latest_version_id)],
|
||||
)
|
||||
.context("Error creating/updating client")?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn set_snapshot(&mut self, snapshot: Snapshot, data: Vec<u8>) -> anyhow::Result<()> {
|
||||
async fn set_snapshot(&mut self, snapshot: Snapshot, data: Vec<u8>) -> anyhow::Result<()> {
|
||||
self.con
|
||||
.execute(
|
||||
"UPDATE clients
|
||||
@ -203,7 +212,7 @@ impl StorageTxn for Txn {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_snapshot_data(&mut self, version_id: Uuid) -> anyhow::Result<Option<Vec<u8>>> {
|
||||
async fn get_snapshot_data(&mut self, version_id: Uuid) -> anyhow::Result<Option<Vec<u8>>> {
|
||||
let r = self
|
||||
.con
|
||||
.query_row(
|
||||
@ -227,9 +236,8 @@ impl StorageTxn for Txn {
|
||||
.transpose()
|
||||
}
|
||||
|
||||
fn get_version_by_parent(
|
||||
async fn get_version_by_parent(
|
||||
&mut self,
|
||||
|
||||
parent_version_id: Uuid,
|
||||
) -> anyhow::Result<Option<Version>> {
|
||||
self.get_version_impl(
|
||||
@ -238,16 +246,15 @@ impl StorageTxn for Txn {
|
||||
parent_version_id)
|
||||
}
|
||||
|
||||
fn get_version(&mut self, version_id: Uuid) -> anyhow::Result<Option<Version>> {
|
||||
async fn get_version(&mut self, version_id: Uuid) -> anyhow::Result<Option<Version>> {
|
||||
self.get_version_impl(
|
||||
"SELECT version_id, parent_version_id, history_segment FROM versions WHERE version_id = ? AND client_id = ?",
|
||||
self.client_id,
|
||||
version_id)
|
||||
}
|
||||
|
||||
fn add_version(
|
||||
async fn add_version(
|
||||
&mut self,
|
||||
|
||||
version_id: Uuid,
|
||||
parent_version_id: Uuid,
|
||||
history_segment: Vec<u8>,
|
||||
@ -262,21 +269,31 @@ impl StorageTxn for Txn {
|
||||
]
|
||||
)
|
||||
.context("Error adding version")?;
|
||||
self.con
|
||||
let rows_changed = self
|
||||
.con
|
||||
.execute(
|
||||
"UPDATE clients
|
||||
SET
|
||||
latest_version_id = ?,
|
||||
versions_since_snapshot = versions_since_snapshot + 1
|
||||
WHERE client_id = ?",
|
||||
params![StoredUuid(version_id), StoredUuid(self.client_id),],
|
||||
WHERE client_id = ? and (latest_version_id = ? or latest_version_id = ?)",
|
||||
params![
|
||||
StoredUuid(version_id),
|
||||
StoredUuid(self.client_id),
|
||||
StoredUuid(parent_version_id),
|
||||
StoredUuid(Uuid::nil())
|
||||
],
|
||||
)
|
||||
.context("Error updating client for new version")?;
|
||||
|
||||
if rows_changed == 0 {
|
||||
anyhow::bail!("clients.latest_version_id does not match parent_version_id");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn commit(&mut self) -> anyhow::Result<()> {
|
||||
async fn commit(&mut self) -> anyhow::Result<()> {
|
||||
self.con.execute("COMMIT", [])?;
|
||||
Ok(())
|
||||
}
|
||||
@ -289,48 +306,49 @@ mod test {
|
||||
use pretty_assertions::assert_eq;
|
||||
use tempfile::TempDir;
|
||||
|
||||
#[test]
|
||||
fn test_emtpy_dir() -> anyhow::Result<()> {
|
||||
#[tokio::test]
|
||||
async fn test_emtpy_dir() -> anyhow::Result<()> {
|
||||
let tmp_dir = TempDir::new()?;
|
||||
let non_existant = tmp_dir.path().join("subdir");
|
||||
let storage = SqliteStorage::new(non_existant)?;
|
||||
let client_id = Uuid::new_v4();
|
||||
let mut txn = storage.txn(client_id)?;
|
||||
let maybe_client = txn.get_client()?;
|
||||
let mut txn = storage.txn(client_id).await?;
|
||||
let maybe_client = txn.get_client().await?;
|
||||
assert!(maybe_client.is_none());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_client_empty() -> anyhow::Result<()> {
|
||||
#[tokio::test]
|
||||
async fn test_get_client_empty() -> anyhow::Result<()> {
|
||||
let tmp_dir = TempDir::new()?;
|
||||
let storage = SqliteStorage::new(tmp_dir.path())?;
|
||||
let client_id = Uuid::new_v4();
|
||||
let mut txn = storage.txn(client_id)?;
|
||||
let maybe_client = txn.get_client()?;
|
||||
let mut txn = storage.txn(client_id).await?;
|
||||
let maybe_client = txn.get_client().await?;
|
||||
assert!(maybe_client.is_none());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_client_storage() -> anyhow::Result<()> {
|
||||
#[tokio::test]
|
||||
async fn test_client_storage() -> anyhow::Result<()> {
|
||||
let tmp_dir = TempDir::new()?;
|
||||
let storage = SqliteStorage::new(tmp_dir.path())?;
|
||||
let client_id = Uuid::new_v4();
|
||||
let mut txn = storage.txn(client_id)?;
|
||||
let mut txn = storage.txn(client_id).await?;
|
||||
|
||||
let latest_version_id = Uuid::new_v4();
|
||||
txn.new_client(latest_version_id)?;
|
||||
txn.new_client(latest_version_id).await?;
|
||||
|
||||
let client = txn.get_client()?.unwrap();
|
||||
let client = txn.get_client().await?.unwrap();
|
||||
assert_eq!(client.latest_version_id, latest_version_id);
|
||||
assert!(client.snapshot.is_none());
|
||||
|
||||
let latest_version_id = Uuid::new_v4();
|
||||
txn.add_version(latest_version_id, Uuid::new_v4(), vec![1, 1])?;
|
||||
let new_version_id = Uuid::new_v4();
|
||||
txn.add_version(new_version_id, latest_version_id, vec![1, 1])
|
||||
.await?;
|
||||
|
||||
let client = txn.get_client()?.unwrap();
|
||||
assert_eq!(client.latest_version_id, latest_version_id);
|
||||
let client = txn.get_client().await?.unwrap();
|
||||
assert_eq!(client.latest_version_id, new_version_id);
|
||||
assert!(client.snapshot.is_none());
|
||||
|
||||
let snap = Snapshot {
|
||||
@ -338,37 +356,40 @@ mod test {
|
||||
timestamp: "2014-11-28T12:00:09Z".parse::<DateTime<Utc>>().unwrap(),
|
||||
versions_since: 4,
|
||||
};
|
||||
txn.set_snapshot(snap.clone(), vec![1, 2, 3])?;
|
||||
txn.set_snapshot(snap.clone(), vec![1, 2, 3]).await?;
|
||||
|
||||
let client = txn.get_client()?.unwrap();
|
||||
assert_eq!(client.latest_version_id, latest_version_id);
|
||||
let client = txn.get_client().await?.unwrap();
|
||||
assert_eq!(client.latest_version_id, new_version_id);
|
||||
assert_eq!(client.snapshot.unwrap(), snap);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_gvbp_empty() -> anyhow::Result<()> {
|
||||
#[tokio::test]
|
||||
async fn test_gvbp_empty() -> anyhow::Result<()> {
|
||||
let tmp_dir = TempDir::new()?;
|
||||
let storage = SqliteStorage::new(tmp_dir.path())?;
|
||||
let client_id = Uuid::new_v4();
|
||||
let mut txn = storage.txn(client_id)?;
|
||||
let maybe_version = txn.get_version_by_parent(Uuid::new_v4())?;
|
||||
let mut txn = storage.txn(client_id).await?;
|
||||
let maybe_version = txn.get_version_by_parent(Uuid::new_v4()).await?;
|
||||
assert!(maybe_version.is_none());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_add_version_and_get_version() -> anyhow::Result<()> {
|
||||
#[tokio::test]
|
||||
async fn test_add_version_and_get_version() -> anyhow::Result<()> {
|
||||
let tmp_dir = TempDir::new()?;
|
||||
let storage = SqliteStorage::new(tmp_dir.path())?;
|
||||
let client_id = Uuid::new_v4();
|
||||
let mut txn = storage.txn(client_id)?;
|
||||
let mut txn = storage.txn(client_id).await?;
|
||||
|
||||
let parent_version_id = Uuid::new_v4();
|
||||
txn.new_client(parent_version_id).await?;
|
||||
|
||||
let version_id = Uuid::new_v4();
|
||||
let parent_version_id = Uuid::new_v4();
|
||||
let history_segment = b"abc".to_vec();
|
||||
txn.add_version(version_id, parent_version_id, history_segment.clone())?;
|
||||
txn.add_version(version_id, parent_version_id, history_segment.clone())
|
||||
.await?;
|
||||
|
||||
let expected = Version {
|
||||
version_id,
|
||||
@ -376,71 +397,114 @@ mod test {
|
||||
history_segment,
|
||||
};
|
||||
|
||||
let version = txn.get_version_by_parent(parent_version_id)?.unwrap();
|
||||
let version = txn.get_version_by_parent(parent_version_id).await?.unwrap();
|
||||
assert_eq!(version, expected);
|
||||
|
||||
let version = txn.get_version(version_id)?.unwrap();
|
||||
let version = txn.get_version(version_id).await?.unwrap();
|
||||
assert_eq!(version, expected);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_add_version_exists() -> anyhow::Result<()> {
|
||||
#[tokio::test]
|
||||
async fn test_add_version_exists() -> anyhow::Result<()> {
|
||||
let tmp_dir = TempDir::new()?;
|
||||
let storage = SqliteStorage::new(tmp_dir.path())?;
|
||||
let client_id = Uuid::new_v4();
|
||||
let mut txn = storage.txn(client_id)?;
|
||||
let mut txn = storage.txn(client_id).await?;
|
||||
|
||||
let parent_version_id = Uuid::new_v4();
|
||||
txn.new_client(parent_version_id).await?;
|
||||
|
||||
let version_id = Uuid::new_v4();
|
||||
let parent_version_id = Uuid::new_v4();
|
||||
let history_segment = b"abc".to_vec();
|
||||
txn.add_version(version_id, parent_version_id, history_segment.clone())?;
|
||||
txn.add_version(version_id, parent_version_id, history_segment.clone())
|
||||
.await?;
|
||||
// Fails because the version already exists.
|
||||
assert!(txn
|
||||
.add_version(version_id, parent_version_id, history_segment.clone())
|
||||
.await
|
||||
.is_err());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_snapshots() -> anyhow::Result<()> {
|
||||
#[tokio::test]
|
||||
async fn test_add_version_mismatch() -> anyhow::Result<()> {
|
||||
let tmp_dir = TempDir::new()?;
|
||||
let storage = SqliteStorage::new(tmp_dir.path())?;
|
||||
let client_id = Uuid::new_v4();
|
||||
let mut txn = storage.txn(client_id)?;
|
||||
let mut txn = storage.txn(client_id).await?;
|
||||
|
||||
txn.new_client(Uuid::new_v4())?;
|
||||
assert!(txn.get_client()?.unwrap().snapshot.is_none());
|
||||
let latest_version_id = Uuid::new_v4();
|
||||
txn.new_client(latest_version_id).await?;
|
||||
|
||||
let version_id = Uuid::new_v4();
|
||||
let parent_version_id = Uuid::new_v4(); // != latest_version_id
|
||||
let history_segment = b"abc".to_vec();
|
||||
// Fails because the latest_version_id is not parent_version_id.
|
||||
assert!(txn
|
||||
.add_version(version_id, parent_version_id, history_segment.clone())
|
||||
.await
|
||||
.is_err());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_snapshots() -> anyhow::Result<()> {
|
||||
let tmp_dir = TempDir::new()?;
|
||||
let storage = SqliteStorage::new(tmp_dir.path())?;
|
||||
let client_id = Uuid::new_v4();
|
||||
let mut txn = storage.txn(client_id).await?;
|
||||
|
||||
txn.new_client(Uuid::new_v4()).await?;
|
||||
assert!(txn.get_client().await?.unwrap().snapshot.is_none());
|
||||
|
||||
let snap = Snapshot {
|
||||
version_id: Uuid::new_v4(),
|
||||
timestamp: "2013-10-08T12:00:09Z".parse::<DateTime<Utc>>().unwrap(),
|
||||
versions_since: 3,
|
||||
};
|
||||
txn.set_snapshot(snap.clone(), vec![9, 8, 9])?;
|
||||
txn.set_snapshot(snap.clone(), vec![9, 8, 9]).await?;
|
||||
|
||||
assert_eq!(
|
||||
txn.get_snapshot_data(snap.version_id)?.unwrap(),
|
||||
txn.get_snapshot_data(snap.version_id).await?.unwrap(),
|
||||
vec![9, 8, 9]
|
||||
);
|
||||
assert_eq!(txn.get_client()?.unwrap().snapshot, Some(snap));
|
||||
assert_eq!(txn.get_client().await?.unwrap().snapshot, Some(snap));
|
||||
|
||||
let snap2 = Snapshot {
|
||||
version_id: Uuid::new_v4(),
|
||||
timestamp: "2014-11-28T12:00:09Z".parse::<DateTime<Utc>>().unwrap(),
|
||||
versions_since: 10,
|
||||
};
|
||||
txn.set_snapshot(snap2.clone(), vec![0, 2, 4, 6])?;
|
||||
txn.set_snapshot(snap2.clone(), vec![0, 2, 4, 6]).await?;
|
||||
|
||||
assert_eq!(
|
||||
txn.get_snapshot_data(snap2.version_id)?.unwrap(),
|
||||
txn.get_snapshot_data(snap2.version_id).await?.unwrap(),
|
||||
vec![0, 2, 4, 6]
|
||||
);
|
||||
assert_eq!(txn.get_client()?.unwrap().snapshot, Some(snap2));
|
||||
assert_eq!(txn.get_client().await?.unwrap().snapshot, Some(snap2));
|
||||
|
||||
// check that mismatched version is detected
|
||||
assert!(txn.get_snapshot_data(Uuid::new_v4()).is_err());
|
||||
assert!(txn.get_snapshot_data(Uuid::new_v4()).await.is_err());
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
/// When an add_version call specifies a `parent_version_id` that does not exist in the
|
||||
/// DB, but no other versions exist, the call succeeds.
|
||||
async fn test_add_version_no_history() -> anyhow::Result<()> {
|
||||
let tmp_dir = TempDir::new()?;
|
||||
let storage = SqliteStorage::new(tmp_dir.path())?;
|
||||
let client_id = Uuid::new_v4();
|
||||
let mut txn = storage.txn(client_id).await?;
|
||||
txn.new_client(Uuid::nil()).await?;
|
||||
|
||||
let version_id = Uuid::new_v4();
|
||||
let parent_version_id = Uuid::new_v4();
|
||||
txn.add_version(version_id, parent_version_id, b"v1".to_vec())
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@ -2,45 +2,54 @@ use std::thread;
|
||||
use taskchampion_sync_server_core::{Storage, NIL_VERSION_ID};
|
||||
use taskchampion_sync_server_storage_sqlite::SqliteStorage;
|
||||
use tempfile::TempDir;
|
||||
use tokio::runtime;
|
||||
use uuid::Uuid;
|
||||
|
||||
/// Test that calls to `add_version` from different threads maintain sequential consistency.
|
||||
#[test]
|
||||
fn add_version_concurrency() -> anyhow::Result<()> {
|
||||
///
|
||||
/// This uses `std::thread` to ensure actual parallelism, with a different, single-threaded Tokio runtime
|
||||
/// in each thread. Asynchronous concurrency does not actually test consistency.
|
||||
#[tokio::test]
|
||||
async fn add_version_concurrency() -> anyhow::Result<()> {
|
||||
let tmp_dir = TempDir::new()?;
|
||||
let client_id = Uuid::new_v4();
|
||||
|
||||
{
|
||||
let con = SqliteStorage::new(tmp_dir.path())?;
|
||||
let mut txn = con.txn(client_id)?;
|
||||
txn.new_client(NIL_VERSION_ID)?;
|
||||
txn.commit()?;
|
||||
let mut txn = con.txn(client_id).await?;
|
||||
txn.new_client(NIL_VERSION_ID).await?;
|
||||
txn.commit().await?;
|
||||
}
|
||||
|
||||
const N: i32 = 100;
|
||||
const T: i32 = 4;
|
||||
|
||||
// Add N versions to the DB.
|
||||
let add_versions = || {
|
||||
let con = SqliteStorage::new(tmp_dir.path())?;
|
||||
let add_versions = |tmp_dir, client_id| {
|
||||
let rt = runtime::Builder::new_current_thread().build()?;
|
||||
rt.block_on(async {
|
||||
let con = SqliteStorage::new(tmp_dir)?;
|
||||
|
||||
for _ in 0..N {
|
||||
let mut txn = con.txn(client_id)?;
|
||||
let client = txn.get_client()?.unwrap();
|
||||
let version_id = Uuid::new_v4();
|
||||
let parent_version_id = client.latest_version_id;
|
||||
std::thread::yield_now(); // Make failure more likely.
|
||||
txn.add_version(version_id, parent_version_id, b"data".to_vec())?;
|
||||
txn.commit()?;
|
||||
}
|
||||
for _ in 0..N {
|
||||
let mut txn = con.txn(client_id).await?;
|
||||
let client = txn.get_client().await?.unwrap();
|
||||
let version_id = Uuid::new_v4();
|
||||
let parent_version_id = client.latest_version_id;
|
||||
std::thread::yield_now(); // Make failure more likely.
|
||||
txn.add_version(version_id, parent_version_id, b"data".to_vec())
|
||||
.await?;
|
||||
txn.commit().await?;
|
||||
}
|
||||
|
||||
Ok::<_, anyhow::Error>(())
|
||||
Ok::<_, anyhow::Error>(())
|
||||
})
|
||||
};
|
||||
|
||||
thread::scope(|s| {
|
||||
// Spawn T threads.
|
||||
for _ in 0..T {
|
||||
s.spawn(add_versions);
|
||||
let tmp_dir = tmp_dir.path();
|
||||
s.spawn(move || add_versions(tmp_dir, client_id));
|
||||
}
|
||||
});
|
||||
|
||||
@ -49,13 +58,16 @@ fn add_version_concurrency() -> anyhow::Result<()> {
|
||||
// same `parent_version_id`.
|
||||
{
|
||||
let con = SqliteStorage::new(tmp_dir.path())?;
|
||||
let mut txn = con.txn(client_id)?;
|
||||
let client = txn.get_client()?.unwrap();
|
||||
let mut txn = con.txn(client_id).await?;
|
||||
let client = txn.get_client().await?.unwrap();
|
||||
|
||||
let mut n = 0;
|
||||
let mut version_id = client.latest_version_id;
|
||||
while version_id != NIL_VERSION_ID {
|
||||
let version = txn.get_version(version_id)?.expect("version should exist");
|
||||
let version = txn
|
||||
.get_version(version_id)
|
||||
.await?
|
||||
.expect("version should exist");
|
||||
n += 1;
|
||||
version_id = version.parent_version_id;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user