Compare commits
144 Commits
security-b
...
win-servic
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
64a07e0ed6 | ||
|
|
f245a6575d | ||
|
|
e3050bc5ff | ||
|
|
d593eedf01 | ||
|
|
2fb5bb3d84 | ||
|
|
86052c9350 | ||
|
|
e5be55e141 | ||
|
|
8f0eb7130b | ||
|
|
94fe04a6a4 | ||
|
|
976c11902c | ||
|
|
c8d2662a36 | ||
|
|
ac5fedddd1 | ||
|
|
0c2d4986a2 | ||
|
|
a3203936d2 | ||
|
|
fb1c0ec130 | ||
|
|
2a21758369 | ||
|
|
1abb5fa006 | ||
|
|
e1b1c857fa | ||
|
|
4216007af3 | ||
|
|
bbf8a8019c | ||
|
|
ac04495480 | ||
|
|
eb25d31361 | ||
|
|
056ff3470b | ||
|
|
c0b08e84cc | ||
|
|
ddd6e7910f | ||
|
|
d9b3694cab | ||
|
|
4ebe7b6fc4 | ||
|
|
8043cdf8d8 | ||
| 2148faa376 | |||
|
|
eb37ee0a0c | ||
|
|
1f07fd6a98 | ||
|
|
e135519c06 | ||
|
|
f015d345f4 | ||
|
|
51674bb39c | ||
|
|
cd07ab7a78 | ||
|
|
cfa6e068eb | ||
|
|
784261f4d8 | ||
|
|
971db0e919 | ||
|
|
e1a8553142 | ||
|
|
ec70561c93 | ||
|
|
3993d3a8cc | ||
|
|
c87456ae2f | ||
|
|
e89983de3a | ||
|
|
f56668d9f6 | ||
|
|
434738bae5 | ||
|
|
915540de32 | ||
|
|
5a5008080a | ||
|
|
3bc423f9b2 | ||
|
|
f2c33a5bf4 | ||
|
|
3e8b26418a | ||
|
|
60ce1cc110 | ||
|
|
2ff4d0961c | ||
|
|
d61dab3285 | ||
|
|
c439c9645d | ||
|
|
c2883704e6 | ||
| 47caec38a6 | |||
|
|
77c3babec7 | ||
|
|
6f03ce4d1d | ||
|
|
712f114763 | ||
|
|
c56184d30b | ||
|
|
9017ea4017 | ||
|
|
088fa6fe72 | ||
|
|
c90af9c196 | ||
|
|
a5a9bc73b0 | ||
|
|
6ed8150e48 | ||
|
|
fac312d860 | ||
|
|
549a0f5f52 | ||
|
|
4db102b3d1 | ||
|
|
c61a9e30ac | ||
|
|
27836beb75 | ||
|
|
099f76166e | ||
|
|
66026e903a | ||
|
|
3360d3c8c7 | ||
|
|
02980468db | ||
|
|
ec0e8a980c | ||
|
|
16d5b9a233 | ||
|
|
62c4bc5ade | ||
|
|
ccd657c9ec | ||
|
|
013af7e65f | ||
| 84978afd58 | |||
|
|
4cb5b303dc | ||
| 8fde3cec41 | |||
| 17ac195c5d | |||
| c1c5d14133 | |||
| 47144bdf81 | |||
| 42760bbd79 | |||
| d29bca853b | |||
| f8d27a1454 | |||
| 6030f30901 | |||
| a3c401194f | |||
|
|
6386510f52 | ||
| ec36e5c2ea | |||
|
|
ba86d18250 | ||
|
|
606a1f3774 | ||
|
|
b3a67ffc00 | ||
|
|
168290040c | ||
|
|
2b27da224e | ||
|
|
9e92b168ba | ||
|
|
bd159c35e8 | ||
|
|
b3e378b5fc | ||
|
|
b7c4f2e735 | ||
|
|
4a5dd3eea7 | ||
|
|
5af6d8dd9c | ||
|
|
5dfe390ac3 | ||
|
|
43c7b211c3 | ||
|
|
c5f9cfcaa0 | ||
|
|
67fce6f06a | ||
|
|
191b126462 | ||
|
|
cb05407bb6 | ||
| 4beb34764d | |||
|
|
4b4a8f4489 | ||
|
|
54d0fe0505 | ||
|
|
06f4d628db | ||
|
|
657f47e32f | ||
|
|
86f8feb291 | ||
|
|
6deec731e2 | ||
|
|
f5a5c62181 | ||
|
|
b8afd94b21 | ||
|
|
7b57965952 | ||
|
|
9dca7aff27 | ||
|
|
4d1f047baf | ||
|
|
925c7a211f | ||
|
|
d81120f59c | ||
|
|
e118eceb85 | ||
|
|
4a84fe9339 | ||
|
|
c6e13dc476 | ||
|
|
8f5d4cc385 | ||
|
|
2ffd60973d | ||
|
|
08af101b2e | ||
|
|
bb58868333 | ||
|
|
b05cdeec66 | ||
|
|
9ec465706a | ||
|
|
46a3c1768c | ||
|
|
6c8a67c520 | ||
|
|
bbaed3fb97 | ||
|
|
4700bc407e | ||
|
|
281fbcb31d | ||
|
|
a55221573b | ||
|
|
45acb45a05 | ||
|
|
11f1caa6da | ||
|
|
f769c9119b | ||
|
|
1145642255 | ||
|
|
9f33277a4f | ||
|
|
0a8e1dce3f |
11
.claude/memory/feedback_widget_decomposition.md
Normal file
11
.claude/memory/feedback_widget_decomposition.md
Normal file
@@ -0,0 +1,11 @@
|
||||
---
|
||||
name: Widget decomposition and provider subscriptions
|
||||
description: Prefer splitting screens into multiple focused files/widgets; each widget subscribes to its own relevant providers
|
||||
type: feedback
|
||||
---
|
||||
|
||||
Split screens into multiple smaller widgets across multiple files. Each widget should subscribe only to the providers it needs (`ref.watch` at lowest possible level), rather than having one large screen widget that watches everything and passes data down as parameters.
|
||||
|
||||
**Why:** Reduces unnecessary rebuilds; improves readability; each file has one clear responsibility.
|
||||
|
||||
**How to apply:** When building a new screen, identify which sub-widgets need their own provider subscriptions and extract them into separate files (e.g., `widgets/grant_card.dart` watches enrichment providers itself, rather than the screen doing it and passing resolved strings down).
|
||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -1 +1,6 @@
|
||||
target/
|
||||
target/
|
||||
scripts/__pycache__/
|
||||
.DS_Store
|
||||
.cargo/config.toml
|
||||
.vscode/
|
||||
docs/
|
||||
|
||||
@@ -8,7 +8,7 @@ when:
|
||||
include: ['.woodpecker/server-*.yaml', 'server/**']
|
||||
|
||||
steps:
|
||||
- name: test
|
||||
- name: audit
|
||||
image: jdxcode/mise:latest
|
||||
directory: server
|
||||
environment:
|
||||
|
||||
25
.woodpecker/server-lint.yaml
Normal file
25
.woodpecker/server-lint.yaml
Normal file
@@ -0,0 +1,25 @@
|
||||
when:
|
||||
- event: pull_request
|
||||
path:
|
||||
include: ['.woodpecker/server-*.yaml', 'server/**']
|
||||
- event: push
|
||||
branch: main
|
||||
path:
|
||||
include: ['.woodpecker/server-*.yaml', 'server/**']
|
||||
|
||||
steps:
|
||||
- name: lint
|
||||
image: jdxcode/mise:latest
|
||||
directory: server
|
||||
environment:
|
||||
CARGO_TERM_COLOR: always
|
||||
CARGO_TARGET_DIR: /usr/local/cargo/target
|
||||
CARGO_HOME: /usr/local/cargo/registry
|
||||
volumes:
|
||||
- cargo-target:/usr/local/cargo/target
|
||||
- cargo-registry:/usr/local/cargo/registry
|
||||
commands:
|
||||
- apt-get update && apt-get install -y pkg-config
|
||||
- mise install rust
|
||||
- mise install protoc
|
||||
- mise exec rust -- cargo clippy --all -- -D warnings
|
||||
@@ -8,7 +8,7 @@ when:
|
||||
include: ['.woodpecker/server-*.yaml', 'server/**']
|
||||
|
||||
steps:
|
||||
- name: test
|
||||
- name: vet
|
||||
image: jdxcode/mise:latest
|
||||
directory: server
|
||||
environment:
|
||||
|
||||
18
.woodpecker/useragent-analyze.yaml
Normal file
18
.woodpecker/useragent-analyze.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
when:
|
||||
- event: pull_request
|
||||
path:
|
||||
include: ['.woodpecker/useragent-*.yaml', 'useragent/**']
|
||||
- event: push
|
||||
branch: main
|
||||
path:
|
||||
include: ['.woodpecker/useragent-*.yaml', 'useragent/**']
|
||||
|
||||
steps:
|
||||
- name: analyze
|
||||
image: jdxcode/mise:latest
|
||||
commands:
|
||||
- mise install flutter
|
||||
- mise install protoc
|
||||
# Reruns codegen to catch protocol drift
|
||||
- mise codegen
|
||||
- cd useragent/ && flutter analyze
|
||||
128
AGENTS.md
Normal file
128
AGENTS.md
Normal file
@@ -0,0 +1,128 @@
|
||||
# AGENTS.md
|
||||
|
||||
This file provides guidance to Codex (Codex.ai/code) when working with code in this repository.
|
||||
|
||||
## Project Overview
|
||||
|
||||
Arbiter is a **permissioned signing service** for cryptocurrency wallets. It consists of:
|
||||
- **`server/`** — Rust gRPC daemon that holds encrypted keys and enforces policies
|
||||
- **`useragent/`** — Flutter desktop app (macOS/Windows) with a Rust backend via Rinf
|
||||
- **`protobufs/`** — Protocol Buffer definitions shared between server and client
|
||||
|
||||
The vault never exposes key material; it only produces signatures when requests satisfy configured policies.
|
||||
|
||||
## Toolchain Setup
|
||||
|
||||
Tools are managed via [mise](https://mise.jdx.dev/). Install all required tools:
|
||||
```sh
|
||||
mise install
|
||||
```
|
||||
|
||||
Key versions: Rust 1.93.0 (with clippy), Flutter 3.38.9-stable, protoc 29.6, diesel_cli 2.3.6 (sqlite).
|
||||
|
||||
## Server (Rust workspace at `server/`)
|
||||
|
||||
### Crates
|
||||
|
||||
| Crate | Purpose |
|
||||
|---|---|
|
||||
| `arbiter-proto` | Generated gRPC stubs + protobuf types; compiled from `protobufs/*.proto` via `tonic-prost-build` |
|
||||
| `arbiter-server` | Main daemon — actors, DB, EVM policy engine, gRPC service implementation |
|
||||
| `arbiter-useragent` | Rust client library for the user agent side of the gRPC protocol |
|
||||
| `arbiter-client` | Rust client library for SDK clients |
|
||||
|
||||
### Common Commands
|
||||
|
||||
```sh
|
||||
cd server
|
||||
|
||||
# Build
|
||||
cargo build
|
||||
|
||||
# Run the server daemon
|
||||
cargo run -p arbiter-server
|
||||
|
||||
# Run all tests (preferred over cargo test)
|
||||
cargo nextest run
|
||||
|
||||
# Run a single test
|
||||
cargo nextest run <test_name>
|
||||
|
||||
# Lint
|
||||
cargo clippy
|
||||
|
||||
# Security audit
|
||||
cargo audit
|
||||
|
||||
# Check unused dependencies
|
||||
cargo shear
|
||||
|
||||
# Run snapshot tests and update snapshots
|
||||
cargo insta review
|
||||
```
|
||||
|
||||
### Architecture
|
||||
|
||||
The server is actor-based using the **kameo** crate. All long-lived state lives in `GlobalActors`:
|
||||
|
||||
- **`Bootstrapper`** — Manages the one-time bootstrap token written to `~/.arbiter/bootstrap_token` on first run.
|
||||
- **`KeyHolder`** — Holds the encrypted root key and manages the Sealed/Unsealed vault state machine. On unseal, decrypts the root key into a `memsafe` hardened memory cell.
|
||||
- **`FlowCoordinator`** — Coordinates cross-connection flow between user agents and SDK clients.
|
||||
- **`EvmActor`** — Handles EVM transaction policy enforcement and signing.
|
||||
|
||||
Per-connection actors live under `actors/user_agent/` and `actors/client/`, each with `auth` (challenge-response authentication) and `session` (post-auth operations) sub-modules.
|
||||
|
||||
**Database:** SQLite via `diesel-async` + `bb8` connection pool. Schema managed by embedded Diesel migrations in `crates/arbiter-server/migrations/`. DB file lives at `~/.arbiter/arbiter.sqlite`. Tests use a temp-file DB via `db::create_test_pool()`.
|
||||
|
||||
**Cryptography:**
|
||||
- Authentication: ed25519 (challenge-response, nonce-tracked per peer)
|
||||
- Encryption at rest: XChaCha20-Poly1305 (versioned via `scheme` field for transparent migration on unseal)
|
||||
- Password KDF: Argon2
|
||||
- Unseal transport: X25519 ephemeral key exchange
|
||||
- TLS: self-signed certificate (aws-lc-rs backend), fingerprint distributed via `ArbiterUrl`
|
||||
|
||||
**Protocol:** gRPC with Protocol Buffers. The `ArbiterUrl` type encodes host, port, CA cert, and bootstrap token into a single shareable string (printed to console on first run).
|
||||
|
||||
### Proto Regeneration
|
||||
|
||||
When `.proto` files in `protobufs/` change, rebuild to regenerate:
|
||||
```sh
|
||||
cd server && cargo build -p arbiter-proto
|
||||
```
|
||||
|
||||
### Database Migrations
|
||||
|
||||
```sh
|
||||
# Create a new migration
|
||||
diesel migration generate <name> --migration-dir crates/arbiter-server/migrations
|
||||
|
||||
# Run migrations manually (server also runs them on startup)
|
||||
diesel migration run --migration-dir crates/arbiter-server/migrations
|
||||
```
|
||||
|
||||
## User Agent (Flutter + Rinf at `useragent/`)
|
||||
|
||||
The Flutter app uses [Rinf](https://rinf.cunarist.org) to call Rust code. The Rust logic lives in `useragent/native/hub/` as a separate crate that uses `arbiter-useragent` for the gRPC client.
|
||||
|
||||
Communication between Dart and Rust uses typed **signals** defined in `useragent/native/hub/src/signals/`. After modifying signal structs, regenerate Dart bindings:
|
||||
|
||||
```sh
|
||||
cd useragent && rinf gen
|
||||
```
|
||||
|
||||
### Common Commands
|
||||
|
||||
```sh
|
||||
cd useragent
|
||||
|
||||
# Run the app (macOS or Windows)
|
||||
flutter run
|
||||
|
||||
# Regenerate Rust↔Dart signal bindings
|
||||
rinf gen
|
||||
|
||||
# Analyze Dart code
|
||||
flutter analyze
|
||||
```
|
||||
|
||||
The Rinf Rust entry point is `useragent/native/hub/src/lib.rs`. It spawns actors defined in `useragent/native/hub/src/actors/` which handle Dart↔server communication via signals.
|
||||
@@ -3,7 +3,6 @@
|
||||
Arbiter is a permissioned signing service for cryptocurrency wallets. It runs as a background service on the user's machine with an optional client application for vault management.
|
||||
|
||||
**Core principle:** The vault NEVER exposes key material. It only produces signatures when a request satisfies the configured policies.
|
||||
|
||||
---
|
||||
|
||||
## 1. Peer Types
|
||||
|
||||
128
CLAUDE.md
Normal file
128
CLAUDE.md
Normal file
@@ -0,0 +1,128 @@
|
||||
# CLAUDE.md
|
||||
|
||||
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
||||
|
||||
## Project Overview
|
||||
|
||||
Arbiter is a **permissioned signing service** for cryptocurrency wallets. It consists of:
|
||||
- **`server/`** — Rust gRPC daemon that holds encrypted keys and enforces policies
|
||||
- **`useragent/`** — Flutter desktop app (macOS/Windows) with a Rust backend via Rinf
|
||||
- **`protobufs/`** — Protocol Buffer definitions shared between server and client
|
||||
|
||||
The vault never exposes key material; it only produces signatures when requests satisfy configured policies.
|
||||
|
||||
## Toolchain Setup
|
||||
|
||||
Tools are managed via [mise](https://mise.jdx.dev/). Install all required tools:
|
||||
```sh
|
||||
mise install
|
||||
```
|
||||
|
||||
Key versions: Rust 1.93.0 (with clippy), Flutter 3.38.9-stable, protoc 29.6, diesel_cli 2.3.6 (sqlite).
|
||||
|
||||
## Server (Rust workspace at `server/`)
|
||||
|
||||
### Crates
|
||||
|
||||
| Crate | Purpose |
|
||||
|---|---|
|
||||
| `arbiter-proto` | Generated gRPC stubs + protobuf types; compiled from `protobufs/*.proto` via `tonic-prost-build` |
|
||||
| `arbiter-server` | Main daemon — actors, DB, EVM policy engine, gRPC service implementation |
|
||||
| `arbiter-useragent` | Rust client library for the user agent side of the gRPC protocol |
|
||||
| `arbiter-client` | Rust client library for SDK clients |
|
||||
|
||||
### Common Commands
|
||||
|
||||
```sh
|
||||
cd server
|
||||
|
||||
# Build
|
||||
cargo build
|
||||
|
||||
# Run the server daemon
|
||||
cargo run -p arbiter-server
|
||||
|
||||
# Run all tests (preferred over cargo test)
|
||||
cargo nextest run
|
||||
|
||||
# Run a single test
|
||||
cargo nextest run <test_name>
|
||||
|
||||
# Lint
|
||||
cargo clippy
|
||||
|
||||
# Security audit
|
||||
cargo audit
|
||||
|
||||
# Check unused dependencies
|
||||
cargo shear
|
||||
|
||||
# Run snapshot tests and update snapshots
|
||||
cargo insta review
|
||||
```
|
||||
|
||||
### Architecture
|
||||
|
||||
The server is actor-based using the **kameo** crate. All long-lived state lives in `GlobalActors`:
|
||||
|
||||
- **`Bootstrapper`** — Manages the one-time bootstrap token written to `~/.arbiter/bootstrap_token` on first run.
|
||||
- **`KeyHolder`** — Holds the encrypted root key and manages the Sealed/Unsealed vault state machine. On unseal, decrypts the root key into a `memsafe` hardened memory cell.
|
||||
- **`FlowCoordinator`** — Coordinates cross-connection flow between user agents and SDK clients.
|
||||
- **`EvmActor`** — Handles EVM transaction policy enforcement and signing.
|
||||
|
||||
Per-connection actors live under `actors/user_agent/` and `actors/client/`, each with `auth` (challenge-response authentication) and `session` (post-auth operations) sub-modules.
|
||||
|
||||
**Database:** SQLite via `diesel-async` + `bb8` connection pool. Schema managed by embedded Diesel migrations in `crates/arbiter-server/migrations/`. DB file lives at `~/.arbiter/arbiter.sqlite`. Tests use a temp-file DB via `db::create_test_pool()`.
|
||||
|
||||
**Cryptography:**
|
||||
- Authentication: ed25519 (challenge-response, nonce-tracked per peer)
|
||||
- Encryption at rest: XChaCha20-Poly1305 (versioned via `scheme` field for transparent migration on unseal)
|
||||
- Password KDF: Argon2
|
||||
- Unseal transport: X25519 ephemeral key exchange
|
||||
- TLS: self-signed certificate (aws-lc-rs backend), fingerprint distributed via `ArbiterUrl`
|
||||
|
||||
**Protocol:** gRPC with Protocol Buffers. The `ArbiterUrl` type encodes host, port, CA cert, and bootstrap token into a single shareable string (printed to console on first run).
|
||||
|
||||
### Proto Regeneration
|
||||
|
||||
When `.proto` files in `protobufs/` change, rebuild to regenerate:
|
||||
```sh
|
||||
cd server && cargo build -p arbiter-proto
|
||||
```
|
||||
|
||||
### Database Migrations
|
||||
|
||||
```sh
|
||||
# Create a new migration
|
||||
diesel migration generate <name> --migration-dir crates/arbiter-server/migrations
|
||||
|
||||
# Run migrations manually (server also runs them on startup)
|
||||
diesel migration run --migration-dir crates/arbiter-server/migrations
|
||||
```
|
||||
|
||||
## User Agent (Flutter + Rinf at `useragent/`)
|
||||
|
||||
The Flutter app uses [Rinf](https://rinf.cunarist.org) to call Rust code. The Rust logic lives in `useragent/native/hub/` as a separate crate that uses `arbiter-useragent` for the gRPC client.
|
||||
|
||||
Communication between Dart and Rust uses typed **signals** defined in `useragent/native/hub/src/signals/`. After modifying signal structs, regenerate Dart bindings:
|
||||
|
||||
```sh
|
||||
cd useragent && rinf gen
|
||||
```
|
||||
|
||||
### Common Commands
|
||||
|
||||
```sh
|
||||
cd useragent
|
||||
|
||||
# Run the app (macOS or Windows)
|
||||
flutter run
|
||||
|
||||
# Regenerate Rust↔Dart signal bindings
|
||||
rinf gen
|
||||
|
||||
# Analyze Dart code
|
||||
flutter analyze
|
||||
```
|
||||
|
||||
The Rinf Rust entry point is `useragent/native/hub/src/lib.rs`. It spawns actors defined in `useragent/native/hub/src/actors/` which handle Dart↔server communication via signals.
|
||||
@@ -4,6 +4,66 @@ This document covers concrete technology choices and dependencies. For the archi
|
||||
|
||||
---
|
||||
|
||||
## Client Connection Flow
|
||||
|
||||
### Authentication Result Semantics
|
||||
|
||||
Authentication no longer uses an implicit success-only response shape. Both `client` and `user-agent` return explicit auth status enums over the wire.
|
||||
|
||||
- **Client:** `AuthResult` may return `SUCCESS`, `INVALID_KEY`, `INVALID_SIGNATURE`, `APPROVAL_DENIED`, `NO_USER_AGENTS_ONLINE`, or `INTERNAL`
|
||||
- **User-agent:** `AuthResult` may return `SUCCESS`, `INVALID_KEY`, `INVALID_SIGNATURE`, `BOOTSTRAP_REQUIRED`, `TOKEN_INVALID`, or `INTERNAL`
|
||||
|
||||
This makes transport-level failures and actor/domain-level auth failures distinct:
|
||||
|
||||
- **Transport/protocol failures** are surfaced as stream/status errors
|
||||
- **Authentication failures** are surfaced as successful protocol responses carrying an explicit auth status
|
||||
|
||||
Clients are expected to handle these status codes directly and present the concrete failure reason to the user.
|
||||
|
||||
### New Client Approval
|
||||
|
||||
When a client whose public key is not yet in the database connects, all connected user agents are asked to approve the connection. The first agent to respond determines the outcome; remaining requests are cancelled via a watch channel.
|
||||
|
||||
```mermaid
|
||||
flowchart TD
|
||||
A([Client connects]) --> B[Receive AuthChallengeRequest]
|
||||
B --> C{pubkey in DB?}
|
||||
|
||||
C -- yes --> D[Read nonce\nIncrement nonce in DB]
|
||||
D --> G
|
||||
|
||||
C -- no --> E[Ask all UserAgents:\nClientConnectionRequest]
|
||||
E --> F{First response}
|
||||
F -- denied --> Z([Reject connection])
|
||||
F -- approved --> F2[Cancel remaining\nUserAgent requests]
|
||||
F2 --> F3[INSERT client\nnonce = 1]
|
||||
F3 --> G[Send AuthChallenge\nwith nonce]
|
||||
|
||||
G --> H[Receive AuthChallengeSolution]
|
||||
H --> I{Signature valid?}
|
||||
I -- no --> Z
|
||||
I -- yes --> J([Session started])
|
||||
```
|
||||
|
||||
### Known Issue: Concurrent Registration Race (TOCTOU)
|
||||
|
||||
Two connections presenting the same previously-unknown public key can race through the approval flow simultaneously:
|
||||
|
||||
1. Both check the DB → neither is registered.
|
||||
2. Both request approval from user agents → both receive approval.
|
||||
3. Both `INSERT` the client record → the second insert silently overwrites the first, resetting the nonce.
|
||||
|
||||
This means the first connection's nonce is invalidated by the second, causing its challenge verification to fail. A fix requires either serialising new-client registration (e.g. an in-memory lock keyed on pubkey) or replacing the separate check + insert with an `INSERT OR IGNORE` / upsert guarded by a unique constraint on `public_key`.
|
||||
|
||||
### Nonce Semantics
|
||||
|
||||
The `program_client.nonce` column stores the **next usable nonce** — i.e. it is always one ahead of the nonce last issued in a challenge.
|
||||
|
||||
- **New client:** inserted with `nonce = 1`; the first challenge is issued with `nonce = 0`.
|
||||
- **Existing client:** the current DB value is read and used as the challenge nonce, then immediately incremented within the same exclusive transaction, preventing replay.
|
||||
|
||||
---
|
||||
|
||||
## Cryptography
|
||||
|
||||
### Authentication
|
||||
@@ -22,9 +82,97 @@ This document covers concrete technology choices and dependencies. For the archi
|
||||
## Communication
|
||||
|
||||
- **Protocol:** gRPC with Protocol Buffers
|
||||
- **Request/response matching:** multiplexed over a single bidirectional stream using per-connection request IDs
|
||||
- **Server identity distribution:** `ServerInfo` protobuf struct containing the TLS public key fingerprint
|
||||
- **Future consideration:** grpc-web lacks bidirectional stream support, so a browser-based wallet may require protojson over WebSocket
|
||||
|
||||
### Request Multiplexing
|
||||
|
||||
Both `client` and `user-agent` connections support multiple in-flight requests over one gRPC bidi stream.
|
||||
|
||||
- Every request carries a monotonically increasing request ID
|
||||
- Every normal response echoes the request ID it corresponds to
|
||||
- Out-of-band server messages omit the response ID entirely
|
||||
- The server rejects already-seen request IDs at the transport adapter boundary before business logic sees the message
|
||||
|
||||
This keeps request correlation entirely in transport/client connection code while leaving actor and domain handlers unaware of request IDs.
|
||||
|
||||
---
|
||||
|
||||
## EVM Policy Engine
|
||||
|
||||
### Overview
|
||||
|
||||
The EVM engine classifies incoming transactions, enforces grant constraints, and records executions. It is the sole path through which a wallet key is used for signing.
|
||||
|
||||
The central abstraction is the `Policy` trait. Each implementation handles one semantic transaction category and owns its own database tables for grant storage and transaction logging.
|
||||
|
||||
### Transaction Evaluation Flow
|
||||
|
||||
`Engine::evaluate_transaction` runs the following steps in order:
|
||||
|
||||
1. **Classify** — Each registered policy's `analyze(context)` inspects the transaction fields (`chain`, `to`, `value`, `calldata`). The first one returning `Some(meaning)` wins. If none match, the transaction is rejected as `UnsupportedTransactionType`.
|
||||
2. **Find grant** — `Policy::try_find_grant` queries for a non-revoked grant covering this wallet, client, chain, and target address.
|
||||
3. **Check shared constraints** — `check_shared_constraints` runs in the engine before any policy-specific logic. It enforces the validity window, gas fee caps, and transaction count rate limit (see below).
|
||||
4. **Evaluate** — `Policy::evaluate` checks the decoded meaning against the grant's policy-specific constraints and returns any violations.
|
||||
5. **Record** — If `RunKind::Execution` and there are no violations, the engine writes to `evm_transaction_log` and calls `Policy::record_transaction` for any policy-specific logging (e.g., token transfer volume).
|
||||
|
||||
### Policy Trait
|
||||
|
||||
| Method | Purpose |
|
||||
|---|---|
|
||||
| `analyze` | Pure — classifies a transaction into a typed `Meaning`, or `None` if this policy doesn't apply |
|
||||
| `evaluate` | Checks the `Meaning` against a `Grant`; returns a list of `EvalViolation`s |
|
||||
| `create_grant` | Inserts policy-specific rows; returns the specific grant ID |
|
||||
| `try_find_grant` | Finds a matching non-revoked grant for the given `EvalContext` |
|
||||
| `find_all_grants` | Returns all non-revoked grants (used for listing) |
|
||||
| `record_transaction` | Persists policy-specific data after execution |
|
||||
|
||||
`analyze` and `evaluate` are intentionally separate: classification is pure and cheap, while evaluation may involve DB queries (e.g., fetching past transfer volume).
|
||||
|
||||
### Registered Policies
|
||||
|
||||
**EtherTransfer** — plain ETH transfers (empty calldata)
|
||||
|
||||
- Grant requires: allowlist of recipient addresses + one volumetric rate limit (max ETH over a time window)
|
||||
- Violations: recipient not in allowlist, cumulative ETH volume exceeded
|
||||
|
||||
**TokenTransfer** — ERC-20 `transfer(address,uint256)` calls
|
||||
|
||||
- Recognised by ABI-decoding the `transfer(address,uint256)` selector against a static registry of known token contracts (`arbiter_tokens_registry`)
|
||||
- Grant requires: token contract address, optional recipient restriction, zero or more volumetric rate limits
|
||||
- Violations: recipient mismatch, any volumetric limit exceeded
|
||||
|
||||
### Grant Model
|
||||
|
||||
Every grant has two layers:
|
||||
|
||||
- **Shared (`evm_basic_grant`)** — wallet, chain, validity period, gas fee caps, transaction count rate limit. One row per grant regardless of type.
|
||||
- **Specific** — policy-owned tables (`evm_ether_transfer_grant`, `evm_token_transfer_grant`, etc.) holding type-specific configuration.
|
||||
|
||||
`find_all_grants` uses a `#[diesel::auto_type]` base join between the specific and shared tables, then batch-loads related rows (targets, volume limits) in two additional queries to avoid N+1.
|
||||
|
||||
The engine exposes `list_all_grants` which collects across all policy types into `Vec<Grant<SpecificGrant>>` via a blanket `From<Grant<S>> for Grant<SpecificGrant>` conversion.
|
||||
|
||||
### Shared Constraints (enforced by the engine)
|
||||
|
||||
These are checked centrally in `check_shared_constraints` before policy evaluation:
|
||||
|
||||
| Constraint | Fields | Behaviour |
|
||||
|---|---|---|
|
||||
| Validity window | `valid_from`, `valid_until` | Emits `InvalidTime` if current time is outside the range |
|
||||
| Gas fee cap | `max_gas_fee_per_gas`, `max_priority_fee_per_gas` | Emits `GasLimitExceeded` if either cap is breached |
|
||||
| Tx count rate limit | `rate_limit` (`count` + `window`) | Counts rows in `evm_transaction_log` within the window; emits `RateLimitExceeded` if at or above the limit |
|
||||
|
||||
---
|
||||
|
||||
### Known Limitations
|
||||
|
||||
- **Only EIP-1559 transactions are supported.** Legacy and EIP-2930 types are rejected outright.
|
||||
- **No opaque-calldata (unknown contract) grant type.** The architecture describes a category for unrecognised contracts, but no policy implements it yet. Any transaction that is not a plain ETH transfer or a known ERC-20 transfer is unconditionally rejected.
|
||||
- **Token registry is static.** Tokens are recognised only if they appear in the hard-coded `arbiter_tokens_registry` crate. There is no mechanism to register additional contracts at runtime.
|
||||
- **Nonce management is not implemented.** The architecture lists nonce deduplication as a core responsibility, but no nonce tracking or enforcement exists yet.
|
||||
|
||||
---
|
||||
|
||||
## Memory Protection
|
||||
|
||||
190
LICENSE
Normal file
190
LICENSE
Normal file
@@ -0,0 +1,190 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Copyright 2026 MarketTakers
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
13
README.md
Normal file
13
README.md
Normal file
@@ -0,0 +1,13 @@
|
||||
# Arbiter
|
||||
> Policy-first multi-client wallet daemon, allowing permissioned transactions across blockchains
|
||||
|
||||
## Security warning
|
||||
Arbiter can't meaningfully protect against host compromise. Potential attack flow:
|
||||
- Attacker steals TLS keys from database
|
||||
- Pretends to be server; just accepts user agent challenge solutions
|
||||
- Pretend to be in sealed state and performing DH with client
|
||||
- Steals user password and derives seal key
|
||||
|
||||
While this attack is highly targetive, it's still possible.
|
||||
|
||||
> This software is experimental. Do not use with funds you cannot afford to lose.
|
||||
31
app/.dart_tool/extension_discovery/README.md
Normal file
31
app/.dart_tool/extension_discovery/README.md
Normal file
@@ -0,0 +1,31 @@
|
||||
Extension Discovery Cache
|
||||
=========================
|
||||
|
||||
This folder is used by `package:extension_discovery` to cache lists of
|
||||
packages that contains extensions for other packages.
|
||||
|
||||
DO NOT USE THIS FOLDER
|
||||
----------------------
|
||||
|
||||
* Do not read (or rely) the contents of this folder.
|
||||
* Do write to this folder.
|
||||
|
||||
If you're interested in the lists of extensions stored in this folder use the
|
||||
API offered by package `extension_discovery` to get this information.
|
||||
|
||||
If this package doesn't work for your use-case, then don't try to read the
|
||||
contents of this folder. It may change, and will not remain stable.
|
||||
|
||||
Use package `extension_discovery`
|
||||
---------------------------------
|
||||
|
||||
If you want to access information from this folder.
|
||||
|
||||
Feel free to delete this folder
|
||||
-------------------------------
|
||||
|
||||
Files in this folder act as a cache, and the cache is discarded if the files
|
||||
are older than the modification time of `.dart_tool/package_config.json`.
|
||||
|
||||
Hence, it should never be necessary to clear this cache manually, if you find a
|
||||
need to do please file a bug.
|
||||
1
app/.dart_tool/extension_discovery/vs_code.json
Normal file
1
app/.dart_tool/extension_discovery/vs_code.json
Normal file
@@ -0,0 +1 @@
|
||||
{"version":2,"entries":[{"package":"app","rootUri":"../","packageUri":"lib/"}]}
|
||||
178
app/.dart_tool/package_config.json
Normal file
178
app/.dart_tool/package_config.json
Normal file
@@ -0,0 +1,178 @@
|
||||
{
|
||||
"configVersion": 2,
|
||||
"packages": [
|
||||
{
|
||||
"name": "async",
|
||||
"rootUri": "file:///Users/kaska/.pub-cache/hosted/pub.dev/async-2.13.0",
|
||||
"packageUri": "lib/",
|
||||
"languageVersion": "3.4"
|
||||
},
|
||||
{
|
||||
"name": "boolean_selector",
|
||||
"rootUri": "file:///Users/kaska/.pub-cache/hosted/pub.dev/boolean_selector-2.1.2",
|
||||
"packageUri": "lib/",
|
||||
"languageVersion": "3.1"
|
||||
},
|
||||
{
|
||||
"name": "characters",
|
||||
"rootUri": "file:///Users/kaska/.pub-cache/hosted/pub.dev/characters-1.4.0",
|
||||
"packageUri": "lib/",
|
||||
"languageVersion": "3.4"
|
||||
},
|
||||
{
|
||||
"name": "clock",
|
||||
"rootUri": "file:///Users/kaska/.pub-cache/hosted/pub.dev/clock-1.1.2",
|
||||
"packageUri": "lib/",
|
||||
"languageVersion": "3.4"
|
||||
},
|
||||
{
|
||||
"name": "collection",
|
||||
"rootUri": "file:///Users/kaska/.pub-cache/hosted/pub.dev/collection-1.19.1",
|
||||
"packageUri": "lib/",
|
||||
"languageVersion": "3.4"
|
||||
},
|
||||
{
|
||||
"name": "cupertino_icons",
|
||||
"rootUri": "file:///Users/kaska/.pub-cache/hosted/pub.dev/cupertino_icons-1.0.8",
|
||||
"packageUri": "lib/",
|
||||
"languageVersion": "3.1"
|
||||
},
|
||||
{
|
||||
"name": "fake_async",
|
||||
"rootUri": "file:///Users/kaska/.pub-cache/hosted/pub.dev/fake_async-1.3.3",
|
||||
"packageUri": "lib/",
|
||||
"languageVersion": "3.3"
|
||||
},
|
||||
{
|
||||
"name": "flutter",
|
||||
"rootUri": "file:///Users/kaska/.local/share/mise/installs/flutter/3.38.9-stable/packages/flutter",
|
||||
"packageUri": "lib/",
|
||||
"languageVersion": "3.8"
|
||||
},
|
||||
{
|
||||
"name": "flutter_lints",
|
||||
"rootUri": "file:///Users/kaska/.pub-cache/hosted/pub.dev/flutter_lints-6.0.0",
|
||||
"packageUri": "lib/",
|
||||
"languageVersion": "3.8"
|
||||
},
|
||||
{
|
||||
"name": "flutter_test",
|
||||
"rootUri": "file:///Users/kaska/.local/share/mise/installs/flutter/3.38.9-stable/packages/flutter_test",
|
||||
"packageUri": "lib/",
|
||||
"languageVersion": "3.8"
|
||||
},
|
||||
{
|
||||
"name": "leak_tracker",
|
||||
"rootUri": "file:///Users/kaska/.pub-cache/hosted/pub.dev/leak_tracker-11.0.2",
|
||||
"packageUri": "lib/",
|
||||
"languageVersion": "3.2"
|
||||
},
|
||||
{
|
||||
"name": "leak_tracker_flutter_testing",
|
||||
"rootUri": "file:///Users/kaska/.pub-cache/hosted/pub.dev/leak_tracker_flutter_testing-3.0.10",
|
||||
"packageUri": "lib/",
|
||||
"languageVersion": "3.2"
|
||||
},
|
||||
{
|
||||
"name": "leak_tracker_testing",
|
||||
"rootUri": "file:///Users/kaska/.pub-cache/hosted/pub.dev/leak_tracker_testing-3.0.2",
|
||||
"packageUri": "lib/",
|
||||
"languageVersion": "3.2"
|
||||
},
|
||||
{
|
||||
"name": "lints",
|
||||
"rootUri": "file:///Users/kaska/.pub-cache/hosted/pub.dev/lints-6.1.0",
|
||||
"packageUri": "lib/",
|
||||
"languageVersion": "3.8"
|
||||
},
|
||||
{
|
||||
"name": "matcher",
|
||||
"rootUri": "file:///Users/kaska/.pub-cache/hosted/pub.dev/matcher-0.12.17",
|
||||
"packageUri": "lib/",
|
||||
"languageVersion": "3.4"
|
||||
},
|
||||
{
|
||||
"name": "material_color_utilities",
|
||||
"rootUri": "file:///Users/kaska/.pub-cache/hosted/pub.dev/material_color_utilities-0.11.1",
|
||||
"packageUri": "lib/",
|
||||
"languageVersion": "2.17"
|
||||
},
|
||||
{
|
||||
"name": "meta",
|
||||
"rootUri": "file:///Users/kaska/.pub-cache/hosted/pub.dev/meta-1.17.0",
|
||||
"packageUri": "lib/",
|
||||
"languageVersion": "3.5"
|
||||
},
|
||||
{
|
||||
"name": "path",
|
||||
"rootUri": "file:///Users/kaska/.pub-cache/hosted/pub.dev/path-1.9.1",
|
||||
"packageUri": "lib/",
|
||||
"languageVersion": "3.4"
|
||||
},
|
||||
{
|
||||
"name": "sky_engine",
|
||||
"rootUri": "file:///Users/kaska/.local/share/mise/installs/flutter/3.38.9-stable/bin/cache/pkg/sky_engine",
|
||||
"packageUri": "lib/",
|
||||
"languageVersion": "3.8"
|
||||
},
|
||||
{
|
||||
"name": "source_span",
|
||||
"rootUri": "file:///Users/kaska/.pub-cache/hosted/pub.dev/source_span-1.10.2",
|
||||
"packageUri": "lib/",
|
||||
"languageVersion": "3.1"
|
||||
},
|
||||
{
|
||||
"name": "stack_trace",
|
||||
"rootUri": "file:///Users/kaska/.pub-cache/hosted/pub.dev/stack_trace-1.12.1",
|
||||
"packageUri": "lib/",
|
||||
"languageVersion": "3.4"
|
||||
},
|
||||
{
|
||||
"name": "stream_channel",
|
||||
"rootUri": "file:///Users/kaska/.pub-cache/hosted/pub.dev/stream_channel-2.1.4",
|
||||
"packageUri": "lib/",
|
||||
"languageVersion": "3.3"
|
||||
},
|
||||
{
|
||||
"name": "string_scanner",
|
||||
"rootUri": "file:///Users/kaska/.pub-cache/hosted/pub.dev/string_scanner-1.4.1",
|
||||
"packageUri": "lib/",
|
||||
"languageVersion": "3.1"
|
||||
},
|
||||
{
|
||||
"name": "term_glyph",
|
||||
"rootUri": "file:///Users/kaska/.pub-cache/hosted/pub.dev/term_glyph-1.2.2",
|
||||
"packageUri": "lib/",
|
||||
"languageVersion": "3.1"
|
||||
},
|
||||
{
|
||||
"name": "test_api",
|
||||
"rootUri": "file:///Users/kaska/.pub-cache/hosted/pub.dev/test_api-0.7.7",
|
||||
"packageUri": "lib/",
|
||||
"languageVersion": "3.5"
|
||||
},
|
||||
{
|
||||
"name": "vector_math",
|
||||
"rootUri": "file:///Users/kaska/.pub-cache/hosted/pub.dev/vector_math-2.2.0",
|
||||
"packageUri": "lib/",
|
||||
"languageVersion": "3.1"
|
||||
},
|
||||
{
|
||||
"name": "vm_service",
|
||||
"rootUri": "file:///Users/kaska/.pub-cache/hosted/pub.dev/vm_service-15.0.2",
|
||||
"packageUri": "lib/",
|
||||
"languageVersion": "3.5"
|
||||
},
|
||||
{
|
||||
"name": "app",
|
||||
"rootUri": "../",
|
||||
"packageUri": "lib/",
|
||||
"languageVersion": "3.10"
|
||||
}
|
||||
],
|
||||
"generator": "pub",
|
||||
"generatorVersion": "3.10.8",
|
||||
"flutterRoot": "file:///Users/kaska/.local/share/mise/installs/flutter/3.38.9-stable",
|
||||
"flutterVersion": "3.38.9",
|
||||
"pubCache": "file:///Users/kaska/.pub-cache"
|
||||
}
|
||||
230
app/.dart_tool/package_graph.json
Normal file
230
app/.dart_tool/package_graph.json
Normal file
@@ -0,0 +1,230 @@
|
||||
{
|
||||
"roots": [
|
||||
"app"
|
||||
],
|
||||
"packages": [
|
||||
{
|
||||
"name": "app",
|
||||
"version": "1.0.0+1",
|
||||
"dependencies": [
|
||||
"cupertino_icons",
|
||||
"flutter"
|
||||
],
|
||||
"devDependencies": [
|
||||
"flutter_lints",
|
||||
"flutter_test"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "flutter_lints",
|
||||
"version": "6.0.0",
|
||||
"dependencies": [
|
||||
"lints"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "flutter_test",
|
||||
"version": "0.0.0",
|
||||
"dependencies": [
|
||||
"clock",
|
||||
"collection",
|
||||
"fake_async",
|
||||
"flutter",
|
||||
"leak_tracker_flutter_testing",
|
||||
"matcher",
|
||||
"meta",
|
||||
"path",
|
||||
"stack_trace",
|
||||
"stream_channel",
|
||||
"test_api",
|
||||
"vector_math"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "cupertino_icons",
|
||||
"version": "1.0.8",
|
||||
"dependencies": []
|
||||
},
|
||||
{
|
||||
"name": "flutter",
|
||||
"version": "0.0.0",
|
||||
"dependencies": [
|
||||
"characters",
|
||||
"collection",
|
||||
"material_color_utilities",
|
||||
"meta",
|
||||
"sky_engine",
|
||||
"vector_math"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "lints",
|
||||
"version": "6.1.0",
|
||||
"dependencies": []
|
||||
},
|
||||
{
|
||||
"name": "stream_channel",
|
||||
"version": "2.1.4",
|
||||
"dependencies": [
|
||||
"async"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "meta",
|
||||
"version": "1.17.0",
|
||||
"dependencies": []
|
||||
},
|
||||
{
|
||||
"name": "collection",
|
||||
"version": "1.19.1",
|
||||
"dependencies": []
|
||||
},
|
||||
{
|
||||
"name": "leak_tracker_flutter_testing",
|
||||
"version": "3.0.10",
|
||||
"dependencies": [
|
||||
"flutter",
|
||||
"leak_tracker",
|
||||
"leak_tracker_testing",
|
||||
"matcher",
|
||||
"meta"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "vector_math",
|
||||
"version": "2.2.0",
|
||||
"dependencies": []
|
||||
},
|
||||
{
|
||||
"name": "stack_trace",
|
||||
"version": "1.12.1",
|
||||
"dependencies": [
|
||||
"path"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "clock",
|
||||
"version": "1.1.2",
|
||||
"dependencies": []
|
||||
},
|
||||
{
|
||||
"name": "fake_async",
|
||||
"version": "1.3.3",
|
||||
"dependencies": [
|
||||
"clock",
|
||||
"collection"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "path",
|
||||
"version": "1.9.1",
|
||||
"dependencies": []
|
||||
},
|
||||
{
|
||||
"name": "matcher",
|
||||
"version": "0.12.17",
|
||||
"dependencies": [
|
||||
"async",
|
||||
"meta",
|
||||
"stack_trace",
|
||||
"term_glyph",
|
||||
"test_api"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "test_api",
|
||||
"version": "0.7.7",
|
||||
"dependencies": [
|
||||
"async",
|
||||
"boolean_selector",
|
||||
"collection",
|
||||
"meta",
|
||||
"source_span",
|
||||
"stack_trace",
|
||||
"stream_channel",
|
||||
"string_scanner",
|
||||
"term_glyph"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "sky_engine",
|
||||
"version": "0.0.0",
|
||||
"dependencies": []
|
||||
},
|
||||
{
|
||||
"name": "material_color_utilities",
|
||||
"version": "0.11.1",
|
||||
"dependencies": [
|
||||
"collection"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "characters",
|
||||
"version": "1.4.0",
|
||||
"dependencies": []
|
||||
},
|
||||
{
|
||||
"name": "async",
|
||||
"version": "2.13.0",
|
||||
"dependencies": [
|
||||
"collection",
|
||||
"meta"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "leak_tracker_testing",
|
||||
"version": "3.0.2",
|
||||
"dependencies": [
|
||||
"leak_tracker",
|
||||
"matcher",
|
||||
"meta"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "leak_tracker",
|
||||
"version": "11.0.2",
|
||||
"dependencies": [
|
||||
"clock",
|
||||
"collection",
|
||||
"meta",
|
||||
"path",
|
||||
"vm_service"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "term_glyph",
|
||||
"version": "1.2.2",
|
||||
"dependencies": []
|
||||
},
|
||||
{
|
||||
"name": "string_scanner",
|
||||
"version": "1.4.1",
|
||||
"dependencies": [
|
||||
"source_span"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "source_span",
|
||||
"version": "1.10.2",
|
||||
"dependencies": [
|
||||
"collection",
|
||||
"path",
|
||||
"term_glyph"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "boolean_selector",
|
||||
"version": "2.1.2",
|
||||
"dependencies": [
|
||||
"source_span",
|
||||
"string_scanner"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "vm_service",
|
||||
"version": "15.0.2",
|
||||
"dependencies": []
|
||||
}
|
||||
],
|
||||
"configVersion": 1
|
||||
}
|
||||
1
app/.dart_tool/version
Normal file
1
app/.dart_tool/version
Normal file
@@ -0,0 +1 @@
|
||||
3.38.9
|
||||
@@ -1,122 +0,0 @@
|
||||
import 'package:flutter/material.dart';
|
||||
|
||||
void main() {
|
||||
runApp(const MyApp());
|
||||
}
|
||||
|
||||
class MyApp extends StatelessWidget {
|
||||
const MyApp({super.key});
|
||||
|
||||
// This widget is the root of your application.
|
||||
@override
|
||||
Widget build(BuildContext context) {
|
||||
return MaterialApp(
|
||||
title: 'Flutter Demo',
|
||||
theme: ThemeData(
|
||||
// This is the theme of your application.
|
||||
//
|
||||
// TRY THIS: Try running your application with "flutter run". You'll see
|
||||
// the application has a purple toolbar. Then, without quitting the app,
|
||||
// try changing the seedColor in the colorScheme below to Colors.green
|
||||
// and then invoke "hot reload" (save your changes or press the "hot
|
||||
// reload" button in a Flutter-supported IDE, or press "r" if you used
|
||||
// the command line to start the app).
|
||||
//
|
||||
// Notice that the counter didn't reset back to zero; the application
|
||||
// state is not lost during the reload. To reset the state, use hot
|
||||
// restart instead.
|
||||
//
|
||||
// This works for code too, not just values: Most code changes can be
|
||||
// tested with just a hot reload.
|
||||
colorScheme: .fromSeed(seedColor: Colors.deepPurple),
|
||||
),
|
||||
home: const MyHomePage(title: 'Flutter Demo Home Page'),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
class MyHomePage extends StatefulWidget {
|
||||
const MyHomePage({super.key, required this.title});
|
||||
|
||||
// This widget is the home page of your application. It is stateful, meaning
|
||||
// that it has a State object (defined below) that contains fields that affect
|
||||
// how it looks.
|
||||
|
||||
// This class is the configuration for the state. It holds the values (in this
|
||||
// case the title) provided by the parent (in this case the App widget) and
|
||||
// used by the build method of the State. Fields in a Widget subclass are
|
||||
// always marked "final".
|
||||
|
||||
final String title;
|
||||
|
||||
@override
|
||||
State<MyHomePage> createState() => _MyHomePageState();
|
||||
}
|
||||
|
||||
class _MyHomePageState extends State<MyHomePage> {
|
||||
int _counter = 0;
|
||||
|
||||
void _incrementCounter() {
|
||||
setState(() {
|
||||
// This call to setState tells the Flutter framework that something has
|
||||
// changed in this State, which causes it to rerun the build method below
|
||||
// so that the display can reflect the updated values. If we changed
|
||||
// _counter without calling setState(), then the build method would not be
|
||||
// called again, and so nothing would appear to happen.
|
||||
_counter++;
|
||||
});
|
||||
}
|
||||
|
||||
@override
|
||||
Widget build(BuildContext context) {
|
||||
// This method is rerun every time setState is called, for instance as done
|
||||
// by the _incrementCounter method above.
|
||||
//
|
||||
// The Flutter framework has been optimized to make rerunning build methods
|
||||
// fast, so that you can just rebuild anything that needs updating rather
|
||||
// than having to individually change instances of widgets.
|
||||
return Scaffold(
|
||||
appBar: AppBar(
|
||||
// TRY THIS: Try changing the color here to a specific color (to
|
||||
// Colors.amber, perhaps?) and trigger a hot reload to see the AppBar
|
||||
// change color while the other colors stay the same.
|
||||
backgroundColor: Theme.of(context).colorScheme.inversePrimary,
|
||||
// Here we take the value from the MyHomePage object that was created by
|
||||
// the App.build method, and use it to set our appbar title.
|
||||
title: Text(widget.title),
|
||||
),
|
||||
body: Center(
|
||||
// Center is a layout widget. It takes a single child and positions it
|
||||
// in the middle of the parent.
|
||||
child: Column(
|
||||
// Column is also a layout widget. It takes a list of children and
|
||||
// arranges them vertically. By default, it sizes itself to fit its
|
||||
// children horizontally, and tries to be as tall as its parent.
|
||||
//
|
||||
// Column has various properties to control how it sizes itself and
|
||||
// how it positions its children. Here we use mainAxisAlignment to
|
||||
// center the children vertically; the main axis here is the vertical
|
||||
// axis because Columns are vertical (the cross axis would be
|
||||
// horizontal).
|
||||
//
|
||||
// TRY THIS: Invoke "debug painting" (choose the "Toggle Debug Paint"
|
||||
// action in the IDE, or press "p" in the console), to see the
|
||||
// wireframe for each widget.
|
||||
mainAxisAlignment: .center,
|
||||
children: [
|
||||
const Text('You have pushed the button this many times:'),
|
||||
Text(
|
||||
'$_counter',
|
||||
style: Theme.of(context).textTheme.headlineMedium,
|
||||
),
|
||||
],
|
||||
),
|
||||
),
|
||||
floatingActionButton: FloatingActionButton(
|
||||
onPressed: _incrementCounter,
|
||||
tooltip: 'Increment',
|
||||
child: const Icon(Icons.add),
|
||||
),
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
#include "ephemeral/Flutter-Generated.xcconfig"
|
||||
@@ -1 +0,0 @@
|
||||
#include "ephemeral/Flutter-Generated.xcconfig"
|
||||
@@ -1,10 +0,0 @@
|
||||
//
|
||||
// Generated file. Do not edit.
|
||||
//
|
||||
|
||||
import FlutterMacOS
|
||||
import Foundation
|
||||
|
||||
|
||||
func RegisterGeneratedPlugins(registry: FlutterPluginRegistry) {
|
||||
}
|
||||
11
app/macos/Flutter/ephemeral/Flutter-Generated.xcconfig
Normal file
11
app/macos/Flutter/ephemeral/Flutter-Generated.xcconfig
Normal file
@@ -0,0 +1,11 @@
|
||||
// This is a generated file; do not edit or check into version control.
|
||||
FLUTTER_ROOT=/Users/kaska/.local/share/mise/installs/flutter/3.38.9-stable
|
||||
FLUTTER_APPLICATION_PATH=/Users/kaska/Documents/Projects/Major/arbiter/app
|
||||
COCOAPODS_PARALLEL_CODE_SIGN=true
|
||||
FLUTTER_BUILD_DIR=build
|
||||
FLUTTER_BUILD_NAME=1.0.0
|
||||
FLUTTER_BUILD_NUMBER=1
|
||||
DART_OBFUSCATION=false
|
||||
TRACK_WIDGET_CREATION=true
|
||||
TREE_SHAKE_ICONS=false
|
||||
PACKAGE_CONFIG=.dart_tool/package_config.json
|
||||
12
app/macos/Flutter/ephemeral/flutter_export_environment.sh
Executable file
12
app/macos/Flutter/ephemeral/flutter_export_environment.sh
Executable file
@@ -0,0 +1,12 @@
|
||||
#!/bin/sh
|
||||
# This is a generated file; do not edit or check into version control.
|
||||
export "FLUTTER_ROOT=/Users/kaska/.local/share/mise/installs/flutter/3.38.9-stable"
|
||||
export "FLUTTER_APPLICATION_PATH=/Users/kaska/Documents/Projects/Major/arbiter/app"
|
||||
export "COCOAPODS_PARALLEL_CODE_SIGN=true"
|
||||
export "FLUTTER_BUILD_DIR=build"
|
||||
export "FLUTTER_BUILD_NAME=1.0.0"
|
||||
export "FLUTTER_BUILD_NUMBER=1"
|
||||
export "DART_OBFUSCATION=false"
|
||||
export "TRACK_WIDGET_CREATION=true"
|
||||
export "TREE_SHAKE_ICONS=false"
|
||||
export "PACKAGE_CONFIG=.dart_tool/package_config.json"
|
||||
213
app/pubspec.lock
213
app/pubspec.lock
@@ -1,213 +0,0 @@
|
||||
# Generated by pub
|
||||
# See https://dart.dev/tools/pub/glossary#lockfile
|
||||
packages:
|
||||
async:
|
||||
dependency: transitive
|
||||
description:
|
||||
name: async
|
||||
sha256: "758e6d74e971c3e5aceb4110bfd6698efc7f501675bcfe0c775459a8140750eb"
|
||||
url: "https://pub.dev"
|
||||
source: hosted
|
||||
version: "2.13.0"
|
||||
boolean_selector:
|
||||
dependency: transitive
|
||||
description:
|
||||
name: boolean_selector
|
||||
sha256: "8aab1771e1243a5063b8b0ff68042d67334e3feab9e95b9490f9a6ebf73b42ea"
|
||||
url: "https://pub.dev"
|
||||
source: hosted
|
||||
version: "2.1.2"
|
||||
characters:
|
||||
dependency: transitive
|
||||
description:
|
||||
name: characters
|
||||
sha256: f71061c654a3380576a52b451dd5532377954cf9dbd272a78fc8479606670803
|
||||
url: "https://pub.dev"
|
||||
source: hosted
|
||||
version: "1.4.0"
|
||||
clock:
|
||||
dependency: transitive
|
||||
description:
|
||||
name: clock
|
||||
sha256: fddb70d9b5277016c77a80201021d40a2247104d9f4aa7bab7157b7e3f05b84b
|
||||
url: "https://pub.dev"
|
||||
source: hosted
|
||||
version: "1.1.2"
|
||||
collection:
|
||||
dependency: transitive
|
||||
description:
|
||||
name: collection
|
||||
sha256: "2f5709ae4d3d59dd8f7cd309b4e023046b57d8a6c82130785d2b0e5868084e76"
|
||||
url: "https://pub.dev"
|
||||
source: hosted
|
||||
version: "1.19.1"
|
||||
cupertino_icons:
|
||||
dependency: "direct main"
|
||||
description:
|
||||
name: cupertino_icons
|
||||
sha256: ba631d1c7f7bef6b729a622b7b752645a2d076dba9976925b8f25725a30e1ee6
|
||||
url: "https://pub.dev"
|
||||
source: hosted
|
||||
version: "1.0.8"
|
||||
fake_async:
|
||||
dependency: transitive
|
||||
description:
|
||||
name: fake_async
|
||||
sha256: "5368f224a74523e8d2e7399ea1638b37aecfca824a3cc4dfdf77bf1fa905ac44"
|
||||
url: "https://pub.dev"
|
||||
source: hosted
|
||||
version: "1.3.3"
|
||||
flutter:
|
||||
dependency: "direct main"
|
||||
description: flutter
|
||||
source: sdk
|
||||
version: "0.0.0"
|
||||
flutter_lints:
|
||||
dependency: "direct dev"
|
||||
description:
|
||||
name: flutter_lints
|
||||
sha256: "3105dc8492f6183fb076ccf1f351ac3d60564bff92e20bfc4af9cc1651f4e7e1"
|
||||
url: "https://pub.dev"
|
||||
source: hosted
|
||||
version: "6.0.0"
|
||||
flutter_test:
|
||||
dependency: "direct dev"
|
||||
description: flutter
|
||||
source: sdk
|
||||
version: "0.0.0"
|
||||
leak_tracker:
|
||||
dependency: transitive
|
||||
description:
|
||||
name: leak_tracker
|
||||
sha256: "33e2e26bdd85a0112ec15400c8cbffea70d0f9c3407491f672a2fad47915e2de"
|
||||
url: "https://pub.dev"
|
||||
source: hosted
|
||||
version: "11.0.2"
|
||||
leak_tracker_flutter_testing:
|
||||
dependency: transitive
|
||||
description:
|
||||
name: leak_tracker_flutter_testing
|
||||
sha256: "1dbc140bb5a23c75ea9c4811222756104fbcd1a27173f0c34ca01e16bea473c1"
|
||||
url: "https://pub.dev"
|
||||
source: hosted
|
||||
version: "3.0.10"
|
||||
leak_tracker_testing:
|
||||
dependency: transitive
|
||||
description:
|
||||
name: leak_tracker_testing
|
||||
sha256: "8d5a2d49f4a66b49744b23b018848400d23e54caf9463f4eb20df3eb8acb2eb1"
|
||||
url: "https://pub.dev"
|
||||
source: hosted
|
||||
version: "3.0.2"
|
||||
lints:
|
||||
dependency: transitive
|
||||
description:
|
||||
name: lints
|
||||
sha256: "12f842a479589fea194fe5c5a3095abc7be0c1f2ddfa9a0e76aed1dbd26a87df"
|
||||
url: "https://pub.dev"
|
||||
source: hosted
|
||||
version: "6.1.0"
|
||||
matcher:
|
||||
dependency: transitive
|
||||
description:
|
||||
name: matcher
|
||||
sha256: dc58c723c3c24bf8d3e2d3ad3f2f9d7bd9cf43ec6feaa64181775e60190153f2
|
||||
url: "https://pub.dev"
|
||||
source: hosted
|
||||
version: "0.12.17"
|
||||
material_color_utilities:
|
||||
dependency: transitive
|
||||
description:
|
||||
name: material_color_utilities
|
||||
sha256: f7142bb1154231d7ea5f96bc7bde4bda2a0945d2806bb11670e30b850d56bdec
|
||||
url: "https://pub.dev"
|
||||
source: hosted
|
||||
version: "0.11.1"
|
||||
meta:
|
||||
dependency: transitive
|
||||
description:
|
||||
name: meta
|
||||
sha256: "23f08335362185a5ea2ad3a4e597f1375e78bce8a040df5c600c8d3552ef2394"
|
||||
url: "https://pub.dev"
|
||||
source: hosted
|
||||
version: "1.17.0"
|
||||
path:
|
||||
dependency: transitive
|
||||
description:
|
||||
name: path
|
||||
sha256: "75cca69d1490965be98c73ceaea117e8a04dd21217b37b292c9ddbec0d955bc5"
|
||||
url: "https://pub.dev"
|
||||
source: hosted
|
||||
version: "1.9.1"
|
||||
sky_engine:
|
||||
dependency: transitive
|
||||
description: flutter
|
||||
source: sdk
|
||||
version: "0.0.0"
|
||||
source_span:
|
||||
dependency: transitive
|
||||
description:
|
||||
name: source_span
|
||||
sha256: "56a02f1f4cd1a2d96303c0144c93bd6d909eea6bee6bf5a0e0b685edbd4c47ab"
|
||||
url: "https://pub.dev"
|
||||
source: hosted
|
||||
version: "1.10.2"
|
||||
stack_trace:
|
||||
dependency: transitive
|
||||
description:
|
||||
name: stack_trace
|
||||
sha256: "8b27215b45d22309b5cddda1aa2b19bdfec9df0e765f2de506401c071d38d1b1"
|
||||
url: "https://pub.dev"
|
||||
source: hosted
|
||||
version: "1.12.1"
|
||||
stream_channel:
|
||||
dependency: transitive
|
||||
description:
|
||||
name: stream_channel
|
||||
sha256: "969e04c80b8bcdf826f8f16579c7b14d780458bd97f56d107d3950fdbeef059d"
|
||||
url: "https://pub.dev"
|
||||
source: hosted
|
||||
version: "2.1.4"
|
||||
string_scanner:
|
||||
dependency: transitive
|
||||
description:
|
||||
name: string_scanner
|
||||
sha256: "921cd31725b72fe181906c6a94d987c78e3b98c2e205b397ea399d4054872b43"
|
||||
url: "https://pub.dev"
|
||||
source: hosted
|
||||
version: "1.4.1"
|
||||
term_glyph:
|
||||
dependency: transitive
|
||||
description:
|
||||
name: term_glyph
|
||||
sha256: "7f554798625ea768a7518313e58f83891c7f5024f88e46e7182a4558850a4b8e"
|
||||
url: "https://pub.dev"
|
||||
source: hosted
|
||||
version: "1.2.2"
|
||||
test_api:
|
||||
dependency: transitive
|
||||
description:
|
||||
name: test_api
|
||||
sha256: ab2726c1a94d3176a45960b6234466ec367179b87dd74f1611adb1f3b5fb9d55
|
||||
url: "https://pub.dev"
|
||||
source: hosted
|
||||
version: "0.7.7"
|
||||
vector_math:
|
||||
dependency: transitive
|
||||
description:
|
||||
name: vector_math
|
||||
sha256: d530bd74fea330e6e364cda7a85019c434070188383e1cd8d9777ee586914c5b
|
||||
url: "https://pub.dev"
|
||||
source: hosted
|
||||
version: "2.2.0"
|
||||
vm_service:
|
||||
dependency: transitive
|
||||
description:
|
||||
name: vm_service
|
||||
sha256: "45caa6c5917fa127b5dbcfbd1fa60b14e583afdc08bfc96dda38886ca252eb60"
|
||||
url: "https://pub.dev"
|
||||
source: hosted
|
||||
version: "15.0.2"
|
||||
sdks:
|
||||
dart: ">=3.10.8 <4.0.0"
|
||||
flutter: ">=3.18.0-18.0.pre.54"
|
||||
@@ -1,89 +0,0 @@
|
||||
name: app
|
||||
description: "A new Flutter project."
|
||||
# The following line prevents the package from being accidentally published to
|
||||
# pub.dev using `flutter pub publish`. This is preferred for private packages.
|
||||
publish_to: 'none' # Remove this line if you wish to publish to pub.dev
|
||||
|
||||
# The following defines the version and build number for your application.
|
||||
# A version number is three numbers separated by dots, like 1.2.43
|
||||
# followed by an optional build number separated by a +.
|
||||
# Both the version and the builder number may be overridden in flutter
|
||||
# build by specifying --build-name and --build-number, respectively.
|
||||
# In Android, build-name is used as versionName while build-number used as versionCode.
|
||||
# Read more about Android versioning at https://developer.android.com/studio/publish/versioning
|
||||
# In iOS, build-name is used as CFBundleShortVersionString while build-number is used as CFBundleVersion.
|
||||
# Read more about iOS versioning at
|
||||
# https://developer.apple.com/library/archive/documentation/General/Reference/InfoPlistKeyReference/Articles/CoreFoundationKeys.html
|
||||
# In Windows, build-name is used as the major, minor, and patch parts
|
||||
# of the product and file versions while build-number is used as the build suffix.
|
||||
version: 1.0.0+1
|
||||
|
||||
environment:
|
||||
sdk: ^3.10.8
|
||||
|
||||
# Dependencies specify other packages that your package needs in order to work.
|
||||
# To automatically upgrade your package dependencies to the latest versions
|
||||
# consider running `flutter pub upgrade --major-versions`. Alternatively,
|
||||
# dependencies can be manually updated by changing the version numbers below to
|
||||
# the latest version available on pub.dev. To see which dependencies have newer
|
||||
# versions available, run `flutter pub outdated`.
|
||||
dependencies:
|
||||
flutter:
|
||||
sdk: flutter
|
||||
|
||||
# The following adds the Cupertino Icons font to your application.
|
||||
# Use with the CupertinoIcons class for iOS style icons.
|
||||
cupertino_icons: ^1.0.8
|
||||
|
||||
dev_dependencies:
|
||||
flutter_test:
|
||||
sdk: flutter
|
||||
|
||||
# The "flutter_lints" package below contains a set of recommended lints to
|
||||
# encourage good coding practices. The lint set provided by the package is
|
||||
# activated in the `analysis_options.yaml` file located at the root of your
|
||||
# package. See that file for information about deactivating specific lint
|
||||
# rules and activating additional ones.
|
||||
flutter_lints: ^6.0.0
|
||||
|
||||
# For information on the generic Dart part of this file, see the
|
||||
# following page: https://dart.dev/tools/pub/pubspec
|
||||
|
||||
# The following section is specific to Flutter packages.
|
||||
flutter:
|
||||
|
||||
# The following line ensures that the Material Icons font is
|
||||
# included with your application, so that you can use the icons in
|
||||
# the material Icons class.
|
||||
uses-material-design: true
|
||||
|
||||
# To add assets to your application, add an assets section, like this:
|
||||
# assets:
|
||||
# - images/a_dot_burr.jpeg
|
||||
# - images/a_dot_ham.jpeg
|
||||
|
||||
# An image asset can refer to one or more resolution-specific "variants", see
|
||||
# https://flutter.dev/to/resolution-aware-images
|
||||
|
||||
# For details regarding adding assets from package dependencies, see
|
||||
# https://flutter.dev/to/asset-from-package
|
||||
|
||||
# To add custom fonts to your application, add a fonts section here,
|
||||
# in this "flutter" section. Each entry in this list should have a
|
||||
# "family" key with the font family name, and a "fonts" key with a
|
||||
# list giving the asset and other descriptors for the font. For
|
||||
# example:
|
||||
# fonts:
|
||||
# - family: Schyler
|
||||
# fonts:
|
||||
# - asset: fonts/Schyler-Regular.ttf
|
||||
# - asset: fonts/Schyler-Italic.ttf
|
||||
# style: italic
|
||||
# - family: Trajan Pro
|
||||
# fonts:
|
||||
# - asset: fonts/TrajanPro.ttf
|
||||
# - asset: fonts/TrajanPro_Bold.ttf
|
||||
# weight: 700
|
||||
#
|
||||
# For details regarding fonts from package dependencies,
|
||||
# see https://flutter.dev/to/font-from-package
|
||||
@@ -1,11 +0,0 @@
|
||||
//
|
||||
// Generated file. Do not edit.
|
||||
//
|
||||
|
||||
// clang-format off
|
||||
|
||||
#include "generated_plugin_registrant.h"
|
||||
|
||||
|
||||
void RegisterPlugins(flutter::PluginRegistry* registry) {
|
||||
}
|
||||
1308
docs/superpowers/plans/2026-03-28-grant-creation-refactor.md
Normal file
1308
docs/superpowers/plans/2026-03-28-grant-creation-refactor.md
Normal file
File diff suppressed because it is too large
Load Diff
821
docs/superpowers/plans/2026-03-28-grant-grid-view.md
Normal file
821
docs/superpowers/plans/2026-03-28-grant-grid-view.md
Normal file
@@ -0,0 +1,821 @@
|
||||
# Grant Grid View Implementation Plan
|
||||
|
||||
> **For agentic workers:** REQUIRED SUB-SKILL: Use superpowers:subagent-driven-development (recommended) or superpowers:executing-plans to implement this plan task-by-task. Steps use checkbox (`- [ ]`) syntax for tracking.
|
||||
|
||||
**Goal:** Add an "EVM Grants" dashboard tab that displays all grants as enriched cards (type, chain, wallet address, client name) with per-card revoke support.
|
||||
|
||||
**Architecture:** A new `walletAccessListProvider` fetches wallet accesses with their DB row IDs. The screen (`grants.dart`) watches only `evmGrantsProvider` for top-level state. Each `GrantCard` widget (its own file) watches enrichment providers (`walletAccessListProvider`, `evmProvider`, `sdkClientsProvider`) and the revoke mutation directly — keeping rebuilds scoped to the card. The screen is registered as a dashboard tab in `AdaptiveScaffold`.
|
||||
|
||||
**Tech Stack:** Flutter, Riverpod (`riverpod_annotation` + `build_runner` codegen), `sizer` (adaptive sizing), `auto_route`, Protocol Buffers (Dart), `Palette` design tokens.
|
||||
|
||||
---
|
||||
|
||||
## File Map
|
||||
|
||||
| File | Action | Responsibility |
|
||||
|---|---|---|
|
||||
| `useragent/lib/theme/palette.dart` | Modify | Add `Palette.token` (indigo accent for token-transfer cards) |
|
||||
| `useragent/lib/features/connection/evm/wallet_access.dart` | Modify | Add `listAllWalletAccesses()` function |
|
||||
| `useragent/lib/providers/sdk_clients/wallet_access_list.dart` | Create | `WalletAccessListProvider` — fetches full wallet access list with IDs |
|
||||
| `useragent/lib/screens/dashboard/evm/grants/widgets/grant_card.dart` | Create | `GrantCard` widget — watches enrichment providers + revoke mutation; one card per grant |
|
||||
| `useragent/lib/screens/dashboard/evm/grants/grants.dart` | Create | `EvmGrantsScreen` — watches `evmGrantsProvider`; handles loading/error/empty/data states; renders `GrantCard` list |
|
||||
| `useragent/lib/router.dart` | Modify | Register `EvmGrantsRoute` in dashboard children |
|
||||
| `useragent/lib/screens/dashboard.dart` | Modify | Add Grants entry to `routes` list and `NavigationDestination` list |
|
||||
|
||||
---
|
||||
|
||||
## Task 1: Add `Palette.token`
|
||||
|
||||
**Files:**
|
||||
- Modify: `useragent/lib/theme/palette.dart`
|
||||
|
||||
- [ ] **Step 1: Add the color**
|
||||
|
||||
Replace the contents of `useragent/lib/theme/palette.dart` with:
|
||||
|
||||
```dart
|
||||
import 'package:flutter/material.dart';
|
||||
|
||||
class Palette {
|
||||
static const ink = Color(0xFF15263C);
|
||||
static const coral = Color(0xFFE26254);
|
||||
static const cream = Color(0xFFFFFAF4);
|
||||
static const line = Color(0x1A15263C);
|
||||
static const token = Color(0xFF5C6BC0);
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 2: Verify**
|
||||
|
||||
```sh
|
||||
cd useragent && flutter analyze lib/theme/palette.dart
|
||||
```
|
||||
|
||||
Expected: no issues.
|
||||
|
||||
- [ ] **Step 3: Commit**
|
||||
|
||||
```sh
|
||||
jj describe -m "feat(theme): add Palette.token for token-transfer grant cards"
|
||||
jj new
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Task 2: Add `listAllWalletAccesses` feature function
|
||||
|
||||
**Files:**
|
||||
- Modify: `useragent/lib/features/connection/evm/wallet_access.dart`
|
||||
|
||||
`readClientWalletAccess` (existing) filters the list to one client's wallet IDs and returns `Set<int>`. This new function returns the complete unfiltered list with row IDs so the grant cards can resolve wallet_access_id → wallet + client.
|
||||
|
||||
- [ ] **Step 1: Append function**
|
||||
|
||||
Add at the bottom of `useragent/lib/features/connection/evm/wallet_access.dart`:
|
||||
|
||||
```dart
|
||||
Future<List<SdkClientWalletAccess>> listAllWalletAccesses(
|
||||
Connection connection,
|
||||
) async {
|
||||
final response = await connection.ask(
|
||||
UserAgentRequest(listWalletAccess: Empty()),
|
||||
);
|
||||
if (!response.hasListWalletAccessResponse()) {
|
||||
throw Exception(
|
||||
'Expected list wallet access response, got ${response.whichPayload()}',
|
||||
);
|
||||
}
|
||||
return response.listWalletAccessResponse.accesses.toList(growable: false);
|
||||
}
|
||||
```
|
||||
|
||||
Each returned `SdkClientWalletAccess` has:
|
||||
- `.id` — the `evm_wallet_access` row ID (same value as `wallet_access_id` in a `GrantEntry`)
|
||||
- `.access.walletId` — the EVM wallet DB ID
|
||||
- `.access.sdkClientId` — the SDK client DB ID
|
||||
|
||||
- [ ] **Step 2: Verify**
|
||||
|
||||
```sh
|
||||
cd useragent && flutter analyze lib/features/connection/evm/wallet_access.dart
|
||||
```
|
||||
|
||||
Expected: no issues.
|
||||
|
||||
- [ ] **Step 3: Commit**
|
||||
|
||||
```sh
|
||||
jj describe -m "feat(evm): add listAllWalletAccesses feature function"
|
||||
jj new
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Task 3: Create `WalletAccessListProvider`
|
||||
|
||||
**Files:**
|
||||
- Create: `useragent/lib/providers/sdk_clients/wallet_access_list.dart`
|
||||
- Generated: `useragent/lib/providers/sdk_clients/wallet_access_list.g.dart`
|
||||
|
||||
Mirrors the structure of `EvmGrants` in `providers/evm/evm_grants.dart` — class-based `@riverpod` with a `refresh()` method.
|
||||
|
||||
- [ ] **Step 1: Write the provider**
|
||||
|
||||
Create `useragent/lib/providers/sdk_clients/wallet_access_list.dart`:
|
||||
|
||||
```dart
|
||||
import 'package:arbiter/features/connection/evm/wallet_access.dart';
|
||||
import 'package:arbiter/proto/user_agent.pb.dart';
|
||||
import 'package:arbiter/providers/connection/connection_manager.dart';
|
||||
import 'package:mtcore/markettakers.dart';
|
||||
import 'package:riverpod_annotation/riverpod_annotation.dart';
|
||||
|
||||
part 'wallet_access_list.g.dart';
|
||||
|
||||
@riverpod
|
||||
class WalletAccessList extends _$WalletAccessList {
|
||||
@override
|
||||
Future<List<SdkClientWalletAccess>?> build() async {
|
||||
final connection = await ref.watch(connectionManagerProvider.future);
|
||||
if (connection == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
try {
|
||||
return await listAllWalletAccesses(connection);
|
||||
} catch (e, st) {
|
||||
talker.handle(e, st);
|
||||
rethrow;
|
||||
}
|
||||
}
|
||||
|
||||
Future<void> refresh() async {
|
||||
final connection = await ref.read(connectionManagerProvider.future);
|
||||
if (connection == null) {
|
||||
state = const AsyncData(null);
|
||||
return;
|
||||
}
|
||||
|
||||
state = const AsyncLoading();
|
||||
state = await AsyncValue.guard(() => listAllWalletAccesses(connection));
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 2: Run code generation**
|
||||
|
||||
```sh
|
||||
cd useragent && dart run build_runner build --delete-conflicting-outputs
|
||||
```
|
||||
|
||||
Expected: `useragent/lib/providers/sdk_clients/wallet_access_list.g.dart` created. No errors.
|
||||
|
||||
- [ ] **Step 3: Verify**
|
||||
|
||||
```sh
|
||||
cd useragent && flutter analyze lib/providers/sdk_clients/
|
||||
```
|
||||
|
||||
Expected: no issues.
|
||||
|
||||
- [ ] **Step 4: Commit**
|
||||
|
||||
```sh
|
||||
jj describe -m "feat(providers): add WalletAccessListProvider"
|
||||
jj new
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Task 4: Create `GrantCard` widget
|
||||
|
||||
**Files:**
|
||||
- Create: `useragent/lib/screens/dashboard/evm/grants/widgets/grant_card.dart`
|
||||
|
||||
This widget owns all per-card logic: enrichment lookups, revoke action, and rebuild scope. The screen only passes it a `GrantEntry` — the card fetches everything else itself.
|
||||
|
||||
**Key types:**
|
||||
- `GrantEntry` (from `proto/evm.pb.dart`): `.id`, `.shared.walletAccessId`, `.shared.chainId`, `.specific.whichGrant()`
|
||||
- `SpecificGrant_Grant.etherTransfer` / `.tokenTransfer` — enum values for the oneof
|
||||
- `SdkClientWalletAccess` (from `proto/user_agent.pb.dart`): `.id`, `.access.walletId`, `.access.sdkClientId`
|
||||
- `WalletEntry` (from `proto/evm.pb.dart`): `.id`, `.address` (List<int>)
|
||||
- `SdkClientEntry` (from `proto/user_agent.pb.dart`): `.id`, `.info.name`
|
||||
- `revokeEvmGrantMutation` — `Mutation<void>` (global; all revoke buttons disable together while any revoke is in flight)
|
||||
- `executeRevokeEvmGrant(ref, grantId: int)` — `Future<void>`
|
||||
|
||||
- [ ] **Step 1: Write the widget**
|
||||
|
||||
Create `useragent/lib/screens/dashboard/evm/grants/widgets/grant_card.dart`:
|
||||
|
||||
```dart
|
||||
import 'package:arbiter/proto/evm.pb.dart';
|
||||
import 'package:arbiter/proto/user_agent.pb.dart';
|
||||
import 'package:arbiter/providers/evm/evm.dart';
|
||||
import 'package:arbiter/providers/evm/evm_grants.dart';
|
||||
import 'package:arbiter/providers/sdk_clients/list.dart';
|
||||
import 'package:arbiter/providers/sdk_clients/wallet_access_list.dart';
|
||||
import 'package:arbiter/theme/palette.dart';
|
||||
import 'package:flutter/material.dart';
|
||||
import 'package:hooks_riverpod/experimental/mutation.dart';
|
||||
import 'package:hooks_riverpod/hooks_riverpod.dart';
|
||||
import 'package:sizer/sizer.dart';
|
||||
|
||||
String _shortAddress(List<int> bytes) {
|
||||
final hex = bytes.map((b) => b.toRadixString(16).padLeft(2, '0')).join();
|
||||
return '0x${hex.substring(0, 6)}...${hex.substring(hex.length - 4)}';
|
||||
}
|
||||
|
||||
String _formatError(Object error) {
|
||||
final message = error.toString();
|
||||
if (message.startsWith('Exception: ')) {
|
||||
return message.substring('Exception: '.length);
|
||||
}
|
||||
return message;
|
||||
}
|
||||
|
||||
class GrantCard extends ConsumerWidget {
|
||||
const GrantCard({super.key, required this.grant});
|
||||
|
||||
final GrantEntry grant;
|
||||
|
||||
@override
|
||||
Widget build(BuildContext context, WidgetRef ref) {
|
||||
// Enrichment lookups — each watch scopes rebuilds to this card only
|
||||
final walletAccesses =
|
||||
ref.watch(walletAccessListProvider).asData?.value ?? const [];
|
||||
final wallets = ref.watch(evmProvider).asData?.value ?? const [];
|
||||
final clients = ref.watch(sdkClientsProvider).asData?.value ?? const [];
|
||||
final revoking = ref.watch(revokeEvmGrantMutation) is MutationPending;
|
||||
|
||||
final isEther =
|
||||
grant.specific.whichGrant() == SpecificGrant_Grant.etherTransfer;
|
||||
final accent = isEther ? Palette.coral : Palette.token;
|
||||
final typeLabel = isEther ? 'Ether' : 'Token';
|
||||
final theme = Theme.of(context);
|
||||
final muted = Palette.ink.withValues(alpha: 0.62);
|
||||
|
||||
// Resolve wallet_access_id → wallet address + client name
|
||||
final accessById = <int, SdkClientWalletAccess>{
|
||||
for (final a in walletAccesses) a.id: a,
|
||||
};
|
||||
final walletById = <int, WalletEntry>{
|
||||
for (final w in wallets) w.id: w,
|
||||
};
|
||||
final clientNameById = <int, String>{
|
||||
for (final c in clients) c.id: c.info.name,
|
||||
};
|
||||
|
||||
final accessId = grant.shared.walletAccessId;
|
||||
final access = accessById[accessId];
|
||||
final wallet = access != null ? walletById[access.access.walletId] : null;
|
||||
|
||||
final walletLabel = wallet != null
|
||||
? _shortAddress(wallet.address)
|
||||
: 'Access #$accessId';
|
||||
|
||||
final clientLabel = () {
|
||||
if (access == null) return '';
|
||||
final name = clientNameById[access.access.sdkClientId] ?? '';
|
||||
return name.isEmpty ? 'Client #${access.access.sdkClientId}' : name;
|
||||
}();
|
||||
|
||||
void showError(String message) {
|
||||
if (!context.mounted) return;
|
||||
ScaffoldMessenger.of(context).showSnackBar(
|
||||
SnackBar(content: Text(message), behavior: SnackBarBehavior.floating),
|
||||
);
|
||||
}
|
||||
|
||||
Future<void> revoke() async {
|
||||
try {
|
||||
await executeRevokeEvmGrant(ref, grantId: grant.id);
|
||||
} catch (e) {
|
||||
showError(_formatError(e));
|
||||
}
|
||||
}
|
||||
|
||||
return Container(
|
||||
decoration: BoxDecoration(
|
||||
borderRadius: BorderRadius.circular(24),
|
||||
color: Palette.cream.withValues(alpha: 0.92),
|
||||
border: Border.all(color: Palette.line),
|
||||
),
|
||||
child: IntrinsicHeight(
|
||||
child: Row(
|
||||
crossAxisAlignment: CrossAxisAlignment.stretch,
|
||||
children: [
|
||||
// Accent strip
|
||||
Container(
|
||||
width: 0.8.w,
|
||||
decoration: BoxDecoration(
|
||||
color: accent,
|
||||
borderRadius: const BorderRadius.horizontal(
|
||||
left: Radius.circular(24),
|
||||
),
|
||||
),
|
||||
),
|
||||
// Card body
|
||||
Expanded(
|
||||
child: Padding(
|
||||
padding: EdgeInsets.symmetric(
|
||||
horizontal: 1.6.w,
|
||||
vertical: 1.4.h,
|
||||
),
|
||||
child: Column(
|
||||
crossAxisAlignment: CrossAxisAlignment.start,
|
||||
children: [
|
||||
// Row 1: type badge · chain · spacer · revoke button
|
||||
Row(
|
||||
children: [
|
||||
Container(
|
||||
padding: EdgeInsets.symmetric(
|
||||
horizontal: 1.w,
|
||||
vertical: 0.4.h,
|
||||
),
|
||||
decoration: BoxDecoration(
|
||||
color: accent.withValues(alpha: 0.15),
|
||||
borderRadius: BorderRadius.circular(8),
|
||||
),
|
||||
child: Text(
|
||||
typeLabel,
|
||||
style: theme.textTheme.labelSmall?.copyWith(
|
||||
color: accent,
|
||||
fontWeight: FontWeight.w800,
|
||||
),
|
||||
),
|
||||
),
|
||||
SizedBox(width: 1.w),
|
||||
Container(
|
||||
padding: EdgeInsets.symmetric(
|
||||
horizontal: 1.w,
|
||||
vertical: 0.4.h,
|
||||
),
|
||||
decoration: BoxDecoration(
|
||||
color: Palette.ink.withValues(alpha: 0.06),
|
||||
borderRadius: BorderRadius.circular(8),
|
||||
),
|
||||
child: Text(
|
||||
'Chain ${grant.shared.chainId}',
|
||||
style: theme.textTheme.labelSmall?.copyWith(
|
||||
color: muted,
|
||||
fontWeight: FontWeight.w700,
|
||||
),
|
||||
),
|
||||
),
|
||||
const Spacer(),
|
||||
if (revoking)
|
||||
SizedBox(
|
||||
width: 1.8.h,
|
||||
height: 1.8.h,
|
||||
child: CircularProgressIndicator(
|
||||
strokeWidth: 2,
|
||||
color: Palette.coral,
|
||||
),
|
||||
)
|
||||
else
|
||||
OutlinedButton.icon(
|
||||
onPressed: revoke,
|
||||
style: OutlinedButton.styleFrom(
|
||||
foregroundColor: Palette.coral,
|
||||
side: BorderSide(
|
||||
color: Palette.coral.withValues(alpha: 0.4),
|
||||
),
|
||||
padding: EdgeInsets.symmetric(
|
||||
horizontal: 1.w,
|
||||
vertical: 0.6.h,
|
||||
),
|
||||
shape: RoundedRectangleBorder(
|
||||
borderRadius: BorderRadius.circular(10),
|
||||
),
|
||||
),
|
||||
icon: const Icon(Icons.block_rounded, size: 16),
|
||||
label: const Text('Revoke'),
|
||||
),
|
||||
],
|
||||
),
|
||||
SizedBox(height: 0.8.h),
|
||||
// Row 2: wallet address · client name
|
||||
Row(
|
||||
children: [
|
||||
Text(
|
||||
walletLabel,
|
||||
style: theme.textTheme.bodySmall?.copyWith(
|
||||
color: Palette.ink,
|
||||
fontFamily: 'monospace',
|
||||
),
|
||||
),
|
||||
Padding(
|
||||
padding: EdgeInsets.symmetric(horizontal: 0.8.w),
|
||||
child: Text(
|
||||
'·',
|
||||
style: theme.textTheme.bodySmall
|
||||
?.copyWith(color: muted),
|
||||
),
|
||||
),
|
||||
Expanded(
|
||||
child: Text(
|
||||
clientLabel,
|
||||
maxLines: 1,
|
||||
overflow: TextOverflow.ellipsis,
|
||||
style: theme.textTheme.bodySmall
|
||||
?.copyWith(color: muted),
|
||||
),
|
||||
),
|
||||
],
|
||||
),
|
||||
],
|
||||
),
|
||||
),
|
||||
),
|
||||
],
|
||||
),
|
||||
),
|
||||
);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 2: Verify**
|
||||
|
||||
```sh
|
||||
cd useragent && flutter analyze lib/screens/dashboard/evm/grants/widgets/grant_card.dart
|
||||
```
|
||||
|
||||
Expected: no issues.
|
||||
|
||||
- [ ] **Step 3: Commit**
|
||||
|
||||
```sh
|
||||
jj describe -m "feat(grants): add GrantCard widget with self-contained enrichment"
|
||||
jj new
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Task 5: Create `EvmGrantsScreen`
|
||||
|
||||
**Files:**
|
||||
- Create: `useragent/lib/screens/dashboard/evm/grants/grants.dart`
|
||||
|
||||
The screen watches only `evmGrantsProvider` for top-level state (loading / error / no connection / empty / data). When there is data it renders a list of `GrantCard` widgets — each card manages its own enrichment subscriptions.
|
||||
|
||||
- [ ] **Step 1: Write the screen**
|
||||
|
||||
Create `useragent/lib/screens/dashboard/evm/grants/grants.dart`:
|
||||
|
||||
```dart
|
||||
import 'package:arbiter/proto/evm.pb.dart';
|
||||
import 'package:arbiter/providers/evm/evm_grants.dart';
|
||||
import 'package:arbiter/providers/sdk_clients/wallet_access_list.dart';
|
||||
import 'package:arbiter/router.gr.dart';
|
||||
import 'package:arbiter/screens/dashboard/evm/grants/widgets/grant_card.dart';
|
||||
import 'package:arbiter/theme/palette.dart';
|
||||
import 'package:arbiter/widgets/page_header.dart';
|
||||
import 'package:auto_route/auto_route.dart';
|
||||
import 'package:flutter/material.dart';
|
||||
import 'package:hooks_riverpod/hooks_riverpod.dart';
|
||||
import 'package:sizer/sizer.dart';
|
||||
|
||||
String _formatError(Object error) {
|
||||
final message = error.toString();
|
||||
if (message.startsWith('Exception: ')) {
|
||||
return message.substring('Exception: '.length);
|
||||
}
|
||||
return message;
|
||||
}
|
||||
|
||||
// ─── State panel ──────────────────────────────────────────────────────────────
|
||||
|
||||
class _StatePanel extends StatelessWidget {
|
||||
const _StatePanel({
|
||||
required this.icon,
|
||||
required this.title,
|
||||
required this.body,
|
||||
this.actionLabel,
|
||||
this.onAction,
|
||||
this.busy = false,
|
||||
});
|
||||
|
||||
final IconData icon;
|
||||
final String title;
|
||||
final String body;
|
||||
final String? actionLabel;
|
||||
final Future<void> Function()? onAction;
|
||||
final bool busy;
|
||||
|
||||
@override
|
||||
Widget build(BuildContext context) {
|
||||
final theme = Theme.of(context);
|
||||
|
||||
return Container(
|
||||
decoration: BoxDecoration(
|
||||
borderRadius: BorderRadius.circular(24),
|
||||
color: Palette.cream.withValues(alpha: 0.92),
|
||||
border: Border.all(color: Palette.line),
|
||||
),
|
||||
child: Padding(
|
||||
padding: EdgeInsets.all(2.8.h),
|
||||
child: Column(
|
||||
crossAxisAlignment: CrossAxisAlignment.start,
|
||||
children: [
|
||||
if (busy)
|
||||
SizedBox(
|
||||
width: 2.8.h,
|
||||
height: 2.8.h,
|
||||
child: const CircularProgressIndicator(strokeWidth: 2.5),
|
||||
)
|
||||
else
|
||||
Icon(icon, size: 34, color: Palette.coral),
|
||||
SizedBox(height: 1.8.h),
|
||||
Text(
|
||||
title,
|
||||
style: theme.textTheme.headlineSmall?.copyWith(
|
||||
color: Palette.ink,
|
||||
fontWeight: FontWeight.w800,
|
||||
),
|
||||
),
|
||||
SizedBox(height: 1.h),
|
||||
Text(
|
||||
body,
|
||||
style: theme.textTheme.bodyLarge?.copyWith(
|
||||
color: Palette.ink.withValues(alpha: 0.72),
|
||||
height: 1.5,
|
||||
),
|
||||
),
|
||||
if (actionLabel != null && onAction != null) ...[
|
||||
SizedBox(height: 2.h),
|
||||
OutlinedButton.icon(
|
||||
onPressed: () => onAction!(),
|
||||
icon: const Icon(Icons.refresh),
|
||||
label: Text(actionLabel!),
|
||||
),
|
||||
],
|
||||
],
|
||||
),
|
||||
),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// ─── Grant list ───────────────────────────────────────────────────────────────
|
||||
|
||||
class _GrantList extends StatelessWidget {
|
||||
const _GrantList({required this.grants});
|
||||
|
||||
final List<GrantEntry> grants;
|
||||
|
||||
@override
|
||||
Widget build(BuildContext context) {
|
||||
return Column(
|
||||
children: [
|
||||
for (var i = 0; i < grants.length; i++)
|
||||
Padding(
|
||||
padding: EdgeInsets.only(
|
||||
bottom: i == grants.length - 1 ? 0 : 1.8.h,
|
||||
),
|
||||
child: GrantCard(grant: grants[i]),
|
||||
),
|
||||
],
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// ─── Screen ───────────────────────────────────────────────────────────────────
|
||||
|
||||
@RoutePage()
|
||||
class EvmGrantsScreen extends ConsumerWidget {
|
||||
const EvmGrantsScreen({super.key});
|
||||
|
||||
@override
|
||||
Widget build(BuildContext context, WidgetRef ref) {
|
||||
// Screen watches only the grant list for top-level state decisions
|
||||
final grantsAsync = ref.watch(evmGrantsProvider);
|
||||
|
||||
Future<void> refresh() async {
|
||||
await Future.wait([
|
||||
ref.read(evmGrantsProvider.notifier).refresh(),
|
||||
ref.read(walletAccessListProvider.notifier).refresh(),
|
||||
]);
|
||||
}
|
||||
|
||||
void showMessage(String message) {
|
||||
if (!context.mounted) return;
|
||||
ScaffoldMessenger.of(context).showSnackBar(
|
||||
SnackBar(content: Text(message), behavior: SnackBarBehavior.floating),
|
||||
);
|
||||
}
|
||||
|
||||
Future<void> safeRefresh() async {
|
||||
try {
|
||||
await refresh();
|
||||
} catch (e) {
|
||||
showMessage(_formatError(e));
|
||||
}
|
||||
}
|
||||
|
||||
final grantsState = grantsAsync.asData?.value;
|
||||
final grants = grantsState?.grants;
|
||||
|
||||
final content = switch (grantsAsync) {
|
||||
AsyncLoading() when grantsState == null => const _StatePanel(
|
||||
icon: Icons.hourglass_top,
|
||||
title: 'Loading grants',
|
||||
body: 'Pulling grant registry from Arbiter.',
|
||||
busy: true,
|
||||
),
|
||||
AsyncError(:final error) => _StatePanel(
|
||||
icon: Icons.sync_problem,
|
||||
title: 'Grant registry unavailable',
|
||||
body: _formatError(error),
|
||||
actionLabel: 'Retry',
|
||||
onAction: safeRefresh,
|
||||
),
|
||||
AsyncData(:final value) when value == null => _StatePanel(
|
||||
icon: Icons.portable_wifi_off,
|
||||
title: 'No active server connection',
|
||||
body: 'Reconnect to Arbiter to list EVM grants.',
|
||||
actionLabel: 'Refresh',
|
||||
onAction: safeRefresh,
|
||||
),
|
||||
_ when grants != null && grants.isEmpty => _StatePanel(
|
||||
icon: Icons.policy_outlined,
|
||||
title: 'No grants yet',
|
||||
body: 'Create a grant to allow SDK clients to sign transactions.',
|
||||
actionLabel: 'Create grant',
|
||||
onAction: () => context.router.push(const CreateEvmGrantRoute()),
|
||||
),
|
||||
_ => _GrantList(grants: grants ?? const []),
|
||||
};
|
||||
|
||||
return Scaffold(
|
||||
body: SafeArea(
|
||||
child: RefreshIndicator.adaptive(
|
||||
color: Palette.ink,
|
||||
backgroundColor: Colors.white,
|
||||
onRefresh: safeRefresh,
|
||||
child: ListView(
|
||||
physics: const BouncingScrollPhysics(
|
||||
parent: AlwaysScrollableScrollPhysics(),
|
||||
),
|
||||
padding: EdgeInsets.fromLTRB(2.4.w, 2.4.h, 2.4.w, 3.2.h),
|
||||
children: [
|
||||
PageHeader(
|
||||
title: 'EVM Grants',
|
||||
isBusy: grantsAsync.isLoading,
|
||||
actions: [
|
||||
FilledButton.icon(
|
||||
onPressed: () =>
|
||||
context.router.push(const CreateEvmGrantRoute()),
|
||||
icon: const Icon(Icons.add_rounded),
|
||||
label: const Text('Create grant'),
|
||||
),
|
||||
SizedBox(width: 1.w),
|
||||
OutlinedButton.icon(
|
||||
onPressed: safeRefresh,
|
||||
style: OutlinedButton.styleFrom(
|
||||
foregroundColor: Palette.ink,
|
||||
side: BorderSide(color: Palette.line),
|
||||
padding: EdgeInsets.symmetric(
|
||||
horizontal: 1.4.w,
|
||||
vertical: 1.2.h,
|
||||
),
|
||||
shape: RoundedRectangleBorder(
|
||||
borderRadius: BorderRadius.circular(14),
|
||||
),
|
||||
),
|
||||
icon: const Icon(Icons.refresh, size: 18),
|
||||
label: const Text('Refresh'),
|
||||
),
|
||||
],
|
||||
),
|
||||
SizedBox(height: 1.8.h),
|
||||
content,
|
||||
],
|
||||
),
|
||||
),
|
||||
),
|
||||
);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 2: Verify**
|
||||
|
||||
```sh
|
||||
cd useragent && flutter analyze lib/screens/dashboard/evm/grants/
|
||||
```
|
||||
|
||||
Expected: no issues.
|
||||
|
||||
- [ ] **Step 3: Commit**
|
||||
|
||||
```sh
|
||||
jj describe -m "feat(grants): add EvmGrantsScreen"
|
||||
jj new
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Task 6: Wire router and dashboard tab
|
||||
|
||||
**Files:**
|
||||
- Modify: `useragent/lib/router.dart`
|
||||
- Modify: `useragent/lib/screens/dashboard.dart`
|
||||
- Regenerated: `useragent/lib/router.gr.dart`
|
||||
|
||||
- [ ] **Step 1: Add route to `router.dart`**
|
||||
|
||||
Replace the contents of `useragent/lib/router.dart` with:
|
||||
|
||||
```dart
|
||||
import 'package:auto_route/auto_route.dart';
|
||||
|
||||
import 'router.gr.dart';
|
||||
|
||||
@AutoRouterConfig(generateForDir: ['lib/screens'])
|
||||
class Router extends RootStackRouter {
|
||||
@override
|
||||
List<AutoRoute> get routes => [
|
||||
AutoRoute(page: Bootstrap.page, path: '/bootstrap', initial: true),
|
||||
AutoRoute(page: ServerInfoSetupRoute.page, path: '/server-info'),
|
||||
AutoRoute(page: ServerConnectionRoute.page, path: '/server-connection'),
|
||||
AutoRoute(page: VaultSetupRoute.page, path: '/vault'),
|
||||
AutoRoute(page: ClientDetailsRoute.page, path: '/clients/:clientId'),
|
||||
AutoRoute(page: CreateEvmGrantRoute.page, path: '/evm-grants/create'),
|
||||
|
||||
AutoRoute(
|
||||
page: DashboardRouter.page,
|
||||
path: '/dashboard',
|
||||
children: [
|
||||
AutoRoute(page: EvmRoute.page, path: 'evm'),
|
||||
AutoRoute(page: ClientsRoute.page, path: 'clients'),
|
||||
AutoRoute(page: EvmGrantsRoute.page, path: 'grants'),
|
||||
AutoRoute(page: AboutRoute.page, path: 'about'),
|
||||
],
|
||||
),
|
||||
];
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 2: Update `dashboard.dart`**
|
||||
|
||||
In `useragent/lib/screens/dashboard.dart`, replace the `routes` constant:
|
||||
|
||||
```dart
|
||||
final routes = [
|
||||
const EvmRoute(),
|
||||
const ClientsRoute(),
|
||||
const EvmGrantsRoute(),
|
||||
const AboutRoute(),
|
||||
];
|
||||
```
|
||||
|
||||
And replace the `destinations` list inside `AdaptiveScaffold`:
|
||||
|
||||
```dart
|
||||
destinations: const [
|
||||
NavigationDestination(
|
||||
icon: Icon(Icons.account_balance_wallet_outlined),
|
||||
selectedIcon: Icon(Icons.account_balance_wallet),
|
||||
label: 'Wallets',
|
||||
),
|
||||
NavigationDestination(
|
||||
icon: Icon(Icons.devices_other_outlined),
|
||||
selectedIcon: Icon(Icons.devices_other),
|
||||
label: 'Clients',
|
||||
),
|
||||
NavigationDestination(
|
||||
icon: Icon(Icons.policy_outlined),
|
||||
selectedIcon: Icon(Icons.policy),
|
||||
label: 'Grants',
|
||||
),
|
||||
NavigationDestination(
|
||||
icon: Icon(Icons.info_outline),
|
||||
selectedIcon: Icon(Icons.info),
|
||||
label: 'About',
|
||||
),
|
||||
],
|
||||
```
|
||||
|
||||
- [ ] **Step 3: Regenerate router**
|
||||
|
||||
```sh
|
||||
cd useragent && dart run build_runner build --delete-conflicting-outputs
|
||||
```
|
||||
|
||||
Expected: `lib/router.gr.dart` updated, `EvmGrantsRoute` now available, no errors.
|
||||
|
||||
- [ ] **Step 4: Full project verify**
|
||||
|
||||
```sh
|
||||
cd useragent && flutter analyze
|
||||
```
|
||||
|
||||
Expected: no issues.
|
||||
|
||||
- [ ] **Step 5: Commit**
|
||||
|
||||
```sh
|
||||
jj describe -m "feat(nav): add Grants dashboard tab"
|
||||
jj new
|
||||
```
|
||||
170
docs/superpowers/specs/2026-03-28-grant-grid-view-design.md
Normal file
170
docs/superpowers/specs/2026-03-28-grant-grid-view-design.md
Normal file
@@ -0,0 +1,170 @@
|
||||
# Grant Grid View — Design Spec
|
||||
|
||||
**Date:** 2026-03-28
|
||||
|
||||
## Overview
|
||||
|
||||
Add a "Grants" dashboard tab to the Flutter user-agent app that displays all EVM grants as a card-based grid. Each card shows a compact summary (type, chain, wallet address, client name) with a revoke action. The tab integrates into the existing `AdaptiveScaffold` navigation alongside Wallets, Clients, and About.
|
||||
|
||||
## Scope
|
||||
|
||||
- New `walletAccessListProvider` for fetching wallet access entries with their DB row IDs
|
||||
- New `EvmGrantsScreen` as a dashboard tab
|
||||
- Grant card widget with enriched display (type, chain, wallet, client)
|
||||
- Revoke action wired to existing `executeRevokeEvmGrant` mutation
|
||||
- Dashboard tab bar and router updated
|
||||
- New token-transfer accent color added to `Palette`
|
||||
|
||||
**Out of scope:** Fixing grant creation (separate task).
|
||||
|
||||
---
|
||||
|
||||
## Data Layer
|
||||
|
||||
### `walletAccessListProvider`
|
||||
|
||||
**File:** `useragent/lib/providers/sdk_clients/wallet_access_list.dart`
|
||||
|
||||
- `@riverpod` class, watches `connectionManagerProvider.future`
|
||||
- Returns `List<SdkClientWalletAccess>?` (null when not connected)
|
||||
- Each entry: `.id` (wallet_access_id), `.access.walletId`, `.access.sdkClientId`
|
||||
- Exposes a `refresh()` method following the same pattern as `EvmGrants.refresh()`
|
||||
|
||||
### Enrichment at render time (Approach A)
|
||||
|
||||
The `EvmGrantsScreen` watches four providers:
|
||||
1. `evmGrantsProvider` — the grant list
|
||||
2. `walletAccessListProvider` — to resolve wallet_access_id → (wallet_id, sdk_client_id)
|
||||
3. `evmProvider` — to resolve wallet_id → wallet address
|
||||
4. `sdkClientsProvider` — to resolve sdk_client_id → client name
|
||||
|
||||
All lookups are in-memory Maps built inside the build method; no extra model class needed.
|
||||
|
||||
Fallbacks:
|
||||
- Wallet address not found → `"Access #N"` where N is the wallet_access_id
|
||||
- Client name not found → `"Client #N"` where N is the sdk_client_id
|
||||
|
||||
---
|
||||
|
||||
## Route Structure
|
||||
|
||||
```
|
||||
/dashboard
|
||||
/evm ← existing (Wallets tab)
|
||||
/clients ← existing (Clients tab)
|
||||
/grants ← NEW (Grants tab)
|
||||
/about ← existing
|
||||
|
||||
/evm-grants/create ← existing push route (unchanged)
|
||||
```
|
||||
|
||||
### Changes to `router.dart`
|
||||
|
||||
Add inside dashboard children:
|
||||
```dart
|
||||
AutoRoute(page: EvmGrantsRoute.page, path: 'grants'),
|
||||
```
|
||||
|
||||
### Changes to `dashboard.dart`
|
||||
|
||||
Add to `routes` list:
|
||||
```dart
|
||||
const EvmGrantsRoute()
|
||||
```
|
||||
|
||||
Add `NavigationDestination`:
|
||||
```dart
|
||||
NavigationDestination(
|
||||
icon: Icon(Icons.policy_outlined),
|
||||
selectedIcon: Icon(Icons.policy),
|
||||
label: 'Grants',
|
||||
),
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Screen: `EvmGrantsScreen`
|
||||
|
||||
**File:** `useragent/lib/screens/dashboard/evm/grants/grants.dart`
|
||||
|
||||
```
|
||||
Scaffold
|
||||
└─ SafeArea
|
||||
└─ RefreshIndicator.adaptive (refreshes evmGrantsProvider + walletAccessListProvider)
|
||||
└─ ListView (BouncingScrollPhysics + AlwaysScrollableScrollPhysics)
|
||||
├─ PageHeader
|
||||
│ title: 'EVM Grants'
|
||||
│ isBusy: evmGrantsProvider.isLoading
|
||||
│ actions: [CreateGrantButton, RefreshButton]
|
||||
├─ SizedBox(height: 1.8.h)
|
||||
└─ <content>
|
||||
```
|
||||
|
||||
### State handling
|
||||
|
||||
Matches the pattern from `EvmScreen` and `ClientsScreen`:
|
||||
|
||||
| State | Display |
|
||||
|---|---|
|
||||
| Loading (no data yet) | `_StatePanel` with spinner, "Loading grants" |
|
||||
| Error | `_StatePanel` with coral icon, error message, Retry button |
|
||||
| No connection | `_StatePanel`, "No active server connection" |
|
||||
| Empty list | `_StatePanel`, "No grants yet", with Create Grant shortcut |
|
||||
| Data | Column of `_GrantCard` widgets |
|
||||
|
||||
### Header actions
|
||||
|
||||
**CreateGrantButton:** `FilledButton.icon` with `Icons.add_rounded`, pushes `CreateEvmGrantRoute()` via `context.router.push(...)`.
|
||||
|
||||
**RefreshButton:** `OutlinedButton.icon` with `Icons.refresh`, calls `ref.read(evmGrantsProvider.notifier).refresh()`.
|
||||
|
||||
---
|
||||
|
||||
## Grant Card: `_GrantCard`
|
||||
|
||||
**Layout:**
|
||||
|
||||
```
|
||||
Container (rounded 24, Palette.cream bg, Palette.line border)
|
||||
└─ IntrinsicHeight > Row
|
||||
├─ Accent strip (0.8.w wide, full height, rounded left)
|
||||
└─ Padding > Column
|
||||
├─ Row 1: TypeBadge + ChainChip + Spacer + RevokeButton
|
||||
└─ Row 2: WalletText + "·" + ClientText
|
||||
```
|
||||
|
||||
**Accent color by grant type:**
|
||||
- Ether transfer → `Palette.coral`
|
||||
- Token transfer → `Palette.token` (new entry in `Palette` — indigo, e.g. `Color(0xFF5C6BC0)`)
|
||||
|
||||
**TypeBadge:** Small pill container with accent color background at 15% opacity, accent-colored text. Label: `'Ether'` or `'Token'`.
|
||||
|
||||
**ChainChip:** Small container: `'Chain ${grant.shared.chainId}'`, muted ink color.
|
||||
|
||||
**WalletText:** Short hex address (`0xabc...def`) from wallet lookup, `bodySmall`, monospace font family.
|
||||
|
||||
**ClientText:** Client name from `sdkClientsProvider` lookup, or fallback string. `bodySmall`, muted ink.
|
||||
|
||||
**RevokeButton:**
|
||||
- `OutlinedButton` with `Icons.block_rounded` icon, label `'Revoke'`
|
||||
- `foregroundColor: Palette.coral`, `side: BorderSide(color: Palette.coral.withValues(alpha: 0.4))`
|
||||
- Disabled (replaced with `CircularProgressIndicator`) while `revokeEvmGrantMutation` is pending — note: this is a single global mutation, so all revoke buttons disable while any revoke is in flight
|
||||
- On press: calls `executeRevokeEvmGrant(ref, grantId: grant.id)`; shows `SnackBar` on error
|
||||
|
||||
---
|
||||
|
||||
## Adaptive Sizing
|
||||
|
||||
All sizing uses `sizer` units (`1.h`, `1.w`, etc.). No hardcoded pixel values.
|
||||
|
||||
---
|
||||
|
||||
## Files to Create / Modify
|
||||
|
||||
| File | Action |
|
||||
|---|---|
|
||||
| `lib/theme/palette.dart` | Modify — add `Palette.token` color |
|
||||
| `lib/providers/sdk_clients/wallet_access_list.dart` | Create |
|
||||
| `lib/screens/dashboard/evm/grants/grants.dart` | Create |
|
||||
| `lib/router.dart` | Modify — add grants route to dashboard children |
|
||||
| `lib/screens/dashboard.dart` | Modify — add tab to routes list and NavigationDestinations |
|
||||
119
mise.lock
119
mise.lock
@@ -1,35 +1,65 @@
|
||||
# @generated - this file is auto-generated by `mise lock` https://mise.jdx.dev/dev-tools/mise-lock.html
|
||||
|
||||
[[tools.ast-grep]]
|
||||
version = "0.42.0"
|
||||
backend = "aqua:ast-grep/ast-grep"
|
||||
|
||||
[tools.ast-grep."platforms.linux-arm64"]
|
||||
checksum = "sha256:5c830eae8456569e2f7212434ed9c238f58dca412d76045418ed6d394a755836"
|
||||
url = "https://github.com/ast-grep/ast-grep/releases/download/0.42.0/app-aarch64-unknown-linux-gnu.zip"
|
||||
|
||||
[tools.ast-grep."platforms.linux-arm64-musl"]
|
||||
checksum = "sha256:5c830eae8456569e2f7212434ed9c238f58dca412d76045418ed6d394a755836"
|
||||
url = "https://github.com/ast-grep/ast-grep/releases/download/0.42.0/app-aarch64-unknown-linux-gnu.zip"
|
||||
|
||||
[tools.ast-grep."platforms.linux-x64"]
|
||||
checksum = "sha256:e825a05603f0bcc4cd9076c4cc8c9abd6d008b7cd07d9aa3cc323ba4b8606651"
|
||||
url = "https://github.com/ast-grep/ast-grep/releases/download/0.42.0/app-x86_64-unknown-linux-gnu.zip"
|
||||
|
||||
[tools.ast-grep."platforms.linux-x64-musl"]
|
||||
checksum = "sha256:e825a05603f0bcc4cd9076c4cc8c9abd6d008b7cd07d9aa3cc323ba4b8606651"
|
||||
url = "https://github.com/ast-grep/ast-grep/releases/download/0.42.0/app-x86_64-unknown-linux-gnu.zip"
|
||||
|
||||
[tools.ast-grep."platforms.macos-arm64"]
|
||||
checksum = "sha256:fc300d5293b1c770a5aece03a8a193b92e71e87cec726c28096990691a582620"
|
||||
url = "https://github.com/ast-grep/ast-grep/releases/download/0.42.0/app-aarch64-apple-darwin.zip"
|
||||
|
||||
[tools.ast-grep."platforms.macos-x64"]
|
||||
checksum = "sha256:979ffe611327056f4730a1ae71b0209b3b830f58b22c6ed194cda34f55400db2"
|
||||
url = "https://github.com/ast-grep/ast-grep/releases/download/0.42.0/app-x86_64-apple-darwin.zip"
|
||||
|
||||
[tools.ast-grep."platforms.windows-x64"]
|
||||
checksum = "sha256:55836fa1b2c65dc7d61615a4d9368622a0d2371a76d28b9a165e5a3ab6ae32a4"
|
||||
url = "https://github.com/ast-grep/ast-grep/releases/download/0.42.0/app-x86_64-pc-windows-msvc.zip"
|
||||
|
||||
[[tools."cargo:cargo-audit"]]
|
||||
version = "0.22.1"
|
||||
backend = "cargo:cargo-audit"
|
||||
|
||||
[[tools."cargo:cargo-features"]]
|
||||
version = "1.0.0"
|
||||
backend = "cargo:cargo-features"
|
||||
[[tools."cargo:cargo-edit"]]
|
||||
version = "0.13.9"
|
||||
backend = "cargo:cargo-edit"
|
||||
|
||||
[[tools."cargo:cargo-features-manager"]]
|
||||
version = "0.11.1"
|
||||
backend = "cargo:cargo-features-manager"
|
||||
|
||||
[[tools."cargo:cargo-insta"]]
|
||||
version = "1.46.3"
|
||||
backend = "cargo:cargo-insta"
|
||||
|
||||
[[tools."cargo:cargo-nextest"]]
|
||||
version = "0.9.126"
|
||||
backend = "cargo:cargo-nextest"
|
||||
|
||||
[[tools."cargo:cargo-shear"]]
|
||||
version = "1.9.1"
|
||||
version = "1.11.2"
|
||||
backend = "cargo:cargo-shear"
|
||||
|
||||
[[tools."cargo:cargo-vet"]]
|
||||
version = "0.10.2"
|
||||
backend = "cargo:cargo-vet"
|
||||
|
||||
[[tools."cargo:diesel-cli"]]
|
||||
version = "2.3.6"
|
||||
backend = "cargo:diesel-cli"
|
||||
|
||||
[tools."cargo:diesel-cli".options]
|
||||
default-features = "false"
|
||||
features = "sqlite,sqlite-bundled"
|
||||
|
||||
[[tools."cargo:diesel_cli"]]
|
||||
version = "2.3.6"
|
||||
backend = "cargo:diesel_cli"
|
||||
@@ -45,11 +75,66 @@ backend = "asdf:flutter"
|
||||
[[tools.protoc]]
|
||||
version = "29.6"
|
||||
backend = "aqua:protocolbuffers/protobuf/protoc"
|
||||
"platforms.linux-arm64" = { checksum = "sha256:2594ff4fcae8cb57310d394d0961b236190ad9c5efbfdf1f597ea471d424fe79", url = "https://github.com/protocolbuffers/protobuf/releases/download/v29.6/protoc-29.6-linux-aarch_64.zip"}
|
||||
"platforms.linux-x64" = { checksum = "sha256:48785a926e73ffa3f68e2f22b14e7b849620c7a1d36809ac9249a5495e280323", url = "https://github.com/protocolbuffers/protobuf/releases/download/v29.6/protoc-29.6-linux-x86_64.zip"}
|
||||
"platforms.macos-arm64" = { checksum = "sha256:b9576b5fa1a1ef3fe13a8c91d9d8204b46545759bea5ae155cd6ba2ea4cdaeed", url = "https://github.com/protocolbuffers/protobuf/releases/download/v29.6/protoc-29.6-osx-aarch_64.zip"}
|
||||
"platforms.macos-x64" = { checksum = "sha256:312f04713946921cc0187ef34df80241ddca1bab6f564c636885fd2cc90d3f88", url = "https://github.com/protocolbuffers/protobuf/releases/download/v29.6/protoc-29.6-osx-x86_64.zip"}
|
||||
"platforms.windows-x64" = { checksum = "sha256:1ebd7c87baffb9f1c47169b640872bf5fb1e4408079c691af527be9561d8f6f7", url = "https://github.com/protocolbuffers/protobuf/releases/download/v29.6/protoc-29.6-win64.zip"}
|
||||
|
||||
[tools.protoc."platforms.linux-arm64"]
|
||||
checksum = "sha256:2594ff4fcae8cb57310d394d0961b236190ad9c5efbfdf1f597ea471d424fe79"
|
||||
url = "https://github.com/protocolbuffers/protobuf/releases/download/v29.6/protoc-29.6-linux-aarch_64.zip"
|
||||
|
||||
[tools.protoc."platforms.linux-arm64-musl"]
|
||||
checksum = "sha256:2594ff4fcae8cb57310d394d0961b236190ad9c5efbfdf1f597ea471d424fe79"
|
||||
url = "https://github.com/protocolbuffers/protobuf/releases/download/v29.6/protoc-29.6-linux-aarch_64.zip"
|
||||
|
||||
[tools.protoc."platforms.linux-x64"]
|
||||
checksum = "sha256:48785a926e73ffa3f68e2f22b14e7b849620c7a1d36809ac9249a5495e280323"
|
||||
url = "https://github.com/protocolbuffers/protobuf/releases/download/v29.6/protoc-29.6-linux-x86_64.zip"
|
||||
|
||||
[tools.protoc."platforms.linux-x64-musl"]
|
||||
checksum = "sha256:48785a926e73ffa3f68e2f22b14e7b849620c7a1d36809ac9249a5495e280323"
|
||||
url = "https://github.com/protocolbuffers/protobuf/releases/download/v29.6/protoc-29.6-linux-x86_64.zip"
|
||||
|
||||
[tools.protoc."platforms.macos-arm64"]
|
||||
checksum = "sha256:b9576b5fa1a1ef3fe13a8c91d9d8204b46545759bea5ae155cd6ba2ea4cdaeed"
|
||||
url = "https://github.com/protocolbuffers/protobuf/releases/download/v29.6/protoc-29.6-osx-aarch_64.zip"
|
||||
|
||||
[tools.protoc."platforms.macos-x64"]
|
||||
checksum = "sha256:312f04713946921cc0187ef34df80241ddca1bab6f564c636885fd2cc90d3f88"
|
||||
url = "https://github.com/protocolbuffers/protobuf/releases/download/v29.6/protoc-29.6-osx-x86_64.zip"
|
||||
|
||||
[tools.protoc."platforms.windows-x64"]
|
||||
checksum = "sha256:1ebd7c87baffb9f1c47169b640872bf5fb1e4408079c691af527be9561d8f6f7"
|
||||
url = "https://github.com/protocolbuffers/protobuf/releases/download/v29.6/protoc-29.6-win64.zip"
|
||||
|
||||
[[tools.python]]
|
||||
version = "3.14.3"
|
||||
backend = "core:python"
|
||||
|
||||
[tools.python."platforms.linux-arm64"]
|
||||
checksum = "sha256:53700338695e402a1a1fe22be4a41fbdacc70e22bb308a48eca8ed67cb7992be"
|
||||
url = "https://github.com/astral-sh/python-build-standalone/releases/download/20260324/cpython-3.14.3+20260324-aarch64-unknown-linux-gnu-install_only_stripped.tar.gz"
|
||||
|
||||
[tools.python."platforms.linux-arm64-musl"]
|
||||
checksum = "sha256:53700338695e402a1a1fe22be4a41fbdacc70e22bb308a48eca8ed67cb7992be"
|
||||
url = "https://github.com/astral-sh/python-build-standalone/releases/download/20260324/cpython-3.14.3+20260324-aarch64-unknown-linux-gnu-install_only_stripped.tar.gz"
|
||||
|
||||
[tools.python."platforms.linux-x64"]
|
||||
checksum = "sha256:d7a9f970914bb4c88756fe3bdcc186d4feb90e9500e54f1db47dae4dc9687e39"
|
||||
url = "https://github.com/astral-sh/python-build-standalone/releases/download/20260324/cpython-3.14.3+20260324-x86_64-unknown-linux-gnu-install_only_stripped.tar.gz"
|
||||
|
||||
[tools.python."platforms.linux-x64-musl"]
|
||||
checksum = "sha256:d7a9f970914bb4c88756fe3bdcc186d4feb90e9500e54f1db47dae4dc9687e39"
|
||||
url = "https://github.com/astral-sh/python-build-standalone/releases/download/20260324/cpython-3.14.3+20260324-x86_64-unknown-linux-gnu-install_only_stripped.tar.gz"
|
||||
|
||||
[tools.python."platforms.macos-arm64"]
|
||||
checksum = "sha256:c43aecde4a663aebff99b9b83da0efec506479f1c3f98331442f33d2c43501f9"
|
||||
url = "https://github.com/astral-sh/python-build-standalone/releases/download/20260324/cpython-3.14.3+20260324-aarch64-apple-darwin-install_only_stripped.tar.gz"
|
||||
|
||||
[tools.python."platforms.macos-x64"]
|
||||
checksum = "sha256:9ab41dbc2f100a2a45d1833b9c11165f51051c558b5213eda9a9731d5948a0c0"
|
||||
url = "https://github.com/astral-sh/python-build-standalone/releases/download/20260324/cpython-3.14.3+20260324-x86_64-apple-darwin-install_only_stripped.tar.gz"
|
||||
|
||||
[tools.python."platforms.windows-x64"]
|
||||
checksum = "sha256:bbe19034b35b0267176a7442575ae7dc6343480fd4d35598cb7700173d431e09"
|
||||
url = "https://github.com/astral-sh/python-build-standalone/releases/download/20260324/cpython-3.14.3+20260324-x86_64-pc-windows-msvc-install_only_stripped.tar.gz"
|
||||
|
||||
[[tools.rust]]
|
||||
version = "1.93.0"
|
||||
|
||||
15
mise.toml
15
mise.toml
@@ -2,10 +2,21 @@
|
||||
"cargo:diesel_cli" = { version = "2.3.6", features = "sqlite,sqlite-bundled", default-features = false }
|
||||
"cargo:cargo-audit" = "0.22.1"
|
||||
"cargo:cargo-vet" = "0.10.2"
|
||||
|
||||
flutter = "3.38.9-stable"
|
||||
protoc = "29.6"
|
||||
rust = "1.93.1"
|
||||
"rust" = {version = "1.93.0", components = "clippy"}
|
||||
"cargo:cargo-features-manager" = "0.11.1"
|
||||
"cargo:cargo-nextest" = "0.9.126"
|
||||
"cargo:cargo-shear" = "latest"
|
||||
"cargo:cargo-insta" = "1.46.3"
|
||||
python = "3.14.3"
|
||||
ast-grep = "0.42.0"
|
||||
"cargo:cargo-edit" = "0.13.9"
|
||||
|
||||
[tasks.codegen]
|
||||
sources = ['protobufs/*.proto']
|
||||
outputs = ['useragent/lib/proto/*']
|
||||
run = '''
|
||||
dart pub global activate protoc_plugin && \
|
||||
protoc --dart_out=grpc:useragent/lib/proto --proto_path=protobufs/ protobufs/*.proto
|
||||
'''
|
||||
|
||||
@@ -2,67 +2,15 @@ syntax = "proto3";
|
||||
|
||||
package arbiter;
|
||||
|
||||
import "auth.proto";
|
||||
|
||||
message ClientRequest {
|
||||
oneof payload {
|
||||
arbiter.auth.ClientMessage auth_message = 1;
|
||||
CertRotationAck cert_rotation_ack = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message ClientResponse {
|
||||
oneof payload {
|
||||
arbiter.auth.ServerMessage auth_message = 1;
|
||||
CertRotationNotification cert_rotation_notification = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message UserAgentRequest {
|
||||
oneof payload {
|
||||
arbiter.auth.ClientMessage auth_message = 1;
|
||||
CertRotationAck cert_rotation_ack = 2;
|
||||
}
|
||||
}
|
||||
message UserAgentResponse {
|
||||
oneof payload {
|
||||
arbiter.auth.ServerMessage auth_message = 1;
|
||||
CertRotationNotification cert_rotation_notification = 2;
|
||||
}
|
||||
}
|
||||
import "client.proto";
|
||||
import "user_agent.proto";
|
||||
|
||||
message ServerInfo {
|
||||
string version = 1;
|
||||
bytes cert_public_key = 2;
|
||||
}
|
||||
|
||||
// TLS Certificate Rotation Protocol
|
||||
message CertRotationNotification {
|
||||
// New public certificate (DER-encoded)
|
||||
bytes new_cert = 1;
|
||||
|
||||
// Unix timestamp when rotation will be executed (if all ACKs received)
|
||||
int64 rotation_scheduled_at = 2;
|
||||
|
||||
// Unix timestamp deadline for ACK (7 days from now)
|
||||
int64 ack_deadline = 3;
|
||||
|
||||
// Rotation ID for tracking
|
||||
int32 rotation_id = 4;
|
||||
}
|
||||
|
||||
message CertRotationAck {
|
||||
// Rotation ID (from CertRotationNotification)
|
||||
int32 rotation_id = 1;
|
||||
|
||||
// Client public key for identification
|
||||
bytes client_public_key = 2;
|
||||
|
||||
// Confirmation that client saved the new certificate
|
||||
bool cert_saved = 3;
|
||||
}
|
||||
|
||||
service ArbiterService {
|
||||
rpc Client(stream ClientRequest) returns (stream ClientResponse);
|
||||
rpc UserAgent(stream UserAgentRequest) returns (stream UserAgentResponse);
|
||||
rpc Client(stream arbiter.client.ClientRequest) returns (stream arbiter.client.ClientResponse);
|
||||
rpc UserAgent(stream arbiter.user_agent.UserAgentRequest) returns (stream arbiter.user_agent.UserAgentResponse);
|
||||
}
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package arbiter.auth;
|
||||
|
||||
import "google/protobuf/timestamp.proto";
|
||||
|
||||
message AuthChallengeRequest {
|
||||
bytes pubkey = 1;
|
||||
optional string bootstrap_token = 2;
|
||||
}
|
||||
|
||||
message AuthChallenge {
|
||||
bytes pubkey = 1;
|
||||
int32 nonce = 2;
|
||||
}
|
||||
|
||||
message AuthChallengeSolution {
|
||||
bytes signature = 1;
|
||||
}
|
||||
|
||||
message AuthOk {}
|
||||
|
||||
message ClientMessage {
|
||||
oneof payload {
|
||||
AuthChallengeRequest auth_challenge_request = 1;
|
||||
AuthChallengeSolution auth_challenge_solution = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message ServerMessage {
|
||||
oneof payload {
|
||||
AuthChallenge auth_challenge = 1;
|
||||
AuthOk auth_ok = 2;
|
||||
}
|
||||
}
|
||||
64
protobufs/client.proto
Normal file
64
protobufs/client.proto
Normal file
@@ -0,0 +1,64 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package arbiter.client;
|
||||
|
||||
import "evm.proto";
|
||||
import "google/protobuf/empty.proto";
|
||||
|
||||
message ClientInfo {
|
||||
string name = 1;
|
||||
optional string description = 2;
|
||||
optional string version = 3;
|
||||
}
|
||||
|
||||
message AuthChallengeRequest {
|
||||
bytes pubkey = 1;
|
||||
ClientInfo client_info = 2;
|
||||
}
|
||||
|
||||
message AuthChallenge {
|
||||
bytes pubkey = 1;
|
||||
int32 nonce = 2;
|
||||
}
|
||||
|
||||
message AuthChallengeSolution {
|
||||
bytes signature = 1;
|
||||
}
|
||||
|
||||
enum AuthResult {
|
||||
AUTH_RESULT_UNSPECIFIED = 0;
|
||||
AUTH_RESULT_SUCCESS = 1;
|
||||
AUTH_RESULT_INVALID_KEY = 2;
|
||||
AUTH_RESULT_INVALID_SIGNATURE = 3;
|
||||
AUTH_RESULT_APPROVAL_DENIED = 4;
|
||||
AUTH_RESULT_NO_USER_AGENTS_ONLINE = 5;
|
||||
AUTH_RESULT_INTERNAL = 6;
|
||||
}
|
||||
|
||||
enum VaultState {
|
||||
VAULT_STATE_UNSPECIFIED = 0;
|
||||
VAULT_STATE_UNBOOTSTRAPPED = 1;
|
||||
VAULT_STATE_SEALED = 2;
|
||||
VAULT_STATE_UNSEALED = 3;
|
||||
VAULT_STATE_ERROR = 4;
|
||||
}
|
||||
|
||||
message ClientRequest {
|
||||
int32 request_id = 4;
|
||||
oneof payload {
|
||||
AuthChallengeRequest auth_challenge_request = 1;
|
||||
AuthChallengeSolution auth_challenge_solution = 2;
|
||||
google.protobuf.Empty query_vault_state = 3;
|
||||
}
|
||||
}
|
||||
|
||||
message ClientResponse {
|
||||
optional int32 request_id = 7;
|
||||
oneof payload {
|
||||
AuthChallenge auth_challenge = 1;
|
||||
AuthResult auth_result = 2;
|
||||
arbiter.evm.EvmSignTransactionResponse evm_sign_transaction = 3;
|
||||
arbiter.evm.EvmAnalyzeTransactionResponse evm_analyze_transaction = 4;
|
||||
VaultState vault_state = 6;
|
||||
}
|
||||
}
|
||||
216
protobufs/evm.proto
Normal file
216
protobufs/evm.proto
Normal file
@@ -0,0 +1,216 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package arbiter.evm;
|
||||
|
||||
import "google/protobuf/empty.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
|
||||
enum EvmError {
|
||||
EVM_ERROR_UNSPECIFIED = 0;
|
||||
EVM_ERROR_VAULT_SEALED = 1;
|
||||
EVM_ERROR_INTERNAL = 2;
|
||||
}
|
||||
|
||||
message WalletEntry {
|
||||
int32 id = 1;
|
||||
bytes address = 2; // 20-byte Ethereum address
|
||||
}
|
||||
|
||||
message WalletList {
|
||||
repeated WalletEntry wallets = 1;
|
||||
}
|
||||
|
||||
message WalletCreateResponse {
|
||||
oneof result {
|
||||
WalletEntry wallet = 1;
|
||||
EvmError error = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message WalletListResponse {
|
||||
oneof result {
|
||||
WalletList wallets = 1;
|
||||
EvmError error = 2;
|
||||
}
|
||||
}
|
||||
|
||||
// --- Grant types ---
|
||||
|
||||
message TransactionRateLimit {
|
||||
uint32 count = 1;
|
||||
int64 window_secs = 2;
|
||||
}
|
||||
|
||||
message VolumeRateLimit {
|
||||
bytes max_volume = 1; // U256 as big-endian bytes
|
||||
int64 window_secs = 2;
|
||||
}
|
||||
|
||||
message SharedSettings {
|
||||
int32 wallet_access_id = 1;
|
||||
uint64 chain_id = 2;
|
||||
optional google.protobuf.Timestamp valid_from = 3;
|
||||
optional google.protobuf.Timestamp valid_until = 4;
|
||||
optional bytes max_gas_fee_per_gas = 5; // U256 as big-endian bytes
|
||||
optional bytes max_priority_fee_per_gas = 6; // U256 as big-endian bytes
|
||||
optional TransactionRateLimit rate_limit = 7;
|
||||
}
|
||||
|
||||
message EtherTransferSettings {
|
||||
repeated bytes targets = 1; // list of 20-byte Ethereum addresses
|
||||
VolumeRateLimit limit = 2;
|
||||
}
|
||||
|
||||
message TokenTransferSettings {
|
||||
bytes token_contract = 1; // 20-byte Ethereum address
|
||||
optional bytes target = 2; // 20-byte Ethereum address; absent means any recipient allowed
|
||||
repeated VolumeRateLimit volume_limits = 3;
|
||||
}
|
||||
|
||||
message SpecificGrant {
|
||||
oneof grant {
|
||||
EtherTransferSettings ether_transfer = 1;
|
||||
TokenTransferSettings token_transfer = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message EtherTransferMeaning {
|
||||
bytes to = 1; // 20-byte Ethereum address
|
||||
bytes value = 2; // U256 as big-endian bytes
|
||||
}
|
||||
|
||||
message TokenInfo {
|
||||
string symbol = 1;
|
||||
bytes address = 2; // 20-byte Ethereum address
|
||||
uint64 chain_id = 3;
|
||||
}
|
||||
|
||||
// Mirror of token_transfers::Meaning
|
||||
message TokenTransferMeaning {
|
||||
TokenInfo token = 1;
|
||||
bytes to = 2; // 20-byte Ethereum address
|
||||
bytes value = 3; // U256 as big-endian bytes
|
||||
}
|
||||
|
||||
// Mirror of policies::SpecificMeaning
|
||||
message SpecificMeaning {
|
||||
oneof meaning {
|
||||
EtherTransferMeaning ether_transfer = 1;
|
||||
TokenTransferMeaning token_transfer = 2;
|
||||
}
|
||||
}
|
||||
|
||||
// --- Eval error types ---
|
||||
message GasLimitExceededViolation {
|
||||
optional bytes max_gas_fee_per_gas = 1; // U256 as big-endian bytes
|
||||
optional bytes max_priority_fee_per_gas = 2; // U256 as big-endian bytes
|
||||
}
|
||||
|
||||
message EvalViolation {
|
||||
oneof kind {
|
||||
bytes invalid_target = 1; // 20-byte Ethereum address
|
||||
GasLimitExceededViolation gas_limit_exceeded = 2;
|
||||
google.protobuf.Empty rate_limit_exceeded = 3;
|
||||
google.protobuf.Empty volumetric_limit_exceeded = 4;
|
||||
google.protobuf.Empty invalid_time = 5;
|
||||
google.protobuf.Empty invalid_transaction_type = 6;
|
||||
}
|
||||
}
|
||||
|
||||
// Transaction was classified but no grant covers it
|
||||
message NoMatchingGrantError {
|
||||
SpecificMeaning meaning = 1;
|
||||
}
|
||||
|
||||
// Transaction was classified and a grant was found, but constraints were violated
|
||||
message PolicyViolationsError {
|
||||
SpecificMeaning meaning = 1;
|
||||
repeated EvalViolation violations = 2;
|
||||
}
|
||||
|
||||
// top-level error returned when transaction evaluation fails
|
||||
message TransactionEvalError {
|
||||
oneof kind {
|
||||
google.protobuf.Empty contract_creation_not_supported = 1;
|
||||
google.protobuf.Empty unsupported_transaction_type = 2;
|
||||
NoMatchingGrantError no_matching_grant = 3;
|
||||
PolicyViolationsError policy_violations = 4;
|
||||
}
|
||||
}
|
||||
|
||||
// --- UserAgent grant management ---
|
||||
message EvmGrantCreateRequest {
|
||||
SharedSettings shared = 1;
|
||||
SpecificGrant specific = 2;
|
||||
}
|
||||
|
||||
message EvmGrantCreateResponse {
|
||||
oneof result {
|
||||
int32 grant_id = 1;
|
||||
EvmError error = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message EvmGrantDeleteRequest {
|
||||
int32 grant_id = 1;
|
||||
}
|
||||
|
||||
message EvmGrantDeleteResponse {
|
||||
oneof result {
|
||||
google.protobuf.Empty ok = 1;
|
||||
EvmError error = 2;
|
||||
}
|
||||
}
|
||||
|
||||
// Basic grant info returned in grant listings
|
||||
message GrantEntry {
|
||||
int32 id = 1;
|
||||
int32 wallet_access_id = 2;
|
||||
SharedSettings shared = 3;
|
||||
SpecificGrant specific = 4;
|
||||
}
|
||||
|
||||
message EvmGrantListRequest {
|
||||
optional int32 wallet_access_id = 1;
|
||||
}
|
||||
|
||||
message EvmGrantListResponse {
|
||||
oneof result {
|
||||
EvmGrantList grants = 1;
|
||||
EvmError error = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message EvmGrantList {
|
||||
repeated GrantEntry grants = 1;
|
||||
}
|
||||
|
||||
// --- Client transaction operations ---
|
||||
|
||||
message EvmSignTransactionRequest {
|
||||
bytes wallet_address = 1; // 20-byte Ethereum address
|
||||
bytes rlp_transaction = 2; // RLP-encoded EIP-1559 transaction (unsigned)
|
||||
}
|
||||
|
||||
// oneof because signing and evaluation happen atomically — a signing failure
|
||||
// is always either an eval error or an internal error, never a partial success
|
||||
message EvmSignTransactionResponse {
|
||||
oneof result {
|
||||
bytes signature = 1; // 65-byte signature: r[32] || s[32] || v[1]
|
||||
TransactionEvalError eval_error = 2;
|
||||
EvmError error = 3;
|
||||
}
|
||||
}
|
||||
|
||||
message EvmAnalyzeTransactionRequest {
|
||||
bytes wallet_address = 1; // 20-byte Ethereum address
|
||||
bytes rlp_transaction = 2; // RLP-encoded EIP-1559 transaction
|
||||
}
|
||||
|
||||
message EvmAnalyzeTransactionResponse {
|
||||
oneof result {
|
||||
SpecificMeaning meaning = 1;
|
||||
TransactionEvalError eval_error = 2;
|
||||
EvmError error = 3;
|
||||
}
|
||||
}
|
||||
@@ -1,46 +0,0 @@
|
||||
// Protocol Buffers - Google's data interchange format
|
||||
// Copyright 2008 Google Inc. All rights reserved.
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
// * Neither the name of Google Inc. nor the names of its
|
||||
// contributors may be used to endorse or promote products derived from
|
||||
// this software without specific prior written permission.
|
||||
|
||||
syntax = "proto3";
|
||||
|
||||
package google.protobuf;
|
||||
|
||||
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
|
||||
option cc_enable_arenas = true;
|
||||
option go_package = "google.golang.org/protobuf/types/known/timestamppb";
|
||||
option java_package = "com.google.protobuf";
|
||||
option java_outer_classname = "TimestampProto";
|
||||
option java_multiple_files = true;
|
||||
option objc_class_prefix = "GPB";
|
||||
|
||||
// A Timestamp represents a point in time independent of any time zone or local
|
||||
// calendar, encoded as a count of seconds and fractions of seconds at
|
||||
// nanosecond resolution. The count is relative to an epoch at UTC midnight on
|
||||
// January 1, 1970, in the proleptic Gregorian calendar which extends the
|
||||
// Gregorian calendar backwards to year one.
|
||||
message Timestamp {
|
||||
// Represents seconds of UTC time since Unix epoch
|
||||
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
|
||||
// 9999-12-31T23:59:59Z inclusive.
|
||||
int64 seconds = 1;
|
||||
|
||||
// Non-negative fractions of a second at nanosecond resolution. Negative
|
||||
// second values with fractions must still have non-negative nanos values
|
||||
// that count forward in time. Must be from 0 to 999,999,999
|
||||
// inclusive.
|
||||
int32 nanos = 2;
|
||||
}
|
||||
@@ -1,14 +0,0 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package arbiter.unseal;
|
||||
|
||||
message UserAgentKeyRequest {}
|
||||
|
||||
message ServerKeyResponse {
|
||||
bytes pubkey = 1;
|
||||
}
|
||||
message UserAgentSealedKey {
|
||||
bytes sealed_key = 1;
|
||||
bytes pubkey = 2;
|
||||
bytes nonce = 3;
|
||||
}
|
||||
199
protobufs/user_agent.proto
Normal file
199
protobufs/user_agent.proto
Normal file
@@ -0,0 +1,199 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package arbiter.user_agent;
|
||||
|
||||
import "client.proto";
|
||||
import "evm.proto";
|
||||
import "google/protobuf/empty.proto";
|
||||
|
||||
enum KeyType {
|
||||
KEY_TYPE_UNSPECIFIED = 0;
|
||||
KEY_TYPE_ED25519 = 1;
|
||||
KEY_TYPE_ECDSA_SECP256K1 = 2;
|
||||
KEY_TYPE_RSA = 3;
|
||||
}
|
||||
|
||||
// --- SDK client management ---
|
||||
|
||||
enum SdkClientError {
|
||||
SDK_CLIENT_ERROR_UNSPECIFIED = 0;
|
||||
SDK_CLIENT_ERROR_ALREADY_EXISTS = 1;
|
||||
SDK_CLIENT_ERROR_NOT_FOUND = 2;
|
||||
SDK_CLIENT_ERROR_HAS_RELATED_DATA = 3; // hard-delete blocked by FK (client has grants or transaction logs)
|
||||
SDK_CLIENT_ERROR_INTERNAL = 4;
|
||||
}
|
||||
|
||||
message SdkClientRevokeRequest {
|
||||
int32 client_id = 1;
|
||||
}
|
||||
|
||||
message SdkClientEntry {
|
||||
int32 id = 1;
|
||||
bytes pubkey = 2;
|
||||
arbiter.client.ClientInfo info = 3;
|
||||
int32 created_at = 4;
|
||||
}
|
||||
|
||||
message SdkClientList {
|
||||
repeated SdkClientEntry clients = 1;
|
||||
}
|
||||
|
||||
message SdkClientRevokeResponse {
|
||||
oneof result {
|
||||
google.protobuf.Empty ok = 1;
|
||||
SdkClientError error = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message SdkClientListResponse {
|
||||
oneof result {
|
||||
SdkClientList clients = 1;
|
||||
SdkClientError error = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message AuthChallengeRequest {
|
||||
bytes pubkey = 1;
|
||||
optional string bootstrap_token = 2;
|
||||
KeyType key_type = 3;
|
||||
}
|
||||
|
||||
message AuthChallenge {
|
||||
int32 nonce = 2;
|
||||
reserved 1;
|
||||
}
|
||||
|
||||
message AuthChallengeSolution {
|
||||
bytes signature = 1;
|
||||
}
|
||||
|
||||
enum AuthResult {
|
||||
AUTH_RESULT_UNSPECIFIED = 0;
|
||||
AUTH_RESULT_SUCCESS = 1;
|
||||
AUTH_RESULT_INVALID_KEY = 2;
|
||||
AUTH_RESULT_INVALID_SIGNATURE = 3;
|
||||
AUTH_RESULT_BOOTSTRAP_REQUIRED = 4;
|
||||
AUTH_RESULT_TOKEN_INVALID = 5;
|
||||
AUTH_RESULT_INTERNAL = 6;
|
||||
}
|
||||
|
||||
message UnsealStart {
|
||||
bytes client_pubkey = 1;
|
||||
}
|
||||
|
||||
message UnsealStartResponse {
|
||||
bytes server_pubkey = 1;
|
||||
}
|
||||
message UnsealEncryptedKey {
|
||||
bytes nonce = 1;
|
||||
bytes ciphertext = 2;
|
||||
bytes associated_data = 3;
|
||||
}
|
||||
|
||||
message BootstrapEncryptedKey {
|
||||
bytes nonce = 1;
|
||||
bytes ciphertext = 2;
|
||||
bytes associated_data = 3;
|
||||
}
|
||||
|
||||
enum UnsealResult {
|
||||
UNSEAL_RESULT_UNSPECIFIED = 0;
|
||||
UNSEAL_RESULT_SUCCESS = 1;
|
||||
UNSEAL_RESULT_INVALID_KEY = 2;
|
||||
UNSEAL_RESULT_UNBOOTSTRAPPED = 3;
|
||||
}
|
||||
|
||||
enum BootstrapResult {
|
||||
BOOTSTRAP_RESULT_UNSPECIFIED = 0;
|
||||
BOOTSTRAP_RESULT_SUCCESS = 1;
|
||||
BOOTSTRAP_RESULT_ALREADY_BOOTSTRAPPED = 2;
|
||||
BOOTSTRAP_RESULT_INVALID_KEY = 3;
|
||||
}
|
||||
|
||||
enum VaultState {
|
||||
VAULT_STATE_UNSPECIFIED = 0;
|
||||
VAULT_STATE_UNBOOTSTRAPPED = 1;
|
||||
VAULT_STATE_SEALED = 2;
|
||||
VAULT_STATE_UNSEALED = 3;
|
||||
VAULT_STATE_ERROR = 4;
|
||||
}
|
||||
|
||||
message SdkClientConnectionRequest {
|
||||
bytes pubkey = 1;
|
||||
arbiter.client.ClientInfo info = 2;
|
||||
}
|
||||
|
||||
message SdkClientConnectionResponse {
|
||||
bool approved = 1;
|
||||
bytes pubkey = 2;
|
||||
}
|
||||
|
||||
message SdkClientConnectionCancel {
|
||||
bytes pubkey = 1;
|
||||
}
|
||||
|
||||
message WalletAccess {
|
||||
int32 wallet_id = 1;
|
||||
int32 sdk_client_id = 2;
|
||||
}
|
||||
|
||||
message SdkClientWalletAccess {
|
||||
int32 id = 1;
|
||||
WalletAccess access = 2;
|
||||
}
|
||||
|
||||
message SdkClientGrantWalletAccess {
|
||||
repeated WalletAccess accesses = 1;
|
||||
}
|
||||
|
||||
message SdkClientRevokeWalletAccess {
|
||||
repeated int32 accesses = 1;
|
||||
}
|
||||
|
||||
message ListWalletAccessResponse {
|
||||
repeated SdkClientWalletAccess accesses = 1;
|
||||
}
|
||||
|
||||
message UserAgentRequest {
|
||||
int32 id = 16;
|
||||
oneof payload {
|
||||
AuthChallengeRequest auth_challenge_request = 1;
|
||||
AuthChallengeSolution auth_challenge_solution = 2;
|
||||
UnsealStart unseal_start = 3;
|
||||
UnsealEncryptedKey unseal_encrypted_key = 4;
|
||||
google.protobuf.Empty query_vault_state = 5;
|
||||
google.protobuf.Empty evm_wallet_create = 6;
|
||||
google.protobuf.Empty evm_wallet_list = 7;
|
||||
arbiter.evm.EvmGrantCreateRequest evm_grant_create = 8;
|
||||
arbiter.evm.EvmGrantDeleteRequest evm_grant_delete = 9;
|
||||
arbiter.evm.EvmGrantListRequest evm_grant_list = 10;
|
||||
SdkClientConnectionResponse sdk_client_connection_response = 11;
|
||||
SdkClientRevokeRequest sdk_client_revoke = 12;
|
||||
google.protobuf.Empty sdk_client_list = 13;
|
||||
BootstrapEncryptedKey bootstrap_encrypted_key = 14;
|
||||
SdkClientGrantWalletAccess grant_wallet_access = 15;
|
||||
SdkClientRevokeWalletAccess revoke_wallet_access = 17;
|
||||
google.protobuf.Empty list_wallet_access = 18;
|
||||
}
|
||||
}
|
||||
message UserAgentResponse {
|
||||
optional int32 id = 16;
|
||||
oneof payload {
|
||||
AuthChallenge auth_challenge = 1;
|
||||
AuthResult auth_result = 2;
|
||||
UnsealStartResponse unseal_start_response = 3;
|
||||
UnsealResult unseal_result = 4;
|
||||
VaultState vault_state = 5;
|
||||
arbiter.evm.WalletCreateResponse evm_wallet_create = 6;
|
||||
arbiter.evm.WalletListResponse evm_wallet_list = 7;
|
||||
arbiter.evm.EvmGrantCreateResponse evm_grant_create = 8;
|
||||
arbiter.evm.EvmGrantDeleteResponse evm_grant_delete = 9;
|
||||
arbiter.evm.EvmGrantListResponse evm_grant_list = 10;
|
||||
SdkClientConnectionRequest sdk_client_connection_request = 11;
|
||||
SdkClientConnectionCancel sdk_client_connection_cancel = 12;
|
||||
SdkClientRevokeResponse sdk_client_revoke_response = 13;
|
||||
SdkClientListResponse sdk_client_list_response = 14;
|
||||
BootstrapResult bootstrap_result = 15;
|
||||
ListWalletAccessResponse list_wallet_access_response = 17;
|
||||
}
|
||||
}
|
||||
BIN
scripts/__pycache__/gen_erc20_registry.cpython-314.pyc
Normal file
BIN
scripts/__pycache__/gen_erc20_registry.cpython-314.pyc
Normal file
Binary file not shown.
150
scripts/gen_erc20_registry.py
Normal file
150
scripts/gen_erc20_registry.py
Normal file
@@ -0,0 +1,150 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Fetch the Uniswap default token list and emit Rust `TokenInfo` statics.
|
||||
|
||||
Usage:
|
||||
python3 gen_erc20_registry.py # fetch from IPFS
|
||||
python3 gen_erc20_registry.py tokens.json # local file
|
||||
python3 gen_erc20_registry.py tokens.json out.rs # custom output file
|
||||
"""
|
||||
|
||||
import json
|
||||
import re
|
||||
import sys
|
||||
import unicodedata
|
||||
import urllib.request
|
||||
|
||||
UNISWAP_URL = "https://ipfs.io/ipns/tokens.uniswap.org"
|
||||
|
||||
SOLANA_CHAIN_ID = 501000101
|
||||
IDENTIFIER_RE = re.compile(r"[^A-Za-z0-9]+")
|
||||
|
||||
|
||||
def load_tokens(source=None):
|
||||
if source:
|
||||
with open(source) as f:
|
||||
return json.load(f)
|
||||
req = urllib.request.Request(
|
||||
UNISWAP_URL,
|
||||
headers={"Accept": "application/json", "User-Agent": "gen_tokens/1.0"},
|
||||
)
|
||||
with urllib.request.urlopen(req, timeout=60) as resp:
|
||||
return json.loads(resp.read())
|
||||
|
||||
|
||||
def escape(s: str) -> str:
|
||||
return s.replace("\\", "\\\\").replace('"', '\\"')
|
||||
|
||||
|
||||
def to_screaming_case(name: str) -> str:
|
||||
normalized = unicodedata.normalize("NFKD", name or "")
|
||||
ascii_name = normalized.encode("ascii", "ignore").decode("ascii")
|
||||
snake = IDENTIFIER_RE.sub("_", ascii_name).strip("_").upper()
|
||||
if not snake:
|
||||
snake = "TOKEN"
|
||||
if snake[0].isdigit():
|
||||
snake = f"TOKEN_{snake}"
|
||||
return snake
|
||||
|
||||
|
||||
def static_name_for_token(token: dict, used_names: set[str]) -> str:
|
||||
base = to_screaming_case(token.get("name", ""))
|
||||
if base not in used_names:
|
||||
used_names.add(base)
|
||||
return base
|
||||
|
||||
address = token["address"]
|
||||
suffix = f"{token['chainId']}_{address[2:].upper()[-8:]}"
|
||||
candidate = f"{base}_{suffix}"
|
||||
|
||||
i = 2
|
||||
while candidate in used_names:
|
||||
candidate = f"{base}_{suffix}_{i}"
|
||||
i += 1
|
||||
|
||||
used_names.add(candidate)
|
||||
return candidate
|
||||
|
||||
|
||||
def main():
|
||||
source = sys.argv[1] if len(sys.argv) > 1 else None
|
||||
output = sys.argv[2] if len(sys.argv) > 2 else "generated_tokens.rs"
|
||||
data = load_tokens(source)
|
||||
tokens = data["tokens"]
|
||||
|
||||
# Deduplicate by (chainId, address)
|
||||
seen = set()
|
||||
unique = []
|
||||
for t in tokens:
|
||||
key = (t["chainId"], t["address"].lower())
|
||||
if key not in seen:
|
||||
seen.add(key)
|
||||
unique.append(t)
|
||||
|
||||
unique.sort(key=lambda t: (t["chainId"], t.get("symbol", "").upper()))
|
||||
evm_tokens = [t for t in unique if t["chainId"] != SOLANA_CHAIN_ID]
|
||||
|
||||
ver = data["version"]
|
||||
lines = []
|
||||
w = lines.append
|
||||
|
||||
w(
|
||||
f"// Auto-generated from Uniswap token list v{ver['major']}.{ver['minor']}.{ver['patch']}"
|
||||
)
|
||||
w(f"// {len(evm_tokens)} tokens")
|
||||
w("// DO NOT EDIT - regenerate with gen_erc20_registry.py")
|
||||
w("")
|
||||
|
||||
used_static_names = set()
|
||||
token_statics = []
|
||||
for t in evm_tokens:
|
||||
static_name = static_name_for_token(t, used_static_names)
|
||||
token_statics.append((static_name, t))
|
||||
|
||||
for static_name, t in token_statics:
|
||||
addr = t["address"]
|
||||
name = escape(t.get("name", ""))
|
||||
symbol = escape(t.get("symbol", ""))
|
||||
decimals = t.get("decimals", 18)
|
||||
logo = t.get("logoURI")
|
||||
chain = t["chainId"]
|
||||
|
||||
logo_val = f'Some("{escape(logo)}")' if logo else "None"
|
||||
|
||||
w(f"pub static {static_name}: TokenInfo = TokenInfo {{")
|
||||
w(f' name: "{name}",')
|
||||
w(f' symbol: "{symbol}",')
|
||||
w(f" decimals: {decimals},")
|
||||
w(f' contract: address!("{addr}"),')
|
||||
w(f" chain: {chain},")
|
||||
w(f" logo_uri: {logo_val},")
|
||||
w("};")
|
||||
w("")
|
||||
|
||||
w("pub static TOKENS: &[&TokenInfo] = &[")
|
||||
for static_name, _ in token_statics:
|
||||
w(f" &{static_name},")
|
||||
w("];")
|
||||
w("")
|
||||
w("pub fn get_token(")
|
||||
w(" chain_id: alloy::primitives::ChainId,")
|
||||
w(" address: alloy::primitives::Address,")
|
||||
w(") -> Option<&'static TokenInfo> {")
|
||||
w(" match (chain_id, address) {")
|
||||
for static_name, t in token_statics:
|
||||
w(
|
||||
f' ({t["chainId"]}, addr) if addr == address!("{t["address"]}") => Some(&{static_name}),'
|
||||
)
|
||||
w(" _ => None,")
|
||||
w(" }")
|
||||
w("}")
|
||||
w("")
|
||||
|
||||
with open(output, "w") as f:
|
||||
f.write("\n".join(lines))
|
||||
|
||||
print(f"Wrote {len(token_statics)} tokens to {output}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
13
server/.cargo/audit.toml
Normal file
13
server/.cargo/audit.toml
Normal file
@@ -0,0 +1,13 @@
|
||||
[advisories]
|
||||
# RUSTSEC-2023-0071: Marvin Attack timing side-channel in rsa crate.
|
||||
# No fixed version is available upstream.
|
||||
# RSA support is required for Windows Hello / KeyCredentialManager
|
||||
# (https://learn.microsoft.com/en-us/uwp/api/windows.security.credentials.keycredentialmanager.requestcreateasync),
|
||||
# which only issues RSA-2048 keys.
|
||||
# Mitigations in place:
|
||||
# - Signing uses BlindedSigningKey (PSS+SHA-256), which applies blinding to
|
||||
# protect the private key from timing recovery during signing.
|
||||
# - RSA decryption is never performed; we only verify public-key signatures.
|
||||
# - The attack requires local, high-resolution timing access against the
|
||||
# signing process, which is not exposed in our threat model.
|
||||
ignore = ["RUSTSEC-2023-0071"]
|
||||
3587
server/Cargo.lock
generated
3587
server/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -1,25 +1,45 @@
|
||||
[workspace]
|
||||
members = [
|
||||
"crates/arbiter-client",
|
||||
"crates/arbiter-proto",
|
||||
"crates/arbiter-server",
|
||||
"crates/arbiter-useragent",
|
||||
"crates/*",
|
||||
]
|
||||
resolver = "3"
|
||||
|
||||
[workspace.lints.clippy]
|
||||
disallowed-methods = "deny"
|
||||
|
||||
|
||||
[workspace.dependencies]
|
||||
tonic = { version = "0.14.3", features = ["deflate", "gzip", "tls-connect-info", "zstd"] }
|
||||
tonic = { version = "0.14.5", features = [
|
||||
"deflate",
|
||||
"gzip",
|
||||
"tls-connect-info",
|
||||
"zstd",
|
||||
] }
|
||||
tracing = "0.1.44"
|
||||
tokio = { version = "1.49.0", features = ["full"] }
|
||||
tokio = { version = "1.50.0", features = ["full"] }
|
||||
ed25519-dalek = { version = "3.0.0-pre.6", features = ["rand_core"] }
|
||||
chrono = { version = "0.4.43", features = ["serde"] }
|
||||
chrono = { version = "0.4.44", features = ["serde"] }
|
||||
rand = "0.10.0"
|
||||
rustls = "0.23.36"
|
||||
rustls = { version = "0.23.37", features = ["aws-lc-rs"] }
|
||||
smlang = "0.8.0"
|
||||
miette = { version = "7.6.0", features = ["fancy", "serde"] }
|
||||
thiserror = "2.0.18"
|
||||
async-trait = "0.1.89"
|
||||
futures = "0.3.31"
|
||||
futures = "0.3.32"
|
||||
tokio-stream = { version = "0.1.18", features = ["full"] }
|
||||
kameo = "0.19.2"
|
||||
prost-types = { version = "0.14.3", features = ["chrono"] }
|
||||
x25519-dalek = { version = "2.0.1", features = ["getrandom"] }
|
||||
rstest = "0.26.1"
|
||||
rustls-pki-types = "1.14.0"
|
||||
alloy = "1.7.3"
|
||||
rcgen = { version = "0.14.7", features = [
|
||||
"aws_lc_rs",
|
||||
"pem",
|
||||
"x509-parser",
|
||||
"zeroize",
|
||||
], default-features = false }
|
||||
k256 = { version = "0.13.4", features = ["ecdsa", "pkcs8"] }
|
||||
rsa = { version = "0.9", features = ["sha2"] }
|
||||
sha2 = "0.10"
|
||||
spki = "0.7"
|
||||
|
||||
9
server/clippy.toml
Normal file
9
server/clippy.toml
Normal file
@@ -0,0 +1,9 @@
|
||||
disallowed-methods = [
|
||||
# RSA decryption is forbidden: the rsa crate has RUSTSEC-2023-0071 (Marvin Attack).
|
||||
# We only use RSA for Windows Hello (KeyCredentialManager) public-key verification — decryption
|
||||
# is never required and must not be introduced.
|
||||
{ path = "rsa::RsaPrivateKey::decrypt", reason = "RSA decryption is forbidden (RUSTSEC-2023-0071 Marvin Attack). Only PSS signing/verification is permitted." },
|
||||
{ path = "rsa::RsaPrivateKey::decrypt_blinded", reason = "RSA decryption is forbidden (RUSTSEC-2023-0071 Marvin Attack). Only PSS signing/verification is permitted." },
|
||||
{ path = "rsa::traits::Decryptor::decrypt", reason = "RSA decryption is forbidden (RUSTSEC-2023-0071 Marvin Attack). This blocks decrypt() on rsa::{pkcs1v15,oaep}::DecryptingKey." },
|
||||
{ path = "rsa::traits::RandomizedDecryptor::decrypt_with_rng", reason = "RSA decryption is forbidden (RUSTSEC-2023-0071 Marvin Attack). This blocks decrypt_with_rng() on rsa::{pkcs1v15,oaep}::DecryptingKey." },
|
||||
]
|
||||
BIN
server/crates/.DS_Store
vendored
Normal file
BIN
server/crates/.DS_Store
vendored
Normal file
Binary file not shown.
@@ -3,5 +3,24 @@ name = "arbiter-client"
|
||||
version = "0.1.0"
|
||||
edition = "2024"
|
||||
repository = "https://git.markettakers.org/MarketTakers/arbiter"
|
||||
license = "Apache-2.0"
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[features]
|
||||
evm = ["dep:alloy"]
|
||||
|
||||
[dependencies]
|
||||
arbiter-proto.path = "../arbiter-proto"
|
||||
alloy = { workspace = true, optional = true }
|
||||
tonic.workspace = true
|
||||
tonic.features = ["tls-aws-lc"]
|
||||
tokio.workspace = true
|
||||
tokio-stream.workspace = true
|
||||
ed25519-dalek.workspace = true
|
||||
thiserror.workspace = true
|
||||
http = "1.4.0"
|
||||
rustls-webpki = { version = "0.103.10", features = ["aws-lc-rs"] }
|
||||
async-trait.workspace = true
|
||||
rand.workspace = true
|
||||
|
||||
135
server/crates/arbiter-client/src/auth.rs
Normal file
135
server/crates/arbiter-client/src/auth.rs
Normal file
@@ -0,0 +1,135 @@
|
||||
use arbiter_proto::{
|
||||
ClientMetadata, format_challenge,
|
||||
proto::client::{
|
||||
AuthChallengeRequest, AuthChallengeSolution, AuthResult, ClientInfo as ProtoClientInfo,
|
||||
ClientRequest, client_request::Payload as ClientRequestPayload,
|
||||
client_response::Payload as ClientResponsePayload,
|
||||
},
|
||||
};
|
||||
use ed25519_dalek::Signer as _;
|
||||
|
||||
use crate::{
|
||||
storage::StorageError,
|
||||
transport::{ClientTransport, next_request_id},
|
||||
};
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum AuthError {
|
||||
#[error("Auth challenge was not returned by server")]
|
||||
MissingAuthChallenge,
|
||||
|
||||
#[error("Client approval denied by User Agent")]
|
||||
ApprovalDenied,
|
||||
|
||||
#[error("No User Agents online to approve client")]
|
||||
NoUserAgentsOnline,
|
||||
|
||||
#[error("Unexpected auth response payload")]
|
||||
UnexpectedAuthResponse,
|
||||
|
||||
#[error("Signing key storage error")]
|
||||
Storage(#[from] StorageError),
|
||||
}
|
||||
|
||||
fn map_auth_result(code: i32) -> AuthError {
|
||||
match AuthResult::try_from(code).unwrap_or(AuthResult::Unspecified) {
|
||||
AuthResult::ApprovalDenied => AuthError::ApprovalDenied,
|
||||
AuthResult::NoUserAgentsOnline => AuthError::NoUserAgentsOnline,
|
||||
AuthResult::Unspecified
|
||||
| AuthResult::Success
|
||||
| AuthResult::InvalidKey
|
||||
| AuthResult::InvalidSignature
|
||||
| AuthResult::Internal => AuthError::UnexpectedAuthResponse,
|
||||
}
|
||||
}
|
||||
|
||||
async fn send_auth_challenge_request(
|
||||
transport: &mut ClientTransport,
|
||||
metadata: ClientMetadata,
|
||||
key: &ed25519_dalek::SigningKey,
|
||||
) -> std::result::Result<(), AuthError> {
|
||||
transport
|
||||
.send(ClientRequest {
|
||||
request_id: next_request_id(),
|
||||
payload: Some(ClientRequestPayload::AuthChallengeRequest(
|
||||
AuthChallengeRequest {
|
||||
pubkey: key.verifying_key().to_bytes().to_vec(),
|
||||
client_info: Some(ProtoClientInfo {
|
||||
name: metadata.name,
|
||||
description: metadata.description,
|
||||
version: metadata.version,
|
||||
}),
|
||||
},
|
||||
)),
|
||||
})
|
||||
.await
|
||||
.map_err(|_| AuthError::UnexpectedAuthResponse)
|
||||
}
|
||||
|
||||
async fn receive_auth_challenge(
|
||||
transport: &mut ClientTransport,
|
||||
) -> std::result::Result<arbiter_proto::proto::client::AuthChallenge, AuthError> {
|
||||
let response = transport
|
||||
.recv()
|
||||
.await
|
||||
.map_err(|_| AuthError::MissingAuthChallenge)?;
|
||||
|
||||
let payload = response.payload.ok_or(AuthError::MissingAuthChallenge)?;
|
||||
match payload {
|
||||
ClientResponsePayload::AuthChallenge(challenge) => Ok(challenge),
|
||||
ClientResponsePayload::AuthResult(result) => Err(map_auth_result(result)),
|
||||
_ => Err(AuthError::UnexpectedAuthResponse),
|
||||
}
|
||||
}
|
||||
|
||||
async fn send_auth_challenge_solution(
|
||||
transport: &mut ClientTransport,
|
||||
key: &ed25519_dalek::SigningKey,
|
||||
challenge: arbiter_proto::proto::client::AuthChallenge,
|
||||
) -> std::result::Result<(), AuthError> {
|
||||
let challenge_payload = format_challenge(challenge.nonce, &challenge.pubkey);
|
||||
let signature = key.sign(&challenge_payload).to_bytes().to_vec();
|
||||
|
||||
transport
|
||||
.send(ClientRequest {
|
||||
request_id: next_request_id(),
|
||||
payload: Some(ClientRequestPayload::AuthChallengeSolution(
|
||||
AuthChallengeSolution { signature },
|
||||
)),
|
||||
})
|
||||
.await
|
||||
.map_err(|_| AuthError::UnexpectedAuthResponse)
|
||||
}
|
||||
|
||||
async fn receive_auth_confirmation(
|
||||
transport: &mut ClientTransport,
|
||||
) -> std::result::Result<(), AuthError> {
|
||||
let response = transport
|
||||
.recv()
|
||||
.await
|
||||
.map_err(|_| AuthError::UnexpectedAuthResponse)?;
|
||||
|
||||
let payload = response
|
||||
.payload
|
||||
.ok_or(AuthError::UnexpectedAuthResponse)?;
|
||||
match payload {
|
||||
ClientResponsePayload::AuthResult(result)
|
||||
if AuthResult::try_from(result).ok() == Some(AuthResult::Success) =>
|
||||
{
|
||||
Ok(())
|
||||
}
|
||||
ClientResponsePayload::AuthResult(result) => Err(map_auth_result(result)),
|
||||
_ => Err(AuthError::UnexpectedAuthResponse),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn authenticate(
|
||||
transport: &mut ClientTransport,
|
||||
metadata: ClientMetadata,
|
||||
key: &ed25519_dalek::SigningKey,
|
||||
) -> std::result::Result<(), AuthError> {
|
||||
send_auth_challenge_request(transport, metadata, key).await?;
|
||||
let challenge = receive_auth_challenge(transport).await?;
|
||||
send_auth_challenge_solution(transport, key, challenge).await?;
|
||||
receive_auth_confirmation(transport).await
|
||||
}
|
||||
48
server/crates/arbiter-client/src/bin/test_connect.rs
Normal file
48
server/crates/arbiter-client/src/bin/test_connect.rs
Normal file
@@ -0,0 +1,48 @@
|
||||
|
||||
use std::io::{self, Write};
|
||||
|
||||
use arbiter_client::ArbiterClient;
|
||||
use arbiter_proto::{ClientMetadata, url::ArbiterUrl};
|
||||
use tonic::ConnectError;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
println!("Testing connection to Arbiter server...");
|
||||
print!("Enter ArbiterUrl: ");
|
||||
let _ = io::stdout().flush();
|
||||
|
||||
let mut input = String::new();
|
||||
if let Err(err) = io::stdin().read_line(&mut input) {
|
||||
eprintln!("Failed to read input: {err}");
|
||||
return;
|
||||
}
|
||||
|
||||
let input = input.trim();
|
||||
if input.is_empty() {
|
||||
eprintln!("ArbiterUrl cannot be empty");
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
|
||||
let url = match ArbiterUrl::try_from(input) {
|
||||
Ok(url) => url,
|
||||
Err(err) => {
|
||||
eprintln!("Invalid ArbiterUrl: {err}");
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
println!("{:#?}", url);
|
||||
|
||||
let metadata = ClientMetadata {
|
||||
name: "arbiter-client test_connect".to_string(),
|
||||
description: Some("Manual connection smoke test".to_string()),
|
||||
version: Some(env!("CARGO_PKG_VERSION").to_string()),
|
||||
};
|
||||
|
||||
match ArbiterClient::connect(url, metadata).await {
|
||||
Ok(_) => println!("Connected and authenticated successfully."),
|
||||
Err(err) => eprintln!("Failed to connect: {:#?}", err),
|
||||
}
|
||||
}
|
||||
89
server/crates/arbiter-client/src/client.rs
Normal file
89
server/crates/arbiter-client/src/client.rs
Normal file
@@ -0,0 +1,89 @@
|
||||
use arbiter_proto::{ClientMetadata, proto::arbiter_service_client::ArbiterServiceClient, url::ArbiterUrl};
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::{Mutex, mpsc};
|
||||
use tokio_stream::wrappers::ReceiverStream;
|
||||
use tonic::transport::ClientTlsConfig;
|
||||
|
||||
use crate::{
|
||||
StorageError, auth::{AuthError, authenticate}, storage::{FileSigningKeyStorage, SigningKeyStorage}, transport::{BUFFER_LENGTH, ClientTransport}
|
||||
};
|
||||
|
||||
#[cfg(feature = "evm")]
|
||||
use crate::wallets::evm::ArbiterEvmWallet;
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum Error {
|
||||
#[error("gRPC error")]
|
||||
Grpc(#[from] tonic::Status),
|
||||
|
||||
#[error("Could not establish connection")]
|
||||
Connection(#[from] tonic::transport::Error),
|
||||
|
||||
#[error("Invalid server URI")]
|
||||
InvalidUri(#[from] http::uri::InvalidUri),
|
||||
|
||||
#[error("Invalid CA certificate")]
|
||||
InvalidCaCert(#[from] webpki::Error),
|
||||
|
||||
#[error("Authentication error")]
|
||||
Authentication(#[from] AuthError),
|
||||
|
||||
#[error("Storage error")]
|
||||
Storage(#[from] StorageError),
|
||||
|
||||
}
|
||||
|
||||
pub struct ArbiterClient {
|
||||
#[allow(dead_code)]
|
||||
transport: Arc<Mutex<ClientTransport>>,
|
||||
}
|
||||
|
||||
impl ArbiterClient {
|
||||
pub async fn connect(url: ArbiterUrl, metadata: ClientMetadata) -> Result<Self, Error> {
|
||||
let storage = FileSigningKeyStorage::from_default_location()?;
|
||||
Self::connect_with_storage(url, metadata, &storage).await
|
||||
}
|
||||
|
||||
pub async fn connect_with_storage<S: SigningKeyStorage>(
|
||||
url: ArbiterUrl,
|
||||
metadata: ClientMetadata,
|
||||
storage: &S,
|
||||
) -> Result<Self, Error> {
|
||||
let key = storage.load_or_create()?;
|
||||
Self::connect_with_key(url, metadata, key).await
|
||||
}
|
||||
|
||||
pub async fn connect_with_key(
|
||||
url: ArbiterUrl,
|
||||
metadata: ClientMetadata,
|
||||
key: ed25519_dalek::SigningKey,
|
||||
) -> Result<Self, Error> {
|
||||
let anchor = webpki::anchor_from_trusted_cert(&url.ca_cert)?.to_owned();
|
||||
let tls = ClientTlsConfig::new().trust_anchor(anchor);
|
||||
|
||||
let channel = tonic::transport::Channel::from_shared(format!("https://{}:{}", url.host, url.port))?
|
||||
.tls_config(tls)?
|
||||
.connect()
|
||||
.await?;
|
||||
|
||||
let mut client = ArbiterServiceClient::new(channel);
|
||||
let (tx, rx) = mpsc::channel(BUFFER_LENGTH);
|
||||
let response_stream = client.client(ReceiverStream::new(rx)).await?.into_inner();
|
||||
|
||||
let mut transport = ClientTransport {
|
||||
sender: tx,
|
||||
receiver: response_stream,
|
||||
};
|
||||
|
||||
authenticate(&mut transport, metadata, &key).await?;
|
||||
|
||||
Ok(Self {
|
||||
transport: Arc::new(Mutex::new(transport)),
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(feature = "evm")]
|
||||
pub async fn evm_wallets(&self) -> Result<Vec<ArbiterEvmWallet>, Error> {
|
||||
todo!("fetch EVM wallet list from server")
|
||||
}
|
||||
}
|
||||
@@ -1,14 +1,12 @@
|
||||
pub fn add(left: u64, right: u64) -> u64 {
|
||||
left + right
|
||||
}
|
||||
mod auth;
|
||||
mod client;
|
||||
mod storage;
|
||||
mod transport;
|
||||
pub mod wallets;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
pub use auth::AuthError;
|
||||
pub use client::{ArbiterClient, Error};
|
||||
pub use storage::{FileSigningKeyStorage, SigningKeyStorage, StorageError};
|
||||
|
||||
#[test]
|
||||
fn it_works() {
|
||||
let result = add(2, 2);
|
||||
assert_eq!(result, 4);
|
||||
}
|
||||
}
|
||||
#[cfg(feature = "evm")]
|
||||
pub use wallets::evm::ArbiterEvmWallet;
|
||||
|
||||
132
server/crates/arbiter-client/src/storage.rs
Normal file
132
server/crates/arbiter-client/src/storage.rs
Normal file
@@ -0,0 +1,132 @@
|
||||
use arbiter_proto::home_path;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum StorageError {
|
||||
#[error("I/O error")]
|
||||
Io(#[from] std::io::Error),
|
||||
|
||||
#[error("Invalid signing key length in storage: expected {expected} bytes, got {actual} bytes")]
|
||||
InvalidKeyLength { expected: usize, actual: usize },
|
||||
}
|
||||
|
||||
pub trait SigningKeyStorage {
|
||||
fn load_or_create(&self) -> std::result::Result<ed25519_dalek::SigningKey, StorageError>;
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct FileSigningKeyStorage {
|
||||
path: PathBuf,
|
||||
}
|
||||
|
||||
impl FileSigningKeyStorage {
|
||||
pub const DEFAULT_FILE_NAME: &str = "sdk_client_ed25519.key";
|
||||
|
||||
pub fn new(path: impl Into<PathBuf>) -> Self {
|
||||
Self { path: path.into() }
|
||||
}
|
||||
|
||||
pub fn from_default_location() -> std::result::Result<Self, StorageError> {
|
||||
Ok(Self::new(home_path()?.join(Self::DEFAULT_FILE_NAME)))
|
||||
}
|
||||
|
||||
fn read_key(path: &Path) -> std::result::Result<ed25519_dalek::SigningKey, StorageError> {
|
||||
let bytes = std::fs::read(path)?;
|
||||
let raw: [u8; 32] =
|
||||
bytes
|
||||
.try_into()
|
||||
.map_err(|v: Vec<u8>| StorageError::InvalidKeyLength {
|
||||
expected: 32,
|
||||
actual: v.len(),
|
||||
})?;
|
||||
Ok(ed25519_dalek::SigningKey::from_bytes(&raw))
|
||||
}
|
||||
}
|
||||
|
||||
impl SigningKeyStorage for FileSigningKeyStorage {
|
||||
fn load_or_create(&self) -> std::result::Result<ed25519_dalek::SigningKey, StorageError> {
|
||||
if let Some(parent) = self.path.parent() {
|
||||
std::fs::create_dir_all(parent)?;
|
||||
}
|
||||
|
||||
if self.path.exists() {
|
||||
return Self::read_key(&self.path);
|
||||
}
|
||||
|
||||
let key = ed25519_dalek::SigningKey::generate(&mut rand::rng());
|
||||
let raw_key = key.to_bytes();
|
||||
|
||||
// Use create_new to prevent accidental overwrite if another process creates the key first.
|
||||
match std::fs::OpenOptions::new()
|
||||
.create_new(true)
|
||||
.write(true)
|
||||
.open(&self.path)
|
||||
{
|
||||
Ok(mut file) => {
|
||||
use std::io::Write as _;
|
||||
file.write_all(&raw_key)?;
|
||||
Ok(key)
|
||||
}
|
||||
Err(err) if err.kind() == std::io::ErrorKind::AlreadyExists => {
|
||||
Self::read_key(&self.path)
|
||||
}
|
||||
Err(err) => Err(StorageError::Io(err)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{FileSigningKeyStorage, SigningKeyStorage, StorageError};
|
||||
|
||||
fn unique_temp_key_path() -> std::path::PathBuf {
|
||||
let nanos = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.expect("clock should be after unix epoch")
|
||||
.as_nanos();
|
||||
std::env::temp_dir().join(format!(
|
||||
"arbiter-client-key-{}-{}.bin",
|
||||
std::process::id(),
|
||||
nanos
|
||||
))
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn file_storage_creates_and_reuses_key() {
|
||||
let path = unique_temp_key_path();
|
||||
let storage = FileSigningKeyStorage::new(path.clone());
|
||||
|
||||
let key_a = storage
|
||||
.load_or_create()
|
||||
.expect("first load_or_create should create key");
|
||||
let key_b = storage
|
||||
.load_or_create()
|
||||
.expect("second load_or_create should read same key");
|
||||
|
||||
assert_eq!(key_a.to_bytes(), key_b.to_bytes());
|
||||
assert!(path.exists());
|
||||
|
||||
std::fs::remove_file(path).expect("temp key file should be removable");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn file_storage_rejects_invalid_key_length() {
|
||||
let path = unique_temp_key_path();
|
||||
std::fs::write(&path, [42u8; 31]).expect("should write invalid key file");
|
||||
let storage = FileSigningKeyStorage::new(path.clone());
|
||||
|
||||
let err = storage
|
||||
.load_or_create()
|
||||
.expect_err("storage should reject non-32-byte key file");
|
||||
|
||||
match err {
|
||||
StorageError::InvalidKeyLength { expected, actual } => {
|
||||
assert_eq!(expected, 32);
|
||||
assert_eq!(actual, 31);
|
||||
}
|
||||
other => panic!("unexpected error: {other:?}"),
|
||||
}
|
||||
|
||||
std::fs::remove_file(path).expect("temp key file should be removable");
|
||||
}
|
||||
}
|
||||
48
server/crates/arbiter-client/src/transport.rs
Normal file
48
server/crates/arbiter-client/src/transport.rs
Normal file
@@ -0,0 +1,48 @@
|
||||
use arbiter_proto::proto::{
|
||||
client::{ClientRequest, ClientResponse},
|
||||
};
|
||||
use std::sync::atomic::{AtomicI32, Ordering};
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
pub(crate) const BUFFER_LENGTH: usize = 16;
|
||||
static NEXT_REQUEST_ID: AtomicI32 = AtomicI32::new(1);
|
||||
|
||||
pub(crate) fn next_request_id() -> i32 {
|
||||
NEXT_REQUEST_ID.fetch_add(1, Ordering::Relaxed)
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub(crate) enum ClientSignError {
|
||||
#[error("Transport channel closed")]
|
||||
ChannelClosed,
|
||||
|
||||
#[error("Connection closed by server")]
|
||||
ConnectionClosed,
|
||||
}
|
||||
|
||||
pub(crate) struct ClientTransport {
|
||||
pub(crate) sender: mpsc::Sender<ClientRequest>,
|
||||
pub(crate) receiver: tonic::Streaming<ClientResponse>,
|
||||
}
|
||||
|
||||
impl ClientTransport {
|
||||
pub(crate) async fn send(
|
||||
&mut self,
|
||||
request: ClientRequest,
|
||||
) -> std::result::Result<(), ClientSignError> {
|
||||
self.sender
|
||||
.send(request)
|
||||
.await
|
||||
.map_err(|_| ClientSignError::ChannelClosed)
|
||||
}
|
||||
|
||||
pub(crate) async fn recv(
|
||||
&mut self,
|
||||
) -> std::result::Result<ClientResponse, ClientSignError> {
|
||||
match self.receiver.message().await {
|
||||
Ok(Some(resp)) => Ok(resp),
|
||||
Ok(None) => Err(ClientSignError::ConnectionClosed),
|
||||
Err(_) => Err(ClientSignError::ConnectionClosed),
|
||||
}
|
||||
}
|
||||
}
|
||||
89
server/crates/arbiter-client/src/wallets/evm.rs
Normal file
89
server/crates/arbiter-client/src/wallets/evm.rs
Normal file
@@ -0,0 +1,89 @@
|
||||
use alloy::{
|
||||
consensus::SignableTransaction,
|
||||
network::TxSigner,
|
||||
primitives::{Address, B256, ChainId, Signature},
|
||||
signers::{Error, Result, Signer},
|
||||
};
|
||||
use async_trait::async_trait;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use crate::transport::ClientTransport;
|
||||
|
||||
pub struct ArbiterEvmWallet {
|
||||
transport: Arc<Mutex<ClientTransport>>,
|
||||
address: Address,
|
||||
chain_id: Option<ChainId>,
|
||||
}
|
||||
|
||||
impl ArbiterEvmWallet {
|
||||
pub(crate) fn new(transport: Arc<Mutex<ClientTransport>>, address: Address) -> Self {
|
||||
Self {
|
||||
transport,
|
||||
address,
|
||||
chain_id: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn address(&self) -> Address {
|
||||
self.address
|
||||
}
|
||||
|
||||
pub fn with_chain_id(mut self, chain_id: ChainId) -> Self {
|
||||
self.chain_id = Some(chain_id);
|
||||
self
|
||||
}
|
||||
|
||||
fn validate_chain_id(&self, tx: &mut dyn SignableTransaction<Signature>) -> Result<()> {
|
||||
if let Some(chain_id) = self.chain_id
|
||||
&& !tx.set_chain_id_checked(chain_id)
|
||||
{
|
||||
return Err(Error::TransactionChainIdMismatch {
|
||||
signer: chain_id,
|
||||
tx: tx.chain_id().unwrap(),
|
||||
});
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Signer for ArbiterEvmWallet {
|
||||
async fn sign_hash(&self, _hash: &B256) -> Result<Signature> {
|
||||
Err(Error::other(
|
||||
"hash-only signing is not supported for ArbiterEvmWallet; use transaction signing",
|
||||
))
|
||||
}
|
||||
|
||||
fn address(&self) -> Address {
|
||||
self.address
|
||||
}
|
||||
|
||||
fn chain_id(&self) -> Option<ChainId> {
|
||||
self.chain_id
|
||||
}
|
||||
|
||||
fn set_chain_id(&mut self, chain_id: Option<ChainId>) {
|
||||
self.chain_id = chain_id;
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl TxSigner<Signature> for ArbiterEvmWallet {
|
||||
fn address(&self) -> Address {
|
||||
self.address
|
||||
}
|
||||
|
||||
async fn sign_transaction(
|
||||
&self,
|
||||
tx: &mut dyn SignableTransaction<Signature>,
|
||||
) -> Result<Signature> {
|
||||
let _transport = self.transport.lock().await;
|
||||
self.validate_chain_id(tx)?;
|
||||
|
||||
Err(Error::other(
|
||||
"transaction signing is not supported by current arbiter.client protocol",
|
||||
))
|
||||
}
|
||||
}
|
||||
2
server/crates/arbiter-client/src/wallets/mod.rs
Normal file
2
server/crates/arbiter-client/src/wallets/mod.rs
Normal file
@@ -0,0 +1,2 @@
|
||||
#[cfg(feature = "evm")]
|
||||
pub mod evm;
|
||||
@@ -3,19 +3,34 @@ name = "arbiter-proto"
|
||||
version = "0.1.0"
|
||||
edition = "2024"
|
||||
repository = "https://git.markettakers.org/MarketTakers/arbiter"
|
||||
license = "Apache-2.0"
|
||||
|
||||
[dependencies]
|
||||
tonic.workspace = true
|
||||
tokio.workspace = true
|
||||
futures.workspace = true
|
||||
hex = "0.4.3"
|
||||
tonic-prost = "0.14.3"
|
||||
tonic-prost = "0.14.5"
|
||||
prost = "0.14.3"
|
||||
kameo.workspace = true
|
||||
url = "2.5.8"
|
||||
miette.workspace = true
|
||||
thiserror.workspace = true
|
||||
rustls-pki-types.workspace = true
|
||||
base64 = "0.22.1"
|
||||
prost-types.workspace = true
|
||||
tracing.workspace = true
|
||||
async-trait.workspace = true
|
||||
tokio-stream.workspace = true
|
||||
|
||||
[build-dependencies]
|
||||
prost-build = "0.14.3"
|
||||
serde_json = "1"
|
||||
tonic-prost-build = "0.14.3"
|
||||
tonic-prost-build = "0.14.5"
|
||||
protoc-bin-vendored = "3"
|
||||
|
||||
[dev-dependencies]
|
||||
rstest.workspace = true
|
||||
rand.workspace = true
|
||||
rcgen.workspace = true
|
||||
|
||||
[package.metadata.cargo-shear]
|
||||
ignored = ["tonic-prost", "prost", "kameo"]
|
||||
|
||||
@@ -1,15 +1,21 @@
|
||||
use tonic_prost_build::configure;
|
||||
|
||||
static PROTOBUF_DIR: &str = "../../../protobufs";
|
||||
|
||||
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let proto_files = vec![
|
||||
format!("{}/arbiter.proto", PROTOBUF_DIR),
|
||||
format!("{}/auth.proto", PROTOBUF_DIR),
|
||||
];
|
||||
println!("cargo::rerun-if-changed={PROTOBUF_DIR}");
|
||||
|
||||
// Компилируем protobuf (tonic-prost-build автоматически использует prost_types для google.protobuf)
|
||||
tonic_prost_build::configure()
|
||||
configure()
|
||||
.message_attribute(".", "#[derive(::kameo::Reply)]")
|
||||
.compile_protos(&proto_files, &[PROTOBUF_DIR.to_string()])?;
|
||||
|
||||
.compile_protos(
|
||||
&[
|
||||
format!("{}/arbiter.proto", PROTOBUF_DIR),
|
||||
format!("{}/user_agent.proto", PROTOBUF_DIR),
|
||||
format!("{}/client.proto", PROTOBUF_DIR),
|
||||
format!("{}/evm.proto", PROTOBUF_DIR),
|
||||
],
|
||||
&[PROTOBUF_DIR.to_string()],
|
||||
)
|
||||
.unwrap();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1,18 +1,57 @@
|
||||
use crate::proto::auth::AuthChallenge;
|
||||
pub mod transport;
|
||||
pub mod url;
|
||||
|
||||
use base64::{Engine, prelude::BASE64_STANDARD};
|
||||
use std::{
|
||||
path::PathBuf,
|
||||
sync::{LazyLock, RwLock},
|
||||
};
|
||||
|
||||
pub mod proto {
|
||||
tonic::include_proto!("arbiter");
|
||||
|
||||
pub mod auth {
|
||||
tonic::include_proto!("arbiter.auth");
|
||||
pub mod user_agent {
|
||||
tonic::include_proto!("arbiter.user_agent");
|
||||
}
|
||||
|
||||
pub mod client {
|
||||
tonic::include_proto!("arbiter.client");
|
||||
}
|
||||
|
||||
pub mod evm {
|
||||
tonic::include_proto!("arbiter.evm");
|
||||
}
|
||||
}
|
||||
|
||||
pub mod transport;
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct ClientMetadata {
|
||||
pub name: String,
|
||||
pub description: Option<String>,
|
||||
pub version: Option<String>,
|
||||
}
|
||||
|
||||
pub static BOOTSTRAP_TOKEN_PATH: &str = "bootstrap_token";
|
||||
pub static BOOTSTRAP_PATH: &str = "bootstrap_token";
|
||||
pub const DEFAULT_SERVER_PORT: u16 = 50051;
|
||||
static HOME_OVERRIDE: LazyLock<RwLock<Option<PathBuf>>> = LazyLock::new(|| RwLock::new(None));
|
||||
|
||||
pub fn set_home_path_override(path: Option<PathBuf>) -> Result<(), std::io::Error> {
|
||||
let mut lock = HOME_OVERRIDE
|
||||
.write()
|
||||
.map_err(|_| std::io::Error::other("home path override lock poisoned"))?;
|
||||
*lock = path;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn home_path() -> Result<std::path::PathBuf, std::io::Error> {
|
||||
if let Some(path) = HOME_OVERRIDE
|
||||
.read()
|
||||
.map_err(|_| std::io::Error::other("home path override lock poisoned"))?
|
||||
.clone()
|
||||
{
|
||||
std::fs::create_dir_all(&path)?;
|
||||
return Ok(path);
|
||||
}
|
||||
|
||||
static ARBITER_HOME: &str = ".arbiter";
|
||||
let home_dir = std::env::home_dir().ok_or(std::io::Error::new(
|
||||
std::io::ErrorKind::PermissionDenied,
|
||||
@@ -25,7 +64,7 @@ pub fn home_path() -> Result<std::path::PathBuf, std::io::Error> {
|
||||
Ok(arbiter_home)
|
||||
}
|
||||
|
||||
pub fn format_challenge(challenge: &AuthChallenge) -> Vec<u8> {
|
||||
let concat_form = format!("{}:{}", challenge.nonce, hex::encode(&challenge.pubkey));
|
||||
concat_form.into_bytes().to_vec()
|
||||
pub fn format_challenge(nonce: i32, pubkey: &[u8]) -> Vec<u8> {
|
||||
let concat_form = format!("{}:{}", nonce, BASE64_STANDARD.encode(pubkey));
|
||||
concat_form.into_bytes()
|
||||
}
|
||||
|
||||
@@ -1,46 +1,163 @@
|
||||
use futures::{Stream, StreamExt};
|
||||
use tokio::sync::mpsc::{self, error::SendError};
|
||||
use tonic::{Status, Streaming};
|
||||
//! Transport-facing abstractions shared by protocol/session code.
|
||||
//!
|
||||
//! This module defines a small set of transport traits that actors and other
|
||||
//! protocol code can depend on without knowing anything about the concrete
|
||||
//! transport underneath.
|
||||
//!
|
||||
//! The abstraction is split into:
|
||||
//! - [`Sender`] for outbound delivery
|
||||
//! - [`Receiver`] for inbound delivery
|
||||
//! - [`Bi`] as the combined duplex form (`Sender + Receiver`)
|
||||
//!
|
||||
//! This split lets code depend only on the half it actually needs. For
|
||||
//! example, some actor/session code only sends out-of-band messages, while
|
||||
//! auth/state-machine code may need full duplex access.
|
||||
//!
|
||||
//! [`Bi`] remains intentionally minimal and transport-agnostic:
|
||||
//! - [`Receiver::recv`] yields inbound messages
|
||||
//! - [`Sender::send`] accepts outbound messages
|
||||
//!
|
||||
//! Transport-specific adapters, including protobuf or gRPC bridges, live in the
|
||||
//! crates that own those boundaries rather than in `arbiter-proto`.
|
||||
//!
|
||||
//! [`Bi`] deliberately does not model request/response correlation. Some
|
||||
//! transports may carry multiplexed request/response traffic, some may emit
|
||||
//! out-of-band messages, and some may be one-message-at-a-time state machines.
|
||||
//! Correlation concerns such as request IDs, pending response maps, and
|
||||
//! out-of-band routing belong in the adapter or connection layer built on top
|
||||
//! of [`Bi`], not in this abstraction itself.
|
||||
//!
|
||||
//! # Generic Ordering Rule
|
||||
//!
|
||||
//! This module consistently uses `Inbound` first and `Outbound` second in
|
||||
//! generic parameter lists.
|
||||
//!
|
||||
//! For [`Receiver`], [`Sender`], and [`Bi`], this means:
|
||||
//! - `Receiver<Inbound>`
|
||||
//! - `Sender<Outbound>`
|
||||
//! - `Bi<Inbound, Outbound>`
|
||||
//!
|
||||
//! Concretely, for [`Bi`]:
|
||||
//! - `recv() -> Option<Inbound>`
|
||||
//! - `send(Outbound)`
|
||||
//!
|
||||
//! [`expect_message`] is a small helper for linear protocol steps: it reads one
|
||||
//! inbound message from a transport and extracts a typed value from it, failing
|
||||
//! if the channel closes or the message shape is not what the caller expected.
|
||||
//!
|
||||
//! [`DummyTransport`] is a no-op implementation useful for tests and local
|
||||
//! actor execution where no real stream exists.
|
||||
//!
|
||||
//! # Design Notes
|
||||
//!
|
||||
//! - [`Bi::send`] returns [`Error`] only for transport delivery failures, such
|
||||
//! as a closed outbound channel.
|
||||
//! - [`Bi::recv`] returns `None` when the underlying transport closes.
|
||||
//! - Message translation is intentionally out of scope for this module.
|
||||
|
||||
use std::marker::PhantomData;
|
||||
|
||||
// Abstraction for stream for sans-io capabilities
|
||||
pub trait Bi<T, U>: Stream<Item = Result<T, Status>> + Send + Sync + 'static {
|
||||
type Error;
|
||||
fn send(
|
||||
&mut self,
|
||||
item: Result<U, Status>,
|
||||
) -> impl std::future::Future<Output = Result<(), Self::Error>> + Send;
|
||||
use async_trait::async_trait;
|
||||
|
||||
/// Errors returned by transport adapters implementing [`Bi`].
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
pub enum Error {
|
||||
#[error("Transport channel is closed")]
|
||||
ChannelClosed,
|
||||
#[error("Unexpected message received")]
|
||||
UnexpectedMessage,
|
||||
}
|
||||
|
||||
// Bi-directional stream abstraction for handling gRPC streaming requests and responses
|
||||
pub struct BiStream<T, U> {
|
||||
pub request_stream: Streaming<T>,
|
||||
pub response_sender: mpsc::Sender<Result<U, Status>>,
|
||||
}
|
||||
|
||||
impl<T, U> Stream for BiStream<T, U>
|
||||
/// Receives one message from `transport` and extracts a value from it using
|
||||
/// `extractor`. Returns [`Error::ChannelClosed`] if the transport closes and
|
||||
/// [`Error::UnexpectedMessage`] if `extractor` returns `None`.
|
||||
pub async fn expect_message<T, Inbound, Outbound, Target, F>(
|
||||
transport: &mut T,
|
||||
extractor: F,
|
||||
) -> Result<Target, Error>
|
||||
where
|
||||
T: Send + 'static,
|
||||
U: Send + 'static,
|
||||
T: Bi<Inbound, Outbound> + ?Sized,
|
||||
F: FnOnce(Inbound) -> Option<Target>,
|
||||
{
|
||||
type Item = Result<T, Status>;
|
||||
let msg = transport.recv().await.ok_or(Error::ChannelClosed)?;
|
||||
extractor(msg).ok_or(Error::UnexpectedMessage)
|
||||
}
|
||||
|
||||
fn poll_next(
|
||||
mut self: std::pin::Pin<&mut Self>,
|
||||
cx: &mut std::task::Context<'_>,
|
||||
) -> std::task::Poll<Option<Self::Item>> {
|
||||
self.request_stream.poll_next_unpin(cx)
|
||||
#[async_trait]
|
||||
pub trait Sender<Outbound>: Send + Sync {
|
||||
async fn send(&mut self, item: Outbound) -> Result<(), Error>;
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait Receiver<Inbound>: Send + Sync {
|
||||
async fn recv(&mut self) -> Option<Inbound>;
|
||||
}
|
||||
|
||||
/// Minimal bidirectional transport abstraction used by protocol code.
|
||||
///
|
||||
/// `Bi<Inbound, Outbound>` is the combined duplex form of [`Sender`] and
|
||||
/// [`Receiver`].
|
||||
///
|
||||
/// It models a channel with:
|
||||
/// - inbound items of type `Inbound` read via [`Bi::recv`]
|
||||
/// - outbound items of type `Outbound` written via [`Bi::send`]
|
||||
///
|
||||
/// It does not imply request/response sequencing, one-at-a-time exchange, or
|
||||
/// any built-in correlation mechanism between inbound and outbound items.
|
||||
pub trait Bi<Inbound, Outbound>: Sender<Outbound> + Receiver<Inbound> + Send + Sync {}
|
||||
|
||||
pub trait SplittableBi<Inbound, Outbound>: Bi<Inbound, Outbound> {
|
||||
type Sender: Sender<Outbound>;
|
||||
type Receiver: Receiver<Inbound>;
|
||||
|
||||
fn split(self) -> (Self::Sender, Self::Receiver);
|
||||
fn from_parts(sender: Self::Sender, receiver: Self::Receiver) -> Self;
|
||||
}
|
||||
|
||||
/// No-op [`Bi`] transport for tests and manual actor usage.
|
||||
///
|
||||
/// `send` drops all items and succeeds. [`Bi::recv`] never resolves and therefore
|
||||
/// does not busy-wait or spuriously close the stream.
|
||||
pub struct DummyTransport<Inbound, Outbound> {
|
||||
_marker: PhantomData<(Inbound, Outbound)>,
|
||||
}
|
||||
|
||||
impl<Inbound, Outbound> Default for DummyTransport<Inbound, Outbound> {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
_marker: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, U> Bi<T, U> for BiStream<T, U>
|
||||
#[async_trait]
|
||||
impl<Inbound, Outbound> Sender<Outbound> for DummyTransport<Inbound, Outbound>
|
||||
where
|
||||
T: Send + 'static,
|
||||
U: Send + 'static,
|
||||
Inbound: Send + Sync + 'static,
|
||||
Outbound: Send + Sync + 'static,
|
||||
{
|
||||
type Error = SendError<Result<U, Status>>;
|
||||
|
||||
async fn send(&mut self, item: Result<U, Status>) -> Result<(), Self::Error> {
|
||||
self.response_sender.send(item).await
|
||||
async fn send(&mut self, _item: Outbound) -> Result<(), Error> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<Inbound, Outbound> Receiver<Inbound> for DummyTransport<Inbound, Outbound>
|
||||
where
|
||||
Inbound: Send + Sync + 'static,
|
||||
Outbound: Send + Sync + 'static,
|
||||
{
|
||||
async fn recv(&mut self) -> Option<Inbound> {
|
||||
std::future::pending::<()>().await;
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
impl<Inbound, Outbound> Bi<Inbound, Outbound> for DummyTransport<Inbound, Outbound>
|
||||
where
|
||||
Inbound: Send + Sync + 'static,
|
||||
Outbound: Send + Sync + 'static,
|
||||
{
|
||||
}
|
||||
|
||||
pub mod grpc;
|
||||
|
||||
106
server/crates/arbiter-proto/src/transport/grpc.rs
Normal file
106
server/crates/arbiter-proto/src/transport/grpc.rs
Normal file
@@ -0,0 +1,106 @@
|
||||
use async_trait::async_trait;
|
||||
use futures::StreamExt;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio_stream::wrappers::ReceiverStream;
|
||||
|
||||
use super::{Bi, Receiver, Sender};
|
||||
|
||||
pub struct GrpcSender<Outbound> {
|
||||
tx: mpsc::Sender<Result<Outbound, tonic::Status>>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<Outbound> Sender<Result<Outbound, tonic::Status>> for GrpcSender<Outbound>
|
||||
where
|
||||
Outbound: Send + Sync + 'static,
|
||||
{
|
||||
async fn send(&mut self, item: Result<Outbound, tonic::Status>) -> Result<(), super::Error> {
|
||||
self.tx
|
||||
.send(item)
|
||||
.await
|
||||
.map_err(|_| super::Error::ChannelClosed)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct GrpcReceiver<Inbound> {
|
||||
rx: tonic::Streaming<Inbound>,
|
||||
}
|
||||
#[async_trait]
|
||||
impl<Inbound> Receiver<Result<Inbound, tonic::Status>> for GrpcReceiver<Inbound>
|
||||
where
|
||||
Inbound: Send + Sync + 'static,
|
||||
{
|
||||
async fn recv(&mut self) -> Option<Result<Inbound, tonic::Status>> {
|
||||
self.rx.next().await
|
||||
}
|
||||
}
|
||||
|
||||
pub struct GrpcBi<Inbound, Outbound> {
|
||||
sender: GrpcSender<Outbound>,
|
||||
receiver: GrpcReceiver<Inbound>,
|
||||
}
|
||||
|
||||
impl<Inbound, Outbound> GrpcBi<Inbound, Outbound>
|
||||
where
|
||||
Inbound: Send + Sync + 'static,
|
||||
Outbound: Send + Sync + 'static,
|
||||
{
|
||||
pub fn from_bi_stream(
|
||||
receiver: tonic::Streaming<Inbound>,
|
||||
) -> (Self, ReceiverStream<Result<Outbound, tonic::Status>>) {
|
||||
let (tx, rx) = mpsc::channel(10);
|
||||
let sender = GrpcSender { tx };
|
||||
let receiver = GrpcReceiver { rx: receiver };
|
||||
let bi = GrpcBi { sender, receiver };
|
||||
(bi, ReceiverStream::new(rx))
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<Inbound, Outbound> Sender<Result<Outbound, tonic::Status>> for GrpcBi<Inbound, Outbound>
|
||||
where
|
||||
Inbound: Send + Sync + 'static,
|
||||
Outbound: Send + Sync + 'static,
|
||||
{
|
||||
async fn send(&mut self, item: Result<Outbound, tonic::Status>) -> Result<(), super::Error> {
|
||||
self.sender.send(item).await
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<Inbound, Outbound> Receiver<Result<Inbound, tonic::Status>> for GrpcBi<Inbound, Outbound>
|
||||
where
|
||||
Inbound: Send + Sync + 'static,
|
||||
Outbound: Send + Sync + 'static,
|
||||
{
|
||||
async fn recv(&mut self) -> Option<Result<Inbound, tonic::Status>> {
|
||||
self.receiver.recv().await
|
||||
}
|
||||
}
|
||||
|
||||
impl<Inbound, Outbound> Bi<Result<Inbound, tonic::Status>, Result<Outbound, tonic::Status>>
|
||||
for GrpcBi<Inbound, Outbound>
|
||||
where
|
||||
Inbound: Send + Sync + 'static,
|
||||
Outbound: Send + Sync + 'static,
|
||||
{
|
||||
}
|
||||
|
||||
impl<Inbound, Outbound>
|
||||
super::SplittableBi<Result<Inbound, tonic::Status>, Result<Outbound, tonic::Status>>
|
||||
for GrpcBi<Inbound, Outbound>
|
||||
where
|
||||
Inbound: Send + Sync + 'static,
|
||||
Outbound: Send + Sync + 'static,
|
||||
{
|
||||
type Sender = GrpcSender<Outbound>;
|
||||
type Receiver = GrpcReceiver<Inbound>;
|
||||
|
||||
fn split(self) -> (Self::Sender, Self::Receiver) {
|
||||
(self.sender, self.receiver)
|
||||
}
|
||||
|
||||
fn from_parts(sender: Self::Sender, receiver: Self::Receiver) -> Self {
|
||||
GrpcBi { sender, receiver }
|
||||
}
|
||||
}
|
||||
130
server/crates/arbiter-proto/src/url.rs
Normal file
130
server/crates/arbiter-proto/src/url.rs
Normal file
@@ -0,0 +1,130 @@
|
||||
use std::fmt::Display;
|
||||
|
||||
use base64::{Engine as _, prelude::BASE64_URL_SAFE};
|
||||
use rustls_pki_types::CertificateDer;
|
||||
|
||||
const ARBITER_URL_SCHEME: &str = "arbiter";
|
||||
const CERT_QUERY_KEY: &str = "cert";
|
||||
const BOOTSTRAP_TOKEN_QUERY_KEY: &str = "bootstrap_token";
|
||||
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ArbiterUrl {
|
||||
pub host: String,
|
||||
pub port: u16,
|
||||
pub ca_cert: CertificateDer<'static>,
|
||||
pub bootstrap_token: Option<String>,
|
||||
}
|
||||
|
||||
impl Display for ArbiterUrl {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let mut base = format!(
|
||||
"{ARBITER_URL_SCHEME}://{}:{}?{CERT_QUERY_KEY}={}",
|
||||
self.host,
|
||||
self.port,
|
||||
BASE64_URL_SAFE.encode(&self.ca_cert)
|
||||
);
|
||||
if let Some(token) = &self.bootstrap_token {
|
||||
base.push_str(&format!("&{BOOTSTRAP_TOKEN_QUERY_KEY}={}", token));
|
||||
}
|
||||
f.write_str(&base)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error, miette::Diagnostic)]
|
||||
pub enum Error {
|
||||
#[error("Invalid URL scheme, expected '{ARBITER_URL_SCHEME}://'")]
|
||||
#[diagnostic(
|
||||
code(arbiter::url::invalid_scheme),
|
||||
help("The URL must start with '{ARBITER_URL_SCHEME}://'")
|
||||
)]
|
||||
InvalidScheme,
|
||||
#[error("Missing host in URL")]
|
||||
#[diagnostic(
|
||||
code(arbiter::url::missing_host),
|
||||
help("The URL must include a host, e.g., '{ARBITER_URL_SCHEME}://127.0.0.1:<port>'")
|
||||
)]
|
||||
MissingHost,
|
||||
#[error("Missing port in URL")]
|
||||
#[diagnostic(
|
||||
code(arbiter::url::missing_port),
|
||||
help("The URL must include a port, e.g., '{ARBITER_URL_SCHEME}://127.0.0.1:1234'")
|
||||
)]
|
||||
MissingPort,
|
||||
#[error("Missing 'cert' query parameter in URL")]
|
||||
#[diagnostic(
|
||||
code(arbiter::url::missing_cert),
|
||||
help("The URL must include a 'cert' query parameter")
|
||||
)]
|
||||
MissingCert,
|
||||
#[error("Invalid base64 in 'cert' query parameter: {0}")]
|
||||
#[diagnostic(code(arbiter::url::invalid_cert_base64))]
|
||||
InvalidCertBase64(#[from] base64::DecodeError),
|
||||
}
|
||||
|
||||
impl<'a> TryFrom<&'a str> for ArbiterUrl {
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(value: &'a str) -> Result<Self, Self::Error> {
|
||||
let url = url::Url::parse(value).map_err(|_| Error::InvalidScheme)?;
|
||||
|
||||
if url.scheme() != ARBITER_URL_SCHEME {
|
||||
return Err(Error::InvalidScheme);
|
||||
}
|
||||
|
||||
let host = url.host_str().ok_or(Error::MissingHost)?.to_string();
|
||||
let port = url.port().ok_or(Error::MissingPort)?;
|
||||
let cert_str = url
|
||||
.query_pairs()
|
||||
.find(|(k, _)| k == CERT_QUERY_KEY)
|
||||
.ok_or(Error::MissingCert)?
|
||||
.1;
|
||||
|
||||
let cert = BASE64_URL_SAFE.decode(cert_str.as_ref())?;
|
||||
let cert = CertificateDer::from_slice(&cert).into_owned();
|
||||
|
||||
let bootstrap_token = url
|
||||
.query_pairs()
|
||||
.find(|(k, _)| k == BOOTSTRAP_TOKEN_QUERY_KEY)
|
||||
.map(|(_, v)| v.to_string());
|
||||
|
||||
Ok(ArbiterUrl {
|
||||
host,
|
||||
port,
|
||||
ca_cert: cert,
|
||||
bootstrap_token,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use rcgen::generate_simple_self_signed;
|
||||
use rstest::rstest;
|
||||
|
||||
use super::*;
|
||||
|
||||
#[rstest]
|
||||
|
||||
fn test_parsing_correctness(
|
||||
#[values("127.0.0.1", "localhost", "192.168.1.1", "some.domain.com")] host: &str,
|
||||
|
||||
#[values(None, Some("token123".to_string()))] bootstrap_token: Option<String>,
|
||||
) {
|
||||
let cert = generate_simple_self_signed(&["Arbiter CA".into()]).unwrap();
|
||||
let cert = cert.cert.der();
|
||||
|
||||
let url = ArbiterUrl {
|
||||
host: host.to_string(),
|
||||
port: 1234,
|
||||
ca_cert: cert.clone().into_owned(),
|
||||
bootstrap_token,
|
||||
};
|
||||
let url_str = url.to_string();
|
||||
let parsed_url = ArbiterUrl::try_from(url_str.as_str()).unwrap();
|
||||
assert_eq!(url.host, parsed_url.host);
|
||||
assert_eq!(url.port, parsed_url.port);
|
||||
assert_eq!(url.ca_cert.to_vec(), parsed_url.ca_cert.to_vec());
|
||||
assert_eq!(url.bootstrap_token, parsed_url.bootstrap_token);
|
||||
}
|
||||
}
|
||||
BIN
server/crates/arbiter-server/.DS_Store
vendored
Normal file
BIN
server/crates/arbiter-server/.DS_Store
vendored
Normal file
Binary file not shown.
@@ -3,16 +3,14 @@ name = "arbiter-server"
|
||||
version = "0.1.0"
|
||||
edition = "2024"
|
||||
repository = "https://git.markettakers.org/MarketTakers/arbiter"
|
||||
license = "Apache-2.0"
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
diesel = { version = "2.3.6", features = [
|
||||
"sqlite",
|
||||
"uuid",
|
||||
"time",
|
||||
"chrono",
|
||||
"serde_json",
|
||||
] }
|
||||
diesel-async = { version = "0.7.4", features = [
|
||||
diesel = { version = "2.3.7", features = ["chrono", "returning_clauses_for_sqlite_3_35", "serde_json", "time", "uuid"] }
|
||||
diesel-async = { version = "0.8.0", features = [
|
||||
"bb8",
|
||||
"migrations",
|
||||
"sqlite",
|
||||
@@ -21,12 +19,15 @@ diesel-async = { version = "0.7.4", features = [
|
||||
ed25519-dalek.workspace = true
|
||||
arbiter-proto.path = "../arbiter-proto"
|
||||
tracing.workspace = true
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
tonic.workspace = true
|
||||
tonic.features = ["tls-aws-lc"]
|
||||
tokio.workspace = true
|
||||
rustls.workspace = true
|
||||
smlang.workspace = true
|
||||
miette.workspace = true
|
||||
thiserror.workspace = true
|
||||
fatality = "0.1.1"
|
||||
diesel_migrations = { version = "2.3.1", features = ["sqlite"] }
|
||||
async-trait.workspace = true
|
||||
secrecy = "0.10.3"
|
||||
@@ -34,19 +35,29 @@ futures.workspace = true
|
||||
tokio-stream.workspace = true
|
||||
dashmap = "6.1.0"
|
||||
rand.workspace = true
|
||||
rcgen = { version = "0.14.7", features = [
|
||||
"aws_lc_rs",
|
||||
"pem",
|
||||
"x509-parser",
|
||||
"zeroize",
|
||||
], default-features = false }
|
||||
rcgen.workspace = true
|
||||
chrono.workspace = true
|
||||
memsafe = "0.4.0"
|
||||
zeroize = { version = "1.8.2", features = ["std", "simd"] }
|
||||
argon2 = { version = "0.5", features = ["std"] }
|
||||
kameo.workspace = true
|
||||
hex = "0.4.3"
|
||||
chacha20poly1305 = "0.10.1"
|
||||
x25519-dalek.workspace = true
|
||||
chacha20poly1305 = { version = "0.10.1", features = ["std"] }
|
||||
argon2 = { version = "0.5.3", features = ["zeroize"] }
|
||||
restructed = "0.2.2"
|
||||
strum = { version = "0.28.0", features = ["derive"] }
|
||||
pem = "3.0.6"
|
||||
k256.workspace = true
|
||||
rsa.workspace = true
|
||||
sha2.workspace = true
|
||||
spki.workspace = true
|
||||
alloy.workspace = true
|
||||
prost-types.workspace = true
|
||||
arbiter-tokens-registry.path = "../arbiter-tokens-registry"
|
||||
clap = { version = "4.6", features = ["derive"] }
|
||||
|
||||
[dev-dependencies]
|
||||
insta = "1.46.3"
|
||||
test-log = { version = "0.2", default-features = false, features = ["trace"] }
|
||||
|
||||
[target.'cfg(windows)'.dependencies]
|
||||
windows-service = "0.8"
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
-- Rollback TLS rotation tables
|
||||
|
||||
-- Удалить добавленную колонку из arbiter_settings
|
||||
ALTER TABLE arbiter_settings DROP COLUMN current_cert_id;
|
||||
|
||||
-- Удалить таблицы в обратном порядке
|
||||
DROP TABLE IF EXISTS tls_rotation_history;
|
||||
DROP TABLE IF EXISTS rotation_client_acks;
|
||||
DROP TABLE IF EXISTS tls_rotation_state;
|
||||
DROP INDEX IF EXISTS idx_tls_certificates_active;
|
||||
DROP TABLE IF EXISTS tls_certificates;
|
||||
@@ -1,57 +0,0 @@
|
||||
-- История всех сертификатов
|
||||
CREATE TABLE IF NOT EXISTS tls_certificates (
|
||||
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
|
||||
cert BLOB NOT NULL, -- DER-encoded
|
||||
cert_key BLOB NOT NULL, -- PEM-encoded
|
||||
not_before INTEGER NOT NULL, -- Unix timestamp
|
||||
not_after INTEGER NOT NULL, -- Unix timestamp
|
||||
created_at INTEGER NOT NULL DEFAULT(unixepoch('now')),
|
||||
is_active BOOLEAN NOT NULL DEFAULT 0 -- Только один active=1
|
||||
) STRICT;
|
||||
|
||||
CREATE INDEX idx_tls_certificates_active ON tls_certificates(is_active, not_after);
|
||||
|
||||
-- Tracking процесса ротации
|
||||
CREATE TABLE IF NOT EXISTS tls_rotation_state (
|
||||
id INTEGER NOT NULL PRIMARY KEY CHECK(id = 1), -- Singleton
|
||||
state TEXT NOT NULL DEFAULT('normal') CHECK(state IN ('normal', 'initiated', 'waiting_acks', 'ready')),
|
||||
new_cert_id INTEGER REFERENCES tls_certificates(id),
|
||||
initiated_at INTEGER,
|
||||
timeout_at INTEGER -- Таймаут для ожидания ACKs (initiated_at + 7 дней)
|
||||
) STRICT;
|
||||
|
||||
-- Tracking ACKs от клиентов
|
||||
CREATE TABLE IF NOT EXISTS rotation_client_acks (
|
||||
rotation_id INTEGER NOT NULL, -- Ссылка на new_cert_id
|
||||
client_key TEXT NOT NULL, -- Публичный ключ клиента (hex)
|
||||
ack_received_at INTEGER NOT NULL DEFAULT(unixepoch('now')),
|
||||
PRIMARY KEY (rotation_id, client_key)
|
||||
) STRICT;
|
||||
|
||||
-- Audit trail событий ротации
|
||||
CREATE TABLE IF NOT EXISTS tls_rotation_history (
|
||||
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
|
||||
cert_id INTEGER NOT NULL REFERENCES tls_certificates(id),
|
||||
event_type TEXT NOT NULL CHECK(event_type IN ('created', 'rotation_initiated', 'acks_complete', 'activated', 'timeout')),
|
||||
timestamp INTEGER NOT NULL DEFAULT(unixepoch('now')),
|
||||
details TEXT -- JSON с доп. информацией
|
||||
) STRICT;
|
||||
|
||||
-- Миграция существующего сертификата
|
||||
INSERT INTO tls_certificates (id, cert, cert_key, not_before, not_after, is_active, created_at)
|
||||
SELECT
|
||||
1,
|
||||
cert,
|
||||
cert_key,
|
||||
unixepoch('now') as not_before,
|
||||
unixepoch('now') + (90 * 24 * 60 * 60) as not_after, -- 90 дней
|
||||
1 as is_active,
|
||||
unixepoch('now')
|
||||
FROM arbiter_settings WHERE id = 1;
|
||||
|
||||
-- Инициализация rotation_state
|
||||
INSERT INTO tls_rotation_state (id, state) VALUES (1, 'normal');
|
||||
|
||||
-- Добавить ссылку на текущий сертификат
|
||||
ALTER TABLE arbiter_settings ADD COLUMN current_cert_id INTEGER REFERENCES tls_certificates(id);
|
||||
UPDATE arbiter_settings SET current_cert_id = 1 WHERE id = 1;
|
||||
@@ -1,31 +1,193 @@
|
||||
create table if not exists aead_encrypted (
|
||||
create table if not exists root_key_history (
|
||||
id INTEGER not null PRIMARY KEY,
|
||||
current_nonce integer not null default(1), -- if re-encrypted, this should be incremented
|
||||
-- root key stored as aead encrypted artifact, with only difference that it's decrypted by unseal key (derived from user password)
|
||||
root_key_encryption_nonce blob not null default(1), -- if re-encrypted, this should be incremented. Used for encrypting root key
|
||||
data_encryption_nonce blob not null default(1), -- nonce used for encrypting with key itself
|
||||
ciphertext blob not null,
|
||||
tag blob not null,
|
||||
schema_version integer not null default(1) -- server would need to reencrypt, because this means that we have changed algorithm
|
||||
schema_version integer not null default(1), -- server would need to reencrypt, because this means that we have changed algorithm
|
||||
salt blob not null -- for key deriviation
|
||||
) STRICT;
|
||||
|
||||
create table if not exists aead_encrypted (
|
||||
id INTEGER not null PRIMARY KEY,
|
||||
current_nonce blob not null default(1), -- if re-encrypted, this should be incremented
|
||||
ciphertext blob not null,
|
||||
tag blob not null,
|
||||
schema_version integer not null default(1), -- server would need to reencrypt, because this means that we have changed algorithm
|
||||
associated_root_key_id integer not null references root_key_history (id) on delete RESTRICT,
|
||||
created_at integer not null default(unixepoch ('now'))
|
||||
) STRICT;
|
||||
|
||||
create unique index if not exists uniq_nonce_per_root_key on aead_encrypted (
|
||||
current_nonce,
|
||||
associated_root_key_id
|
||||
);
|
||||
|
||||
create table if not exists tls_history (
|
||||
id INTEGER not null PRIMARY KEY,
|
||||
cert text not null,
|
||||
cert_key text not null, -- PEM Encoded private key
|
||||
ca_cert text not null,
|
||||
ca_key text not null, -- PEM Encoded private key
|
||||
created_at integer not null default(unixepoch ('now'))
|
||||
) STRICT;
|
||||
|
||||
-- This is a singleton
|
||||
create table if not exists arbiter_settings (
|
||||
id INTEGER not null PRIMARY KEY CHECK (id = 1), -- singleton row, id must be 1
|
||||
root_key_id integer references aead_encrypted (id) on delete RESTRICT, -- if null, means wasn't bootstrapped yet
|
||||
cert_key blob not null,
|
||||
cert blob not null
|
||||
root_key_id integer references root_key_history (id) on delete RESTRICT, -- if null, means wasn't bootstrapped yet
|
||||
tls_id integer references tls_history (id) on delete RESTRICT
|
||||
) STRICT;
|
||||
|
||||
insert into arbiter_settings (id) values (1) on conflict do nothing;
|
||||
-- ensure singleton row exists
|
||||
|
||||
create table if not exists useragent_client (
|
||||
id integer not null primary key,
|
||||
nonce integer not null default (1), -- used for auth challenge
|
||||
nonce integer not null default(1), -- used for auth challenge
|
||||
public_key blob not null,
|
||||
key_type integer not null default(1), -- 1=Ed25519, 2=ECDSA(secp256k1)
|
||||
created_at integer not null default(unixepoch ('now')),
|
||||
updated_at integer not null default(unixepoch ('now'))
|
||||
) STRICT;
|
||||
create unique index if not exists uniq_useragent_client_public_key on useragent_client (public_key, key_type);
|
||||
|
||||
create table if not exists client_metadata (
|
||||
id integer not null primary key,
|
||||
name text not null, -- human-readable name for the client
|
||||
description text, -- optional description for the client
|
||||
version text, -- client version for tracking and debugging
|
||||
created_at integer not null default(unixepoch ('now'))
|
||||
) STRICT;
|
||||
|
||||
-- created to track history of changes
|
||||
create table if not exists client_metadata_history (
|
||||
id integer not null primary key,
|
||||
metadata_id integer not null references client_metadata (id) on delete cascade,
|
||||
client_id integer not null references program_client (id) on delete cascade,
|
||||
created_at integer not null default(unixepoch ('now'))
|
||||
) STRICT;
|
||||
|
||||
create unique index if not exists uniq_metadata_binding_client on client_metadata_history (client_id);
|
||||
|
||||
create table if not exists program_client (
|
||||
id integer not null primary key,
|
||||
nonce integer not null default(1), -- used for auth challenge
|
||||
public_key blob not null,
|
||||
metadata_id integer not null references client_metadata (id) on delete cascade,
|
||||
created_at integer not null default(unixepoch ('now')),
|
||||
updated_at integer not null default(unixepoch ('now'))
|
||||
) STRICT;
|
||||
|
||||
create table if not exists program_client (
|
||||
create unique index if not exists program_client_public_key_unique
|
||||
on program_client (public_key);
|
||||
|
||||
create unique index if not exists uniq_program_client_public_key on program_client (public_key);
|
||||
|
||||
create table if not exists evm_wallet (
|
||||
id integer not null primary key,
|
||||
nonce integer not null default (1), -- used for auth challenge
|
||||
public_key blob not null,
|
||||
created_at integer not null default(unixepoch ('now')),
|
||||
updated_at integer not null default(unixepoch ('now'))
|
||||
) STRICT;
|
||||
address blob not null, -- 20-byte Ethereum address
|
||||
aead_encrypted_id integer not null references aead_encrypted (id) on delete RESTRICT,
|
||||
created_at integer not null default(unixepoch ('now'))
|
||||
) STRICT;
|
||||
|
||||
create unique index if not exists uniq_evm_wallet_address on evm_wallet (address);
|
||||
|
||||
create unique index if not exists uniq_evm_wallet_aead on evm_wallet (aead_encrypted_id);
|
||||
|
||||
create table if not exists evm_wallet_access (
|
||||
id integer not null primary key,
|
||||
wallet_id integer not null references evm_wallet (id) on delete cascade,
|
||||
client_id integer not null references program_client (id) on delete cascade,
|
||||
created_at integer not null default(unixepoch ('now'))
|
||||
) STRICT;
|
||||
|
||||
create unique index if not exists uniq_wallet_access on evm_wallet_access (wallet_id, client_id);
|
||||
|
||||
create table if not exists evm_ether_transfer_limit (
|
||||
id integer not null primary key,
|
||||
window_secs integer not null, -- window duration in seconds
|
||||
max_volume blob not null -- big-endian 32-byte U256
|
||||
) STRICT;
|
||||
|
||||
-- Shared grant properties: client scope, timeframe, fee caps, and rate limit
|
||||
create table if not exists evm_basic_grant (
|
||||
id integer not null primary key,
|
||||
wallet_access_id integer not null references evm_wallet_access (id) on delete restrict,
|
||||
chain_id integer not null, -- EIP-155 chain ID
|
||||
valid_from integer, -- unix timestamp (seconds), null = no lower bound
|
||||
valid_until integer, -- unix timestamp (seconds), null = no upper bound
|
||||
max_gas_fee_per_gas blob, -- big-endian 32-byte U256, null = unlimited
|
||||
max_priority_fee_per_gas blob, -- big-endian 32-byte U256, null = unlimited
|
||||
rate_limit_count integer, -- max transactions in window, null = unlimited
|
||||
rate_limit_window_secs integer, -- window duration in seconds, null = unlimited
|
||||
revoked_at integer, -- unix timestamp when revoked, null = still active
|
||||
created_at integer not null default(unixepoch ('now'))
|
||||
) STRICT;
|
||||
|
||||
-- Shared transaction log for all EVM grants, used for rate limit tracking and auditing
|
||||
create table if not exists evm_transaction_log (
|
||||
id integer not null primary key,
|
||||
wallet_access_id integer not null references evm_wallet_access (id) on delete restrict,
|
||||
grant_id integer not null references evm_basic_grant (id) on delete restrict,
|
||||
chain_id integer not null,
|
||||
eth_value blob not null, -- always present on any EVM tx
|
||||
signed_at integer not null default(unixepoch ('now'))
|
||||
) STRICT;
|
||||
|
||||
create index if not exists idx_evm_basic_grant_access_chain on evm_basic_grant (wallet_access_id, chain_id);
|
||||
|
||||
-- ===============================
|
||||
-- ERC20 token transfer grant
|
||||
-- ===============================
|
||||
create table if not exists evm_token_transfer_grant (
|
||||
id integer not null primary key,
|
||||
basic_grant_id integer not null unique references evm_basic_grant (id) on delete cascade,
|
||||
token_contract blob not null, -- 20-byte ERC20 contract address
|
||||
receiver blob -- 20-byte recipient address or null if every recipient allowed
|
||||
) STRICT;
|
||||
|
||||
-- Per-window volume limits for token transfer grants
|
||||
create table if not exists evm_token_transfer_volume_limit (
|
||||
id integer not null primary key,
|
||||
grant_id integer not null references evm_token_transfer_grant (id) on delete cascade,
|
||||
window_secs integer not null, -- window duration in seconds
|
||||
max_volume blob not null -- big-endian 32-byte U256
|
||||
) STRICT;
|
||||
|
||||
-- Log table for token transfer grant usage
|
||||
create table if not exists evm_token_transfer_log (
|
||||
id integer not null primary key,
|
||||
grant_id integer not null references evm_token_transfer_grant (id) on delete restrict,
|
||||
log_id integer not null references evm_transaction_log (id) on delete restrict,
|
||||
chain_id integer not null, -- EIP-155 chain ID
|
||||
token_contract blob not null, -- 20-byte ERC20 contract address
|
||||
recipient_address blob not null, -- 20-byte recipient address
|
||||
value blob not null, -- big-endian 32-byte U256
|
||||
created_at integer not null default(unixepoch ('now'))
|
||||
) STRICT;
|
||||
|
||||
create index if not exists idx_token_transfer_log_grant on evm_token_transfer_log (grant_id);
|
||||
|
||||
create index if not exists idx_token_transfer_log_log_id on evm_token_transfer_log (log_id);
|
||||
|
||||
create index if not exists idx_token_transfer_log_chain on evm_token_transfer_log (chain_id);
|
||||
|
||||
-- ===============================
|
||||
-- Ether transfer grant (uses base log)
|
||||
-- ===============================
|
||||
create table if not exists evm_ether_transfer_grant (
|
||||
id integer not null primary key,
|
||||
basic_grant_id integer not null unique references evm_basic_grant (id) on delete cascade,
|
||||
limit_id integer not null references evm_ether_transfer_limit (id) on delete restrict
|
||||
) STRICT;
|
||||
|
||||
-- Specific recipient addresses for an ether transfer grant
|
||||
create table if not exists evm_ether_transfer_grant_target (
|
||||
id integer not null primary key,
|
||||
grant_id integer not null references evm_ether_transfer_grant (id) on delete cascade,
|
||||
address blob not null -- 20-byte recipient address
|
||||
) STRICT;
|
||||
|
||||
create unique index if not exists uniq_ether_transfer_target on evm_ether_transfer_grant_target (grant_id, address);
|
||||
|
||||
@@ -1,2 +0,0 @@
|
||||
-- Remove argon2_salt column
|
||||
ALTER TABLE aead_encrypted DROP COLUMN argon2_salt;
|
||||
@@ -1,2 +0,0 @@
|
||||
-- Add argon2_salt column to store password derivation salt
|
||||
ALTER TABLE aead_encrypted ADD COLUMN argon2_salt TEXT;
|
||||
@@ -1,2 +0,0 @@
|
||||
pub mod user_agent;
|
||||
pub mod client;
|
||||
@@ -1,40 +1,32 @@
|
||||
use arbiter_proto::{BOOTSTRAP_TOKEN_PATH, home_path};
|
||||
use diesel::{ExpressionMethods, QueryDsl};
|
||||
use arbiter_proto::{BOOTSTRAP_PATH, home_path};
|
||||
use diesel::QueryDsl;
|
||||
use diesel_async::RunQueryDsl;
|
||||
use kameo::{Actor, messages};
|
||||
use memsafe::MemSafe;
|
||||
use miette::Diagnostic;
|
||||
use rand::{RngExt, distr::StandardUniform, make_rng, rngs::StdRng};
|
||||
use secrecy::SecretString;
|
||||
use rand::{RngExt, distr::Alphanumeric, make_rng, rngs::StdRng};
|
||||
use thiserror::Error;
|
||||
use tracing::info;
|
||||
use zeroize::{Zeroize, Zeroizing};
|
||||
|
||||
use crate::{
|
||||
context::{self, ServerContext},
|
||||
db::{self, DatabasePool, schema},
|
||||
};
|
||||
|
||||
use crate::db::{self, DatabasePool, schema};
|
||||
const TOKEN_LENGTH: usize = 64;
|
||||
|
||||
pub async fn generate_token() -> Result<String, std::io::Error> {
|
||||
let rng: StdRng = make_rng();
|
||||
|
||||
let token: String = rng
|
||||
.sample_iter::<char, _>(StandardUniform)
|
||||
.take(TOKEN_LENGTH)
|
||||
.fold(Default::default(), |mut accum, char| {
|
||||
let token: String = rng.sample_iter(Alphanumeric).take(TOKEN_LENGTH).fold(
|
||||
Default::default(),
|
||||
|mut accum, char| {
|
||||
accum += char.to_string().as_str();
|
||||
accum
|
||||
});
|
||||
},
|
||||
);
|
||||
|
||||
tokio::fs::write(home_path()?.join(BOOTSTRAP_TOKEN_PATH), token.as_str()).await?;
|
||||
tokio::fs::write(home_path()?.join(BOOTSTRAP_PATH), token.as_str()).await?;
|
||||
|
||||
Ok(token)
|
||||
}
|
||||
|
||||
#[derive(Error, Debug, Diagnostic)]
|
||||
pub enum BootstrapError {
|
||||
pub enum Error {
|
||||
#[error("Database error: {0}")]
|
||||
#[diagnostic(code(arbiter_server::bootstrap::database))]
|
||||
Database(#[from] db::PoolError),
|
||||
@@ -49,12 +41,12 @@ pub enum BootstrapError {
|
||||
}
|
||||
|
||||
#[derive(Actor)]
|
||||
pub struct BootstrapActor {
|
||||
pub struct Bootstrapper {
|
||||
token: Option<String>,
|
||||
}
|
||||
|
||||
impl BootstrapActor {
|
||||
pub async fn new(db: &DatabasePool) -> Result<Self, BootstrapError> {
|
||||
impl Bootstrapper {
|
||||
pub async fn new(db: &DatabasePool) -> Result<Self, Error> {
|
||||
let mut conn = db.get().await?;
|
||||
|
||||
let row_count: i64 = schema::useragent_client::table
|
||||
@@ -66,8 +58,6 @@ impl BootstrapActor {
|
||||
|
||||
let token = if row_count == 0 {
|
||||
let token = generate_token().await?;
|
||||
info!(%token, "Generated bootstrap token");
|
||||
tokio::fs::write(home_path()?.join(BOOTSTRAP_TOKEN_PATH), token.as_str()).await?;
|
||||
Some(token)
|
||||
} else {
|
||||
None
|
||||
@@ -75,15 +65,10 @@ impl BootstrapActor {
|
||||
|
||||
Ok(Self { token })
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn get_token(&self) -> Option<String> {
|
||||
self.token.clone()
|
||||
}
|
||||
}
|
||||
|
||||
#[messages]
|
||||
impl BootstrapActor {
|
||||
impl Bootstrapper {
|
||||
#[message]
|
||||
pub fn is_correct_token(&self, token: String) -> bool {
|
||||
match &self.token {
|
||||
@@ -102,3 +87,11 @@ impl BootstrapActor {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[messages]
|
||||
impl Bootstrapper {
|
||||
#[message]
|
||||
pub fn get_token(&self) -> Option<String> {
|
||||
self.token.clone()
|
||||
}
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
use arbiter_proto::{
|
||||
proto::{ClientRequest, ClientResponse},
|
||||
transport::Bi,
|
||||
};
|
||||
|
||||
use crate::ServerContext;
|
||||
|
||||
pub(crate) async fn handle_client(
|
||||
_context: ServerContext,
|
||||
_bistream: impl Bi<ClientRequest, ClientResponse>,
|
||||
) {
|
||||
}
|
||||
333
server/crates/arbiter-server/src/actors/client/auth.rs
Normal file
333
server/crates/arbiter-server/src/actors/client/auth.rs
Normal file
@@ -0,0 +1,333 @@
|
||||
use arbiter_proto::{
|
||||
ClientMetadata, format_challenge, transport::{Bi, expect_message}
|
||||
};
|
||||
use chrono::Utc;
|
||||
use diesel::{
|
||||
ExpressionMethods as _, OptionalExtension as _, QueryDsl as _, SelectableHelper as _,
|
||||
dsl::insert_into, update,
|
||||
};
|
||||
use diesel_async::RunQueryDsl as _;
|
||||
use ed25519_dalek::{Signature, VerifyingKey};
|
||||
use kameo::error::SendError;
|
||||
use tracing::error;
|
||||
|
||||
use crate::{
|
||||
actors::{
|
||||
client::{ClientConnection, ClientProfile},
|
||||
flow_coordinator::{self, RequestClientApproval},
|
||||
},
|
||||
db::{
|
||||
self,
|
||||
models::{ProgramClientMetadata, SqliteTimestamp},
|
||||
schema::program_client,
|
||||
},
|
||||
};
|
||||
|
||||
#[derive(thiserror::Error, Debug, Clone, PartialEq, Eq)]
|
||||
pub enum Error {
|
||||
#[error("Database pool unavailable")]
|
||||
DatabasePoolUnavailable,
|
||||
#[error("Database operation failed")]
|
||||
DatabaseOperationFailed,
|
||||
#[error("Invalid challenge solution")]
|
||||
InvalidChallengeSolution,
|
||||
#[error("Client approval request failed")]
|
||||
ApproveError(#[from] ApproveError),
|
||||
#[error("Transport error")]
|
||||
Transport,
|
||||
}
|
||||
|
||||
#[derive(thiserror::Error, Debug, Clone, PartialEq, Eq)]
|
||||
pub enum ApproveError {
|
||||
#[error("Internal error")]
|
||||
Internal,
|
||||
#[error("Client connection denied by user agents")]
|
||||
Denied,
|
||||
#[error("Upstream error: {0}")]
|
||||
Upstream(flow_coordinator::ApprovalError),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum Inbound {
|
||||
AuthChallengeRequest {
|
||||
pubkey: VerifyingKey,
|
||||
metadata: ClientMetadata,
|
||||
},
|
||||
AuthChallengeSolution {
|
||||
signature: Signature,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum Outbound {
|
||||
AuthChallenge { pubkey: VerifyingKey, nonce: i32 },
|
||||
AuthSuccess,
|
||||
}
|
||||
|
||||
pub struct ClientInfo {
|
||||
pub id: i32,
|
||||
pub current_nonce: i32,
|
||||
}
|
||||
|
||||
/// Atomically reads and increments the nonce for a known client.
|
||||
/// Returns `None` if the pubkey is not registered.
|
||||
async fn get_client_and_nonce(
|
||||
db: &db::DatabasePool,
|
||||
pubkey: &VerifyingKey,
|
||||
) -> Result<Option<ClientInfo>, Error> {
|
||||
let pubkey_bytes = pubkey.as_bytes().to_vec();
|
||||
|
||||
let mut conn = db.get().await.map_err(|e| {
|
||||
error!(error = ?e, "Database pool error");
|
||||
Error::DatabasePoolUnavailable
|
||||
})?;
|
||||
|
||||
conn.exclusive_transaction(|conn| {
|
||||
let pubkey_bytes = pubkey_bytes.clone();
|
||||
Box::pin(async move {
|
||||
let Some((client_id, current_nonce)) = program_client::table
|
||||
.filter(program_client::public_key.eq(&pubkey_bytes))
|
||||
.select((program_client::id, program_client::nonce))
|
||||
.first::<(i32, i32)>(conn)
|
||||
.await
|
||||
.optional()?
|
||||
else {
|
||||
return Result::<_, diesel::result::Error>::Ok(None);
|
||||
};
|
||||
|
||||
update(program_client::table)
|
||||
.filter(program_client::public_key.eq(&pubkey_bytes))
|
||||
.set(program_client::nonce.eq(current_nonce + 1))
|
||||
.execute(conn)
|
||||
.await?;
|
||||
|
||||
Ok(Some(ClientInfo {
|
||||
id: client_id,
|
||||
current_nonce,
|
||||
}))
|
||||
})
|
||||
})
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!(error = ?e, "Database error");
|
||||
Error::DatabaseOperationFailed
|
||||
})
|
||||
}
|
||||
|
||||
async fn approve_new_client(
|
||||
actors: &crate::actors::GlobalActors,
|
||||
profile: ClientProfile,
|
||||
) -> Result<(), Error> {
|
||||
let result = actors
|
||||
.flow_coordinator
|
||||
.ask(RequestClientApproval { client: profile })
|
||||
.await;
|
||||
|
||||
match result {
|
||||
Ok(true) => Ok(()),
|
||||
Ok(false) => Err(Error::ApproveError(ApproveError::Denied)),
|
||||
Err(SendError::HandlerError(e)) => {
|
||||
error!(error = ?e, "Approval upstream error");
|
||||
Err(Error::ApproveError(ApproveError::Upstream(e)))
|
||||
}
|
||||
Err(e) => {
|
||||
error!(error = ?e, "Approval request to flow coordinator failed");
|
||||
Err(Error::ApproveError(ApproveError::Internal))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn insert_client(
|
||||
db: &db::DatabasePool,
|
||||
pubkey: &VerifyingKey,
|
||||
metadata: &ClientMetadata,
|
||||
) -> Result<i32, Error> {
|
||||
use crate::db::schema::{client_metadata, program_client};
|
||||
let mut conn = db.get().await.map_err(|e| {
|
||||
error!(error = ?e, "Database pool error");
|
||||
Error::DatabasePoolUnavailable
|
||||
})?;
|
||||
|
||||
let metadata_id = insert_into(client_metadata::table)
|
||||
.values((
|
||||
client_metadata::name.eq(&metadata.name),
|
||||
client_metadata::description.eq(&metadata.description),
|
||||
client_metadata::version.eq(&metadata.version),
|
||||
))
|
||||
.returning(client_metadata::id)
|
||||
.get_result::<i32>(&mut conn)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!(error = ?e, "Failed to insert client metadata");
|
||||
Error::DatabaseOperationFailed
|
||||
})?;
|
||||
|
||||
let client_id = insert_into(program_client::table)
|
||||
.values((
|
||||
program_client::public_key.eq(pubkey.as_bytes().to_vec()),
|
||||
program_client::metadata_id.eq(metadata_id),
|
||||
program_client::nonce.eq(1), // pre-incremented; challenge uses 0
|
||||
))
|
||||
.on_conflict_do_nothing()
|
||||
.returning(program_client::id)
|
||||
.get_result::<i32>(&mut conn)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!(error = ?e, "Failed to insert client metadata");
|
||||
Error::DatabaseOperationFailed
|
||||
})?;
|
||||
|
||||
Ok(client_id)
|
||||
}
|
||||
|
||||
async fn sync_client_metadata(
|
||||
db: &db::DatabasePool,
|
||||
client_id: i32,
|
||||
metadata: &ClientMetadata,
|
||||
) -> Result<(), Error> {
|
||||
use crate::db::schema::{client_metadata, client_metadata_history};
|
||||
|
||||
let now = SqliteTimestamp(Utc::now());
|
||||
|
||||
let mut conn = db.get().await.map_err(|e| {
|
||||
error!(error = ?e, "Database pool error");
|
||||
Error::DatabasePoolUnavailable
|
||||
})?;
|
||||
|
||||
conn.exclusive_transaction(|conn| {
|
||||
let metadata = metadata.clone();
|
||||
Box::pin(async move {
|
||||
let (current_metadata_id, current): (i32, ProgramClientMetadata) =
|
||||
program_client::table
|
||||
.find(client_id)
|
||||
.inner_join(client_metadata::table)
|
||||
.select((
|
||||
program_client::metadata_id,
|
||||
ProgramClientMetadata::as_select(),
|
||||
))
|
||||
.first(conn)
|
||||
.await?;
|
||||
|
||||
let unchanged = current.name == metadata.name
|
||||
&& current.description == metadata.description
|
||||
&& current.version == metadata.version;
|
||||
if unchanged {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
insert_into(client_metadata_history::table)
|
||||
.values((
|
||||
client_metadata_history::metadata_id.eq(current_metadata_id),
|
||||
client_metadata_history::client_id.eq(client_id),
|
||||
))
|
||||
.execute(conn)
|
||||
.await?;
|
||||
|
||||
let metadata_id = insert_into(client_metadata::table)
|
||||
.values((
|
||||
client_metadata::name.eq(&metadata.name),
|
||||
client_metadata::description.eq(&metadata.description),
|
||||
client_metadata::version.eq(&metadata.version),
|
||||
))
|
||||
.returning(client_metadata::id)
|
||||
.get_result::<i32>(conn)
|
||||
.await?;
|
||||
|
||||
update(program_client::table.find(client_id))
|
||||
.set((
|
||||
program_client::metadata_id.eq(metadata_id),
|
||||
program_client::updated_at.eq(now),
|
||||
))
|
||||
.execute(conn)
|
||||
.await?;
|
||||
|
||||
Ok::<(), diesel::result::Error>(())
|
||||
})
|
||||
})
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!(error = ?e, "Database error");
|
||||
Error::DatabaseOperationFailed
|
||||
})
|
||||
}
|
||||
|
||||
async fn challenge_client<T>(
|
||||
transport: &mut T,
|
||||
pubkey: VerifyingKey,
|
||||
nonce: i32,
|
||||
) -> Result<(), Error>
|
||||
where
|
||||
T: Bi<Inbound, Result<Outbound, Error>> + ?Sized,
|
||||
{
|
||||
transport
|
||||
.send(Ok(Outbound::AuthChallenge { pubkey, nonce }))
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!(error = ?e, "Failed to send auth challenge");
|
||||
Error::Transport
|
||||
})?;
|
||||
|
||||
let signature = expect_message(transport, |req: Inbound| match req {
|
||||
Inbound::AuthChallengeSolution { signature } => Some(signature),
|
||||
_ => None,
|
||||
})
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!(error = ?e, "Failed to receive challenge solution");
|
||||
Error::Transport
|
||||
})?;
|
||||
|
||||
let formatted = format_challenge(nonce, pubkey.as_bytes());
|
||||
|
||||
pubkey.verify_strict(&formatted, &signature).map_err(|_| {
|
||||
error!("Challenge solution verification failed");
|
||||
Error::InvalidChallengeSolution
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn authenticate<T>(
|
||||
props: &mut ClientConnection,
|
||||
transport: &mut T,
|
||||
) -> Result<VerifyingKey, Error>
|
||||
where
|
||||
T: Bi<Inbound, Result<Outbound, Error>> + Send + ?Sized,
|
||||
{
|
||||
let Some(Inbound::AuthChallengeRequest { pubkey, metadata }) = transport.recv().await else {
|
||||
return Err(Error::Transport);
|
||||
};
|
||||
|
||||
let info = match get_client_and_nonce(&props.db, &pubkey).await? {
|
||||
Some(nonce) => nonce,
|
||||
None => {
|
||||
approve_new_client(
|
||||
&props.actors,
|
||||
ClientProfile {
|
||||
pubkey,
|
||||
metadata: metadata.clone(),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
let client_id = insert_client(&props.db, &pubkey, &metadata).await?;
|
||||
ClientInfo {
|
||||
id: client_id,
|
||||
current_nonce: 0,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
sync_client_metadata(&props.db, info.id, &metadata).await?;
|
||||
|
||||
challenge_client(transport, pubkey, info.current_nonce).await?;
|
||||
|
||||
transport
|
||||
.send(Ok(Outbound::AuthSuccess))
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!(error = ?e, "Failed to send auth success");
|
||||
Error::Transport
|
||||
})?;
|
||||
|
||||
Ok(pubkey)
|
||||
}
|
||||
44
server/crates/arbiter-server/src/actors/client/mod.rs
Normal file
44
server/crates/arbiter-server/src/actors/client/mod.rs
Normal file
@@ -0,0 +1,44 @@
|
||||
use arbiter_proto::{ClientMetadata, transport::Bi};
|
||||
use kameo::actor::Spawn;
|
||||
use tracing::{error, info};
|
||||
|
||||
use crate::{
|
||||
actors::{GlobalActors, client::{ session::ClientSession}},
|
||||
db,
|
||||
};
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ClientProfile {
|
||||
pub pubkey: ed25519_dalek::VerifyingKey,
|
||||
pub metadata: ClientMetadata,
|
||||
}
|
||||
|
||||
pub struct ClientConnection {
|
||||
pub(crate) db: db::DatabasePool,
|
||||
pub(crate) actors: GlobalActors,
|
||||
}
|
||||
|
||||
impl ClientConnection {
|
||||
pub fn new(db: db::DatabasePool, actors: GlobalActors) -> Self {
|
||||
Self { db, actors }
|
||||
}
|
||||
}
|
||||
|
||||
pub mod auth;
|
||||
pub mod session;
|
||||
|
||||
pub async fn connect_client<T>(mut props: ClientConnection, transport: &mut T)
|
||||
where
|
||||
T: Bi<auth::Inbound, Result<auth::Outbound, auth::Error>> + Send + ?Sized,
|
||||
{
|
||||
match auth::authenticate(&mut props, transport).await {
|
||||
Ok(_pubkey) => {
|
||||
ClientSession::spawn(ClientSession::new(props));
|
||||
info!("Client authenticated, session started");
|
||||
}
|
||||
Err(err) => {
|
||||
let _ = transport.send(Err(err.clone())).await;
|
||||
error!(?err, "Authentication failed, closing connection");
|
||||
}
|
||||
}
|
||||
}
|
||||
72
server/crates/arbiter-server/src/actors/client/session.rs
Normal file
72
server/crates/arbiter-server/src/actors/client/session.rs
Normal file
@@ -0,0 +1,72 @@
|
||||
use kameo::{Actor, messages};
|
||||
use tracing::error;
|
||||
|
||||
use crate::{
|
||||
actors::{
|
||||
GlobalActors, client::ClientConnection, flow_coordinator::RegisterClient,
|
||||
keyholder::KeyHolderState,
|
||||
},
|
||||
db,
|
||||
};
|
||||
|
||||
pub struct ClientSession {
|
||||
props: ClientConnection,
|
||||
}
|
||||
|
||||
impl ClientSession {
|
||||
pub(crate) fn new(props: ClientConnection) -> Self {
|
||||
Self { props }
|
||||
}
|
||||
}
|
||||
|
||||
#[messages]
|
||||
impl ClientSession {
|
||||
#[message]
|
||||
pub(crate) async fn handle_query_vault_state(&mut self) -> Result<KeyHolderState, Error> {
|
||||
use crate::actors::keyholder::GetState;
|
||||
|
||||
let vault_state = match self.props.actors.key_holder.ask(GetState {}).await {
|
||||
Ok(state) => state,
|
||||
Err(err) => {
|
||||
error!(?err, actor = "client", "keyholder.query.failed");
|
||||
return Err(Error::Internal);
|
||||
}
|
||||
};
|
||||
|
||||
Ok(vault_state)
|
||||
}
|
||||
}
|
||||
|
||||
impl Actor for ClientSession {
|
||||
type Args = Self;
|
||||
|
||||
type Error = Error;
|
||||
|
||||
async fn on_start(
|
||||
args: Self::Args,
|
||||
this: kameo::prelude::ActorRef<Self>,
|
||||
) -> Result<Self, Self::Error> {
|
||||
args.props
|
||||
.actors
|
||||
.flow_coordinator
|
||||
.ask(RegisterClient { actor: this })
|
||||
.await
|
||||
.map_err(|_| Error::ConnectionRegistrationFailed)?;
|
||||
Ok(args)
|
||||
}
|
||||
}
|
||||
|
||||
impl ClientSession {
|
||||
pub fn new_test(db: db::DatabasePool, actors: GlobalActors) -> Self {
|
||||
let props = ClientConnection::new(db, actors);
|
||||
Self { props }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum Error {
|
||||
#[error("Connection registration failed")]
|
||||
ConnectionRegistrationFailed,
|
||||
#[error("Internal error")]
|
||||
Internal,
|
||||
}
|
||||
265
server/crates/arbiter-server/src/actors/evm/mod.rs
Normal file
265
server/crates/arbiter-server/src/actors/evm/mod.rs
Normal file
@@ -0,0 +1,265 @@
|
||||
use alloy::{consensus::TxEip1559, primitives::Address, signers::Signature};
|
||||
use diesel::{
|
||||
ExpressionMethods, OptionalExtension as _, QueryDsl, SelectableHelper as _, dsl::insert_into,
|
||||
};
|
||||
use diesel_async::RunQueryDsl;
|
||||
use kameo::{Actor, actor::ActorRef, messages};
|
||||
use rand::{SeedableRng, rng, rngs::StdRng};
|
||||
|
||||
use crate::{
|
||||
actors::keyholder::{CreateNew, Decrypt, KeyHolder},
|
||||
db::{
|
||||
self, DatabaseError, DatabasePool,
|
||||
models::{self, SqliteTimestamp},
|
||||
schema,
|
||||
},
|
||||
evm::{
|
||||
self, RunKind,
|
||||
policies::{
|
||||
FullGrant, Grant, SharedGrantSettings, SpecificGrant, SpecificMeaning,
|
||||
ether_transfer::EtherTransfer, token_transfers::TokenTransfer,
|
||||
},
|
||||
},
|
||||
safe_cell::{SafeCell, SafeCellHandle as _},
|
||||
};
|
||||
|
||||
pub use crate::evm::safe_signer;
|
||||
|
||||
#[derive(Debug, thiserror::Error, miette::Diagnostic)]
|
||||
pub enum SignTransactionError {
|
||||
#[error("Wallet not found")]
|
||||
#[diagnostic(code(arbiter::evm::sign::wallet_not_found))]
|
||||
WalletNotFound,
|
||||
|
||||
#[error("Database error: {0}")]
|
||||
#[diagnostic(code(arbiter::evm::sign::database))]
|
||||
Database(#[from] DatabaseError),
|
||||
|
||||
#[error("Keyholder error: {0}")]
|
||||
#[diagnostic(code(arbiter::evm::sign::keyholder))]
|
||||
Keyholder(#[from] crate::actors::keyholder::Error),
|
||||
|
||||
#[error("Keyholder mailbox error")]
|
||||
#[diagnostic(code(arbiter::evm::sign::keyholder_send))]
|
||||
KeyholderSend,
|
||||
|
||||
#[error("Signing error: {0}")]
|
||||
#[diagnostic(code(arbiter::evm::sign::signing))]
|
||||
Signing(#[from] alloy::signers::Error),
|
||||
|
||||
#[error("Policy error: {0}")]
|
||||
#[diagnostic(code(arbiter::evm::sign::vet))]
|
||||
Vet(#[from] evm::VetError),
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error, miette::Diagnostic)]
|
||||
pub enum Error {
|
||||
#[error("Keyholder error: {0}")]
|
||||
#[diagnostic(code(arbiter::evm::keyholder))]
|
||||
Keyholder(#[from] crate::actors::keyholder::Error),
|
||||
|
||||
#[error("Keyholder mailbox error")]
|
||||
#[diagnostic(code(arbiter::evm::keyholder_send))]
|
||||
KeyholderSend,
|
||||
|
||||
#[error("Database error: {0}")]
|
||||
#[diagnostic(code(arbiter::evm::database))]
|
||||
Database(#[from] DatabaseError),
|
||||
}
|
||||
|
||||
#[derive(Actor)]
|
||||
pub struct EvmActor {
|
||||
pub keyholder: ActorRef<KeyHolder>,
|
||||
pub db: DatabasePool,
|
||||
pub rng: StdRng,
|
||||
pub engine: evm::Engine,
|
||||
}
|
||||
|
||||
impl EvmActor {
|
||||
pub fn new(keyholder: ActorRef<KeyHolder>, db: DatabasePool) -> Self {
|
||||
// is it safe to seed rng from system once?
|
||||
// todo: audit
|
||||
let rng = StdRng::from_rng(&mut rng());
|
||||
let engine = evm::Engine::new(db.clone());
|
||||
Self {
|
||||
keyholder,
|
||||
db,
|
||||
rng,
|
||||
engine,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[messages]
|
||||
impl EvmActor {
|
||||
#[message]
|
||||
pub async fn generate(&mut self) -> Result<(i32, Address), Error> {
|
||||
let (mut key_cell, address) = safe_signer::generate(&mut self.rng);
|
||||
|
||||
let plaintext = key_cell.read_inline(|reader| SafeCell::new(reader.to_vec()));
|
||||
|
||||
let aead_id: i32 = self
|
||||
.keyholder
|
||||
.ask(CreateNew { plaintext })
|
||||
.await
|
||||
.map_err(|_| Error::KeyholderSend)?;
|
||||
|
||||
let mut conn = self.db.get().await.map_err(DatabaseError::from)?;
|
||||
let wallet_id = insert_into(schema::evm_wallet::table)
|
||||
.values(&models::NewEvmWallet {
|
||||
address: address.as_slice().to_vec(),
|
||||
aead_encrypted_id: aead_id,
|
||||
})
|
||||
.returning(schema::evm_wallet::id)
|
||||
.get_result(&mut conn)
|
||||
.await
|
||||
.map_err(DatabaseError::from)?;
|
||||
|
||||
Ok((wallet_id, address))
|
||||
}
|
||||
|
||||
#[message]
|
||||
pub async fn list_wallets(&self) -> Result<Vec<(i32, Address)>, Error> {
|
||||
let mut conn = self.db.get().await.map_err(DatabaseError::from)?;
|
||||
let rows: Vec<models::EvmWallet> = schema::evm_wallet::table
|
||||
.select(models::EvmWallet::as_select())
|
||||
.load(&mut conn)
|
||||
.await
|
||||
.map_err(DatabaseError::from)?;
|
||||
|
||||
Ok(rows
|
||||
.into_iter()
|
||||
.map(|w| (w.id, Address::from_slice(&w.address)))
|
||||
.collect())
|
||||
}
|
||||
}
|
||||
|
||||
#[messages]
|
||||
impl EvmActor {
|
||||
#[message]
|
||||
pub async fn useragent_create_grant(
|
||||
&mut self,
|
||||
basic: SharedGrantSettings,
|
||||
grant: SpecificGrant,
|
||||
) -> Result<i32, DatabaseError> {
|
||||
match grant {
|
||||
SpecificGrant::EtherTransfer(settings) => {
|
||||
self.engine
|
||||
.create_grant::<EtherTransfer>(FullGrant {
|
||||
basic,
|
||||
specific: settings,
|
||||
})
|
||||
.await
|
||||
}
|
||||
SpecificGrant::TokenTransfer(settings) => {
|
||||
self.engine
|
||||
.create_grant::<TokenTransfer>(FullGrant {
|
||||
basic,
|
||||
specific: settings,
|
||||
})
|
||||
.await
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[message]
|
||||
pub async fn useragent_delete_grant(&mut self, grant_id: i32) -> Result<(), Error> {
|
||||
let mut conn = self.db.get().await.map_err(DatabaseError::from)?;
|
||||
diesel::update(schema::evm_basic_grant::table)
|
||||
.filter(schema::evm_basic_grant::id.eq(grant_id))
|
||||
.set(schema::evm_basic_grant::revoked_at.eq(SqliteTimestamp::now()))
|
||||
.execute(&mut conn)
|
||||
.await
|
||||
.map_err(DatabaseError::from)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[message]
|
||||
pub async fn useragent_list_grants(&mut self) -> Result<Vec<Grant<SpecificGrant>>, Error> {
|
||||
Ok(self
|
||||
.engine
|
||||
.list_all_grants()
|
||||
.await
|
||||
.map_err(DatabaseError::from)?)
|
||||
}
|
||||
|
||||
#[message]
|
||||
pub async fn shared_analyze_transaction(
|
||||
&mut self,
|
||||
client_id: i32,
|
||||
wallet_address: Address,
|
||||
transaction: TxEip1559,
|
||||
) -> Result<SpecificMeaning, SignTransactionError> {
|
||||
let mut conn = self.db.get().await.map_err(DatabaseError::from)?;
|
||||
let wallet = schema::evm_wallet::table
|
||||
.select(models::EvmWallet::as_select())
|
||||
.filter(schema::evm_wallet::address.eq(wallet_address.as_slice()))
|
||||
.first(&mut conn)
|
||||
.await
|
||||
.optional()
|
||||
.map_err(DatabaseError::from)?
|
||||
.ok_or(SignTransactionError::WalletNotFound)?;
|
||||
let wallet_access = schema::evm_wallet_access::table
|
||||
.select(models::EvmWalletAccess::as_select())
|
||||
.filter(schema::evm_wallet_access::wallet_id.eq(wallet.id))
|
||||
.filter(schema::evm_wallet_access::client_id.eq(client_id))
|
||||
.first(&mut conn)
|
||||
.await
|
||||
.optional()
|
||||
.map_err(DatabaseError::from)?
|
||||
.ok_or(SignTransactionError::WalletNotFound)?;
|
||||
drop(conn);
|
||||
|
||||
let meaning = self
|
||||
.engine
|
||||
.evaluate_transaction(wallet_access, transaction.clone(), RunKind::Execution)
|
||||
.await?;
|
||||
|
||||
Ok(meaning)
|
||||
}
|
||||
|
||||
#[message]
|
||||
pub async fn client_sign_transaction(
|
||||
&mut self,
|
||||
client_id: i32,
|
||||
wallet_address: Address,
|
||||
mut transaction: TxEip1559,
|
||||
) -> Result<Signature, SignTransactionError> {
|
||||
let mut conn = self.db.get().await.map_err(DatabaseError::from)?;
|
||||
let wallet = schema::evm_wallet::table
|
||||
.select(models::EvmWallet::as_select())
|
||||
.filter(schema::evm_wallet::address.eq(wallet_address.as_slice()))
|
||||
.first(&mut conn)
|
||||
.await
|
||||
.optional()
|
||||
.map_err(DatabaseError::from)?
|
||||
.ok_or(SignTransactionError::WalletNotFound)?;
|
||||
let wallet_access = schema::evm_wallet_access::table
|
||||
.select(models::EvmWalletAccess::as_select())
|
||||
.filter(schema::evm_wallet_access::wallet_id.eq(wallet.id))
|
||||
.filter(schema::evm_wallet_access::client_id.eq(client_id))
|
||||
.first(&mut conn)
|
||||
.await
|
||||
.optional()
|
||||
.map_err(DatabaseError::from)?
|
||||
.ok_or(SignTransactionError::WalletNotFound)?;
|
||||
drop(conn);
|
||||
|
||||
let raw_key: SafeCell<Vec<u8>> = self
|
||||
.keyholder
|
||||
.ask(Decrypt {
|
||||
aead_id: wallet.aead_encrypted_id,
|
||||
})
|
||||
.await
|
||||
.map_err(|_| SignTransactionError::KeyholderSend)?;
|
||||
|
||||
let signer = safe_signer::SafeSigner::from_cell(raw_key)?;
|
||||
|
||||
self.engine
|
||||
.evaluate_transaction(wallet_access, transaction.clone(), RunKind::Execution)
|
||||
.await?;
|
||||
|
||||
use alloy::network::TxSignerSync as _;
|
||||
Ok(signer.sign_transaction_sync(&mut transaction)?)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,101 @@
|
||||
use std::ops::ControlFlow;
|
||||
|
||||
use kameo::{
|
||||
Actor, messages,
|
||||
prelude::{ActorId, ActorRef, ActorStopReason, Context, WeakActorRef},
|
||||
reply::ReplySender,
|
||||
};
|
||||
|
||||
use crate::actors::{
|
||||
client::ClientProfile,
|
||||
flow_coordinator::ApprovalError,
|
||||
user_agent::{UserAgentSession, session::BeginNewClientApproval},
|
||||
};
|
||||
|
||||
pub struct Args {
|
||||
pub client: ClientProfile,
|
||||
pub user_agents: Vec<ActorRef<UserAgentSession>>,
|
||||
pub reply: ReplySender<Result<bool, ApprovalError>>
|
||||
}
|
||||
|
||||
pub struct ClientApprovalController {
|
||||
/// Number of UAs that have not yet responded (approval or denial) or died.
|
||||
pending: usize,
|
||||
/// Number of approvals received so far.
|
||||
approved: usize,
|
||||
reply: Option<ReplySender<Result<bool, ApprovalError>>>,
|
||||
}
|
||||
|
||||
impl ClientApprovalController {
|
||||
fn send_reply(&mut self, result: Result<bool, ApprovalError>) {
|
||||
if let Some(reply) = self.reply.take() {
|
||||
reply.send(result);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Actor for ClientApprovalController {
|
||||
type Args = Args;
|
||||
type Error = ();
|
||||
|
||||
async fn on_start(
|
||||
Args { client, mut user_agents, reply }: Self::Args,
|
||||
actor_ref: ActorRef<Self>,
|
||||
) -> Result<Self, Self::Error> {
|
||||
let this = Self {
|
||||
pending: user_agents.len(),
|
||||
approved: 0,
|
||||
reply: Some(reply),
|
||||
};
|
||||
|
||||
for user_agent in user_agents.drain(..) {
|
||||
actor_ref.link(&user_agent).await;
|
||||
let _ = user_agent
|
||||
.tell(BeginNewClientApproval {
|
||||
client: client.clone(),
|
||||
controller: actor_ref.clone(),
|
||||
})
|
||||
.await;
|
||||
}
|
||||
|
||||
Ok(this)
|
||||
}
|
||||
|
||||
async fn on_link_died(
|
||||
&mut self,
|
||||
_: WeakActorRef<Self>,
|
||||
_: ActorId,
|
||||
_: ActorStopReason,
|
||||
) -> Result<ControlFlow<ActorStopReason>, Self::Error> {
|
||||
// A linked UA died before responding — counts as a non-approval.
|
||||
self.pending = self.pending.saturating_sub(1);
|
||||
if self.pending == 0 {
|
||||
// At least one UA didn't approve: deny.
|
||||
self.send_reply(Ok(false));
|
||||
return Ok(ControlFlow::Break(ActorStopReason::Normal));
|
||||
}
|
||||
Ok(ControlFlow::Continue(()))
|
||||
}
|
||||
}
|
||||
|
||||
#[messages]
|
||||
impl ClientApprovalController {
|
||||
#[message(ctx)]
|
||||
pub async fn client_approval_answer(&mut self, approved: bool, ctx: &mut Context<Self, ()>) {
|
||||
if !approved {
|
||||
// Denial wins immediately regardless of other pending responses.
|
||||
self.send_reply(Ok(false));
|
||||
ctx.stop();
|
||||
return;
|
||||
}
|
||||
|
||||
self.approved += 1;
|
||||
self.pending = self.pending.saturating_sub(1);
|
||||
|
||||
if self.pending == 0 {
|
||||
// Every connected UA approved.
|
||||
self.send_reply(Ok(true));
|
||||
ctx.stop();
|
||||
}
|
||||
}
|
||||
}
|
||||
118
server/crates/arbiter-server/src/actors/flow_coordinator/mod.rs
Normal file
118
server/crates/arbiter-server/src/actors/flow_coordinator/mod.rs
Normal file
@@ -0,0 +1,118 @@
|
||||
use std::{collections::HashMap, ops::ControlFlow};
|
||||
|
||||
use kameo::{
|
||||
Actor,
|
||||
actor::{ActorId, ActorRef, Spawn},
|
||||
messages,
|
||||
prelude::{ActorStopReason, Context, WeakActorRef},
|
||||
reply::DelegatedReply,
|
||||
};
|
||||
use tracing::info;
|
||||
|
||||
use crate::actors::{
|
||||
client::{ClientProfile, session::ClientSession},
|
||||
flow_coordinator::client_connect_approval::ClientApprovalController,
|
||||
user_agent::session::UserAgentSession,
|
||||
};
|
||||
|
||||
pub mod client_connect_approval;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct FlowCoordinator {
|
||||
pub user_agents: HashMap<ActorId, ActorRef<UserAgentSession>>,
|
||||
pub clients: HashMap<ActorId, ActorRef<ClientSession>>,
|
||||
}
|
||||
|
||||
impl Actor for FlowCoordinator {
|
||||
type Args = Self;
|
||||
|
||||
type Error = ();
|
||||
|
||||
async fn on_start(args: Self::Args, _: ActorRef<Self>) -> Result<Self, Self::Error> {
|
||||
Ok(args)
|
||||
}
|
||||
|
||||
async fn on_link_died(
|
||||
&mut self,
|
||||
_: WeakActorRef<Self>,
|
||||
id: ActorId,
|
||||
_: ActorStopReason,
|
||||
) -> Result<ControlFlow<ActorStopReason>, Self::Error> {
|
||||
if self.user_agents.remove(&id).is_some() {
|
||||
info!(
|
||||
?id,
|
||||
actor = "FlowCoordinator",
|
||||
event = "useragent.disconnected"
|
||||
);
|
||||
} else if self.clients.remove(&id).is_some() {
|
||||
info!(
|
||||
?id,
|
||||
actor = "FlowCoordinator",
|
||||
event = "client.disconnected"
|
||||
);
|
||||
} else {
|
||||
info!(
|
||||
?id,
|
||||
actor = "FlowCoordinator",
|
||||
event = "unknown.actor.disconnected"
|
||||
);
|
||||
}
|
||||
Ok(ControlFlow::Continue(()))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error, Clone, PartialEq, Eq, Hash)]
|
||||
pub enum ApprovalError {
|
||||
#[error("No user agents connected")]
|
||||
NoUserAgentsConnected,
|
||||
}
|
||||
|
||||
#[messages]
|
||||
impl FlowCoordinator {
|
||||
#[message(ctx)]
|
||||
pub async fn register_user_agent(
|
||||
&mut self,
|
||||
actor: ActorRef<UserAgentSession>,
|
||||
ctx: &mut Context<Self, ()>,
|
||||
) {
|
||||
info!(id = %actor.id(), actor = "FlowCoordinator", event = "useragent.connected");
|
||||
ctx.actor_ref().link(&actor).await;
|
||||
self.user_agents.insert(actor.id(), actor);
|
||||
}
|
||||
|
||||
#[message(ctx)]
|
||||
pub async fn register_client(
|
||||
&mut self,
|
||||
actor: ActorRef<ClientSession>,
|
||||
ctx: &mut Context<Self, ()>,
|
||||
) {
|
||||
info!(id = %actor.id(), actor = "FlowCoordinator", event = "client.connected");
|
||||
ctx.actor_ref().link(&actor).await;
|
||||
self.clients.insert(actor.id(), actor);
|
||||
}
|
||||
|
||||
#[message(ctx)]
|
||||
pub async fn request_client_approval(
|
||||
&mut self,
|
||||
client: ClientProfile,
|
||||
ctx: &mut Context<Self, DelegatedReply<Result<bool, ApprovalError>>>,
|
||||
) -> DelegatedReply<Result<bool, ApprovalError>> {
|
||||
let (reply, Some(reply_sender)) = ctx.reply_sender() else {
|
||||
unreachable!("Expected `request_client_approval` to have callback channel");
|
||||
};
|
||||
|
||||
let refs: Vec<_> = self.user_agents.values().cloned().collect();
|
||||
if refs.is_empty() {
|
||||
reply_sender.send(Err(ApprovalError::NoUserAgentsConnected));
|
||||
return reply;
|
||||
}
|
||||
|
||||
ClientApprovalController::spawn(client_connect_approval::Args {
|
||||
client,
|
||||
user_agents: refs,
|
||||
reply: reply_sender,
|
||||
});
|
||||
|
||||
reply
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1 @@
|
||||
pub mod v1;
|
||||
@@ -0,0 +1,243 @@
|
||||
use std::ops::Deref as _;
|
||||
|
||||
use argon2::{Algorithm, Argon2, password_hash::Salt as ArgonSalt};
|
||||
use chacha20poly1305::{
|
||||
AeadInPlace, Key, KeyInit as _, XChaCha20Poly1305, XNonce,
|
||||
aead::{AeadMut, Error, Payload},
|
||||
};
|
||||
use rand::{
|
||||
Rng as _, SeedableRng,
|
||||
rngs::{StdRng, SysRng},
|
||||
};
|
||||
|
||||
use crate::safe_cell::{SafeCell, SafeCellHandle as _};
|
||||
|
||||
pub const ROOT_KEY_TAG: &[u8] = "arbiter/seal/v1".as_bytes();
|
||||
pub const TAG: &[u8] = "arbiter/private-key/v1".as_bytes();
|
||||
|
||||
pub const NONCE_LENGTH: usize = 24;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct Nonce([u8; NONCE_LENGTH]);
|
||||
impl Nonce {
|
||||
pub fn increment(&mut self) {
|
||||
for i in (0..self.0.len()).rev() {
|
||||
if self.0[i] == 0xFF {
|
||||
self.0[i] = 0;
|
||||
} else {
|
||||
self.0[i] += 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_vec(&self) -> Vec<u8> {
|
||||
self.0.to_vec()
|
||||
}
|
||||
}
|
||||
impl<'a> TryFrom<&'a [u8]> for Nonce {
|
||||
type Error = ();
|
||||
|
||||
fn try_from(value: &'a [u8]) -> Result<Self, Self::Error> {
|
||||
if value.len() != NONCE_LENGTH {
|
||||
return Err(());
|
||||
}
|
||||
let mut nonce = [0u8; NONCE_LENGTH];
|
||||
nonce.copy_from_slice(value);
|
||||
Ok(Self(nonce))
|
||||
}
|
||||
}
|
||||
|
||||
pub struct KeyCell(pub SafeCell<Key>);
|
||||
impl From<SafeCell<Key>> for KeyCell {
|
||||
fn from(value: SafeCell<Key>) -> Self {
|
||||
Self(value)
|
||||
}
|
||||
}
|
||||
impl TryFrom<SafeCell<Vec<u8>>> for KeyCell {
|
||||
type Error = ();
|
||||
|
||||
fn try_from(mut value: SafeCell<Vec<u8>>) -> Result<Self, Self::Error> {
|
||||
let value = value.read();
|
||||
if value.len() != size_of::<Key>() {
|
||||
return Err(());
|
||||
}
|
||||
let cell = SafeCell::new_inline(|cell_write: &mut Key| {
|
||||
cell_write.copy_from_slice(&value);
|
||||
});
|
||||
Ok(Self(cell))
|
||||
}
|
||||
}
|
||||
|
||||
impl KeyCell {
|
||||
pub fn new_secure_random() -> Self {
|
||||
let key = SafeCell::new_inline(|key_buffer: &mut Key| {
|
||||
#[allow(
|
||||
clippy::unwrap_used,
|
||||
reason = "Rng failure is unrecoverable and should panic"
|
||||
)]
|
||||
let mut rng = StdRng::try_from_rng(&mut SysRng).unwrap();
|
||||
rng.fill_bytes(key_buffer);
|
||||
});
|
||||
|
||||
key.into()
|
||||
}
|
||||
|
||||
pub fn encrypt_in_place(
|
||||
&mut self,
|
||||
nonce: &Nonce,
|
||||
associated_data: &[u8],
|
||||
mut buffer: impl AsMut<Vec<u8>>,
|
||||
) -> Result<(), Error> {
|
||||
let key_reader = self.0.read();
|
||||
let key_ref = key_reader.deref();
|
||||
let cipher = XChaCha20Poly1305::new(key_ref);
|
||||
let nonce = XNonce::from_slice(nonce.0.as_ref());
|
||||
let buffer = buffer.as_mut();
|
||||
cipher.encrypt_in_place(nonce, associated_data, buffer)
|
||||
}
|
||||
pub fn decrypt_in_place(
|
||||
&mut self,
|
||||
nonce: &Nonce,
|
||||
associated_data: &[u8],
|
||||
buffer: &mut SafeCell<Vec<u8>>,
|
||||
) -> Result<(), Error> {
|
||||
let key_reader = self.0.read();
|
||||
let key_ref = key_reader.deref();
|
||||
let cipher = XChaCha20Poly1305::new(key_ref);
|
||||
let nonce = XNonce::from_slice(nonce.0.as_ref());
|
||||
let mut buffer = buffer.write();
|
||||
let buffer: &mut Vec<u8> = buffer.as_mut();
|
||||
cipher.decrypt_in_place(nonce, associated_data, buffer)
|
||||
}
|
||||
|
||||
pub fn encrypt(
|
||||
&mut self,
|
||||
nonce: &Nonce,
|
||||
associated_data: &[u8],
|
||||
plaintext: impl AsRef<[u8]>,
|
||||
) -> Result<Vec<u8>, Error> {
|
||||
let key_reader = self.0.read();
|
||||
let key_ref = key_reader.deref();
|
||||
let mut cipher = XChaCha20Poly1305::new(key_ref);
|
||||
let nonce = XNonce::from_slice(nonce.0.as_ref());
|
||||
|
||||
let ciphertext = cipher.encrypt(
|
||||
nonce,
|
||||
Payload {
|
||||
msg: plaintext.as_ref(),
|
||||
aad: associated_data,
|
||||
},
|
||||
)?;
|
||||
Ok(ciphertext)
|
||||
}
|
||||
}
|
||||
|
||||
pub type Salt = [u8; ArgonSalt::RECOMMENDED_LENGTH];
|
||||
|
||||
pub fn generate_salt() -> Salt {
|
||||
let mut salt = Salt::default();
|
||||
#[allow(
|
||||
clippy::unwrap_used,
|
||||
reason = "Rng failure is unrecoverable and should panic"
|
||||
)]
|
||||
let mut rng = StdRng::try_from_rng(&mut SysRng).unwrap();
|
||||
rng.fill_bytes(&mut salt);
|
||||
salt
|
||||
}
|
||||
|
||||
/// User password might be of different length, have not enough entropy, etc...
|
||||
/// Derive a fixed-length key from the password using Argon2id, which is designed for password hashing and key derivation.
|
||||
pub fn derive_seal_key(mut password: SafeCell<Vec<u8>>, salt: &Salt) -> KeyCell {
|
||||
#[allow(clippy::unwrap_used)]
|
||||
let params = argon2::Params::new(262_144, 3, 4, None).unwrap();
|
||||
let hasher = Argon2::new(Algorithm::Argon2id, argon2::Version::V0x13, params);
|
||||
let mut key = SafeCell::new(Key::default());
|
||||
password.read_inline(|password_source| {
|
||||
let mut key_buffer = key.write();
|
||||
let key_buffer: &mut [u8] = key_buffer.as_mut();
|
||||
|
||||
#[allow(
|
||||
clippy::unwrap_used,
|
||||
reason = "Better fail completely than return a weak key"
|
||||
)]
|
||||
hasher
|
||||
.hash_password_into(password_source.deref(), salt, key_buffer)
|
||||
.unwrap();
|
||||
});
|
||||
|
||||
key.into()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::safe_cell::SafeCell;
|
||||
|
||||
#[test]
|
||||
pub fn derive_seal_key_deterministic() {
|
||||
static PASSWORD: &[u8] = b"password";
|
||||
let password = SafeCell::new(PASSWORD.to_vec());
|
||||
let password2 = SafeCell::new(PASSWORD.to_vec());
|
||||
let salt = generate_salt();
|
||||
|
||||
let mut key1 = derive_seal_key(password, &salt);
|
||||
let mut key2 = derive_seal_key(password2, &salt);
|
||||
|
||||
let key1_reader = key1.0.read();
|
||||
let key2_reader = key2.0.read();
|
||||
|
||||
assert_eq!(key1_reader.deref(), key2_reader.deref());
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn successful_derive() {
|
||||
static PASSWORD: &[u8] = b"password";
|
||||
let password = SafeCell::new(PASSWORD.to_vec());
|
||||
let salt = generate_salt();
|
||||
|
||||
let mut key = derive_seal_key(password, &salt);
|
||||
let key_reader = key.0.read();
|
||||
let key_ref = key_reader.deref();
|
||||
|
||||
assert_ne!(key_ref.as_slice(), &[0u8; 32][..]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn encrypt_decrypt() {
|
||||
static PASSWORD: &[u8] = b"password";
|
||||
let password = SafeCell::new(PASSWORD.to_vec());
|
||||
let salt = generate_salt();
|
||||
|
||||
let mut key = derive_seal_key(password, &salt);
|
||||
let nonce = Nonce(*b"unique nonce 123 1231233"); // 24 bytes for XChaCha20Poly1305
|
||||
let associated_data = b"associated data";
|
||||
let mut buffer = b"secret data".to_vec();
|
||||
|
||||
key.encrypt_in_place(&nonce, associated_data, &mut buffer)
|
||||
.unwrap();
|
||||
assert_ne!(buffer, b"secret data");
|
||||
|
||||
let mut buffer = SafeCell::new(buffer);
|
||||
|
||||
key.decrypt_in_place(&nonce, associated_data, &mut buffer)
|
||||
.unwrap();
|
||||
|
||||
let buffer = buffer.read();
|
||||
assert_eq!(*buffer, b"secret data");
|
||||
}
|
||||
|
||||
#[test]
|
||||
// We should fuzz this
|
||||
pub fn test_nonce_increment() {
|
||||
let mut nonce = Nonce([0u8; NONCE_LENGTH]);
|
||||
nonce.increment();
|
||||
|
||||
assert_eq!(
|
||||
nonce.0,
|
||||
[
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1
|
||||
]
|
||||
);
|
||||
}
|
||||
}
|
||||
412
server/crates/arbiter-server/src/actors/keyholder/mod.rs
Normal file
412
server/crates/arbiter-server/src/actors/keyholder/mod.rs
Normal file
@@ -0,0 +1,412 @@
|
||||
use chrono::Utc;
|
||||
use diesel::{
|
||||
ExpressionMethods as _, OptionalExtension, QueryDsl, SelectableHelper,
|
||||
dsl::{insert_into, update},
|
||||
};
|
||||
use diesel_async::{AsyncConnection, RunQueryDsl};
|
||||
use kameo::{Actor, Reply, messages};
|
||||
use strum::{EnumDiscriminants, IntoDiscriminant};
|
||||
use tracing::{error, info};
|
||||
|
||||
use crate::safe_cell::SafeCell;
|
||||
use crate::{
|
||||
db::{
|
||||
self,
|
||||
models::{self, RootKeyHistory},
|
||||
schema::{self},
|
||||
},
|
||||
safe_cell::SafeCellHandle as _,
|
||||
};
|
||||
use encryption::v1::{self, KeyCell, Nonce};
|
||||
|
||||
pub mod encryption;
|
||||
|
||||
#[derive(Default, EnumDiscriminants)]
|
||||
#[strum_discriminants(derive(Reply), vis(pub), name(KeyHolderState))]
|
||||
enum State {
|
||||
#[default]
|
||||
Unbootstrapped,
|
||||
Sealed {
|
||||
root_key_history_id: i32,
|
||||
},
|
||||
Unsealed {
|
||||
root_key_history_id: i32,
|
||||
root_key: KeyCell,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error, miette::Diagnostic)]
|
||||
pub enum Error {
|
||||
#[error("Keyholder is already bootstrapped")]
|
||||
#[diagnostic(code(arbiter::keyholder::already_bootstrapped))]
|
||||
AlreadyBootstrapped,
|
||||
#[error("Keyholder is not bootstrapped")]
|
||||
#[diagnostic(code(arbiter::keyholder::not_bootstrapped))]
|
||||
NotBootstrapped,
|
||||
#[error("Invalid key provided")]
|
||||
#[diagnostic(code(arbiter::keyholder::invalid_key))]
|
||||
InvalidKey,
|
||||
|
||||
#[error("Requested aead entry not found")]
|
||||
#[diagnostic(code(arbiter::keyholder::aead_not_found))]
|
||||
NotFound,
|
||||
|
||||
#[error("Encryption error: {0}")]
|
||||
#[diagnostic(code(arbiter::keyholder::encryption_error))]
|
||||
Encryption(#[from] chacha20poly1305::aead::Error),
|
||||
|
||||
#[error("Database error: {0}")]
|
||||
#[diagnostic(code(arbiter::keyholder::database_error))]
|
||||
DatabaseConnection(#[from] db::PoolError),
|
||||
|
||||
#[error("Database transaction error: {0}")]
|
||||
#[diagnostic(code(arbiter::keyholder::database_transaction_error))]
|
||||
DatabaseTransaction(#[from] diesel::result::Error),
|
||||
|
||||
#[error("Broken database")]
|
||||
#[diagnostic(code(arbiter::keyholder::broken_database))]
|
||||
BrokenDatabase,
|
||||
}
|
||||
|
||||
/// Manages vault root key and tracks current state of the vault (bootstrapped/unbootstrapped, sealed/unsealed).
|
||||
/// Provides API for encrypting and decrypting data using the vault root key.
|
||||
/// Abstraction over database to make sure nonces are never reused and encryption keys are never exposed in plaintext outside of this actor.
|
||||
#[derive(Actor)]
|
||||
pub struct KeyHolder {
|
||||
db: db::DatabasePool,
|
||||
state: State,
|
||||
}
|
||||
|
||||
#[messages]
|
||||
impl KeyHolder {
|
||||
pub async fn new(db: db::DatabasePool) -> Result<Self, Error> {
|
||||
let state = {
|
||||
let mut conn = db.get().await?;
|
||||
|
||||
let (root_key_history,) = schema::arbiter_settings::table
|
||||
.left_join(schema::root_key_history::table)
|
||||
.select((Option::<RootKeyHistory>::as_select(),))
|
||||
.get_result::<(Option<RootKeyHistory>,)>(&mut conn)
|
||||
.await?;
|
||||
|
||||
match root_key_history {
|
||||
Some(root_key_history) => State::Sealed {
|
||||
root_key_history_id: root_key_history.id,
|
||||
},
|
||||
None => State::Unbootstrapped,
|
||||
}
|
||||
};
|
||||
|
||||
Ok(Self { db, state })
|
||||
}
|
||||
|
||||
// Exclusive transaction to avoid race condtions if multiple keyholders write
|
||||
// additional layer of protection against nonce-reuse
|
||||
async fn get_new_nonce(pool: &db::DatabasePool, root_key_id: i32) -> Result<Nonce, Error> {
|
||||
let mut conn = pool.get().await?;
|
||||
|
||||
let nonce = conn
|
||||
.exclusive_transaction(|conn| {
|
||||
Box::pin(async move {
|
||||
let current_nonce: Vec<u8> = schema::root_key_history::table
|
||||
.filter(schema::root_key_history::id.eq(root_key_id))
|
||||
.select(schema::root_key_history::data_encryption_nonce)
|
||||
.first(conn)
|
||||
.await?;
|
||||
|
||||
let mut nonce =
|
||||
v1::Nonce::try_from(current_nonce.as_slice()).map_err(|_| {
|
||||
error!(
|
||||
"Broken database: invalid nonce for root key history id={}",
|
||||
root_key_id
|
||||
);
|
||||
Error::BrokenDatabase
|
||||
})?;
|
||||
nonce.increment();
|
||||
|
||||
update(schema::root_key_history::table)
|
||||
.filter(schema::root_key_history::id.eq(root_key_id))
|
||||
.set(schema::root_key_history::data_encryption_nonce.eq(nonce.to_vec()))
|
||||
.execute(conn)
|
||||
.await?;
|
||||
|
||||
Result::<_, Error>::Ok(nonce)
|
||||
})
|
||||
})
|
||||
.await?;
|
||||
|
||||
Ok(nonce)
|
||||
}
|
||||
|
||||
#[message]
|
||||
pub async fn bootstrap(&mut self, seal_key_raw: SafeCell<Vec<u8>>) -> Result<(), Error> {
|
||||
if !matches!(self.state, State::Unbootstrapped) {
|
||||
return Err(Error::AlreadyBootstrapped);
|
||||
}
|
||||
let salt = v1::generate_salt();
|
||||
let mut seal_key = v1::derive_seal_key(seal_key_raw, &salt);
|
||||
let mut root_key = KeyCell::new_secure_random();
|
||||
|
||||
// Zero nonces are fine because they are one-time
|
||||
let root_key_nonce = v1::Nonce::default();
|
||||
let data_encryption_nonce = v1::Nonce::default();
|
||||
|
||||
let root_key_ciphertext: Vec<u8> = root_key.0.read_inline(|reader| {
|
||||
let root_key_reader = reader.as_slice();
|
||||
seal_key
|
||||
.encrypt(&root_key_nonce, v1::ROOT_KEY_TAG, root_key_reader)
|
||||
.map_err(|err| {
|
||||
error!(?err, "Fatal bootstrap error");
|
||||
Error::Encryption(err)
|
||||
})
|
||||
})?;
|
||||
|
||||
let mut conn = self.db.get().await?;
|
||||
|
||||
let data_encryption_nonce_bytes = data_encryption_nonce.to_vec();
|
||||
let root_key_history_id = conn
|
||||
.transaction(|conn| {
|
||||
Box::pin(async move {
|
||||
let root_key_history_id: i32 = insert_into(schema::root_key_history::table)
|
||||
.values(&models::NewRootKeyHistory {
|
||||
ciphertext: root_key_ciphertext,
|
||||
tag: v1::ROOT_KEY_TAG.to_vec(),
|
||||
root_key_encryption_nonce: root_key_nonce.to_vec(),
|
||||
data_encryption_nonce: data_encryption_nonce_bytes,
|
||||
schema_version: 1,
|
||||
salt: salt.to_vec(),
|
||||
})
|
||||
.returning(schema::root_key_history::id)
|
||||
.get_result(conn)
|
||||
.await?;
|
||||
|
||||
update(schema::arbiter_settings::table)
|
||||
.set(schema::arbiter_settings::root_key_id.eq(root_key_history_id))
|
||||
.execute(conn)
|
||||
.await?;
|
||||
|
||||
Result::<_, diesel::result::Error>::Ok(root_key_history_id)
|
||||
})
|
||||
})
|
||||
.await?;
|
||||
|
||||
self.state = State::Unsealed {
|
||||
root_key,
|
||||
root_key_history_id,
|
||||
};
|
||||
|
||||
info!("Keyholder bootstrapped successfully");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[message]
|
||||
pub async fn try_unseal(&mut self, seal_key_raw: SafeCell<Vec<u8>>) -> Result<(), Error> {
|
||||
let State::Sealed {
|
||||
root_key_history_id,
|
||||
} = &self.state
|
||||
else {
|
||||
return Err(Error::NotBootstrapped);
|
||||
};
|
||||
|
||||
// We don't want to hold connection while doing expensive KDF work
|
||||
let current_key = {
|
||||
let mut conn = self.db.get().await?;
|
||||
schema::root_key_history::table
|
||||
.filter(schema::root_key_history::id.eq(*root_key_history_id))
|
||||
.select(schema::root_key_history::data_encryption_nonce)
|
||||
.select(RootKeyHistory::as_select())
|
||||
.first(&mut conn)
|
||||
.await?
|
||||
};
|
||||
|
||||
let salt = ¤t_key.salt;
|
||||
let salt = v1::Salt::try_from(salt.as_slice()).map_err(|_| {
|
||||
error!("Broken database: invalid salt for root key");
|
||||
Error::BrokenDatabase
|
||||
})?;
|
||||
let mut seal_key = v1::derive_seal_key(seal_key_raw, &salt);
|
||||
|
||||
let mut root_key = SafeCell::new(current_key.ciphertext.clone());
|
||||
|
||||
let nonce = v1::Nonce::try_from(current_key.root_key_encryption_nonce.as_slice()).map_err(
|
||||
|_| {
|
||||
error!("Broken database: invalid nonce for root key");
|
||||
Error::BrokenDatabase
|
||||
},
|
||||
)?;
|
||||
|
||||
seal_key
|
||||
.decrypt_in_place(&nonce, v1::ROOT_KEY_TAG, &mut root_key)
|
||||
.map_err(|err| {
|
||||
error!(?err, "Failed to unseal root key: invalid seal key");
|
||||
Error::InvalidKey
|
||||
})?;
|
||||
|
||||
self.state = State::Unsealed {
|
||||
root_key_history_id: current_key.id,
|
||||
root_key: v1::KeyCell::try_from(root_key).map_err(|err| {
|
||||
error!(?err, "Broken database: invalid encryption key size");
|
||||
Error::BrokenDatabase
|
||||
})?,
|
||||
};
|
||||
|
||||
info!("Keyholder unsealed successfully");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Decrypts the `aead_encrypted` entry with the given ID and returns the plaintext
|
||||
#[message]
|
||||
pub async fn decrypt(&mut self, aead_id: i32) -> Result<SafeCell<Vec<u8>>, Error> {
|
||||
let State::Unsealed { root_key, .. } = &mut self.state else {
|
||||
return Err(Error::NotBootstrapped);
|
||||
};
|
||||
|
||||
let row: models::AeadEncrypted = {
|
||||
let mut conn = self.db.get().await?;
|
||||
schema::aead_encrypted::table
|
||||
.select(models::AeadEncrypted::as_select())
|
||||
.filter(schema::aead_encrypted::id.eq(aead_id))
|
||||
.first(&mut conn)
|
||||
.await
|
||||
.optional()?
|
||||
.ok_or(Error::NotFound)?
|
||||
};
|
||||
|
||||
let nonce = v1::Nonce::try_from(row.current_nonce.as_slice()).map_err(|_| {
|
||||
error!(
|
||||
"Broken database: invalid nonce for aead_encrypted id={}",
|
||||
aead_id
|
||||
);
|
||||
Error::BrokenDatabase
|
||||
})?;
|
||||
let mut output = SafeCell::new(row.ciphertext);
|
||||
root_key.decrypt_in_place(&nonce, v1::TAG, &mut output)?;
|
||||
Ok(output)
|
||||
}
|
||||
|
||||
// Creates new `aead_encrypted` entry in the database and returns it's ID
|
||||
#[message]
|
||||
pub async fn create_new(&mut self, mut plaintext: SafeCell<Vec<u8>>) -> Result<i32, Error> {
|
||||
let State::Unsealed {
|
||||
root_key,
|
||||
root_key_history_id,
|
||||
} = &mut self.state
|
||||
else {
|
||||
return Err(Error::NotBootstrapped);
|
||||
};
|
||||
|
||||
// Order matters here - `get_new_nonce` acquires connection, so we need to call it before next acquire
|
||||
// Borrow checker note: &mut borrow a few lines above is disjoint from this field
|
||||
let nonce = Self::get_new_nonce(&self.db, *root_key_history_id).await?;
|
||||
|
||||
let mut ciphertext_buffer = plaintext.write();
|
||||
let ciphertext_buffer: &mut Vec<u8> = ciphertext_buffer.as_mut();
|
||||
root_key.encrypt_in_place(&nonce, v1::TAG, &mut *ciphertext_buffer)?;
|
||||
|
||||
let ciphertext = std::mem::take(ciphertext_buffer);
|
||||
|
||||
let mut conn = self.db.get().await?;
|
||||
let aead_id: i32 = insert_into(schema::aead_encrypted::table)
|
||||
.values(&models::NewAeadEncrypted {
|
||||
ciphertext,
|
||||
tag: v1::TAG.to_vec(),
|
||||
current_nonce: nonce.to_vec(),
|
||||
schema_version: 1,
|
||||
associated_root_key_id: *root_key_history_id,
|
||||
created_at: Utc::now().into(),
|
||||
})
|
||||
.returning(schema::aead_encrypted::id)
|
||||
.get_result(&mut conn)
|
||||
.await?;
|
||||
|
||||
Ok(aead_id)
|
||||
}
|
||||
|
||||
#[message]
|
||||
pub fn get_state(&self) -> KeyHolderState {
|
||||
self.state.discriminant()
|
||||
}
|
||||
|
||||
#[message]
|
||||
pub fn seal(&mut self) -> Result<(), Error> {
|
||||
let State::Unsealed {
|
||||
root_key_history_id,
|
||||
..
|
||||
} = &self.state
|
||||
else {
|
||||
return Err(Error::NotBootstrapped);
|
||||
};
|
||||
self.state = State::Sealed {
|
||||
root_key_history_id: *root_key_history_id,
|
||||
};
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use diesel::SelectableHelper;
|
||||
|
||||
use diesel_async::RunQueryDsl;
|
||||
|
||||
use crate::{
|
||||
db::{self},
|
||||
safe_cell::SafeCell,
|
||||
};
|
||||
|
||||
use super::*;
|
||||
|
||||
async fn bootstrapped_actor(db: &db::DatabasePool) -> KeyHolder {
|
||||
let mut actor = KeyHolder::new(db.clone()).await.unwrap();
|
||||
let seal_key = SafeCell::new(b"test-seal-key".to_vec());
|
||||
actor.bootstrap(seal_key).await.unwrap();
|
||||
actor
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[test_log::test]
|
||||
async fn nonce_monotonic_even_when_nonce_allocation_interleaves() {
|
||||
let db = db::create_test_pool().await;
|
||||
let mut actor = bootstrapped_actor(&db).await;
|
||||
let root_key_history_id = match actor.state {
|
||||
State::Unsealed {
|
||||
root_key_history_id,
|
||||
..
|
||||
} => root_key_history_id,
|
||||
_ => panic!("expected unsealed state"),
|
||||
};
|
||||
|
||||
let n1 = KeyHolder::get_new_nonce(&db, root_key_history_id)
|
||||
.await
|
||||
.unwrap();
|
||||
let n2 = KeyHolder::get_new_nonce(&db, root_key_history_id)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(n2.to_vec() > n1.to_vec(), "nonce must increase");
|
||||
|
||||
let mut conn = db.get().await.unwrap();
|
||||
let root_row: models::RootKeyHistory = schema::root_key_history::table
|
||||
.select(models::RootKeyHistory::as_select())
|
||||
.first(&mut conn)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(root_row.data_encryption_nonce, n2.to_vec());
|
||||
|
||||
let id = actor
|
||||
.create_new(SafeCell::new(b"post-interleave".to_vec()))
|
||||
.await
|
||||
.unwrap();
|
||||
let row: models::AeadEncrypted = schema::aead_encrypted::table
|
||||
.filter(schema::aead_encrypted::id.eq(id))
|
||||
.select(models::AeadEncrypted::as_select())
|
||||
.first(&mut conn)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(
|
||||
row.current_nonce > n2.to_vec(),
|
||||
"next write must advance nonce"
|
||||
);
|
||||
}
|
||||
}
|
||||
50
server/crates/arbiter-server/src/actors/mod.rs
Normal file
50
server/crates/arbiter-server/src/actors/mod.rs
Normal file
@@ -0,0 +1,50 @@
|
||||
use kameo::actor::{ActorRef, Spawn};
|
||||
use miette::Diagnostic;
|
||||
use thiserror::Error;
|
||||
|
||||
use crate::{
|
||||
actors::{
|
||||
bootstrap::Bootstrapper, evm::EvmActor, flow_coordinator::FlowCoordinator,
|
||||
keyholder::KeyHolder,
|
||||
},
|
||||
db,
|
||||
};
|
||||
|
||||
pub mod bootstrap;
|
||||
pub mod client;
|
||||
mod evm;
|
||||
pub mod flow_coordinator;
|
||||
pub mod keyholder;
|
||||
pub mod user_agent;
|
||||
|
||||
#[derive(Error, Debug, Diagnostic)]
|
||||
pub enum SpawnError {
|
||||
#[error("Failed to spawn Bootstrapper actor")]
|
||||
#[diagnostic(code(SpawnError::Bootstrapper))]
|
||||
Bootstrapper(#[from] bootstrap::Error),
|
||||
|
||||
#[error("Failed to spawn KeyHolder actor")]
|
||||
#[diagnostic(code(SpawnError::KeyHolder))]
|
||||
KeyHolder(#[from] keyholder::Error),
|
||||
}
|
||||
|
||||
/// Long-lived actors that are shared across all connections and handle global state and operations
|
||||
#[derive(Clone)]
|
||||
pub struct GlobalActors {
|
||||
pub key_holder: ActorRef<KeyHolder>,
|
||||
pub bootstrapper: ActorRef<Bootstrapper>,
|
||||
pub flow_coordinator: ActorRef<FlowCoordinator>,
|
||||
pub evm: ActorRef<EvmActor>,
|
||||
}
|
||||
|
||||
impl GlobalActors {
|
||||
pub async fn spawn(db: db::DatabasePool) -> Result<Self, SpawnError> {
|
||||
let key_holder = KeyHolder::spawn(KeyHolder::new(db.clone()).await?);
|
||||
Ok(Self {
|
||||
bootstrapper: Bootstrapper::spawn(Bootstrapper::new(&db).await?),
|
||||
evm: EvmActor::spawn(EvmActor::new(key_holder.clone(), db)),
|
||||
key_holder,
|
||||
flow_coordinator: FlowCoordinator::spawn(FlowCoordinator::default()),
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,374 +0,0 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use arbiter_proto::{
|
||||
proto::{
|
||||
UserAgentRequest, UserAgentResponse,
|
||||
auth::{
|
||||
self, AuthChallengeRequest, ClientMessage, ServerMessage as AuthServerMessage,
|
||||
client_message::Payload as ClientAuthPayload,
|
||||
server_message::Payload as ServerAuthPayload,
|
||||
},
|
||||
user_agent_request::Payload as UserAgentRequestPayload,
|
||||
user_agent_response::Payload as UserAgentResponsePayload,
|
||||
},
|
||||
transport::Bi,
|
||||
};
|
||||
use diesel::{ExpressionMethods as _, OptionalExtension as _, QueryDsl, dsl::update};
|
||||
use diesel_async::{AsyncConnection, RunQueryDsl};
|
||||
use ed25519_dalek::VerifyingKey;
|
||||
use futures::StreamExt;
|
||||
use kameo::{
|
||||
Actor,
|
||||
actor::{ActorRef, Spawn},
|
||||
error::SendError,
|
||||
messages,
|
||||
prelude::Context,
|
||||
};
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::sync::mpsc::Sender;
|
||||
use tonic::{Status, transport::Server};
|
||||
use tracing::{debug, error, info};
|
||||
|
||||
use crate::{
|
||||
ServerContext,
|
||||
actors::user_agent::auth::AuthChallenge,
|
||||
context::bootstrap::{BootstrapActor, ConsumeToken},
|
||||
db::{self, schema},
|
||||
errors::GrpcStatusExt,
|
||||
};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ChallengeContext {
|
||||
challenge: AuthChallenge,
|
||||
key: VerifyingKey,
|
||||
}
|
||||
|
||||
// Request context with deserialized public key for state machine.
|
||||
// This intermediate struct is needed because the state machine branches depending on presence of bootstrap token,
|
||||
// but we want to have the deserialized key in both branches.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct AuthRequestContext {
|
||||
pubkey: VerifyingKey,
|
||||
bootstrap_token: Option<String>,
|
||||
}
|
||||
|
||||
smlang::statemachine!(
|
||||
name: UserAgent,
|
||||
derive_states: [Debug],
|
||||
custom_error: false,
|
||||
transitions: {
|
||||
*Init + AuthRequest(AuthRequestContext) / auth_request_context = ReceivedAuthRequest(AuthRequestContext),
|
||||
ReceivedAuthRequest(AuthRequestContext) + ReceivedBootstrapToken = Authenticated,
|
||||
|
||||
ReceivedAuthRequest(AuthRequestContext) + SentChallenge(ChallengeContext) / move_challenge = WaitingForChallengeSolution(ChallengeContext),
|
||||
|
||||
WaitingForChallengeSolution(ChallengeContext) + ReceivedGoodSolution = Authenticated,
|
||||
WaitingForChallengeSolution(ChallengeContext) + ReceivedBadSolution = AuthError, // block further transitions, but connection should close anyway
|
||||
}
|
||||
);
|
||||
|
||||
pub struct DummyContext;
|
||||
impl UserAgentStateMachineContext for DummyContext {
|
||||
#[allow(missing_docs)]
|
||||
#[allow(clippy::unused_unit)]
|
||||
fn move_challenge(
|
||||
&mut self,
|
||||
state_data: &AuthRequestContext,
|
||||
event_data: ChallengeContext,
|
||||
) -> Result<ChallengeContext, ()> {
|
||||
Ok(event_data)
|
||||
}
|
||||
|
||||
#[allow(missing_docs)]
|
||||
#[allow(clippy::unused_unit)]
|
||||
fn auth_request_context(
|
||||
&mut self,
|
||||
event_data: AuthRequestContext,
|
||||
) -> Result<AuthRequestContext, ()> {
|
||||
Ok(event_data)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Actor)]
|
||||
pub struct UserAgentActor {
|
||||
db: db::DatabasePool,
|
||||
bootstapper: ActorRef<BootstrapActor>,
|
||||
state: UserAgentStateMachine<DummyContext>,
|
||||
tx: Sender<Result<UserAgentResponse, Status>>,
|
||||
}
|
||||
|
||||
impl UserAgentActor {
|
||||
pub(crate) fn new(
|
||||
context: ServerContext,
|
||||
tx: Sender<Result<UserAgentResponse, Status>>,
|
||||
) -> Self {
|
||||
Self {
|
||||
db: context.db.clone(),
|
||||
bootstapper: context.bootstrapper.clone(),
|
||||
state: UserAgentStateMachine::new(DummyContext),
|
||||
tx,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn new_manual(
|
||||
db: db::DatabasePool,
|
||||
bootstapper: ActorRef<BootstrapActor>,
|
||||
tx: Sender<Result<UserAgentResponse, Status>>,
|
||||
) -> Self {
|
||||
Self {
|
||||
db,
|
||||
bootstapper,
|
||||
state: UserAgentStateMachine::new(DummyContext),
|
||||
tx,
|
||||
}
|
||||
}
|
||||
|
||||
fn transition(&mut self, event: UserAgentEvents) -> Result<(), Status> {
|
||||
self.state.process_event(event).map_err(|e| {
|
||||
error!(?e, "State transition failed");
|
||||
Status::internal("State machine error")
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn auth_with_bootstrap_token(
|
||||
&mut self,
|
||||
pubkey: ed25519_dalek::VerifyingKey,
|
||||
token: String,
|
||||
) -> Result<UserAgentResponse, Status> {
|
||||
let token_ok: bool = self
|
||||
.bootstapper
|
||||
.ask(ConsumeToken { token })
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!(?pubkey, "Failed to consume bootstrap token: {e}");
|
||||
Status::internal("Bootstrap token consumption failed")
|
||||
})?;
|
||||
|
||||
if !token_ok {
|
||||
error!(?pubkey, "Invalid bootstrap token provided");
|
||||
return Err(Status::invalid_argument("Invalid bootstrap token"));
|
||||
}
|
||||
|
||||
{
|
||||
let mut conn = self.db.get().await.to_status()?;
|
||||
|
||||
diesel::insert_into(schema::useragent_client::table)
|
||||
.values((
|
||||
schema::useragent_client::public_key.eq(pubkey.as_bytes().to_vec()),
|
||||
schema::useragent_client::nonce.eq(1),
|
||||
))
|
||||
.execute(&mut conn)
|
||||
.await
|
||||
.to_status()?;
|
||||
}
|
||||
|
||||
self.transition(UserAgentEvents::ReceivedBootstrapToken)?;
|
||||
|
||||
Ok(auth_response(ServerAuthPayload::AuthOk(auth::AuthOk {})))
|
||||
}
|
||||
|
||||
async fn auth_with_challenge(&mut self, pubkey: VerifyingKey, pubkey_bytes: Vec<u8>) -> Output {
|
||||
let nonce: Option<i32> = {
|
||||
let mut db_conn = self.db.get().await.to_status()?;
|
||||
db_conn
|
||||
.transaction(|conn| {
|
||||
Box::pin(async move {
|
||||
let current_nonce = schema::useragent_client::table
|
||||
.filter(
|
||||
schema::useragent_client::public_key.eq(pubkey.as_bytes().to_vec()),
|
||||
)
|
||||
.select(schema::useragent_client::nonce)
|
||||
.first::<i32>(conn)
|
||||
.await?;
|
||||
|
||||
update(schema::useragent_client::table)
|
||||
.filter(
|
||||
schema::useragent_client::public_key.eq(pubkey.as_bytes().to_vec()),
|
||||
)
|
||||
.set(schema::useragent_client::nonce.eq(current_nonce + 1))
|
||||
.execute(conn)
|
||||
.await?;
|
||||
|
||||
Result::<_, diesel::result::Error>::Ok(current_nonce)
|
||||
})
|
||||
})
|
||||
.await
|
||||
.optional()
|
||||
.to_status()?
|
||||
};
|
||||
|
||||
let Some(nonce) = nonce else {
|
||||
error!(?pubkey, "Public key not found in database");
|
||||
return Err(Status::unauthenticated("Public key not registered"));
|
||||
};
|
||||
|
||||
let challenge = auth::AuthChallenge {
|
||||
pubkey: pubkey_bytes,
|
||||
nonce,
|
||||
};
|
||||
|
||||
self.transition(UserAgentEvents::SentChallenge(ChallengeContext {
|
||||
challenge: challenge.clone(),
|
||||
key: pubkey,
|
||||
}))?;
|
||||
|
||||
info!(
|
||||
?pubkey,
|
||||
?challenge,
|
||||
"Sent authentication challenge to client"
|
||||
);
|
||||
|
||||
Ok(auth_response(ServerAuthPayload::AuthChallenge(challenge)))
|
||||
}
|
||||
|
||||
fn verify_challenge_solution(
|
||||
&self,
|
||||
solution: &auth::AuthChallengeSolution,
|
||||
) -> Result<(bool, &ChallengeContext), Status> {
|
||||
let UserAgentStates::WaitingForChallengeSolution(challenge_context) = self.state.state()
|
||||
else {
|
||||
error!("Received challenge solution in invalid state");
|
||||
return Err(Status::invalid_argument(
|
||||
"Invalid state for challenge solution",
|
||||
));
|
||||
};
|
||||
let formatted_challenge = arbiter_proto::format_challenge(&challenge_context.challenge);
|
||||
|
||||
let signature = solution.signature.as_slice().try_into().map_err(|_| {
|
||||
error!(?solution, "Invalid signature length");
|
||||
Status::invalid_argument("Invalid signature length")
|
||||
})?;
|
||||
|
||||
let valid = challenge_context
|
||||
.key
|
||||
.verify_strict(&formatted_challenge, &signature)
|
||||
.is_ok();
|
||||
|
||||
Ok((valid, challenge_context))
|
||||
}
|
||||
}
|
||||
|
||||
type Output = Result<UserAgentResponse, Status>;
|
||||
|
||||
fn auth_response(payload: ServerAuthPayload) -> UserAgentResponse {
|
||||
UserAgentResponse {
|
||||
payload: Some(UserAgentResponsePayload::AuthMessage(AuthServerMessage {
|
||||
payload: Some(payload),
|
||||
})),
|
||||
}
|
||||
}
|
||||
|
||||
#[messages]
|
||||
impl UserAgentActor {
|
||||
#[message(ctx)]
|
||||
pub async fn handle_auth_challenge_request(
|
||||
&mut self,
|
||||
req: AuthChallengeRequest,
|
||||
ctx: &mut Context<Self, Output>,
|
||||
) -> Output {
|
||||
let pubkey = req.pubkey.as_array().ok_or(Status::invalid_argument(
|
||||
"Expected pubkey to have specific length",
|
||||
))?;
|
||||
let pubkey = VerifyingKey::from_bytes(pubkey).map_err(|err| {
|
||||
error!(?pubkey, "Failed to convert to VerifyingKey");
|
||||
Status::invalid_argument("Failed to convert pubkey to VerifyingKey")
|
||||
})?;
|
||||
|
||||
self.transition(UserAgentEvents::AuthRequest(AuthRequestContext {
|
||||
pubkey,
|
||||
bootstrap_token: req.bootstrap_token.clone(),
|
||||
}))?;
|
||||
|
||||
match req.bootstrap_token {
|
||||
Some(token) => self.auth_with_bootstrap_token(pubkey, token).await,
|
||||
None => self.auth_with_challenge(pubkey, req.pubkey).await,
|
||||
}
|
||||
}
|
||||
|
||||
#[message(ctx)]
|
||||
pub async fn handle_auth_challenge_solution(
|
||||
&mut self,
|
||||
solution: auth::AuthChallengeSolution,
|
||||
ctx: &mut Context<Self, Output>,
|
||||
) -> Output {
|
||||
let (valid, challenge_context) = self.verify_challenge_solution(&solution)?;
|
||||
|
||||
if valid {
|
||||
info!(
|
||||
?challenge_context,
|
||||
"Client provided valid solution to authentication challenge"
|
||||
);
|
||||
self.transition(UserAgentEvents::ReceivedGoodSolution)?;
|
||||
Ok(auth_response(ServerAuthPayload::AuthOk(auth::AuthOk {})))
|
||||
} else {
|
||||
error!("Client provided invalid solution to authentication challenge");
|
||||
self.transition(UserAgentEvents::ReceivedBadSolution)?;
|
||||
Err(Status::unauthenticated("Invalid challenge solution"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use arbiter_proto::proto::{
|
||||
UserAgentResponse,
|
||||
auth::{AuthChallengeRequest, AuthOk},
|
||||
user_agent_response::Payload as UserAgentResponsePayload,
|
||||
};
|
||||
use kameo::actor::Spawn;
|
||||
|
||||
use crate::{
|
||||
actors::user_agent::HandleAuthChallengeRequest, context::bootstrap::BootstrapActor, db,
|
||||
};
|
||||
|
||||
use super::UserAgentActor;
|
||||
|
||||
#[tokio::test]
|
||||
#[test_log::test]
|
||||
pub async fn test_bootstrap_token_auth() {
|
||||
let db = db::create_test_pool().await;
|
||||
// explicitly not installing any user_agent pubkeys
|
||||
let bootstrapper = BootstrapActor::new(&db).await.unwrap(); // this will create bootstrap token
|
||||
let token = bootstrapper.get_token().unwrap();
|
||||
|
||||
let bootstrapper_ref = BootstrapActor::spawn(bootstrapper);
|
||||
let user_agent = UserAgentActor::new_manual(
|
||||
db.clone(),
|
||||
bootstrapper_ref,
|
||||
tokio::sync::mpsc::channel(1).0, // dummy channel, we won't actually send responses in this test
|
||||
);
|
||||
let user_agent_ref = UserAgentActor::spawn(user_agent);
|
||||
|
||||
// simulate client sending auth request with bootstrap token
|
||||
let new_key = ed25519_dalek::SigningKey::generate(&mut rand::rng());
|
||||
let pubkey_bytes = new_key.verifying_key().to_bytes().to_vec();
|
||||
|
||||
let result = user_agent_ref
|
||||
.ask(HandleAuthChallengeRequest {
|
||||
req: AuthChallengeRequest {
|
||||
pubkey: pubkey_bytes,
|
||||
bootstrap_token: Some(token),
|
||||
},
|
||||
})
|
||||
.await
|
||||
.expect("Shouldn't fail to send message");
|
||||
|
||||
// auth succeeded
|
||||
assert_eq!(
|
||||
result,
|
||||
UserAgentResponse {
|
||||
payload: Some(UserAgentResponsePayload::AuthMessage(
|
||||
arbiter_proto::proto::auth::ServerMessage {
|
||||
payload: Some(arbiter_proto::proto::auth::server_message::Payload::AuthOk(
|
||||
AuthOk {},
|
||||
)),
|
||||
},
|
||||
)),
|
||||
}
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
mod transport;
|
||||
pub(crate) use transport::handle_user_agent;
|
||||
101
server/crates/arbiter-server/src/actors/user_agent/auth.rs
Normal file
101
server/crates/arbiter-server/src/actors/user_agent/auth.rs
Normal file
@@ -0,0 +1,101 @@
|
||||
use arbiter_proto::transport::Bi;
|
||||
use tracing::error;
|
||||
|
||||
use crate::actors::user_agent::{
|
||||
AuthPublicKey, UserAgentConnection,
|
||||
auth::state::{AuthContext, AuthStateMachine},
|
||||
};
|
||||
|
||||
mod state;
|
||||
use state::*;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum Inbound {
|
||||
AuthChallengeRequest {
|
||||
pubkey: AuthPublicKey,
|
||||
bootstrap_token: Option<String>,
|
||||
},
|
||||
AuthChallengeSolution {
|
||||
signature: Vec<u8>,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum Error {
|
||||
UnregisteredPublicKey,
|
||||
InvalidChallengeSolution,
|
||||
InvalidBootstrapToken,
|
||||
Internal { details: String },
|
||||
Transport,
|
||||
}
|
||||
|
||||
impl Error {
|
||||
fn internal(details: impl Into<String>) -> Self {
|
||||
Self::Internal {
|
||||
details: details.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum Outbound {
|
||||
AuthChallenge { nonce: i32 },
|
||||
AuthSuccess,
|
||||
}
|
||||
|
||||
fn parse_auth_event(payload: Inbound) -> AuthEvents {
|
||||
match payload {
|
||||
Inbound::AuthChallengeRequest {
|
||||
pubkey,
|
||||
bootstrap_token: None,
|
||||
} => AuthEvents::AuthRequest(ChallengeRequest { pubkey }),
|
||||
Inbound::AuthChallengeRequest {
|
||||
pubkey,
|
||||
bootstrap_token: Some(token),
|
||||
} => AuthEvents::BootstrapAuthRequest(BootstrapAuthRequest { pubkey, token }),
|
||||
Inbound::AuthChallengeSolution { signature } => {
|
||||
AuthEvents::ReceivedSolution(ChallengeSolution {
|
||||
solution: signature,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn authenticate<T>(
|
||||
props: &mut UserAgentConnection,
|
||||
transport: T,
|
||||
) -> Result<AuthPublicKey, Error>
|
||||
where
|
||||
T: Bi<Inbound, Result<Outbound, Error>> + Send,
|
||||
{
|
||||
let mut state = AuthStateMachine::new(AuthContext::new(props, transport));
|
||||
|
||||
loop {
|
||||
// `state` holds a mutable reference to `props` so we can't access it directly here
|
||||
let Some(payload) = state.context_mut().transport.recv().await else {
|
||||
return Err(Error::Transport);
|
||||
};
|
||||
|
||||
match state.process_event(parse_auth_event(payload)).await {
|
||||
Ok(AuthStates::AuthOk(key)) => return Ok(key.clone()),
|
||||
Err(AuthError::ActionFailed(err)) => {
|
||||
error!(?err, "State machine action failed");
|
||||
return Err(err);
|
||||
}
|
||||
Err(AuthError::GuardFailed(err)) => {
|
||||
error!(?err, "State machine guard failed");
|
||||
return Err(err);
|
||||
}
|
||||
Err(AuthError::InvalidEvent) => {
|
||||
error!("Invalid event for current state");
|
||||
return Err(Error::InvalidChallengeSolution);
|
||||
}
|
||||
Err(AuthError::TransitionsFailed) => {
|
||||
error!("Invalid state transition");
|
||||
return Err(Error::InvalidChallengeSolution);
|
||||
}
|
||||
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
}
|
||||
222
server/crates/arbiter-server/src/actors/user_agent/auth/state.rs
Normal file
222
server/crates/arbiter-server/src/actors/user_agent/auth/state.rs
Normal file
@@ -0,0 +1,222 @@
|
||||
use arbiter_proto::transport::Bi;
|
||||
use diesel::{ExpressionMethods as _, OptionalExtension as _, QueryDsl, update};
|
||||
use diesel_async::RunQueryDsl;
|
||||
use tracing::error;
|
||||
|
||||
use super::Error;
|
||||
use crate::{
|
||||
actors::{
|
||||
bootstrap::ConsumeToken,
|
||||
user_agent::{AuthPublicKey, UserAgentConnection, auth::Outbound},
|
||||
},
|
||||
db::schema,
|
||||
};
|
||||
|
||||
pub struct ChallengeRequest {
|
||||
pub pubkey: AuthPublicKey,
|
||||
}
|
||||
|
||||
pub struct BootstrapAuthRequest {
|
||||
pub pubkey: AuthPublicKey,
|
||||
pub token: String,
|
||||
}
|
||||
|
||||
pub struct ChallengeContext {
|
||||
pub challenge_nonce: i32,
|
||||
pub key: AuthPublicKey,
|
||||
}
|
||||
|
||||
pub struct ChallengeSolution {
|
||||
pub solution: Vec<u8>,
|
||||
}
|
||||
|
||||
smlang::statemachine!(
|
||||
name: Auth,
|
||||
custom_error: true,
|
||||
transitions: {
|
||||
*Init + AuthRequest(ChallengeRequest) / async prepare_challenge = SentChallenge(ChallengeContext),
|
||||
Init + BootstrapAuthRequest(BootstrapAuthRequest) / async verify_bootstrap_token = AuthOk(AuthPublicKey),
|
||||
SentChallenge(ChallengeContext) + ReceivedSolution(ChallengeSolution) / async verify_solution = AuthOk(AuthPublicKey),
|
||||
}
|
||||
);
|
||||
|
||||
async fn create_nonce(db: &crate::db::DatabasePool, pubkey_bytes: &[u8]) -> Result<i32, Error> {
|
||||
let mut db_conn = db.get().await.map_err(|e| {
|
||||
error!(error = ?e, "Database pool error");
|
||||
Error::internal("Database unavailable")
|
||||
})?;
|
||||
db_conn
|
||||
.exclusive_transaction(|conn| {
|
||||
Box::pin(async move {
|
||||
let current_nonce = schema::useragent_client::table
|
||||
.filter(schema::useragent_client::public_key.eq(pubkey_bytes.to_vec()))
|
||||
.select(schema::useragent_client::nonce)
|
||||
.first::<i32>(conn)
|
||||
.await?;
|
||||
|
||||
update(schema::useragent_client::table)
|
||||
.filter(schema::useragent_client::public_key.eq(pubkey_bytes.to_vec()))
|
||||
.set(schema::useragent_client::nonce.eq(current_nonce + 1))
|
||||
.execute(conn)
|
||||
.await?;
|
||||
|
||||
Result::<_, diesel::result::Error>::Ok(current_nonce)
|
||||
})
|
||||
})
|
||||
.await
|
||||
.optional()
|
||||
.map_err(|e| {
|
||||
error!(error = ?e, "Database error");
|
||||
Error::internal("Database operation failed")
|
||||
})?
|
||||
.ok_or_else(|| {
|
||||
error!(?pubkey_bytes, "Public key not found in database");
|
||||
Error::UnregisteredPublicKey
|
||||
})
|
||||
}
|
||||
|
||||
async fn register_key(db: &crate::db::DatabasePool, pubkey: &AuthPublicKey) -> Result<(), Error> {
|
||||
let pubkey_bytes = pubkey.to_stored_bytes();
|
||||
let key_type = pubkey.key_type();
|
||||
let mut conn = db.get().await.map_err(|e| {
|
||||
error!(error = ?e, "Database pool error");
|
||||
Error::internal("Database unavailable")
|
||||
})?;
|
||||
|
||||
diesel::insert_into(schema::useragent_client::table)
|
||||
.values((
|
||||
schema::useragent_client::public_key.eq(pubkey_bytes),
|
||||
schema::useragent_client::nonce.eq(1),
|
||||
schema::useragent_client::key_type.eq(key_type),
|
||||
))
|
||||
.execute(&mut conn)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!(error = ?e, "Database error");
|
||||
Error::internal("Database operation failed")
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub struct AuthContext<'a, T> {
|
||||
pub(super) conn: &'a mut UserAgentConnection,
|
||||
pub(super) transport: T,
|
||||
}
|
||||
|
||||
impl<'a, T> AuthContext<'a, T> {
|
||||
pub fn new(conn: &'a mut UserAgentConnection, transport: T) -> Self {
|
||||
Self { conn, transport }
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> AuthStateMachineContext for AuthContext<'_, T>
|
||||
where
|
||||
T: Bi<super::Inbound, Result<super::Outbound, Error>> + Send,
|
||||
{
|
||||
type Error = Error;
|
||||
|
||||
async fn prepare_challenge(
|
||||
&mut self,
|
||||
ChallengeRequest { pubkey }: ChallengeRequest,
|
||||
) -> Result<ChallengeContext, Self::Error> {
|
||||
let stored_bytes = pubkey.to_stored_bytes();
|
||||
let nonce = create_nonce(&self.conn.db, &stored_bytes).await?;
|
||||
|
||||
self.transport
|
||||
.send(Ok(Outbound::AuthChallenge { nonce }))
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!(?e, "Failed to send auth challenge");
|
||||
Error::Transport
|
||||
})?;
|
||||
|
||||
Ok(ChallengeContext {
|
||||
challenge_nonce: nonce,
|
||||
key: pubkey,
|
||||
})
|
||||
}
|
||||
|
||||
#[allow(missing_docs)]
|
||||
#[allow(clippy::result_unit_err)]
|
||||
async fn verify_bootstrap_token(
|
||||
&mut self,
|
||||
BootstrapAuthRequest { pubkey, token }: BootstrapAuthRequest,
|
||||
) -> Result<AuthPublicKey, Self::Error> {
|
||||
let token_ok: bool = self
|
||||
.conn
|
||||
.actors
|
||||
.bootstrapper
|
||||
.ask(ConsumeToken {
|
||||
token: token.clone(),
|
||||
})
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!(?e, "Failed to consume bootstrap token");
|
||||
Error::internal("Failed to consume bootstrap token")
|
||||
})?;
|
||||
|
||||
if !token_ok {
|
||||
error!("Invalid bootstrap token provided");
|
||||
return Err(Error::InvalidBootstrapToken);
|
||||
}
|
||||
|
||||
register_key(&self.conn.db, &pubkey).await?;
|
||||
|
||||
self.transport
|
||||
.send(Ok(Outbound::AuthSuccess))
|
||||
.await
|
||||
.map_err(|_| Error::Transport)?;
|
||||
|
||||
Ok(pubkey)
|
||||
}
|
||||
|
||||
#[allow(missing_docs)]
|
||||
#[allow(clippy::unused_unit)]
|
||||
async fn verify_solution(
|
||||
&mut self,
|
||||
ChallengeContext {
|
||||
challenge_nonce,
|
||||
key,
|
||||
}: &ChallengeContext,
|
||||
ChallengeSolution { solution }: ChallengeSolution,
|
||||
) -> Result<AuthPublicKey, Self::Error> {
|
||||
let formatted = arbiter_proto::format_challenge(*challenge_nonce, &key.to_stored_bytes());
|
||||
|
||||
let valid = match key {
|
||||
AuthPublicKey::Ed25519(vk) => {
|
||||
let sig = solution.as_slice().try_into().map_err(|_| {
|
||||
error!(?solution, "Invalid Ed25519 signature length");
|
||||
Error::InvalidChallengeSolution
|
||||
})?;
|
||||
vk.verify_strict(&formatted, &sig).is_ok()
|
||||
}
|
||||
AuthPublicKey::EcdsaSecp256k1(vk) => {
|
||||
use k256::ecdsa::signature::Verifier as _;
|
||||
let sig = k256::ecdsa::Signature::try_from(solution.as_slice()).map_err(|_| {
|
||||
error!(?solution, "Invalid ECDSA signature bytes");
|
||||
Error::InvalidChallengeSolution
|
||||
})?;
|
||||
vk.verify(&formatted, &sig).is_ok()
|
||||
}
|
||||
AuthPublicKey::Rsa(pk) => {
|
||||
use rsa::signature::Verifier as _;
|
||||
let verifying_key = rsa::pss::VerifyingKey::<sha2::Sha256>::new(pk.clone());
|
||||
let sig = rsa::pss::Signature::try_from(solution.as_slice()).map_err(|_| {
|
||||
error!(?solution, "Invalid RSA signature bytes");
|
||||
Error::InvalidChallengeSolution
|
||||
})?;
|
||||
verifying_key.verify(&formatted, &sig).is_ok()
|
||||
}
|
||||
};
|
||||
|
||||
if valid {
|
||||
self.transport
|
||||
.send(Ok(Outbound::AuthSuccess))
|
||||
.await
|
||||
.map_err(|_| Error::Transport)?;
|
||||
}
|
||||
|
||||
Ok(key.clone())
|
||||
}
|
||||
}
|
||||
94
server/crates/arbiter-server/src/actors/user_agent/mod.rs
Normal file
94
server/crates/arbiter-server/src/actors/user_agent/mod.rs
Normal file
@@ -0,0 +1,94 @@
|
||||
use crate::{
|
||||
actors::{GlobalActors, client::ClientProfile},
|
||||
db::{self, models::KeyType},
|
||||
};
|
||||
|
||||
/// Abstraction over Ed25519 / ECDSA-secp256k1 / RSA public keys used during the auth handshake.
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum AuthPublicKey {
|
||||
Ed25519(ed25519_dalek::VerifyingKey),
|
||||
/// Compressed SEC1 public key; signature bytes are raw 64-byte (r||s).
|
||||
EcdsaSecp256k1(k256::ecdsa::VerifyingKey),
|
||||
/// RSA-2048+ public key (Windows Hello / KeyCredentialManager); signature bytes are PSS+SHA-256.
|
||||
Rsa(rsa::RsaPublicKey),
|
||||
}
|
||||
|
||||
impl AuthPublicKey {
|
||||
/// Canonical bytes stored in DB and echoed back in the challenge.
|
||||
/// Ed25519: raw 32 bytes. ECDSA: SEC1 compressed 33 bytes. RSA: DER-encoded SPKI.
|
||||
pub fn to_stored_bytes(&self) -> Vec<u8> {
|
||||
match self {
|
||||
AuthPublicKey::Ed25519(k) => k.to_bytes().to_vec(),
|
||||
// SEC1 compressed (33 bytes) is the natural compact format for secp256k1
|
||||
AuthPublicKey::EcdsaSecp256k1(k) => k.to_encoded_point(true).as_bytes().to_vec(),
|
||||
AuthPublicKey::Rsa(k) => {
|
||||
use rsa::pkcs8::EncodePublicKey as _;
|
||||
#[allow(clippy::expect_used)]
|
||||
k.to_public_key_der()
|
||||
.expect("rsa SPKI encoding is infallible")
|
||||
.to_vec()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn key_type(&self) -> KeyType {
|
||||
match self {
|
||||
AuthPublicKey::Ed25519(_) => KeyType::Ed25519,
|
||||
AuthPublicKey::EcdsaSecp256k1(_) => KeyType::EcdsaSecp256k1,
|
||||
AuthPublicKey::Rsa(_) => KeyType::Rsa,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<(KeyType, Vec<u8>)> for AuthPublicKey {
|
||||
type Error = &'static str;
|
||||
|
||||
fn try_from(value: (KeyType, Vec<u8>)) -> Result<Self, Self::Error> {
|
||||
let (key_type, bytes) = value;
|
||||
match key_type {
|
||||
KeyType::Ed25519 => {
|
||||
let bytes: [u8; 32] = bytes.try_into().map_err(|_| "invalid Ed25519 key length")?;
|
||||
let key = ed25519_dalek::VerifyingKey::from_bytes(&bytes)
|
||||
.map_err(|_e| "invalid Ed25519 key")?;
|
||||
Ok(AuthPublicKey::Ed25519(key))
|
||||
}
|
||||
KeyType::EcdsaSecp256k1 => {
|
||||
let point =
|
||||
k256::EncodedPoint::from_bytes(&bytes).map_err(|_e| "invalid ECDSA key")?;
|
||||
let key = k256::ecdsa::VerifyingKey::from_encoded_point(&point)
|
||||
.map_err(|_e| "invalid ECDSA key")?;
|
||||
Ok(AuthPublicKey::EcdsaSecp256k1(key))
|
||||
}
|
||||
KeyType::Rsa => {
|
||||
use rsa::pkcs8::DecodePublicKey as _;
|
||||
let key = rsa::RsaPublicKey::from_public_key_der(&bytes)
|
||||
.map_err(|_e| "invalid RSA key")?;
|
||||
Ok(AuthPublicKey::Rsa(key))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Messages, sent by user agent to connection client without having a request
|
||||
#[derive(Debug)]
|
||||
pub enum OutOfBand {
|
||||
ClientConnectionRequest { profile: ClientProfile },
|
||||
ClientConnectionCancel { pubkey: ed25519_dalek::VerifyingKey },
|
||||
}
|
||||
|
||||
pub struct UserAgentConnection {
|
||||
pub(crate) db: db::DatabasePool,
|
||||
pub(crate) actors: GlobalActors,
|
||||
}
|
||||
|
||||
impl UserAgentConnection {
|
||||
pub fn new(db: db::DatabasePool, actors: GlobalActors) -> Self {
|
||||
Self { db, actors }
|
||||
}
|
||||
}
|
||||
|
||||
pub mod auth;
|
||||
pub mod session;
|
||||
|
||||
pub use auth::authenticate;
|
||||
pub use session::UserAgentSession;
|
||||
181
server/crates/arbiter-server/src/actors/user_agent/session.rs
Normal file
181
server/crates/arbiter-server/src/actors/user_agent/session.rs
Normal file
@@ -0,0 +1,181 @@
|
||||
use std::{borrow::Cow, collections::HashMap};
|
||||
|
||||
use arbiter_proto::transport::Sender;
|
||||
use async_trait::async_trait;
|
||||
use ed25519_dalek::VerifyingKey;
|
||||
use kameo::{Actor, actor::ActorRef, messages};
|
||||
use thiserror::Error;
|
||||
use tracing::error;
|
||||
|
||||
use crate::actors::{
|
||||
client::ClientProfile,
|
||||
flow_coordinator::{RegisterUserAgent, client_connect_approval::ClientApprovalController},
|
||||
user_agent::{OutOfBand, UserAgentConnection},
|
||||
};
|
||||
|
||||
mod state;
|
||||
use state::{DummyContext, UserAgentEvents, UserAgentStateMachine};
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum Error {
|
||||
#[error("State transition failed")]
|
||||
State,
|
||||
|
||||
#[error("Internal error: {message}")]
|
||||
Internal { message: Cow<'static, str> },
|
||||
}
|
||||
|
||||
impl From<crate::db::PoolError> for Error {
|
||||
fn from(err: crate::db::PoolError) -> Self {
|
||||
error!(?err, "Database pool error");
|
||||
Self::internal("Database pool error")
|
||||
}
|
||||
}
|
||||
impl From<diesel::result::Error> for Error {
|
||||
fn from(err: diesel::result::Error) -> Self {
|
||||
error!(?err, "Database error");
|
||||
Self::internal("Database error")
|
||||
}
|
||||
}
|
||||
|
||||
impl Error {
|
||||
pub fn internal(message: impl Into<Cow<'static, str>>) -> Self {
|
||||
Self::Internal {
|
||||
message: message.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct PendingClientApproval {
|
||||
controller: ActorRef<ClientApprovalController>,
|
||||
}
|
||||
|
||||
pub struct UserAgentSession {
|
||||
props: UserAgentConnection,
|
||||
state: UserAgentStateMachine<DummyContext>,
|
||||
sender: Box<dyn Sender<OutOfBand>>,
|
||||
|
||||
pending_client_approvals: HashMap<VerifyingKey, PendingClientApproval>,
|
||||
}
|
||||
|
||||
pub mod connection;
|
||||
|
||||
impl UserAgentSession {
|
||||
pub(crate) fn new(props: UserAgentConnection, sender: Box<dyn Sender<OutOfBand>>) -> Self {
|
||||
Self {
|
||||
props,
|
||||
state: UserAgentStateMachine::new(DummyContext),
|
||||
sender,
|
||||
pending_client_approvals: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_test(db: crate::db::DatabasePool, actors: crate::actors::GlobalActors) -> Self {
|
||||
struct DummySender;
|
||||
|
||||
#[async_trait]
|
||||
impl Sender<OutOfBand> for DummySender {
|
||||
async fn send(
|
||||
&mut self,
|
||||
_item: OutOfBand,
|
||||
) -> Result<(), arbiter_proto::transport::Error> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
Self::new(UserAgentConnection::new(db, actors), Box::new(DummySender))
|
||||
}
|
||||
|
||||
fn transition(&mut self, event: UserAgentEvents) -> Result<(), Error> {
|
||||
self.state.process_event(event).map_err(|e| {
|
||||
error!(?e, "State transition failed");
|
||||
Error::State
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[messages]
|
||||
impl UserAgentSession {
|
||||
#[message]
|
||||
pub async fn begin_new_client_approval(
|
||||
&mut self,
|
||||
client: ClientProfile,
|
||||
controller: ActorRef<ClientApprovalController>,
|
||||
) {
|
||||
if let Err(e) = self
|
||||
.sender
|
||||
.send(OutOfBand::ClientConnectionRequest {
|
||||
profile: client.clone(),
|
||||
})
|
||||
.await
|
||||
{
|
||||
error!(
|
||||
?e,
|
||||
actor = "user_agent",
|
||||
event = "failed to announce new client connection"
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
self.pending_client_approvals
|
||||
.insert(client.pubkey, PendingClientApproval { controller });
|
||||
}
|
||||
}
|
||||
|
||||
impl Actor for UserAgentSession {
|
||||
type Args = Self;
|
||||
|
||||
type Error = Error;
|
||||
|
||||
async fn on_start(
|
||||
args: Self::Args,
|
||||
this: kameo::prelude::ActorRef<Self>,
|
||||
) -> Result<Self, Self::Error> {
|
||||
args.props
|
||||
.actors
|
||||
.flow_coordinator
|
||||
.ask(RegisterUserAgent {
|
||||
actor: this.clone(),
|
||||
})
|
||||
.await
|
||||
.map_err(|err| {
|
||||
error!(
|
||||
?err,
|
||||
"Failed to register user agent connection with flow coordinator"
|
||||
);
|
||||
Error::internal("Failed to register user agent connection with flow coordinator")
|
||||
})?;
|
||||
Ok(args)
|
||||
}
|
||||
|
||||
async fn on_link_died(
|
||||
&mut self,
|
||||
_: kameo::prelude::WeakActorRef<Self>,
|
||||
id: kameo::prelude::ActorId,
|
||||
_: kameo::prelude::ActorStopReason,
|
||||
) -> Result<std::ops::ControlFlow<kameo::prelude::ActorStopReason>, Self::Error> {
|
||||
let cancelled_pubkey = self
|
||||
.pending_client_approvals
|
||||
.iter()
|
||||
.find_map(|(k, v)| (v.controller.id() == id).then_some(*k));
|
||||
|
||||
if let Some(pubkey) = cancelled_pubkey {
|
||||
self.pending_client_approvals.remove(&pubkey);
|
||||
|
||||
if let Err(e) = self
|
||||
.sender
|
||||
.send(OutOfBand::ClientConnectionCancel { pubkey })
|
||||
.await
|
||||
{
|
||||
error!(
|
||||
?e,
|
||||
actor = "user_agent",
|
||||
event = "failed to announce client connection cancellation"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(std::ops::ControlFlow::Continue(()))
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,473 @@
|
||||
use std::sync::Mutex;
|
||||
|
||||
use alloy::primitives::Address;
|
||||
use chacha20poly1305::{AeadInPlace, XChaCha20Poly1305, XNonce, aead::KeyInit};
|
||||
use diesel::sql_types::ops::Add;
|
||||
use diesel::{BoolExpressionMethods as _, ExpressionMethods as _, QueryDsl as _, SelectableHelper};
|
||||
use diesel_async::{AsyncConnection, RunQueryDsl};
|
||||
use kameo::error::SendError;
|
||||
use kameo::prelude::Context;
|
||||
use kameo::{message, messages};
|
||||
use tracing::{error, info};
|
||||
use x25519_dalek::{EphemeralSecret, PublicKey};
|
||||
|
||||
use crate::actors::flow_coordinator::client_connect_approval::ClientApprovalAnswer;
|
||||
use crate::actors::keyholder::KeyHolderState;
|
||||
use crate::actors::user_agent::session::Error;
|
||||
use crate::db::models::{
|
||||
CoreEvmWalletAccess, EvmWalletAccess, NewEvmWalletAccess, ProgramClient, ProgramClientMetadata,
|
||||
};
|
||||
use crate::db::schema::evm_wallet_access;
|
||||
use crate::evm::policies::{Grant, SpecificGrant};
|
||||
use crate::safe_cell::SafeCell;
|
||||
use crate::{
|
||||
actors::{
|
||||
evm::{
|
||||
Generate, ListWallets, UseragentCreateGrant, UseragentDeleteGrant, UseragentListGrants,
|
||||
},
|
||||
keyholder::{self, Bootstrap, TryUnseal},
|
||||
user_agent::session::{
|
||||
UserAgentSession,
|
||||
state::{UnsealContext, UserAgentEvents, UserAgentStates},
|
||||
},
|
||||
},
|
||||
safe_cell::SafeCellHandle as _,
|
||||
};
|
||||
|
||||
impl UserAgentSession {
|
||||
fn take_unseal_secret(&mut self) -> Result<(EphemeralSecret, PublicKey), Error> {
|
||||
let UserAgentStates::WaitingForUnsealKey(unseal_context) = self.state.state() else {
|
||||
error!("Received encrypted key in invalid state");
|
||||
return Err(Error::internal("Invalid state for unseal encrypted key"));
|
||||
};
|
||||
|
||||
let ephemeral_secret = {
|
||||
#[allow(
|
||||
clippy::unwrap_used,
|
||||
reason = "Mutex poison is unrecoverable and should panic"
|
||||
)]
|
||||
let mut secret_lock = unseal_context.secret.lock().unwrap();
|
||||
let secret = secret_lock.take();
|
||||
match secret {
|
||||
Some(secret) => secret,
|
||||
None => {
|
||||
drop(secret_lock);
|
||||
error!("Ephemeral secret already taken");
|
||||
return Err(Error::internal("Ephemeral secret already taken"));
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
Ok((ephemeral_secret, unseal_context.client_public_key))
|
||||
}
|
||||
|
||||
fn decrypt_client_key_material(
|
||||
ephemeral_secret: EphemeralSecret,
|
||||
client_public_key: PublicKey,
|
||||
nonce: &[u8],
|
||||
ciphertext: &[u8],
|
||||
associated_data: &[u8],
|
||||
) -> Result<SafeCell<Vec<u8>>, ()> {
|
||||
let nonce = XNonce::from_slice(nonce);
|
||||
|
||||
let shared_secret = ephemeral_secret.diffie_hellman(&client_public_key);
|
||||
let cipher = XChaCha20Poly1305::new(shared_secret.as_bytes().into());
|
||||
|
||||
let mut key_buffer = SafeCell::new(ciphertext.to_vec());
|
||||
|
||||
let decryption_result = key_buffer.write_inline(|write_handle| {
|
||||
cipher.decrypt_in_place(nonce, associated_data, write_handle)
|
||||
});
|
||||
|
||||
match decryption_result {
|
||||
Ok(_) => Ok(key_buffer),
|
||||
Err(err) => {
|
||||
error!(?err, "Failed to decrypt encrypted key material");
|
||||
Err(())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct UnsealStartResponse {
|
||||
pub server_pubkey: PublicKey,
|
||||
}
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum UnsealError {
|
||||
#[error("Invalid key provided for unsealing")]
|
||||
InvalidKey,
|
||||
#[error("Internal error during unsealing process")]
|
||||
General(#[from] super::Error),
|
||||
}
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum BootstrapError {
|
||||
#[error("Invalid key provided for bootstrapping")]
|
||||
InvalidKey,
|
||||
#[error("Vault is already bootstrapped")]
|
||||
AlreadyBootstrapped,
|
||||
|
||||
#[error("Internal error during bootstrapping process")]
|
||||
General(#[from] super::Error),
|
||||
}
|
||||
|
||||
#[messages]
|
||||
impl UserAgentSession {
|
||||
#[message]
|
||||
pub async fn handle_unseal_request(
|
||||
&mut self,
|
||||
client_pubkey: x25519_dalek::PublicKey,
|
||||
) -> Result<UnsealStartResponse, Error> {
|
||||
let secret = EphemeralSecret::random();
|
||||
let public_key = PublicKey::from(&secret);
|
||||
|
||||
self.transition(UserAgentEvents::UnsealRequest(UnsealContext {
|
||||
secret: Mutex::new(Some(secret)),
|
||||
client_public_key: client_pubkey,
|
||||
}))?;
|
||||
|
||||
Ok(UnsealStartResponse {
|
||||
server_pubkey: public_key,
|
||||
})
|
||||
}
|
||||
|
||||
#[message]
|
||||
pub async fn handle_unseal_encrypted_key(
|
||||
&mut self,
|
||||
nonce: Vec<u8>,
|
||||
ciphertext: Vec<u8>,
|
||||
associated_data: Vec<u8>,
|
||||
) -> Result<(), UnsealError> {
|
||||
let (ephemeral_secret, client_public_key) = match self.take_unseal_secret() {
|
||||
Ok(values) => values,
|
||||
Err(Error::State) => {
|
||||
self.transition(UserAgentEvents::ReceivedInvalidKey)?;
|
||||
return Err(UnsealError::InvalidKey);
|
||||
}
|
||||
Err(_err) => {
|
||||
return Err(Error::internal("Failed to take unseal secret").into());
|
||||
}
|
||||
};
|
||||
|
||||
let seal_key_buffer = match Self::decrypt_client_key_material(
|
||||
ephemeral_secret,
|
||||
client_public_key,
|
||||
&nonce,
|
||||
&ciphertext,
|
||||
&associated_data,
|
||||
) {
|
||||
Ok(buffer) => buffer,
|
||||
Err(()) => {
|
||||
self.transition(UserAgentEvents::ReceivedInvalidKey)?;
|
||||
return Err(UnsealError::InvalidKey);
|
||||
}
|
||||
};
|
||||
|
||||
match self
|
||||
.props
|
||||
.actors
|
||||
.key_holder
|
||||
.ask(TryUnseal {
|
||||
seal_key_raw: seal_key_buffer,
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(_) => {
|
||||
info!("Successfully unsealed key with client-provided key");
|
||||
self.transition(UserAgentEvents::ReceivedValidKey)?;
|
||||
Ok(())
|
||||
}
|
||||
Err(SendError::HandlerError(keyholder::Error::InvalidKey)) => {
|
||||
self.transition(UserAgentEvents::ReceivedInvalidKey)?;
|
||||
Err(UnsealError::InvalidKey)
|
||||
}
|
||||
Err(SendError::HandlerError(err)) => {
|
||||
error!(?err, "Keyholder failed to unseal key");
|
||||
self.transition(UserAgentEvents::ReceivedInvalidKey)?;
|
||||
Err(UnsealError::InvalidKey)
|
||||
}
|
||||
Err(err) => {
|
||||
error!(?err, "Failed to send unseal request to keyholder");
|
||||
self.transition(UserAgentEvents::ReceivedInvalidKey)?;
|
||||
Err(Error::internal("Vault actor error").into())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[message]
|
||||
pub(crate) async fn handle_bootstrap_encrypted_key(
|
||||
&mut self,
|
||||
nonce: Vec<u8>,
|
||||
ciphertext: Vec<u8>,
|
||||
associated_data: Vec<u8>,
|
||||
) -> Result<(), BootstrapError> {
|
||||
let (ephemeral_secret, client_public_key) = match self.take_unseal_secret() {
|
||||
Ok(values) => values,
|
||||
Err(Error::State) => {
|
||||
self.transition(UserAgentEvents::ReceivedInvalidKey)?;
|
||||
return Err(BootstrapError::InvalidKey);
|
||||
}
|
||||
Err(err) => return Err(err.into()),
|
||||
};
|
||||
|
||||
let seal_key_buffer = match Self::decrypt_client_key_material(
|
||||
ephemeral_secret,
|
||||
client_public_key,
|
||||
&nonce,
|
||||
&ciphertext,
|
||||
&associated_data,
|
||||
) {
|
||||
Ok(buffer) => buffer,
|
||||
Err(()) => {
|
||||
self.transition(UserAgentEvents::ReceivedInvalidKey)?;
|
||||
return Err(BootstrapError::InvalidKey);
|
||||
}
|
||||
};
|
||||
|
||||
match self
|
||||
.props
|
||||
.actors
|
||||
.key_holder
|
||||
.ask(Bootstrap {
|
||||
seal_key_raw: seal_key_buffer,
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(_) => {
|
||||
info!("Successfully bootstrapped vault with client-provided key");
|
||||
self.transition(UserAgentEvents::ReceivedValidKey)?;
|
||||
Ok(())
|
||||
}
|
||||
Err(SendError::HandlerError(keyholder::Error::AlreadyBootstrapped)) => {
|
||||
self.transition(UserAgentEvents::ReceivedInvalidKey)?;
|
||||
Err(BootstrapError::AlreadyBootstrapped)
|
||||
}
|
||||
Err(SendError::HandlerError(err)) => {
|
||||
error!(?err, "Keyholder failed to bootstrap vault");
|
||||
self.transition(UserAgentEvents::ReceivedInvalidKey)?;
|
||||
Err(BootstrapError::InvalidKey)
|
||||
}
|
||||
Err(err) => {
|
||||
error!(?err, "Failed to send bootstrap request to keyholder");
|
||||
self.transition(UserAgentEvents::ReceivedInvalidKey)?;
|
||||
Err(BootstrapError::General(Error::internal(
|
||||
"Vault actor error",
|
||||
)))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[messages]
|
||||
impl UserAgentSession {
|
||||
#[message]
|
||||
pub(crate) async fn handle_query_vault_state(&mut self) -> Result<KeyHolderState, Error> {
|
||||
use crate::actors::keyholder::GetState;
|
||||
|
||||
let vault_state = match self.props.actors.key_holder.ask(GetState {}).await {
|
||||
Ok(state) => state,
|
||||
Err(err) => {
|
||||
error!(?err, actor = "useragent", "keyholder.query.failed");
|
||||
return Err(Error::internal("Vault is in broken state"));
|
||||
}
|
||||
};
|
||||
|
||||
Ok(vault_state)
|
||||
}
|
||||
}
|
||||
|
||||
#[messages]
|
||||
impl UserAgentSession {
|
||||
#[message]
|
||||
pub(crate) async fn handle_evm_wallet_create(&mut self) -> Result<(i32, Address), Error> {
|
||||
match self.props.actors.evm.ask(Generate {}).await {
|
||||
Ok(address) => Ok(address),
|
||||
Err(SendError::HandlerError(err)) => Err(Error::internal(format!(
|
||||
"EVM wallet generation failed: {err}"
|
||||
))),
|
||||
Err(err) => {
|
||||
error!(?err, "EVM actor unreachable during wallet create");
|
||||
Err(Error::internal("EVM actor unreachable"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[message]
|
||||
pub(crate) async fn handle_evm_wallet_list(&mut self) -> Result<Vec<(i32, Address)>, Error> {
|
||||
match self.props.actors.evm.ask(ListWallets {}).await {
|
||||
Ok(wallets) => Ok(wallets),
|
||||
Err(err) => {
|
||||
error!(?err, "EVM wallet list failed");
|
||||
Err(Error::internal("Failed to list EVM wallets"))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[messages]
|
||||
impl UserAgentSession {
|
||||
#[message]
|
||||
pub(crate) async fn handle_grant_list(&mut self) -> Result<Vec<Grant<SpecificGrant>>, Error> {
|
||||
match self.props.actors.evm.ask(UseragentListGrants {}).await {
|
||||
Ok(grants) => Ok(grants),
|
||||
Err(err) => {
|
||||
error!(?err, "EVM grant list failed");
|
||||
Err(Error::internal("Failed to list EVM grants"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[message]
|
||||
pub(crate) async fn handle_grant_create(
|
||||
&mut self,
|
||||
basic: crate::evm::policies::SharedGrantSettings,
|
||||
grant: crate::evm::policies::SpecificGrant,
|
||||
) -> Result<i32, Error> {
|
||||
match self
|
||||
.props
|
||||
.actors
|
||||
.evm
|
||||
.ask(UseragentCreateGrant { basic, grant })
|
||||
.await
|
||||
{
|
||||
Ok(grant_id) => Ok(grant_id),
|
||||
Err(err) => {
|
||||
error!(?err, "EVM grant create failed");
|
||||
Err(Error::internal("Failed to create EVM grant"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[message]
|
||||
pub(crate) async fn handle_grant_delete(&mut self, grant_id: i32) -> Result<(), Error> {
|
||||
match self
|
||||
.props
|
||||
.actors
|
||||
.evm
|
||||
.ask(UseragentDeleteGrant { grant_id })
|
||||
.await
|
||||
{
|
||||
Ok(()) => Ok(()),
|
||||
Err(err) => {
|
||||
error!(?err, "EVM grant delete failed");
|
||||
Err(Error::internal("Failed to delete EVM grant"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[message]
|
||||
pub(crate) async fn handle_grant_evm_wallet_access(
|
||||
&mut self,
|
||||
entries: Vec<NewEvmWalletAccess>,
|
||||
) -> Result<(), Error> {
|
||||
let mut conn = self.props.db.get().await?;
|
||||
conn.transaction(|conn| {
|
||||
Box::pin(async move {
|
||||
use crate::db::schema::evm_wallet_access;
|
||||
|
||||
for entry in entries {
|
||||
diesel::insert_into(evm_wallet_access::table)
|
||||
.values(&entry)
|
||||
.on_conflict_do_nothing()
|
||||
.execute(conn)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Result::<_, Error>::Ok(())
|
||||
})
|
||||
})
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[message]
|
||||
pub(crate) async fn handle_revoke_evm_wallet_access(
|
||||
&mut self,
|
||||
entries: Vec<i32>,
|
||||
) -> Result<(), Error> {
|
||||
let mut conn = self.props.db.get().await?;
|
||||
conn.transaction(|conn| {
|
||||
Box::pin(async move {
|
||||
use crate::db::schema::evm_wallet_access;
|
||||
for entry in entries {
|
||||
diesel::delete(evm_wallet_access::table)
|
||||
.filter(evm_wallet_access::wallet_id.eq(entry))
|
||||
.execute(conn)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Result::<_, Error>::Ok(())
|
||||
})
|
||||
})
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[message]
|
||||
pub(crate) async fn handle_list_wallet_access(
|
||||
&mut self,
|
||||
) -> Result<Vec<EvmWalletAccess>, Error> {
|
||||
let mut conn = self.props.db.get().await?;
|
||||
use crate::db::schema::evm_wallet_access;
|
||||
let access_entries = evm_wallet_access::table
|
||||
.select(EvmWalletAccess::as_select())
|
||||
.load::<_>(&mut conn)
|
||||
.await?;
|
||||
Ok(access_entries)
|
||||
}
|
||||
}
|
||||
|
||||
#[messages]
|
||||
impl UserAgentSession {
|
||||
#[message(ctx)]
|
||||
pub(crate) async fn handle_new_client_approve(
|
||||
&mut self,
|
||||
approved: bool,
|
||||
pubkey: ed25519_dalek::VerifyingKey,
|
||||
ctx: &mut Context<Self, Result<(), Error>>,
|
||||
) -> Result<(), Error> {
|
||||
let pending_approval = match self.pending_client_approvals.remove(&pubkey) {
|
||||
Some(approval) => approval,
|
||||
None => {
|
||||
error!("Received client connection response for unknown client");
|
||||
return Err(Error::internal("Unknown client in connection response"));
|
||||
}
|
||||
};
|
||||
|
||||
pending_approval
|
||||
.controller
|
||||
.tell(ClientApprovalAnswer { approved })
|
||||
.await
|
||||
.map_err(|err| {
|
||||
error!(
|
||||
?err,
|
||||
"Failed to send client approval response to controller"
|
||||
);
|
||||
Error::internal("Failed to send client approval response to controller")
|
||||
})?;
|
||||
|
||||
ctx.actor_ref().unlink(&pending_approval.controller).await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[message]
|
||||
pub(crate) async fn handle_sdk_client_list(
|
||||
&mut self,
|
||||
) -> Result<Vec<(ProgramClient, ProgramClientMetadata)>, Error> {
|
||||
use crate::db::schema::{client_metadata, program_client};
|
||||
let mut conn = self.props.db.get().await?;
|
||||
|
||||
let clients = program_client::table
|
||||
.inner_join(client_metadata::table)
|
||||
.select((
|
||||
ProgramClient::as_select(),
|
||||
ProgramClientMetadata::as_select(),
|
||||
))
|
||||
.load::<(ProgramClient, ProgramClientMetadata)>(&mut conn)
|
||||
.await?;
|
||||
|
||||
Ok(clients)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,27 @@
|
||||
use std::sync::Mutex;
|
||||
|
||||
use x25519_dalek::{EphemeralSecret, PublicKey};
|
||||
|
||||
pub struct UnsealContext {
|
||||
pub client_public_key: PublicKey,
|
||||
pub secret: Mutex<Option<EphemeralSecret>>,
|
||||
}
|
||||
|
||||
smlang::statemachine!(
|
||||
name: UserAgent,
|
||||
custom_error: false,
|
||||
transitions: {
|
||||
*Idle + UnsealRequest(UnsealContext) / generate_temp_keypair = WaitingForUnsealKey(UnsealContext),
|
||||
WaitingForUnsealKey(UnsealContext) + ReceivedValidKey = Unsealed,
|
||||
WaitingForUnsealKey(UnsealContext) + ReceivedInvalidKey = Idle,
|
||||
}
|
||||
);
|
||||
|
||||
pub struct DummyContext;
|
||||
impl UserAgentStateMachineContext for DummyContext {
|
||||
#[allow(missing_docs)]
|
||||
#[allow(clippy::unused_unit)]
|
||||
fn generate_temp_keypair(&mut self, event_data: UnsealContext) -> Result<UnsealContext, ()> {
|
||||
Ok(event_data)
|
||||
}
|
||||
}
|
||||
@@ -1,95 +0,0 @@
|
||||
use super::UserAgentActor;
|
||||
use arbiter_proto::proto::{
|
||||
UserAgentRequest, UserAgentResponse,
|
||||
auth::{
|
||||
self, AuthChallenge, AuthChallengeRequest, AuthOk, ClientMessage,
|
||||
ServerMessage as AuthServerMessage, client_message::Payload as ClientAuthPayload,
|
||||
server_message::Payload as ServerAuthPayload,
|
||||
},
|
||||
user_agent_request::Payload as UserAgentRequestPayload,
|
||||
user_agent_response::Payload as UserAgentResponsePayload,
|
||||
};
|
||||
use futures::StreamExt;
|
||||
use kameo::{
|
||||
actor::{ActorRef, Spawn as _},
|
||||
error::SendError,
|
||||
};
|
||||
use tokio::sync::mpsc;
|
||||
use tonic::Status;
|
||||
use tracing::error;
|
||||
|
||||
use crate::{
|
||||
actors::user_agent::{HandleAuthChallengeRequest, HandleAuthChallengeSolution},
|
||||
context::ServerContext,
|
||||
};
|
||||
|
||||
pub(crate) async fn handle_user_agent(
|
||||
context: ServerContext,
|
||||
mut req_stream: tonic::Streaming<UserAgentRequest>,
|
||||
tx: mpsc::Sender<Result<UserAgentResponse, Status>>,
|
||||
) {
|
||||
let actor = UserAgentActor::spawn(UserAgentActor::new(context, tx.clone()));
|
||||
|
||||
while let Some(Ok(req)) = req_stream.next().await
|
||||
&& actor.is_alive()
|
||||
{
|
||||
match process_message(&actor, req).await {
|
||||
Ok(resp) => {
|
||||
if tx.send(Ok(resp)).await.is_err() {
|
||||
error!(actor = "useragent", "Failed to send response to client");
|
||||
break;
|
||||
}
|
||||
}
|
||||
Err(status) => {
|
||||
let _ = tx.send(Err(status)).await;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
actor.kill();
|
||||
}
|
||||
|
||||
async fn process_message(
|
||||
actor: &ActorRef<UserAgentActor>,
|
||||
req: UserAgentRequest,
|
||||
) -> Result<UserAgentResponse, Status> {
|
||||
let msg = req.payload.ok_or_else(|| {
|
||||
error!(actor = "useragent", "Received message with no payload");
|
||||
Status::invalid_argument("Expected message with payload")
|
||||
})?;
|
||||
|
||||
let UserAgentRequestPayload::AuthMessage(ClientMessage {
|
||||
payload: Some(client_message),
|
||||
}) = msg
|
||||
else {
|
||||
error!(
|
||||
actor = "useragent",
|
||||
"Received unexpected message type during authentication"
|
||||
);
|
||||
return Err(Status::invalid_argument(
|
||||
"Expected AuthMessage with ClientMessage payload",
|
||||
));
|
||||
};
|
||||
|
||||
match client_message {
|
||||
ClientAuthPayload::AuthChallengeRequest(req) => actor
|
||||
.ask(HandleAuthChallengeRequest { req })
|
||||
.await
|
||||
.map_err(into_status),
|
||||
ClientAuthPayload::AuthChallengeSolution(solution) => actor
|
||||
.ask(HandleAuthChallengeSolution { solution })
|
||||
.await
|
||||
.map_err(into_status),
|
||||
}
|
||||
}
|
||||
|
||||
fn into_status<M>(e: SendError<M, Status>) -> Status {
|
||||
match e {
|
||||
SendError::HandlerError(status) => status,
|
||||
_ => {
|
||||
error!(actor = "useragent", "Failed to send message to actor");
|
||||
Status::internal("session failure")
|
||||
}
|
||||
}
|
||||
}
|
||||
72
server/crates/arbiter-server/src/cli.rs
Normal file
72
server/crates/arbiter-server/src/cli.rs
Normal file
@@ -0,0 +1,72 @@
|
||||
use std::{
|
||||
net::{Ipv4Addr, SocketAddr, SocketAddrV4},
|
||||
path::PathBuf,
|
||||
};
|
||||
|
||||
use clap::{Args, Parser, Subcommand};
|
||||
|
||||
const DEFAULT_LISTEN_ADDR: SocketAddr = SocketAddr::V4(SocketAddrV4::new(
|
||||
Ipv4Addr::LOCALHOST,
|
||||
arbiter_proto::DEFAULT_SERVER_PORT,
|
||||
));
|
||||
|
||||
#[derive(Debug, Parser)]
|
||||
#[command(name = "arbiter-server")]
|
||||
#[command(about = "Arbiter gRPC server")]
|
||||
pub struct Cli {
|
||||
#[command(subcommand)]
|
||||
pub command: Option<Command>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Subcommand)]
|
||||
pub enum Command {
|
||||
/// Run server in foreground mode.
|
||||
Run(RunArgs),
|
||||
/// Manage service lifecycle.
|
||||
Service {
|
||||
#[command(subcommand)]
|
||||
command: ServiceCommand,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Args)]
|
||||
pub struct RunArgs {
|
||||
#[arg(long, default_value_t = DEFAULT_LISTEN_ADDR)]
|
||||
pub listen_addr: SocketAddr,
|
||||
#[arg(long)]
|
||||
pub data_dir: Option<PathBuf>,
|
||||
}
|
||||
|
||||
impl Default for RunArgs {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
listen_addr: DEFAULT_LISTEN_ADDR,
|
||||
data_dir: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Subcommand)]
|
||||
pub enum ServiceCommand {
|
||||
/// Install Windows service in Service Control Manager.
|
||||
Install(ServiceInstallArgs),
|
||||
/// Internal service entrypoint. SCM only.
|
||||
#[command(hide = true)]
|
||||
Run(ServiceRunArgs),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Args)]
|
||||
pub struct ServiceInstallArgs {
|
||||
#[arg(long)]
|
||||
pub start: bool,
|
||||
#[arg(long)]
|
||||
pub data_dir: Option<PathBuf>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Args)]
|
||||
pub struct ServiceRunArgs {
|
||||
#[arg(long, default_value_t = DEFAULT_LISTEN_ADDR)]
|
||||
pub listen_addr: SocketAddr,
|
||||
#[arg(long)]
|
||||
pub data_dir: Option<PathBuf>,
|
||||
}
|
||||
@@ -1,402 +0,0 @@
|
||||
use std::collections::HashSet;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use diesel::OptionalExtension as _;
|
||||
use diesel_async::RunQueryDsl as _;
|
||||
use ed25519_dalek::VerifyingKey;
|
||||
use kameo::actor::{ActorRef, Spawn};
|
||||
use miette::Diagnostic;
|
||||
use rand::rngs::StdRng;
|
||||
use secrecy::{ExposeSecret, SecretBox};
|
||||
use smlang::statemachine;
|
||||
use thiserror::Error;
|
||||
use tokio::sync::{watch, RwLock};
|
||||
use zeroize::Zeroizing;
|
||||
|
||||
use crate::{
|
||||
context::{
|
||||
bootstrap::{BootstrapActor, generate_token},
|
||||
lease::LeaseHandler,
|
||||
tls::{RotationState, RotationTask, TlsDataRaw, TlsManager},
|
||||
},
|
||||
db::{
|
||||
self,
|
||||
models::ArbiterSetting,
|
||||
schema::{self, arbiter_settings},
|
||||
},
|
||||
};
|
||||
|
||||
pub(crate) mod bootstrap;
|
||||
pub(crate) mod lease;
|
||||
pub(crate) mod tls;
|
||||
|
||||
#[derive(Error, Debug, Diagnostic)]
|
||||
pub enum InitError {
|
||||
#[error("Database setup failed: {0}")]
|
||||
#[diagnostic(code(arbiter_server::init::database_setup))]
|
||||
DatabaseSetup(#[from] db::DatabaseSetupError),
|
||||
|
||||
#[error("Connection acquire failed: {0}")]
|
||||
#[diagnostic(code(arbiter_server::init::database_pool))]
|
||||
DatabasePool(#[from] db::PoolError),
|
||||
|
||||
#[error("Database query error: {0}")]
|
||||
#[diagnostic(code(arbiter_server::init::database_query))]
|
||||
DatabaseQuery(#[from] diesel::result::Error),
|
||||
|
||||
#[error("TLS initialization failed: {0}")]
|
||||
#[diagnostic(code(arbiter_server::init::tls_init))]
|
||||
Tls(#[from] tls::TlsInitError),
|
||||
|
||||
#[error("Bootstrap token generation failed: {0}")]
|
||||
#[diagnostic(code(arbiter_server::init::bootstrap_token))]
|
||||
BootstrapToken(#[from] bootstrap::BootstrapError),
|
||||
|
||||
#[error("I/O Error: {0}")]
|
||||
#[diagnostic(code(arbiter_server::init::io))]
|
||||
Io(#[from] std::io::Error),
|
||||
}
|
||||
|
||||
#[derive(Error, Debug, Diagnostic)]
|
||||
pub enum UnsealError {
|
||||
#[error("Database error: {0}")]
|
||||
#[diagnostic(code(arbiter_server::unseal::database_pool))]
|
||||
Database(#[from] db::PoolError),
|
||||
|
||||
#[error("Query error: {0}")]
|
||||
#[diagnostic(code(arbiter_server::unseal::database_query))]
|
||||
Query(#[from] diesel::result::Error),
|
||||
|
||||
#[error("Decryption failed: {0}")]
|
||||
#[diagnostic(code(arbiter_server::unseal::decryption))]
|
||||
DecryptionFailed(#[from] crate::crypto::CryptoError),
|
||||
|
||||
#[error("Invalid state for unseal")]
|
||||
#[diagnostic(code(arbiter_server::unseal::invalid_state))]
|
||||
InvalidState,
|
||||
|
||||
#[error("Missing salt in database")]
|
||||
#[diagnostic(code(arbiter_server::unseal::missing_salt))]
|
||||
MissingSalt,
|
||||
|
||||
#[error("No root key configured in database")]
|
||||
#[diagnostic(code(arbiter_server::unseal::no_root_key))]
|
||||
NoRootKey,
|
||||
}
|
||||
|
||||
#[derive(Error, Debug, Diagnostic)]
|
||||
pub enum SealError {
|
||||
#[error("Invalid state for seal")]
|
||||
#[diagnostic(code(arbiter_server::seal::invalid_state))]
|
||||
InvalidState,
|
||||
}
|
||||
|
||||
/// Secure in-memory storage for root encryption key
|
||||
///
|
||||
/// Uses `secrecy` crate for automatic zeroization on drop to prevent key material
|
||||
/// from remaining in memory after use. SecretBox provides heap-allocated secret
|
||||
/// storage that implements Send + Sync for safe use in async contexts.
|
||||
pub struct KeyStorage {
|
||||
/// 32-byte root key protected by SecretBox
|
||||
key: SecretBox<[u8; 32]>,
|
||||
}
|
||||
|
||||
impl KeyStorage {
|
||||
/// Create new KeyStorage from a 32-byte root key
|
||||
pub fn new(key: [u8; 32]) -> Self {
|
||||
Self {
|
||||
key: SecretBox::new(Box::new(key)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Access the key for cryptographic operations
|
||||
pub fn key(&self) -> &[u8; 32] {
|
||||
self.key.expose_secret()
|
||||
}
|
||||
}
|
||||
|
||||
// Drop автоматически реализован через secrecy::Zeroize
|
||||
// который зануляет память при освобождении
|
||||
|
||||
statemachine! {
|
||||
name: Server,
|
||||
transitions: {
|
||||
*NotBootstrapped + Bootstrapped = Sealed,
|
||||
Sealed + Unsealed(KeyStorage) / move_key = Ready(KeyStorage),
|
||||
Ready(KeyStorage) + Sealed / dispose_key = Sealed,
|
||||
}
|
||||
}
|
||||
pub struct _Context;
|
||||
impl ServerStateMachineContext for _Context {
|
||||
/// Move key from unseal event into Ready state
|
||||
fn move_key(&mut self, event_data: KeyStorage) -> Result<KeyStorage, ()> {
|
||||
// Просто перемещаем KeyStorage из event в state
|
||||
// Без клонирования - event data consumed
|
||||
Ok(event_data)
|
||||
}
|
||||
|
||||
/// Securely dispose of key when sealing
|
||||
#[allow(missing_docs)]
|
||||
#[allow(clippy::unused_unit)]
|
||||
fn dispose_key(&mut self, _state_data: &KeyStorage) -> Result<(), ()> {
|
||||
// KeyStorage будет dropped после state transition
|
||||
// secrecy::Zeroize зануляет память автоматически
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct _ServerContextInner {
|
||||
pub db: db::DatabasePool,
|
||||
pub state: RwLock<ServerStateMachine<_Context>>,
|
||||
pub rng: StdRng,
|
||||
pub tls: Arc<TlsManager>,
|
||||
pub bootstrapper: ActorRef<BootstrapActor>,
|
||||
pub rotation_state: RwLock<RotationState>,
|
||||
pub rotation_acks: Arc<RwLock<HashSet<VerifyingKey>>>,
|
||||
pub user_agent_leases: LeaseHandler<VerifyingKey>,
|
||||
pub client_leases: LeaseHandler<VerifyingKey>,
|
||||
}
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct ServerContext(Arc<_ServerContextInner>);
|
||||
|
||||
impl std::ops::Deref for ServerContext {
|
||||
type Target = _ServerContextInner;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl ServerContext {
|
||||
/// Check if all active clients have acknowledged the rotation
|
||||
pub async fn check_rotation_ready(&self) -> bool {
|
||||
// TODO: Implement proper rotation readiness check
|
||||
// For now, return false as placeholder
|
||||
false
|
||||
}
|
||||
|
||||
async fn load_tls(
|
||||
db: &db::DatabasePool,
|
||||
settings: Option<&ArbiterSetting>,
|
||||
) -> Result<TlsManager, InitError> {
|
||||
match settings {
|
||||
Some(s) if s.current_cert_id.is_some() => {
|
||||
// Load active certificate from tls_certificates table
|
||||
Ok(TlsManager::load_from_db(
|
||||
db.clone(),
|
||||
s.current_cert_id.unwrap(),
|
||||
)
|
||||
.await?)
|
||||
}
|
||||
Some(s) => {
|
||||
// Legacy migration: extract validity and save to new table
|
||||
let tls_data_raw = TlsDataRaw {
|
||||
cert: s.cert.clone(),
|
||||
key: s.cert_key.clone(),
|
||||
};
|
||||
|
||||
// For legacy certificates, use current time as not_before
|
||||
// and current time + 90 days as not_after
|
||||
let not_before = chrono::Utc::now().timestamp();
|
||||
let not_after = not_before + (90 * 24 * 60 * 60); // 90 days
|
||||
|
||||
Ok(TlsManager::new_from_legacy(
|
||||
db.clone(),
|
||||
tls_data_raw,
|
||||
not_before,
|
||||
not_after,
|
||||
)
|
||||
.await?)
|
||||
}
|
||||
None => {
|
||||
// First startup - generate new certificate
|
||||
Ok(TlsManager::new(db.clone()).await?)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn new(db: db::DatabasePool) -> Result<Self, InitError> {
|
||||
let mut conn = db.get().await?;
|
||||
let rng = rand::make_rng();
|
||||
|
||||
let settings = arbiter_settings::table
|
||||
.first::<ArbiterSetting>(&mut conn)
|
||||
.await
|
||||
.optional()?;
|
||||
|
||||
drop(conn);
|
||||
|
||||
// Load TLS manager
|
||||
let tls = Self::load_tls(&db, settings.as_ref()).await?;
|
||||
|
||||
// Load rotation state from database
|
||||
let rotation_state = RotationState::load_from_db(&db)
|
||||
.await
|
||||
.unwrap_or(RotationState::Normal);
|
||||
|
||||
let bootstrap_token = generate_token().await?;
|
||||
|
||||
let mut state = ServerStateMachine::new(_Context);
|
||||
|
||||
if let Some(settings) = &settings
|
||||
&& settings.root_key_id.is_some()
|
||||
{
|
||||
// TODO: pass the encrypted root key to the state machine and let it handle decryption and transition to Sealed
|
||||
let _ = state.process_event(ServerEvents::Bootstrapped);
|
||||
}
|
||||
|
||||
// Create shutdown channel for rotation task
|
||||
let (rotation_shutdown_tx, rotation_shutdown_rx) = watch::channel(false);
|
||||
|
||||
// Initialize bootstrap actor
|
||||
let bootstrapper = BootstrapActor::spawn(BootstrapActor::new(&db).await?);
|
||||
|
||||
let context = Arc::new(_ServerContextInner {
|
||||
db: db.clone(),
|
||||
rng,
|
||||
tls: Arc::new(tls),
|
||||
state: RwLock::new(state),
|
||||
bootstrapper,
|
||||
rotation_state: RwLock::new(rotation_state),
|
||||
rotation_acks: Arc::new(RwLock::new(HashSet::new())),
|
||||
user_agent_leases: Default::default(),
|
||||
client_leases: Default::default(),
|
||||
});
|
||||
|
||||
Ok(Self(context))
|
||||
}
|
||||
|
||||
/// Unseal vault with password
|
||||
pub async fn unseal(&self, password: &str) -> Result<(), UnsealError> {
|
||||
use crate::crypto::root_key;
|
||||
use diesel::QueryDsl as _;
|
||||
|
||||
// 1. Get root_key_id from settings
|
||||
let mut conn = self.db.get().await?;
|
||||
|
||||
let settings: db::models::ArbiterSetting = schema::arbiter_settings::table
|
||||
.first(&mut conn)
|
||||
.await?;
|
||||
|
||||
let root_key_id = settings.root_key_id.ok_or(UnsealError::NoRootKey)?;
|
||||
|
||||
// 2. Load encrypted root key
|
||||
let encrypted: db::models::AeadEncrypted = schema::aead_encrypted::table
|
||||
.find(root_key_id)
|
||||
.first(&mut conn)
|
||||
.await?;
|
||||
|
||||
let salt = encrypted
|
||||
.argon2_salt
|
||||
.as_ref()
|
||||
.ok_or(UnsealError::MissingSalt)?;
|
||||
|
||||
drop(conn);
|
||||
|
||||
// 3. Decrypt root key using password
|
||||
let root_key = root_key::decrypt_root_key(&encrypted, password, salt)
|
||||
.map_err(UnsealError::DecryptionFailed)?;
|
||||
|
||||
// 4. Create secure storage
|
||||
let key_storage = KeyStorage::new(root_key);
|
||||
|
||||
// 5. Transition state machine
|
||||
let mut state = self.state.write().await;
|
||||
state
|
||||
.process_event(ServerEvents::Unsealed(key_storage))
|
||||
.map_err(|_| UnsealError::InvalidState)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Seal the server (lock the key)
|
||||
pub async fn seal(&self) -> Result<(), SealError> {
|
||||
let mut state = self.state.write().await;
|
||||
state
|
||||
.process_event(ServerEvents::Sealed)
|
||||
.map_err(|_| SealError::InvalidState)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_keystorage_creation() {
|
||||
let key = [42u8; 32];
|
||||
let storage = KeyStorage::new(key);
|
||||
assert_eq!(storage.key()[0], 42);
|
||||
assert_eq!(storage.key().len(), 32);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_keystorage_zeroization() {
|
||||
let key = [99u8; 32];
|
||||
{
|
||||
let _storage = KeyStorage::new(key);
|
||||
// storage будет dropped здесь
|
||||
}
|
||||
// После drop SecretBox должен зануляеть память
|
||||
// Это проверяется автоматически через secrecy::Zeroize
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_state_machine_transitions() {
|
||||
let mut state = ServerStateMachine::new(_Context);
|
||||
|
||||
// Начальное состояние
|
||||
assert!(matches!(state.state(), &ServerStates::NotBootstrapped));
|
||||
|
||||
// Bootstrapped transition
|
||||
state.process_event(ServerEvents::Bootstrapped).unwrap();
|
||||
assert!(matches!(state.state(), &ServerStates::Sealed));
|
||||
|
||||
// Unsealed transition
|
||||
let key_storage = KeyStorage::new([1u8; 32]);
|
||||
state
|
||||
.process_event(ServerEvents::Unsealed(key_storage))
|
||||
.unwrap();
|
||||
assert!(matches!(state.state(), &ServerStates::Ready(_)));
|
||||
|
||||
// Sealed transition
|
||||
state.process_event(ServerEvents::Sealed).unwrap();
|
||||
assert!(matches!(state.state(), &ServerStates::Sealed));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_move_key_callback() {
|
||||
let mut ctx = _Context;
|
||||
let key_storage = KeyStorage::new([7u8; 32]);
|
||||
let result = ctx.move_key(key_storage);
|
||||
assert!(result.is_ok());
|
||||
assert_eq!(result.unwrap().key()[0], 7);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dispose_key_callback() {
|
||||
let mut ctx = _Context;
|
||||
let key_storage = KeyStorage::new([13u8; 32]);
|
||||
let result = ctx.dispose_key(&key_storage);
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_state_transitions() {
|
||||
let mut state = ServerStateMachine::new(_Context);
|
||||
|
||||
// Попытка unseal без bootstrap
|
||||
let key_storage = KeyStorage::new([1u8; 32]);
|
||||
let result = state.process_event(ServerEvents::Unsealed(key_storage));
|
||||
assert!(result.is_err());
|
||||
|
||||
// Правильный путь
|
||||
state.process_event(ServerEvents::Bootstrapped).unwrap();
|
||||
|
||||
// Попытка повторного bootstrap
|
||||
let result = state.process_event(ServerEvents::Bootstrapped);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
}
|
||||
@@ -1,46 +0,0 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use dashmap::DashSet;
|
||||
|
||||
#[derive(Clone, Default)]
|
||||
struct LeaseStorage<T: Eq + std::hash::Hash>(Arc<DashSet<T>>);
|
||||
|
||||
// A lease that automatically releases the item when dropped
|
||||
pub struct Lease<T: Clone + std::hash::Hash + Eq> {
|
||||
item: T,
|
||||
storage: LeaseStorage<T>,
|
||||
}
|
||||
impl<T: Clone + std::hash::Hash + Eq> Drop for Lease<T> {
|
||||
fn drop(&mut self) {
|
||||
self.storage.0.remove(&self.item);
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Default)]
|
||||
pub struct LeaseHandler<T: Clone + std::hash::Hash + Eq> {
|
||||
storage: LeaseStorage<T>,
|
||||
}
|
||||
|
||||
impl<T: Clone + std::hash::Hash + Eq> LeaseHandler<T> {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
storage: LeaseStorage(Arc::new(DashSet::new())),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn acquire(&self, item: T) -> Result<Lease<T>, ()> {
|
||||
if self.storage.0.insert(item.clone()) {
|
||||
Ok(Lease {
|
||||
item,
|
||||
storage: self.storage.clone(),
|
||||
})
|
||||
} else {
|
||||
Err(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Get all currently leased items
|
||||
pub fn get_all(&self) -> Vec<T> {
|
||||
self.storage.0.iter().map(|entry| entry.clone()).collect()
|
||||
}
|
||||
}
|
||||
65
server/crates/arbiter-server/src/context/mod.rs
Normal file
65
server/crates/arbiter-server/src/context/mod.rs
Normal file
@@ -0,0 +1,65 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use miette::Diagnostic;
|
||||
use thiserror::Error;
|
||||
|
||||
use crate::{
|
||||
actors::GlobalActors,
|
||||
context::tls::TlsManager,
|
||||
db::{self},
|
||||
};
|
||||
|
||||
pub mod tls;
|
||||
|
||||
#[derive(Error, Debug, Diagnostic)]
|
||||
pub enum InitError {
|
||||
#[error("Database setup failed: {0}")]
|
||||
#[diagnostic(code(arbiter_server::init::database_setup))]
|
||||
DatabaseSetup(#[from] db::DatabaseSetupError),
|
||||
|
||||
#[error("Connection acquire failed: {0}")]
|
||||
#[diagnostic(code(arbiter_server::init::database_pool))]
|
||||
DatabasePool(#[from] db::PoolError),
|
||||
|
||||
#[error("Database query error: {0}")]
|
||||
#[diagnostic(code(arbiter_server::init::database_query))]
|
||||
DatabaseQuery(#[from] diesel::result::Error),
|
||||
|
||||
#[error("TLS initialization failed: {0}")]
|
||||
#[diagnostic(code(arbiter_server::init::tls_init))]
|
||||
Tls(#[from] tls::InitError),
|
||||
|
||||
#[error("Actor spawn failed: {0}")]
|
||||
#[diagnostic(code(arbiter_server::init::actor_spawn))]
|
||||
ActorSpawn(#[from] crate::actors::SpawnError),
|
||||
|
||||
#[error("I/O Error: {0}")]
|
||||
#[diagnostic(code(arbiter_server::init::io))]
|
||||
Io(#[from] std::io::Error),
|
||||
}
|
||||
|
||||
pub struct _ServerContextInner {
|
||||
pub db: db::DatabasePool,
|
||||
pub tls: TlsManager,
|
||||
pub actors: GlobalActors,
|
||||
}
|
||||
#[derive(Clone)]
|
||||
pub struct ServerContext(Arc<_ServerContextInner>);
|
||||
|
||||
impl std::ops::Deref for ServerContext {
|
||||
type Target = _ServerContextInner;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl ServerContext {
|
||||
pub async fn new(db: db::DatabasePool) -> Result<Self, InitError> {
|
||||
Ok(Self(Arc::new(_ServerContextInner {
|
||||
actors: GlobalActors::spawn(db.clone()).await?,
|
||||
tls: TlsManager::new(db.clone()).await?,
|
||||
db,
|
||||
})))
|
||||
}
|
||||
}
|
||||
258
server/crates/arbiter-server/src/context/tls.rs
Normal file
258
server/crates/arbiter-server/src/context/tls.rs
Normal file
@@ -0,0 +1,258 @@
|
||||
use std::{net::IpAddr, string::FromUtf8Error};
|
||||
|
||||
use diesel::{ExpressionMethods as _, QueryDsl, SelectableHelper as _};
|
||||
use diesel_async::{AsyncConnection, RunQueryDsl};
|
||||
use miette::Diagnostic;
|
||||
use pem::Pem;
|
||||
use rcgen::{
|
||||
BasicConstraints, Certificate, CertificateParams, CertifiedIssuer, DistinguishedName, DnType,
|
||||
IsCa, Issuer, KeyPair, KeyUsagePurpose, SanType,
|
||||
};
|
||||
use rustls::pki_types::pem::PemObject;
|
||||
use thiserror::Error;
|
||||
use tonic::transport::CertificateDer;
|
||||
|
||||
use crate::db::{
|
||||
self,
|
||||
models::{NewTlsHistory, TlsHistory},
|
||||
schema::{
|
||||
arbiter_settings,
|
||||
tls_history::{self},
|
||||
},
|
||||
};
|
||||
|
||||
const ENCODE_CONFIG: pem::EncodeConfig = {
|
||||
let line_ending = match cfg!(target_family = "windows") {
|
||||
true => pem::LineEnding::CRLF,
|
||||
false => pem::LineEnding::LF,
|
||||
};
|
||||
pem::EncodeConfig::new().set_line_ending(line_ending)
|
||||
};
|
||||
|
||||
#[derive(Error, Debug, Diagnostic)]
|
||||
pub enum InitError {
|
||||
#[error("Key generation error during TLS initialization: {0}")]
|
||||
#[diagnostic(code(arbiter_server::tls_init::key_generation))]
|
||||
KeyGeneration(#[from] rcgen::Error),
|
||||
|
||||
#[error("Key invalid format: {0}")]
|
||||
#[diagnostic(code(arbiter_server::tls_init::key_invalid_format))]
|
||||
KeyInvalidFormat(#[from] FromUtf8Error),
|
||||
|
||||
#[error("Key deserialization error: {0}")]
|
||||
#[diagnostic(code(arbiter_server::tls_init::key_deserialization))]
|
||||
KeyDeserializationError(rcgen::Error),
|
||||
|
||||
#[error("Database error during TLS initialization: {0}")]
|
||||
#[diagnostic(code(arbiter_server::tls_init::database_error))]
|
||||
DatabaseError(#[from] diesel::result::Error),
|
||||
|
||||
#[error("Pem deserialization error during TLS initialization: {0}")]
|
||||
#[diagnostic(code(arbiter_server::tls_init::pem_deserialization))]
|
||||
PemDeserializationError(#[from] rustls::pki_types::pem::Error),
|
||||
|
||||
#[error("Database pool acquire error during TLS initialization: {0}")]
|
||||
#[diagnostic(code(arbiter_server::tls_init::database_pool_acquire))]
|
||||
DatabasePoolAcquire(#[from] db::PoolError),
|
||||
}
|
||||
|
||||
pub type PemCert = String;
|
||||
|
||||
pub fn encode_cert_to_pem(cert: &CertificateDer) -> PemCert {
|
||||
pem::encode_config(&Pem::new("CERTIFICATE", cert.to_vec()), ENCODE_CONFIG)
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
struct SerializedTls {
|
||||
cert_pem: PemCert,
|
||||
cert_key_pem: String,
|
||||
}
|
||||
|
||||
struct TlsCa {
|
||||
issuer: Issuer<'static, KeyPair>,
|
||||
cert: CertificateDer<'static>,
|
||||
}
|
||||
|
||||
impl TlsCa {
|
||||
fn generate() -> Result<Self, InitError> {
|
||||
let keypair = KeyPair::generate()?;
|
||||
let mut params = CertificateParams::new(["Arbiter Instance CA".into()])?;
|
||||
params.is_ca = IsCa::Ca(BasicConstraints::Unconstrained);
|
||||
params.key_usages = vec![
|
||||
KeyUsagePurpose::KeyCertSign,
|
||||
KeyUsagePurpose::CrlSign,
|
||||
KeyUsagePurpose::DigitalSignature,
|
||||
];
|
||||
|
||||
let mut dn = DistinguishedName::new();
|
||||
dn.push(DnType::CommonName, "Arbiter Instance CA");
|
||||
params.distinguished_name = dn;
|
||||
let certified_issuer = CertifiedIssuer::self_signed(params, keypair)?;
|
||||
|
||||
let cert_key_pem = certified_issuer.key().serialize_pem();
|
||||
|
||||
#[allow(
|
||||
clippy::unwrap_used,
|
||||
reason = "Broken cert couldn't bootstrap server anyway"
|
||||
)]
|
||||
let issuer = Issuer::from_ca_cert_pem(
|
||||
&certified_issuer.pem(),
|
||||
KeyPair::from_pem(cert_key_pem.as_ref()).unwrap(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
Ok(Self {
|
||||
issuer,
|
||||
cert: certified_issuer.der().clone(),
|
||||
})
|
||||
}
|
||||
fn generate_leaf(&self) -> Result<TlsCert, InitError> {
|
||||
let cert_key = KeyPair::generate()?;
|
||||
let mut params = CertificateParams::new(["Arbiter Instance Leaf".into()])?;
|
||||
params.is_ca = IsCa::NoCa;
|
||||
params.key_usages = vec![
|
||||
KeyUsagePurpose::DigitalSignature,
|
||||
KeyUsagePurpose::KeyEncipherment,
|
||||
];
|
||||
params
|
||||
.subject_alt_names
|
||||
.push(SanType::IpAddress(IpAddr::from([
|
||||
127, 0, 0, 1,
|
||||
])));
|
||||
|
||||
let mut dn = DistinguishedName::new();
|
||||
dn.push(DnType::CommonName, "Arbiter Instance Leaf");
|
||||
params.distinguished_name = dn;
|
||||
|
||||
let new_cert = params.signed_by(&cert_key, &self.issuer)?;
|
||||
|
||||
Ok(TlsCert {
|
||||
cert: new_cert,
|
||||
cert_key,
|
||||
})
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
fn serialize(&self) -> Result<SerializedTls, InitError> {
|
||||
let cert_key_pem = self.issuer.key().serialize_pem();
|
||||
Ok(SerializedTls {
|
||||
cert_pem: encode_cert_to_pem(&self.cert),
|
||||
cert_key_pem,
|
||||
})
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
fn try_deserialize(cert_pem: &str, cert_key_pem: &str) -> Result<Self, InitError> {
|
||||
let keypair =
|
||||
KeyPair::from_pem(cert_key_pem).map_err(InitError::KeyDeserializationError)?;
|
||||
let issuer = Issuer::from_ca_cert_pem(cert_pem, keypair)?;
|
||||
Ok(Self {
|
||||
issuer,
|
||||
cert: CertificateDer::from_pem_slice(cert_pem.as_bytes())?,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
struct TlsCert {
|
||||
cert: Certificate,
|
||||
cert_key: KeyPair,
|
||||
}
|
||||
|
||||
// TODO: Implement cert rotation
|
||||
pub struct TlsManager {
|
||||
cert: CertificateDer<'static>,
|
||||
keypair: KeyPair,
|
||||
ca_cert: CertificateDer<'static>,
|
||||
_db: db::DatabasePool,
|
||||
}
|
||||
|
||||
impl TlsManager {
|
||||
pub async fn generate_new(db: &db::DatabasePool) -> Result<Self, InitError> {
|
||||
let ca = TlsCa::generate()?;
|
||||
let new_cert = ca.generate_leaf()?;
|
||||
|
||||
{
|
||||
let mut conn = db.get().await?;
|
||||
conn.transaction(|conn| {
|
||||
Box::pin(async {
|
||||
let new_tls_history = NewTlsHistory {
|
||||
cert: new_cert.cert.pem(),
|
||||
cert_key: new_cert.cert_key.serialize_pem(),
|
||||
ca_cert: encode_cert_to_pem(&ca.cert),
|
||||
ca_key: ca.issuer.key().serialize_pem(),
|
||||
};
|
||||
|
||||
let inserted_tls_history: i32 = diesel::insert_into(tls_history::table)
|
||||
.values(&new_tls_history)
|
||||
.returning(tls_history::id)
|
||||
.get_result(conn)
|
||||
.await?;
|
||||
|
||||
diesel::update(arbiter_settings::table)
|
||||
.set(arbiter_settings::tls_id.eq(inserted_tls_history))
|
||||
.execute(conn)
|
||||
.await?;
|
||||
|
||||
Result::<_, diesel::result::Error>::Ok(())
|
||||
})
|
||||
})
|
||||
.await?;
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
cert: new_cert.cert.der().clone(),
|
||||
keypair: new_cert.cert_key,
|
||||
ca_cert: ca.cert,
|
||||
_db: db.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn new(db: db::DatabasePool) -> Result<Self, InitError> {
|
||||
let cert_data: Option<TlsHistory> = {
|
||||
let mut conn = db.get().await?;
|
||||
arbiter_settings::table
|
||||
.left_join(tls_history::table)
|
||||
.select(Option::<TlsHistory>::as_select())
|
||||
.first(&mut conn)
|
||||
.await?
|
||||
};
|
||||
|
||||
match cert_data {
|
||||
Some(data) => {
|
||||
let try_load = || -> Result<_, Box<dyn std::error::Error>> {
|
||||
let keypair = KeyPair::from_pem(&data.cert_key)?;
|
||||
let cert = CertificateDer::from_pem_slice(data.cert.as_bytes())?;
|
||||
let ca_cert = CertificateDer::from_pem_slice(data.ca_cert.as_bytes())?;
|
||||
Ok(Self {
|
||||
cert,
|
||||
keypair,
|
||||
ca_cert,
|
||||
_db: db.clone(),
|
||||
})
|
||||
};
|
||||
match try_load() {
|
||||
Ok(manager) => Ok(manager),
|
||||
Err(e) => {
|
||||
eprintln!("Failed to load existing TLS certs: {e}. Generating new ones.");
|
||||
Self::generate_new(&db).await
|
||||
}
|
||||
}
|
||||
}
|
||||
None => Self::generate_new(&db).await,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn cert(&self) -> &CertificateDer<'static> {
|
||||
&self.cert
|
||||
}
|
||||
pub fn ca_cert(&self) -> &CertificateDer<'static> {
|
||||
&self.ca_cert
|
||||
}
|
||||
|
||||
pub fn cert_pem(&self) -> PemCert {
|
||||
encode_cert_to_pem(&self.cert)
|
||||
}
|
||||
pub fn key_pem(&self) -> String {
|
||||
self.keypair.serialize_pem()
|
||||
}
|
||||
}
|
||||
@@ -1,192 +0,0 @@
|
||||
use std::sync::Arc;
|
||||
use std::string::FromUtf8Error;
|
||||
|
||||
use miette::Diagnostic;
|
||||
use rcgen::{Certificate, KeyPair};
|
||||
use rustls::pki_types::CertificateDer;
|
||||
use thiserror::Error;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use crate::db;
|
||||
|
||||
pub mod rotation;
|
||||
|
||||
pub use rotation::{RotationError, RotationState, RotationTask};
|
||||
|
||||
#[derive(Error, Debug, Diagnostic)]
|
||||
#[expect(clippy::enum_variant_names)]
|
||||
pub enum TlsInitError {
|
||||
#[error("Key generation error during TLS initialization: {0}")]
|
||||
#[diagnostic(code(arbiter_server::tls_init::key_generation))]
|
||||
KeyGeneration(#[from] rcgen::Error),
|
||||
|
||||
#[error("Key invalid format: {0}")]
|
||||
#[diagnostic(code(arbiter_server::tls_init::key_invalid_format))]
|
||||
KeyInvalidFormat(#[from] FromUtf8Error),
|
||||
|
||||
#[error("Key deserialization error: {0}")]
|
||||
#[diagnostic(code(arbiter_server::tls_init::key_deserialization))]
|
||||
KeyDeserializationError(rcgen::Error),
|
||||
}
|
||||
|
||||
pub struct TlsData {
|
||||
pub cert: CertificateDer<'static>,
|
||||
pub keypair: KeyPair,
|
||||
}
|
||||
|
||||
pub struct TlsDataRaw {
|
||||
pub cert: Vec<u8>,
|
||||
pub key: Vec<u8>,
|
||||
}
|
||||
impl TlsDataRaw {
|
||||
pub fn serialize(cert: &TlsData) -> Self {
|
||||
Self {
|
||||
cert: cert.cert.as_ref().to_vec(),
|
||||
key: cert.keypair.serialize_pem().as_bytes().to_vec(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn deserialize(&self) -> Result<TlsData, TlsInitError> {
|
||||
let cert = CertificateDer::from_slice(&self.cert).into_owned();
|
||||
|
||||
let key =
|
||||
String::from_utf8(self.key.clone()).map_err(TlsInitError::KeyInvalidFormat)?;
|
||||
|
||||
let keypair = KeyPair::from_pem(&key).map_err(TlsInitError::KeyDeserializationError)?;
|
||||
|
||||
Ok(TlsData { cert, keypair })
|
||||
}
|
||||
}
|
||||
|
||||
/// Metadata about a certificate including validity period
|
||||
pub struct CertificateMetadata {
|
||||
pub cert_id: i32,
|
||||
pub cert: CertificateDer<'static>,
|
||||
pub keypair: Arc<KeyPair>,
|
||||
pub not_before: i64,
|
||||
pub not_after: i64,
|
||||
pub created_at: i64,
|
||||
}
|
||||
|
||||
pub(crate) fn generate_cert(key: &KeyPair) -> Result<(Certificate, i64, i64), rcgen::Error> {
|
||||
let params = rcgen::CertificateParams::new(vec![
|
||||
"arbiter.local".to_string(),
|
||||
"localhost".to_string(),
|
||||
])?;
|
||||
|
||||
// Set validity period: 90 days from now
|
||||
let not_before = chrono::Utc::now();
|
||||
let not_after = not_before + chrono::Duration::days(90);
|
||||
|
||||
// Note: rcgen doesn't directly expose not_before/not_after setting in all versions
|
||||
// For now, we'll generate the cert and track validity separately
|
||||
let cert = params.self_signed(key)?;
|
||||
|
||||
Ok((cert, not_before.timestamp(), not_after.timestamp()))
|
||||
}
|
||||
|
||||
// Certificate rotation enabled
|
||||
pub(crate) struct TlsManager {
|
||||
// Current active certificate (atomic replacement via RwLock)
|
||||
current_cert: Arc<RwLock<CertificateMetadata>>,
|
||||
|
||||
// Database pool for persistence
|
||||
db: db::DatabasePool,
|
||||
}
|
||||
|
||||
impl TlsManager {
|
||||
/// Create new TlsManager with a generated certificate
|
||||
pub async fn new(db: db::DatabasePool) -> Result<Self, TlsInitError> {
|
||||
let keypair = KeyPair::generate()?;
|
||||
let (cert, not_before, not_after) = generate_cert(&keypair)?;
|
||||
let cert_der = cert.der().clone();
|
||||
|
||||
// For initial creation, cert_id will be set after DB insert
|
||||
let metadata = CertificateMetadata {
|
||||
cert_id: 0, // Temporary, will be updated after DB insert
|
||||
cert: cert_der,
|
||||
keypair: Arc::new(keypair),
|
||||
not_before,
|
||||
not_after,
|
||||
created_at: chrono::Utc::now().timestamp(),
|
||||
};
|
||||
|
||||
Ok(Self {
|
||||
current_cert: Arc::new(RwLock::new(metadata)),
|
||||
db,
|
||||
})
|
||||
}
|
||||
|
||||
/// Load TlsManager from database with specific certificate ID
|
||||
pub async fn load_from_db(db: db::DatabasePool, cert_id: i32) -> Result<Self, TlsInitError> {
|
||||
// TODO: Load certificate from database
|
||||
// For now, return error - will be implemented when database access is ready
|
||||
Err(TlsInitError::KeyGeneration(rcgen::Error::CouldNotParseCertificate))
|
||||
}
|
||||
|
||||
/// Create from legacy TlsDataRaw format
|
||||
pub async fn new_from_legacy(
|
||||
db: db::DatabasePool,
|
||||
data: TlsDataRaw,
|
||||
not_before: i64,
|
||||
not_after: i64,
|
||||
) -> Result<Self, TlsInitError> {
|
||||
let tls_data = data.deserialize()?;
|
||||
|
||||
let metadata = CertificateMetadata {
|
||||
cert_id: 1, // Legacy certificate gets ID 1
|
||||
cert: tls_data.cert,
|
||||
keypair: Arc::new(tls_data.keypair),
|
||||
not_before,
|
||||
not_after,
|
||||
created_at: chrono::Utc::now().timestamp(),
|
||||
};
|
||||
|
||||
Ok(Self {
|
||||
current_cert: Arc::new(RwLock::new(metadata)),
|
||||
db,
|
||||
})
|
||||
}
|
||||
|
||||
/// Get current certificate data
|
||||
pub async fn get_certificate(&self) -> (CertificateDer<'static>, Arc<KeyPair>) {
|
||||
let cert = self.current_cert.read().await;
|
||||
(cert.cert.clone(), cert.keypair.clone())
|
||||
}
|
||||
|
||||
/// Replace certificate atomically
|
||||
pub async fn replace_certificate(&self, new_cert: CertificateMetadata) -> Result<(), TlsInitError> {
|
||||
let mut cert = self.current_cert.write().await;
|
||||
*cert = new_cert;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Check if certificate is expiring soon
|
||||
pub async fn check_expiration(&self, threshold_secs: i64) -> bool {
|
||||
let cert = self.current_cert.read().await;
|
||||
let now = chrono::Utc::now().timestamp();
|
||||
cert.not_after - now < threshold_secs
|
||||
}
|
||||
|
||||
/// Get certificate metadata for rotation logic
|
||||
pub async fn get_certificate_metadata(&self) -> CertificateMetadata {
|
||||
let cert = self.current_cert.read().await;
|
||||
CertificateMetadata {
|
||||
cert_id: cert.cert_id,
|
||||
cert: cert.cert.clone(),
|
||||
keypair: cert.keypair.clone(),
|
||||
not_before: cert.not_before,
|
||||
not_after: cert.not_after,
|
||||
created_at: cert.created_at,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn bytes(&self) -> TlsDataRaw {
|
||||
// This method is now async-compatible but we keep sync interface
|
||||
// TODO: Make this async or remove if not needed
|
||||
TlsDataRaw {
|
||||
cert: vec![],
|
||||
key: vec![],
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,552 +0,0 @@
|
||||
use std::collections::HashSet;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use diesel::prelude::*;
|
||||
use diesel_async::RunQueryDsl;
|
||||
use ed25519_dalek::VerifyingKey;
|
||||
use miette::Diagnostic;
|
||||
use rcgen::KeyPair;
|
||||
use thiserror::Error;
|
||||
use tokio::sync::watch;
|
||||
use tracing::{debug, error, info, warn};
|
||||
|
||||
use crate::context::ServerContext;
|
||||
use crate::db::models::{NewRotationClientAck, NewTlsCertificate, NewTlsRotationHistory};
|
||||
use crate::db::schema::{rotation_client_acks, tls_certificates, tls_rotation_history, tls_rotation_state};
|
||||
use crate::db::DatabasePool;
|
||||
|
||||
use super::{generate_cert, CertificateMetadata, TlsInitError};
|
||||
|
||||
#[derive(Error, Debug, Diagnostic)]
|
||||
pub enum RotationError {
|
||||
#[error("Certificate generation failed: {0}")]
|
||||
#[diagnostic(code(arbiter_server::rotation::cert_generation))]
|
||||
CertGeneration(#[from] rcgen::Error),
|
||||
|
||||
#[error("Database error: {0}")]
|
||||
#[diagnostic(code(arbiter_server::rotation::database))]
|
||||
Database(#[from] diesel::result::Error),
|
||||
|
||||
#[error("TLS initialization error: {0}")]
|
||||
#[diagnostic(code(arbiter_server::rotation::tls_init))]
|
||||
TlsInit(#[from] TlsInitError),
|
||||
|
||||
#[error("Invalid rotation state: {0}")]
|
||||
#[diagnostic(code(arbiter_server::rotation::invalid_state))]
|
||||
InvalidState(String),
|
||||
|
||||
#[error("No active certificate found")]
|
||||
#[diagnostic(code(arbiter_server::rotation::no_active_cert))]
|
||||
NoActiveCertificate,
|
||||
}
|
||||
|
||||
/// Состояние процесса ротации сертификата
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum RotationState {
|
||||
/// Обычная работа, ротация не требуется
|
||||
Normal,
|
||||
|
||||
/// Ротация инициирована, новый сертификат сгенерирован
|
||||
RotationInitiated {
|
||||
initiated_at: i64,
|
||||
new_cert_id: i32,
|
||||
},
|
||||
|
||||
/// Ожидание подтверждений (ACKs) от клиентов
|
||||
WaitingForAcks {
|
||||
new_cert_id: i32,
|
||||
initiated_at: i64,
|
||||
timeout_at: i64,
|
||||
},
|
||||
|
||||
/// Все ACK получены или таймаут истёк, готов к ротации
|
||||
ReadyToRotate {
|
||||
new_cert_id: i32,
|
||||
},
|
||||
}
|
||||
|
||||
impl RotationState {
|
||||
/// Загрузить состояние из базы данных
|
||||
pub async fn load_from_db(db: &DatabasePool) -> Result<Self, RotationError> {
|
||||
use crate::db::schema::tls_rotation_state::dsl::*;
|
||||
|
||||
let mut conn = db.get().await.map_err(|e| {
|
||||
RotationError::InvalidState(format!("Failed to get DB connection: {}", e))
|
||||
})?;
|
||||
|
||||
let state_record: (i32, String, Option<i32>, Option<i32>, Option<i32>) =
|
||||
tls_rotation_state
|
||||
.select((id, state, new_cert_id, initiated_at, timeout_at))
|
||||
.filter(id.eq(1))
|
||||
.first(&mut conn)
|
||||
.await?;
|
||||
|
||||
let rotation_state = match state_record.1.as_str() {
|
||||
"normal" => RotationState::Normal,
|
||||
"initiated" => {
|
||||
let cert_id = state_record.2.ok_or_else(|| {
|
||||
RotationError::InvalidState("Initiated state missing new_cert_id".into())
|
||||
})?;
|
||||
let init_at = state_record.3.ok_or_else(|| {
|
||||
RotationError::InvalidState("Initiated state missing initiated_at".into())
|
||||
})?;
|
||||
RotationState::RotationInitiated {
|
||||
initiated_at: init_at as i64,
|
||||
new_cert_id: cert_id,
|
||||
}
|
||||
}
|
||||
"waiting_acks" => {
|
||||
let cert_id = state_record.2.ok_or_else(|| {
|
||||
RotationError::InvalidState("WaitingForAcks state missing new_cert_id".into())
|
||||
})?;
|
||||
let init_at = state_record.3.ok_or_else(|| {
|
||||
RotationError::InvalidState("WaitingForAcks state missing initiated_at".into())
|
||||
})?;
|
||||
let timeout = state_record.4.ok_or_else(|| {
|
||||
RotationError::InvalidState("WaitingForAcks state missing timeout_at".into())
|
||||
})?;
|
||||
RotationState::WaitingForAcks {
|
||||
new_cert_id: cert_id,
|
||||
initiated_at: init_at as i64,
|
||||
timeout_at: timeout as i64,
|
||||
}
|
||||
}
|
||||
"ready" => {
|
||||
let cert_id = state_record.2.ok_or_else(|| {
|
||||
RotationError::InvalidState("Ready state missing new_cert_id".into())
|
||||
})?;
|
||||
RotationState::ReadyToRotate {
|
||||
new_cert_id: cert_id,
|
||||
}
|
||||
}
|
||||
other => {
|
||||
return Err(RotationError::InvalidState(format!(
|
||||
"Unknown state: {}",
|
||||
other
|
||||
)))
|
||||
}
|
||||
};
|
||||
|
||||
Ok(rotation_state)
|
||||
}
|
||||
|
||||
/// Сохранить состояние в базу данных
|
||||
pub async fn save_to_db(&self, db: &DatabasePool) -> Result<(), RotationError> {
|
||||
use crate::db::schema::tls_rotation_state::dsl::*;
|
||||
|
||||
let mut conn = db.get().await.map_err(|e| {
|
||||
RotationError::InvalidState(format!("Failed to get DB connection: {}", e))
|
||||
})?;
|
||||
|
||||
let (state_str, cert_id, init_at, timeout) = match self {
|
||||
RotationState::Normal => ("normal", None, None, None),
|
||||
RotationState::RotationInitiated {
|
||||
initiated_at: init,
|
||||
new_cert_id: cert,
|
||||
} => ("initiated", Some(*cert), Some(*init as i32), None),
|
||||
RotationState::WaitingForAcks {
|
||||
new_cert_id: cert,
|
||||
initiated_at: init,
|
||||
timeout_at: timeout_val,
|
||||
} => (
|
||||
"waiting_acks",
|
||||
Some(*cert),
|
||||
Some(*init as i32),
|
||||
Some(*timeout_val as i32),
|
||||
),
|
||||
RotationState::ReadyToRotate { new_cert_id: cert } => ("ready", Some(*cert), None, None),
|
||||
};
|
||||
|
||||
diesel::update(tls_rotation_state.filter(id.eq(1)))
|
||||
.set((
|
||||
state.eq(state_str),
|
||||
new_cert_id.eq(cert_id),
|
||||
initiated_at.eq(init_at),
|
||||
timeout_at.eq(timeout),
|
||||
))
|
||||
.execute(&mut conn)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Фоновый таск для автоматической ротации сертификатов
|
||||
pub struct RotationTask {
|
||||
context: Arc<crate::context::_ServerContextInner>,
|
||||
check_interval: Duration,
|
||||
rotation_threshold: Duration,
|
||||
ack_timeout: Duration,
|
||||
shutdown_rx: watch::Receiver<bool>,
|
||||
}
|
||||
|
||||
impl RotationTask {
|
||||
/// Создать новый rotation task
|
||||
pub fn new(
|
||||
context: Arc<crate::context::_ServerContextInner>,
|
||||
check_interval: Duration,
|
||||
rotation_threshold: Duration,
|
||||
ack_timeout: Duration,
|
||||
shutdown_rx: watch::Receiver<bool>,
|
||||
) -> Self {
|
||||
Self {
|
||||
context,
|
||||
check_interval,
|
||||
rotation_threshold,
|
||||
ack_timeout,
|
||||
shutdown_rx,
|
||||
}
|
||||
}
|
||||
|
||||
/// Запустить фоновый таск мониторинга и ротации
|
||||
pub async fn run(mut self) -> Result<(), RotationError> {
|
||||
info!("Starting TLS certificate rotation task");
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = tokio::time::sleep(self.check_interval) => {
|
||||
if let Err(e) = self.check_and_process().await {
|
||||
error!("Rotation task error: {}", e);
|
||||
}
|
||||
}
|
||||
_ = self.shutdown_rx.changed() => {
|
||||
info!("Rotation task shutting down");
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Проверить текущее состояние и выполнить необходимые действия
|
||||
async fn check_and_process(&self) -> Result<(), RotationError> {
|
||||
let state = self.context.rotation_state.read().await.clone();
|
||||
|
||||
match state {
|
||||
RotationState::Normal => {
|
||||
// Проверить, нужна ли ротация
|
||||
self.check_expiration_and_initiate().await?;
|
||||
}
|
||||
RotationState::RotationInitiated { new_cert_id, .. } => {
|
||||
// Автоматически перейти в WaitingForAcks
|
||||
self.transition_to_waiting_acks(new_cert_id).await?;
|
||||
}
|
||||
RotationState::WaitingForAcks {
|
||||
new_cert_id,
|
||||
timeout_at,
|
||||
..
|
||||
} => {
|
||||
self.handle_waiting_for_acks(new_cert_id, timeout_at).await?;
|
||||
}
|
||||
RotationState::ReadyToRotate { new_cert_id } => {
|
||||
self.execute_rotation(new_cert_id).await?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Проверить срок действия сертификата и инициировать ротацию если нужно
|
||||
async fn check_expiration_and_initiate(&self) -> Result<(), RotationError> {
|
||||
let threshold_secs = self.rotation_threshold.as_secs() as i64;
|
||||
|
||||
if self.context.tls.check_expiration(threshold_secs).await {
|
||||
info!("Certificate expiring soon, initiating rotation");
|
||||
self.initiate_rotation().await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Инициировать ротацию: сгенерировать новый сертификат и сохранить в БД
|
||||
pub async fn initiate_rotation(&self) -> Result<i32, RotationError> {
|
||||
info!("Initiating certificate rotation");
|
||||
|
||||
// 1. Генерация нового сертификата
|
||||
let keypair = KeyPair::generate()?;
|
||||
let (cert, not_before, not_after) = generate_cert(&keypair)?;
|
||||
let cert_der = cert.der().clone();
|
||||
|
||||
// 2. Сохранение в БД (is_active = false, пока не активирован)
|
||||
let new_cert_id = self
|
||||
.save_new_certificate(&cert_der, &keypair, not_before, not_after)
|
||||
.await?;
|
||||
|
||||
info!(new_cert_id, "New certificate generated and saved");
|
||||
|
||||
// 3. Обновление rotation_state
|
||||
let new_state = RotationState::RotationInitiated {
|
||||
initiated_at: chrono::Utc::now().timestamp(),
|
||||
new_cert_id,
|
||||
};
|
||||
*self.context.rotation_state.write().await = new_state.clone();
|
||||
new_state.save_to_db(&self.context.db).await?;
|
||||
|
||||
// 4. Логирование в audit trail
|
||||
self.log_rotation_event(new_cert_id, "rotation_initiated", None)
|
||||
.await?;
|
||||
|
||||
Ok(new_cert_id)
|
||||
}
|
||||
|
||||
/// Перейти в состояние WaitingForAcks и разослать уведомления
|
||||
async fn transition_to_waiting_acks(&self, new_cert_id: i32) -> Result<(), RotationError> {
|
||||
info!(new_cert_id, "Transitioning to WaitingForAcks state");
|
||||
|
||||
let initiated_at = chrono::Utc::now().timestamp();
|
||||
let timeout_at = initiated_at + self.ack_timeout.as_secs() as i64;
|
||||
|
||||
// Обновить состояние
|
||||
let new_state = RotationState::WaitingForAcks {
|
||||
new_cert_id,
|
||||
initiated_at,
|
||||
timeout_at,
|
||||
};
|
||||
*self.context.rotation_state.write().await = new_state.clone();
|
||||
new_state.save_to_db(&self.context.db).await?;
|
||||
|
||||
// TODO: Broadcast уведомлений клиентам
|
||||
// self.broadcast_rotation_notification(new_cert_id, timeout_at).await?;
|
||||
|
||||
info!(timeout_at, "Rotation notifications sent, waiting for ACKs");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Обработка состояния WaitingForAcks: проверка ACKs и таймаута
|
||||
async fn handle_waiting_for_acks(
|
||||
&self,
|
||||
new_cert_id: i32,
|
||||
timeout_at: i64,
|
||||
) -> Result<(), RotationError> {
|
||||
let now = chrono::Utc::now().timestamp();
|
||||
|
||||
// Проверить таймаут
|
||||
if now > timeout_at {
|
||||
let missing = self.get_missing_acks(new_cert_id).await?;
|
||||
warn!(
|
||||
missing_count = missing.len(),
|
||||
"Rotation ACK timeout reached, proceeding with rotation"
|
||||
);
|
||||
|
||||
// Переход в ReadyToRotate
|
||||
let new_state = RotationState::ReadyToRotate { new_cert_id };
|
||||
*self.context.rotation_state.write().await = new_state.clone();
|
||||
new_state.save_to_db(&self.context.db).await?;
|
||||
|
||||
self.log_rotation_event(
|
||||
new_cert_id,
|
||||
"timeout",
|
||||
Some(format!("Missing ACKs from {} clients", missing.len())),
|
||||
)
|
||||
.await?;
|
||||
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Проверить, все ли ACK получены
|
||||
let missing = self.get_missing_acks(new_cert_id).await?;
|
||||
|
||||
if missing.is_empty() {
|
||||
info!("All clients acknowledged, ready to rotate");
|
||||
|
||||
let new_state = RotationState::ReadyToRotate { new_cert_id };
|
||||
*self.context.rotation_state.write().await = new_state.clone();
|
||||
new_state.save_to_db(&self.context.db).await?;
|
||||
|
||||
self.log_rotation_event(new_cert_id, "acks_complete", None)
|
||||
.await?;
|
||||
} else {
|
||||
let time_remaining = timeout_at - now;
|
||||
debug!(
|
||||
missing_count = missing.len(),
|
||||
time_remaining,
|
||||
"Waiting for rotation ACKs"
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Выполнить атомарную ротацию сертификата
|
||||
async fn execute_rotation(&self, new_cert_id: i32) -> Result<(), RotationError> {
|
||||
info!(new_cert_id, "Executing certificate rotation");
|
||||
|
||||
// 1. Загрузить новый сертификат из БД
|
||||
let new_cert = self.load_certificate(new_cert_id).await?;
|
||||
|
||||
// 2. Атомарная замена в TlsManager
|
||||
self.context
|
||||
.tls
|
||||
.replace_certificate(new_cert)
|
||||
.await
|
||||
.map_err(RotationError::TlsInit)?;
|
||||
|
||||
// 3. Обновить БД: старый is_active=false, новый is_active=true
|
||||
self.activate_certificate(new_cert_id).await?;
|
||||
|
||||
// 4. TODO: Отключить всех клиентов
|
||||
// self.disconnect_all_clients().await?;
|
||||
|
||||
// 5. Очистить rotation_state
|
||||
let new_state = RotationState::Normal;
|
||||
*self.context.rotation_state.write().await = new_state.clone();
|
||||
new_state.save_to_db(&self.context.db).await?;
|
||||
|
||||
// 6. Очистить ACKs
|
||||
self.context.rotation_acks.write().await.clear();
|
||||
self.clear_rotation_acks(new_cert_id).await?;
|
||||
|
||||
// 7. Логирование
|
||||
self.log_rotation_event(new_cert_id, "activated", None)
|
||||
.await?;
|
||||
|
||||
info!(new_cert_id, "Certificate rotation completed successfully");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Сохранить новый сертификат в БД
|
||||
async fn save_new_certificate(
|
||||
&self,
|
||||
cert_der: &[u8],
|
||||
keypair: &KeyPair,
|
||||
cert_not_before: i64,
|
||||
cert_not_after: i64,
|
||||
) -> Result<i32, RotationError> {
|
||||
use crate::db::schema::tls_certificates::dsl::*;
|
||||
|
||||
let mut conn = self.context.db.get().await.map_err(|e| {
|
||||
RotationError::InvalidState(format!("Failed to get DB connection: {}", e))
|
||||
})?;
|
||||
|
||||
let new_cert = NewTlsCertificate {
|
||||
cert: cert_der.to_vec(),
|
||||
cert_key: keypair.serialize_pem().as_bytes().to_vec(),
|
||||
not_before: cert_not_before as i32,
|
||||
not_after: cert_not_after as i32,
|
||||
is_active: false,
|
||||
};
|
||||
|
||||
diesel::insert_into(tls_certificates)
|
||||
.values(&new_cert)
|
||||
.execute(&mut conn)
|
||||
.await?;
|
||||
|
||||
// Получить ID последней вставленной записи
|
||||
let cert_id: i32 = diesel::select(diesel::dsl::sql::<diesel::sql_types::Integer>(
|
||||
"last_insert_rowid()",
|
||||
))
|
||||
.first(&mut conn)
|
||||
.await?;
|
||||
|
||||
self.log_rotation_event(cert_id, "created", None).await?;
|
||||
|
||||
Ok(cert_id)
|
||||
}
|
||||
|
||||
/// Загрузить сертификат из БД
|
||||
async fn load_certificate(&self, cert_id: i32) -> Result<CertificateMetadata, RotationError> {
|
||||
use crate::db::schema::tls_certificates::dsl::*;
|
||||
|
||||
let mut conn = self.context.db.get().await.map_err(|e| {
|
||||
RotationError::InvalidState(format!("Failed to get DB connection: {}", e))
|
||||
})?;
|
||||
|
||||
let cert_record: (Vec<u8>, Vec<u8>, i32, i32, i32) = tls_certificates
|
||||
.select((cert, cert_key, not_before, not_after, created_at))
|
||||
.filter(id.eq(cert_id))
|
||||
.first(&mut conn)
|
||||
.await?;
|
||||
|
||||
let cert_der = rustls::pki_types::CertificateDer::from(cert_record.0);
|
||||
let key_pem = String::from_utf8(cert_record.1)
|
||||
.map_err(|e| RotationError::InvalidState(format!("Invalid key encoding: {}", e)))?;
|
||||
let keypair = KeyPair::from_pem(&key_pem)?;
|
||||
|
||||
Ok(CertificateMetadata {
|
||||
cert_id,
|
||||
cert: cert_der,
|
||||
keypair: Arc::new(keypair),
|
||||
not_before: cert_record.2 as i64,
|
||||
not_after: cert_record.3 as i64,
|
||||
created_at: cert_record.4 as i64,
|
||||
})
|
||||
}
|
||||
|
||||
/// Активировать сертификат (установить is_active=true)
|
||||
async fn activate_certificate(&self, cert_id: i32) -> Result<(), RotationError> {
|
||||
use crate::db::schema::tls_certificates::dsl::*;
|
||||
|
||||
let mut conn = self.context.db.get().await.map_err(|e| {
|
||||
RotationError::InvalidState(format!("Failed to get DB connection: {}", e))
|
||||
})?;
|
||||
|
||||
// Деактивировать все сертификаты
|
||||
diesel::update(tls_certificates)
|
||||
.set(is_active.eq(false))
|
||||
.execute(&mut conn)
|
||||
.await?;
|
||||
|
||||
// Активировать новый
|
||||
diesel::update(tls_certificates.filter(id.eq(cert_id)))
|
||||
.set(is_active.eq(true))
|
||||
.execute(&mut conn)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Получить список клиентов, которые ещё не отправили ACK
|
||||
async fn get_missing_acks(&self, rotation_id: i32) -> Result<Vec<VerifyingKey>, RotationError> {
|
||||
// TODO: Реализовать получение списка всех активных клиентов
|
||||
// и вычитание тех, кто уже отправил ACK
|
||||
|
||||
// Пока возвращаем пустой список
|
||||
Ok(Vec::new())
|
||||
}
|
||||
|
||||
/// Очистить ACKs для данной ротации из БД
|
||||
async fn clear_rotation_acks(&self, rotation_id: i32) -> Result<(), RotationError> {
|
||||
use crate::db::schema::rotation_client_acks::dsl::*;
|
||||
|
||||
let mut conn = self.context.db.get().await.map_err(|e| {
|
||||
RotationError::InvalidState(format!("Failed to get DB connection: {}", e))
|
||||
})?;
|
||||
|
||||
diesel::delete(rotation_client_acks.filter(rotation_id.eq(rotation_id)))
|
||||
.execute(&mut conn)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Записать событие в audit trail
|
||||
async fn log_rotation_event(
|
||||
&self,
|
||||
history_cert_id: i32,
|
||||
history_event_type: &str,
|
||||
history_details: Option<String>,
|
||||
) -> Result<(), RotationError> {
|
||||
use crate::db::schema::tls_rotation_history::dsl::*;
|
||||
|
||||
let mut conn = self.context.db.get().await.map_err(|e| {
|
||||
RotationError::InvalidState(format!("Failed to get DB connection: {}", e))
|
||||
})?;
|
||||
|
||||
let new_history = NewTlsRotationHistory {
|
||||
cert_id: history_cert_id,
|
||||
event_type: history_event_type.to_string(),
|
||||
details: history_details,
|
||||
};
|
||||
|
||||
diesel::insert_into(tls_rotation_history)
|
||||
.values(&new_history)
|
||||
.execute(&mut conn)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -1,139 +0,0 @@
|
||||
use chacha20poly1305::{
|
||||
aead::{Aead, KeyInit},
|
||||
ChaCha20Poly1305, Key, Nonce,
|
||||
};
|
||||
|
||||
use super::CryptoError;
|
||||
|
||||
/// Encrypt plaintext with AEAD (ChaCha20Poly1305)
|
||||
///
|
||||
/// Returns (ciphertext, tag) on success
|
||||
pub fn encrypt(
|
||||
plaintext: &[u8],
|
||||
key: &[u8; 32],
|
||||
nonce: &[u8; 12],
|
||||
) -> Result<Vec<u8>, CryptoError> {
|
||||
let cipher_key = Key::from_slice(key);
|
||||
let cipher = ChaCha20Poly1305::new(cipher_key);
|
||||
let nonce_array = Nonce::from_slice(nonce);
|
||||
|
||||
cipher
|
||||
.encrypt(nonce_array, plaintext)
|
||||
.map_err(|e| CryptoError::AeadEncryption(e.to_string()))
|
||||
}
|
||||
|
||||
/// Decrypt ciphertext with AEAD (ChaCha20Poly1305)
|
||||
///
|
||||
/// The ciphertext должен содержать tag (последние 16 bytes)
|
||||
pub fn decrypt(
|
||||
ciphertext_with_tag: &[u8],
|
||||
key: &[u8; 32],
|
||||
nonce: &[u8; 12],
|
||||
) -> Result<Vec<u8>, CryptoError> {
|
||||
let cipher_key = Key::from_slice(key);
|
||||
let cipher = ChaCha20Poly1305::new(cipher_key);
|
||||
let nonce_array = Nonce::from_slice(nonce);
|
||||
|
||||
cipher
|
||||
.decrypt(nonce_array, ciphertext_with_tag)
|
||||
.map_err(|e| CryptoError::AeadDecryption(e.to_string()))
|
||||
}
|
||||
|
||||
/// Generate nonce from counter
|
||||
///
|
||||
/// Converts i32 counter to 12-byte nonce (big-endian encoding)
|
||||
pub fn nonce_from_counter(counter: i32) -> [u8; 12] {
|
||||
let mut nonce = [0u8; 12];
|
||||
nonce[8..12].copy_from_slice(&counter.to_be_bytes());
|
||||
nonce
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_aead_encrypt_decrypt_round_trip() {
|
||||
let plaintext = b"Hello, World! This is a secret message.";
|
||||
let key = [42u8; 32];
|
||||
let nonce = nonce_from_counter(1);
|
||||
|
||||
// Encrypt
|
||||
let ciphertext = encrypt(plaintext, &key, &nonce).expect("Encryption failed");
|
||||
|
||||
// Verify ciphertext is different from plaintext
|
||||
assert_ne!(ciphertext.as_slice(), plaintext);
|
||||
|
||||
// Decrypt
|
||||
let decrypted = decrypt(&ciphertext, &key, &nonce).expect("Decryption failed");
|
||||
|
||||
// Verify round-trip
|
||||
assert_eq!(decrypted.as_slice(), plaintext);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_aead_decrypt_with_wrong_key() {
|
||||
let plaintext = b"Secret data";
|
||||
let key = [1u8; 32];
|
||||
let wrong_key = [2u8; 32];
|
||||
let nonce = nonce_from_counter(1);
|
||||
|
||||
let ciphertext = encrypt(plaintext, &key, &nonce).expect("Encryption failed");
|
||||
|
||||
// Attempt decrypt with wrong key
|
||||
let result = decrypt(&ciphertext, &wrong_key, &nonce);
|
||||
|
||||
// Should fail
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_aead_decrypt_with_wrong_nonce() {
|
||||
let plaintext = b"Secret data";
|
||||
let key = [1u8; 32];
|
||||
let nonce = nonce_from_counter(1);
|
||||
let wrong_nonce = nonce_from_counter(2);
|
||||
|
||||
let ciphertext = encrypt(plaintext, &key, &nonce).expect("Encryption failed");
|
||||
|
||||
// Attempt decrypt with wrong nonce
|
||||
let result = decrypt(&ciphertext, &key, &wrong_nonce);
|
||||
|
||||
// Should fail
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_nonce_generation_from_counter() {
|
||||
let nonce1 = nonce_from_counter(1);
|
||||
let nonce2 = nonce_from_counter(2);
|
||||
let nonce_max = nonce_from_counter(i32::MAX);
|
||||
|
||||
// Verify nonces are different
|
||||
assert_ne!(nonce1, nonce2);
|
||||
|
||||
// Verify nonce format (first 8 bytes should be zero, last 4 contain counter)
|
||||
assert_eq!(&nonce1[0..8], &[0u8; 8]);
|
||||
assert_eq!(&nonce1[8..12], &1i32.to_be_bytes());
|
||||
|
||||
assert_eq!(&nonce_max[8..12], &i32::MAX.to_be_bytes());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_aead_tampered_ciphertext() {
|
||||
let plaintext = b"Important message";
|
||||
let key = [7u8; 32];
|
||||
let nonce = nonce_from_counter(5);
|
||||
|
||||
let mut ciphertext = encrypt(plaintext, &key, &nonce).expect("Encryption failed");
|
||||
|
||||
// Tamper with ciphertext (flip a bit)
|
||||
if let Some(byte) = ciphertext.get_mut(5) {
|
||||
*byte ^= 0x01;
|
||||
}
|
||||
|
||||
// Attempt decrypt - should fail due to authentication tag mismatch
|
||||
let result = decrypt(&ciphertext, &key, &nonce);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
pub mod aead;
|
||||
pub mod root_key;
|
||||
|
||||
use miette::Diagnostic;
|
||||
use thiserror::Error;
|
||||
|
||||
#[derive(Error, Debug, Diagnostic)]
|
||||
pub enum CryptoError {
|
||||
#[error("AEAD encryption failed: {0}")]
|
||||
#[diagnostic(code(arbiter_server::crypto::aead_encryption))]
|
||||
AeadEncryption(String),
|
||||
|
||||
#[error("AEAD decryption failed: {0}")]
|
||||
#[diagnostic(code(arbiter_server::crypto::aead_decryption))]
|
||||
AeadDecryption(String),
|
||||
|
||||
#[error("Key derivation failed: {0}")]
|
||||
#[diagnostic(code(arbiter_server::crypto::key_derivation))]
|
||||
KeyDerivation(String),
|
||||
|
||||
#[error("Invalid nonce: {0}")]
|
||||
#[diagnostic(code(arbiter_server::crypto::invalid_nonce))]
|
||||
InvalidNonce(String),
|
||||
|
||||
#[error("Invalid key format: {0}")]
|
||||
#[diagnostic(code(arbiter_server::crypto::invalid_key))]
|
||||
InvalidKey(String),
|
||||
}
|
||||
@@ -1,240 +0,0 @@
|
||||
use argon2::{
|
||||
password_hash::{rand_core::OsRng, PasswordHasher, SaltString},
|
||||
Argon2, PasswordHash, PasswordVerifier,
|
||||
};
|
||||
|
||||
use crate::db::models::AeadEncrypted;
|
||||
|
||||
use super::{aead, CryptoError};
|
||||
|
||||
/// Encrypt root key with user password
|
||||
///
|
||||
/// Uses Argon2id for password derivation and ChaCha20Poly1305 for encryption
|
||||
pub fn encrypt_root_key(
|
||||
root_key: &[u8; 32],
|
||||
password: &str,
|
||||
nonce_counter: i32,
|
||||
) -> Result<(AeadEncrypted, String), CryptoError> {
|
||||
// Derive key from password using Argon2
|
||||
let (derived_key, salt) = derive_key_from_password(password)?;
|
||||
|
||||
// Generate nonce from counter
|
||||
let nonce = aead::nonce_from_counter(nonce_counter);
|
||||
|
||||
// Encrypt root key
|
||||
let ciphertext_with_tag = aead::encrypt(root_key, &derived_key, &nonce)?;
|
||||
|
||||
// Extract tag (last 16 bytes)
|
||||
let tag_start = ciphertext_with_tag
|
||||
.len()
|
||||
.checked_sub(16)
|
||||
.ok_or_else(|| CryptoError::AeadEncryption("Ciphertext too short".into()))?;
|
||||
|
||||
let ciphertext = ciphertext_with_tag[..tag_start].to_vec();
|
||||
let tag = ciphertext_with_tag[tag_start..].to_vec();
|
||||
|
||||
let aead_encrypted = AeadEncrypted {
|
||||
id: 1, // Will be set by database
|
||||
current_nonce: nonce_counter,
|
||||
ciphertext,
|
||||
tag,
|
||||
schema_version: 1, // Current version
|
||||
argon2_salt: Some(salt.clone()),
|
||||
};
|
||||
|
||||
Ok((aead_encrypted, salt))
|
||||
}
|
||||
|
||||
/// Decrypt root key with user password
|
||||
///
|
||||
/// Verifies password hash and decrypts using ChaCha20Poly1305
|
||||
pub fn decrypt_root_key(
|
||||
encrypted: &AeadEncrypted,
|
||||
password: &str,
|
||||
salt: &str,
|
||||
) -> Result<[u8; 32], CryptoError> {
|
||||
// Derive key from password using stored salt
|
||||
let derived_key = derive_key_with_salt(password, salt)?;
|
||||
|
||||
// Generate nonce from counter
|
||||
let nonce = aead::nonce_from_counter(encrypted.current_nonce);
|
||||
|
||||
// Reconstruct ciphertext with tag
|
||||
let mut ciphertext_with_tag = encrypted.ciphertext.clone();
|
||||
ciphertext_with_tag.extend_from_slice(&encrypted.tag);
|
||||
|
||||
// Decrypt
|
||||
let plaintext = aead::decrypt(&ciphertext_with_tag, &derived_key, &nonce)?;
|
||||
|
||||
// Verify length
|
||||
if plaintext.len() != 32 {
|
||||
return Err(CryptoError::InvalidKey(format!(
|
||||
"Expected 32 bytes, got {}",
|
||||
plaintext.len()
|
||||
)));
|
||||
}
|
||||
|
||||
// Convert to fixed-size array
|
||||
let mut root_key = [0u8; 32];
|
||||
root_key.copy_from_slice(&plaintext);
|
||||
|
||||
Ok(root_key)
|
||||
}
|
||||
|
||||
/// Derive 32-byte key from password using Argon2id
|
||||
///
|
||||
/// Generates new random salt and returns (derived_key, salt_string)
|
||||
fn derive_key_from_password(password: &str) -> Result<([u8; 32], String), CryptoError> {
|
||||
let salt = SaltString::generate(&mut OsRng);
|
||||
|
||||
let argon2 = Argon2::default();
|
||||
|
||||
let password_hash = argon2
|
||||
.hash_password(password.as_bytes(), &salt)
|
||||
.map_err(|e| CryptoError::KeyDerivation(e.to_string()))?;
|
||||
|
||||
// Extract hash output (32 bytes)
|
||||
let hash_output = password_hash
|
||||
.hash
|
||||
.ok_or_else(|| CryptoError::KeyDerivation("No hash output".into()))?;
|
||||
|
||||
let hash_bytes = hash_output.as_bytes();
|
||||
|
||||
if hash_bytes.len() != 32 {
|
||||
return Err(CryptoError::KeyDerivation(format!(
|
||||
"Expected 32 bytes, got {}",
|
||||
hash_bytes.len()
|
||||
)));
|
||||
}
|
||||
|
||||
let mut key = [0u8; 32];
|
||||
key.copy_from_slice(hash_bytes);
|
||||
|
||||
Ok((key, salt.to_string()))
|
||||
}
|
||||
|
||||
/// Derive 32-byte key from password using existing salt
|
||||
fn derive_key_with_salt(password: &str, salt_str: &str) -> Result<[u8; 32], CryptoError> {
|
||||
let argon2 = Argon2::default();
|
||||
|
||||
// Parse salt
|
||||
let salt =
|
||||
SaltString::from_b64(salt_str).map_err(|e| CryptoError::InvalidKey(e.to_string()))?;
|
||||
|
||||
let password_hash = argon2
|
||||
.hash_password(password.as_bytes(), &salt)
|
||||
.map_err(|e| CryptoError::KeyDerivation(e.to_string()))?;
|
||||
|
||||
// Extract hash output
|
||||
let hash_output = password_hash
|
||||
.hash
|
||||
.ok_or_else(|| CryptoError::KeyDerivation("No hash output".into()))?;
|
||||
|
||||
let hash_bytes = hash_output.as_bytes();
|
||||
|
||||
if hash_bytes.len() != 32 {
|
||||
return Err(CryptoError::KeyDerivation(format!(
|
||||
"Expected 32 bytes, got {}",
|
||||
hash_bytes.len()
|
||||
)));
|
||||
}
|
||||
|
||||
let mut key = [0u8; 32];
|
||||
key.copy_from_slice(hash_bytes);
|
||||
|
||||
Ok(key)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_root_key_encrypt_decrypt_round_trip() {
|
||||
let root_key = [42u8; 32];
|
||||
let password = "super_secret_password_123";
|
||||
let nonce_counter = 1;
|
||||
|
||||
// Encrypt
|
||||
let (encrypted, salt) =
|
||||
encrypt_root_key(&root_key, password, nonce_counter).expect("Encryption failed");
|
||||
|
||||
// Verify structure
|
||||
assert_eq!(encrypted.current_nonce, nonce_counter);
|
||||
assert_eq!(encrypted.schema_version, 1);
|
||||
assert_eq!(encrypted.tag.len(), 16); // AEAD tag size
|
||||
|
||||
// Decrypt
|
||||
let decrypted =
|
||||
decrypt_root_key(&encrypted, password, &salt).expect("Decryption failed");
|
||||
|
||||
// Verify round-trip
|
||||
assert_eq!(decrypted, root_key);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_decrypt_with_wrong_password() {
|
||||
let root_key = [99u8; 32];
|
||||
let correct_password = "correct_password";
|
||||
let wrong_password = "wrong_password";
|
||||
let nonce_counter = 1;
|
||||
|
||||
// Encrypt with correct password
|
||||
let (encrypted, salt) =
|
||||
encrypt_root_key(&root_key, correct_password, nonce_counter).expect("Encryption failed");
|
||||
|
||||
// Attempt decrypt with wrong password
|
||||
let result = decrypt_root_key(&encrypted, wrong_password, &salt);
|
||||
|
||||
// Should fail due to authentication tag mismatch
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_password_derivation_different_salts() {
|
||||
let password = "same_password";
|
||||
|
||||
// Derive key twice - should produce different salts
|
||||
let (key1, salt1) = derive_key_from_password(password).expect("Derivation 1 failed");
|
||||
let (key2, salt2) = derive_key_from_password(password).expect("Derivation 2 failed");
|
||||
|
||||
// Salts should be different (randomly generated)
|
||||
assert_ne!(salt1, salt2);
|
||||
|
||||
// Keys should be different (due to different salts)
|
||||
assert_ne!(key1, key2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_password_derivation_with_same_salt() {
|
||||
let password = "test_password";
|
||||
|
||||
// Generate key and salt
|
||||
let (key1, salt) = derive_key_from_password(password).expect("Derivation failed");
|
||||
|
||||
// Derive key again with same salt
|
||||
let key2 = derive_key_with_salt(password, &salt).expect("Re-derivation failed");
|
||||
|
||||
// Keys should be identical
|
||||
assert_eq!(key1, key2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_different_nonce_produces_different_ciphertext() {
|
||||
let root_key = [77u8; 32];
|
||||
let password = "password123";
|
||||
|
||||
let (encrypted1, salt1) = encrypt_root_key(&root_key, password, 1).expect("Encryption 1 failed");
|
||||
let (encrypted2, salt2) = encrypt_root_key(&root_key, password, 2).expect("Encryption 2 failed");
|
||||
|
||||
// Different nonces should produce different ciphertexts
|
||||
assert_ne!(encrypted1.ciphertext, encrypted2.ciphertext);
|
||||
|
||||
// But both should decrypt correctly
|
||||
let decrypted1 = decrypt_root_key(&encrypted1, password, &salt1).expect("Decryption 1 failed");
|
||||
let decrypted2 = decrypt_root_key(&encrypted2, password, &salt2).expect("Decryption 2 failed");
|
||||
|
||||
assert_eq!(decrypted1, root_key);
|
||||
assert_eq!(decrypted2, root_key);
|
||||
}
|
||||
}
|
||||
@@ -1,12 +1,7 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use diesel::{
|
||||
Connection as _, SqliteConnection,
|
||||
connection::{SimpleConnection as _, TransactionManager},
|
||||
};
|
||||
use diesel::{Connection as _, SqliteConnection, connection::SimpleConnection as _};
|
||||
use diesel_async::{
|
||||
AsyncConnection, SimpleAsyncConnection,
|
||||
pooled_connection::{AsyncDieselConnectionManager, ManagerConfig, RecyclingMethod},
|
||||
pooled_connection::{AsyncDieselConnectionManager, ManagerConfig},
|
||||
sync_connection_wrapper::SyncConnectionWrapper,
|
||||
};
|
||||
use diesel_migrations::{EmbeddedMigrations, MigrationHarness, embed_migrations};
|
||||
@@ -29,26 +24,34 @@ const MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations");
|
||||
#[derive(Error, Diagnostic, Debug)]
|
||||
pub enum DatabaseSetupError {
|
||||
#[error("Failed to determine home directory")]
|
||||
#[diagnostic(code(arbiter::db::home_dir_error))]
|
||||
#[diagnostic(code(arbiter::db::home_dir))]
|
||||
HomeDir(std::io::Error),
|
||||
|
||||
#[error(transparent)]
|
||||
#[diagnostic(code(arbiter::db::connection_error))]
|
||||
#[diagnostic(code(arbiter::db::connection))]
|
||||
Connection(diesel::ConnectionError),
|
||||
|
||||
#[error(transparent)]
|
||||
#[diagnostic(code(arbiter::db::concurrency_error))]
|
||||
#[diagnostic(code(arbiter::db::concurrency))]
|
||||
ConcurrencySetup(diesel::result::Error),
|
||||
|
||||
#[error(transparent)]
|
||||
#[diagnostic(code(arbiter::db::migration_error))]
|
||||
#[diagnostic(code(arbiter::db::migration))]
|
||||
Migration(Box<dyn std::error::Error + Send + Sync>),
|
||||
|
||||
#[error(transparent)]
|
||||
#[diagnostic(code(arbiter::db::pool_error))]
|
||||
#[diagnostic(code(arbiter::db::pool))]
|
||||
Pool(#[from] PoolInitError),
|
||||
}
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum DatabaseError {
|
||||
#[error("Database connection error")]
|
||||
Pool(#[from] PoolError),
|
||||
#[error("Database query error")]
|
||||
Connection(#[from] diesel::result::Error),
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "info")]
|
||||
fn database_path() -> Result<std::path::PathBuf, DatabaseSetupError> {
|
||||
let arbiter_home = arbiter_proto::home_path().map_err(DatabaseSetupError::HomeDir)?;
|
||||
@@ -96,12 +99,13 @@ fn initialize_database(url: &str) -> Result<(), DatabaseSetupError> {
|
||||
|
||||
#[tracing::instrument(level = "info")]
|
||||
pub async fn create_pool(url: Option<&str>) -> Result<DatabasePool, DatabaseSetupError> {
|
||||
let database_url = url.map(String::from).unwrap_or(format!(
|
||||
"{}?mode=rwc",
|
||||
(database_path()?
|
||||
let database_url = url.map(String::from).unwrap_or(
|
||||
#[allow(clippy::expect_used)]
|
||||
database_path()?
|
||||
.to_str()
|
||||
.expect("database path is not valid UTF-8"))
|
||||
));
|
||||
.expect("database path is not valid UTF-8")
|
||||
.to_string(),
|
||||
);
|
||||
|
||||
initialize_database(&database_url)?;
|
||||
|
||||
@@ -134,18 +138,19 @@ pub async fn create_pool(url: Option<&str>) -> Result<DatabasePool, DatabaseSetu
|
||||
Ok(pool)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub async fn create_test_pool() -> DatabasePool {
|
||||
use rand::distr::{Alphanumeric, SampleString as _};
|
||||
|
||||
let tempfile_name = Alphanumeric.sample_string(&mut rand::rng(), 16);
|
||||
|
||||
let file = std::env::temp_dir().join(tempfile_name);
|
||||
let url = format!(
|
||||
"{}?mode=rwc",
|
||||
file.to_str().expect("temp file path is not valid UTF-8")
|
||||
);
|
||||
#[allow(clippy::expect_used)]
|
||||
let url = file
|
||||
.to_str()
|
||||
.expect("temp file path is not valid UTF-8")
|
||||
.to_string();
|
||||
|
||||
#[allow(clippy::expect_used)]
|
||||
create_pool(Some(&url))
|
||||
.await
|
||||
.expect("Failed to create test database pool")
|
||||
@@ -1,118 +1,378 @@
|
||||
#![allow(unused)]
|
||||
#![allow(clippy::all)]
|
||||
|
||||
use crate::db::schema::{self, aead_encrypted, arbiter_settings};
|
||||
use crate::db::schema::{
|
||||
self, aead_encrypted, arbiter_settings, evm_basic_grant, evm_ether_transfer_grant,
|
||||
evm_ether_transfer_grant_target, evm_ether_transfer_limit, evm_token_transfer_grant,
|
||||
evm_token_transfer_log, evm_token_transfer_volume_limit, evm_transaction_log, evm_wallet,
|
||||
root_key_history, tls_history,
|
||||
};
|
||||
use chrono::{DateTime, Utc};
|
||||
use diesel::{prelude::*, sqlite::Sqlite};
|
||||
use restructed::Models;
|
||||
|
||||
pub mod types {
|
||||
use chrono::{DateTime, Utc};
|
||||
pub struct SqliteTimestamp(DateTime<Utc>);
|
||||
}
|
||||
use diesel::{
|
||||
deserialize::{FromSql, FromSqlRow},
|
||||
expression::AsExpression,
|
||||
serialize::{IsNull, ToSql},
|
||||
sql_types::Integer,
|
||||
sqlite::{Sqlite, SqliteType},
|
||||
};
|
||||
|
||||
#[derive(Queryable, Selectable, Debug, Insertable)]
|
||||
#[derive(Debug, FromSqlRow, AsExpression, Clone)]
|
||||
#[diesel(sql_type = Integer)]
|
||||
#[repr(transparent)] // hint compiler to optimize the wrapper struct away
|
||||
pub struct SqliteTimestamp(pub DateTime<Utc>);
|
||||
impl SqliteTimestamp {
|
||||
pub fn now() -> Self {
|
||||
SqliteTimestamp(Utc::now())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<chrono::DateTime<Utc>> for SqliteTimestamp {
|
||||
fn from(dt: chrono::DateTime<Utc>) -> Self {
|
||||
SqliteTimestamp(dt)
|
||||
}
|
||||
}
|
||||
impl From<SqliteTimestamp> for chrono::DateTime<Utc> {
|
||||
fn from(ts: SqliteTimestamp) -> Self {
|
||||
ts.0
|
||||
}
|
||||
}
|
||||
|
||||
impl ToSql<Integer, Sqlite> for SqliteTimestamp {
|
||||
fn to_sql<'b>(
|
||||
&'b self,
|
||||
out: &mut diesel::serialize::Output<'b, '_, Sqlite>,
|
||||
) -> diesel::serialize::Result {
|
||||
let unix_timestamp = self.0.timestamp() as i32;
|
||||
out.set_value(unix_timestamp);
|
||||
Ok(IsNull::No)
|
||||
}
|
||||
}
|
||||
|
||||
impl FromSql<Integer, Sqlite> for SqliteTimestamp {
|
||||
fn from_sql(
|
||||
mut bytes: <Sqlite as diesel::backend::Backend>::RawValue<'_>,
|
||||
) -> diesel::deserialize::Result<Self> {
|
||||
let Some(SqliteType::Long) = bytes.value_type() else {
|
||||
return Err(format!(
|
||||
"Expected Integer type for SqliteTimestamp, got {:?}",
|
||||
bytes.value_type()
|
||||
)
|
||||
.into());
|
||||
};
|
||||
|
||||
let unix_timestamp = bytes.read_long();
|
||||
let datetime =
|
||||
DateTime::from_timestamp(unix_timestamp, 0).ok_or("Timestamp is out of bounds")?;
|
||||
|
||||
Ok(SqliteTimestamp(datetime))
|
||||
}
|
||||
}
|
||||
|
||||
/// Key algorithm stored in the `useragent_client.key_type` column.
|
||||
/// Values must stay stable — they are persisted in the database.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, FromSqlRow, AsExpression, strum::FromRepr)]
|
||||
#[diesel(sql_type = Integer)]
|
||||
#[repr(i32)]
|
||||
pub enum KeyType {
|
||||
Ed25519 = 1,
|
||||
EcdsaSecp256k1 = 2,
|
||||
Rsa = 3,
|
||||
}
|
||||
|
||||
impl ToSql<Integer, Sqlite> for KeyType {
|
||||
fn to_sql<'b>(
|
||||
&'b self,
|
||||
out: &mut diesel::serialize::Output<'b, '_, Sqlite>,
|
||||
) -> diesel::serialize::Result {
|
||||
out.set_value(*self as i32);
|
||||
Ok(IsNull::No)
|
||||
}
|
||||
}
|
||||
|
||||
impl FromSql<Integer, Sqlite> for KeyType {
|
||||
fn from_sql(
|
||||
mut bytes: <Sqlite as diesel::backend::Backend>::RawValue<'_>,
|
||||
) -> diesel::deserialize::Result<Self> {
|
||||
let Some(SqliteType::Long) = bytes.value_type() else {
|
||||
return Err("Expected Integer for KeyType".into());
|
||||
};
|
||||
let discriminant = bytes.read_long();
|
||||
KeyType::from_repr(discriminant as i32)
|
||||
.ok_or_else(|| format!("Unknown KeyType discriminant: {discriminant}").into())
|
||||
}
|
||||
}
|
||||
}
|
||||
pub use types::*;
|
||||
|
||||
#[derive(Models, Queryable, Debug, Insertable, Selectable)]
|
||||
#[view(
|
||||
NewAeadEncrypted,
|
||||
derive(Insertable),
|
||||
omit(id),
|
||||
attributes_with = "deriveless"
|
||||
)]
|
||||
#[diesel(table_name = aead_encrypted, check_for_backend(Sqlite))]
|
||||
pub struct AeadEncrypted {
|
||||
pub id: i32,
|
||||
pub current_nonce: i32,
|
||||
pub ciphertext: Vec<u8>,
|
||||
pub tag: Vec<u8>,
|
||||
pub current_nonce: Vec<u8>,
|
||||
pub schema_version: i32,
|
||||
pub argon2_salt: Option<String>,
|
||||
pub associated_root_key_id: i32, // references root_key_history.id
|
||||
pub created_at: SqliteTimestamp,
|
||||
}
|
||||
|
||||
#[derive(Queryable, Debug, Insertable)]
|
||||
#[diesel(table_name = arbiter_settings, check_for_backend(Sqlite))]
|
||||
pub struct ArbiterSetting {
|
||||
#[derive(Models, Queryable, Debug, Insertable, Selectable)]
|
||||
#[diesel(table_name = root_key_history, check_for_backend(Sqlite))]
|
||||
#[view(
|
||||
NewRootKeyHistory,
|
||||
derive(Insertable),
|
||||
omit(id),
|
||||
attributes_with = "deriveless"
|
||||
)]
|
||||
pub struct RootKeyHistory {
|
||||
pub id: i32,
|
||||
pub root_key_id: Option<i32>, // references aead_encrypted.id
|
||||
pub cert_key: Vec<u8>,
|
||||
pub cert: Vec<u8>,
|
||||
pub current_cert_id: Option<i32>, // references tls_certificates.id
|
||||
pub ciphertext: Vec<u8>,
|
||||
pub tag: Vec<u8>,
|
||||
pub root_key_encryption_nonce: Vec<u8>,
|
||||
pub data_encryption_nonce: Vec<u8>,
|
||||
pub schema_version: i32,
|
||||
pub salt: Vec<u8>,
|
||||
}
|
||||
|
||||
#[derive(Queryable, Debug)]
|
||||
#[derive(Models, Queryable, Debug, Insertable, Selectable)]
|
||||
#[diesel(table_name = tls_history, check_for_backend(Sqlite))]
|
||||
#[view(
|
||||
NewTlsHistory,
|
||||
derive(Insertable),
|
||||
omit(id, created_at),
|
||||
attributes_with = "deriveless"
|
||||
)]
|
||||
pub struct TlsHistory {
|
||||
pub id: i32,
|
||||
pub cert: String,
|
||||
pub cert_key: String, // PEM Encoded private key
|
||||
pub ca_cert: String, // PEM Encoded certificate for cert signing
|
||||
pub ca_key: String, // PEM Encoded public key for cert signing
|
||||
pub created_at: SqliteTimestamp,
|
||||
}
|
||||
|
||||
#[derive(Queryable, Debug, Insertable, Selectable)]
|
||||
#[diesel(table_name = arbiter_settings, check_for_backend(Sqlite))]
|
||||
pub struct ArbiterSettings {
|
||||
pub id: i32,
|
||||
pub root_key_id: Option<i32>, // references root_key_history.id
|
||||
pub tls_id: Option<i32>, // references tls_history.id
|
||||
}
|
||||
|
||||
#[derive(Models, Queryable, Debug, Insertable, Selectable)]
|
||||
#[diesel(table_name = evm_wallet, check_for_backend(Sqlite))]
|
||||
#[view(
|
||||
NewEvmWallet,
|
||||
derive(Insertable),
|
||||
omit(id, created_at),
|
||||
attributes_with = "deriveless"
|
||||
)]
|
||||
pub struct EvmWallet {
|
||||
pub id: i32,
|
||||
pub address: Vec<u8>,
|
||||
pub aead_encrypted_id: i32,
|
||||
pub created_at: SqliteTimestamp,
|
||||
}
|
||||
|
||||
#[derive(Models, Queryable, Debug, Insertable, Selectable, Clone)]
|
||||
#[diesel(table_name = schema::evm_wallet_access, check_for_backend(Sqlite))]
|
||||
#[view(
|
||||
NewEvmWalletAccess,
|
||||
derive(Insertable),
|
||||
omit(id, created_at),
|
||||
attributes_with = "deriveless"
|
||||
)]
|
||||
#[view(
|
||||
CoreEvmWalletAccess,
|
||||
derive(Insertable),
|
||||
omit(created_at),
|
||||
attributes_with = "deriveless"
|
||||
)]
|
||||
pub struct EvmWalletAccess {
|
||||
pub id: i32,
|
||||
pub wallet_id: i32,
|
||||
pub client_id: i32,
|
||||
pub created_at: SqliteTimestamp,
|
||||
}
|
||||
|
||||
#[derive(Models, Queryable, Debug, Insertable, Selectable)]
|
||||
#[diesel(table_name = schema::client_metadata, check_for_backend(Sqlite))]
|
||||
pub struct ProgramClientMetadata {
|
||||
pub id: i32,
|
||||
pub name: String,
|
||||
pub description: Option<String>,
|
||||
pub version: Option<String>,
|
||||
pub created_at: SqliteTimestamp,
|
||||
}
|
||||
|
||||
#[derive(Models, Queryable, Debug, Insertable, Selectable)]
|
||||
#[diesel(table_name = schema::client_metadata_history, check_for_backend(Sqlite))]
|
||||
pub struct ProgramClientMetadataHistory {
|
||||
pub id: i32,
|
||||
pub metadata_id: i32,
|
||||
pub client_id: i32,
|
||||
pub created_at: SqliteTimestamp,
|
||||
}
|
||||
|
||||
#[derive(Models, Queryable, Debug, Insertable, Selectable)]
|
||||
#[diesel(table_name = schema::program_client, check_for_backend(Sqlite))]
|
||||
pub struct ProgramClient {
|
||||
pub id: i32,
|
||||
pub public_key: Vec<u8>,
|
||||
pub nonce: i32,
|
||||
pub created_at: i32,
|
||||
pub updated_at: i32,
|
||||
pub public_key: Vec<u8>,
|
||||
pub metadata_id: i32,
|
||||
pub created_at: SqliteTimestamp,
|
||||
pub updated_at: SqliteTimestamp,
|
||||
}
|
||||
|
||||
#[derive(Queryable, Debug)]
|
||||
#[diesel(table_name = schema::useragent_client, check_for_backend(Sqlite))]
|
||||
pub struct UseragentClient {
|
||||
pub id: i32,
|
||||
pub public_key: Vec<u8>,
|
||||
pub nonce: i32,
|
||||
pub created_at: i32,
|
||||
pub updated_at: i32,
|
||||
pub public_key: Vec<u8>,
|
||||
pub created_at: SqliteTimestamp,
|
||||
pub updated_at: SqliteTimestamp,
|
||||
pub key_type: KeyType,
|
||||
}
|
||||
|
||||
// TLS Certificate Rotation Models
|
||||
|
||||
#[derive(Queryable, Debug, Insertable)]
|
||||
#[diesel(table_name = schema::tls_certificates, check_for_backend(Sqlite))]
|
||||
pub struct TlsCertificate {
|
||||
#[derive(Models, Queryable, Debug, Insertable, Selectable)]
|
||||
#[diesel(table_name = evm_ether_transfer_limit, check_for_backend(Sqlite))]
|
||||
#[view(
|
||||
NewEvmEtherTransferLimit,
|
||||
derive(Insertable),
|
||||
omit(id, created_at),
|
||||
attributes_with = "deriveless"
|
||||
)]
|
||||
pub struct EvmEtherTransferLimit {
|
||||
pub id: i32,
|
||||
pub cert: Vec<u8>,
|
||||
pub cert_key: Vec<u8>,
|
||||
pub not_before: i32,
|
||||
pub not_after: i32,
|
||||
pub created_at: i32,
|
||||
pub is_active: bool,
|
||||
pub window_secs: i32,
|
||||
pub max_volume: Vec<u8>,
|
||||
}
|
||||
|
||||
#[derive(Insertable)]
|
||||
#[diesel(table_name = schema::tls_certificates)]
|
||||
pub struct NewTlsCertificate {
|
||||
pub cert: Vec<u8>,
|
||||
pub cert_key: Vec<u8>,
|
||||
pub not_before: i32,
|
||||
pub not_after: i32,
|
||||
pub is_active: bool,
|
||||
}
|
||||
|
||||
#[derive(Queryable, Debug, Insertable)]
|
||||
#[diesel(table_name = schema::tls_rotation_state, check_for_backend(Sqlite))]
|
||||
pub struct TlsRotationState {
|
||||
#[derive(Models, Queryable, Debug, Insertable, Selectable)]
|
||||
#[diesel(table_name = evm_basic_grant, check_for_backend(Sqlite))]
|
||||
#[view(
|
||||
NewEvmBasicGrant,
|
||||
derive(Insertable),
|
||||
omit(id, created_at),
|
||||
attributes_with = "deriveless"
|
||||
)]
|
||||
pub struct EvmBasicGrant {
|
||||
pub id: i32,
|
||||
pub state: String,
|
||||
pub new_cert_id: Option<i32>,
|
||||
pub initiated_at: Option<i32>,
|
||||
pub timeout_at: Option<i32>,
|
||||
pub wallet_access_id: i32, // references evm_wallet_access.id
|
||||
pub chain_id: i32,
|
||||
pub valid_from: Option<SqliteTimestamp>,
|
||||
pub valid_until: Option<SqliteTimestamp>,
|
||||
pub max_gas_fee_per_gas: Option<Vec<u8>>,
|
||||
pub max_priority_fee_per_gas: Option<Vec<u8>>,
|
||||
pub rate_limit_count: Option<i32>,
|
||||
pub rate_limit_window_secs: Option<i32>,
|
||||
pub revoked_at: Option<SqliteTimestamp>,
|
||||
pub created_at: SqliteTimestamp,
|
||||
}
|
||||
|
||||
#[derive(Queryable, Debug, Insertable)]
|
||||
#[diesel(table_name = schema::rotation_client_acks, check_for_backend(Sqlite))]
|
||||
pub struct RotationClientAck {
|
||||
pub rotation_id: i32,
|
||||
pub client_key: String,
|
||||
pub ack_received_at: i32,
|
||||
}
|
||||
|
||||
#[derive(Insertable)]
|
||||
#[diesel(table_name = schema::rotation_client_acks)]
|
||||
pub struct NewRotationClientAck {
|
||||
pub rotation_id: i32,
|
||||
pub client_key: String,
|
||||
}
|
||||
|
||||
#[derive(Queryable, Debug, Insertable)]
|
||||
#[diesel(table_name = schema::tls_rotation_history, check_for_backend(Sqlite))]
|
||||
pub struct TlsRotationHistory {
|
||||
#[derive(Models, Queryable, Debug, Insertable, Selectable)]
|
||||
#[diesel(table_name = evm_transaction_log, check_for_backend(Sqlite))]
|
||||
#[view(
|
||||
NewEvmTransactionLog,
|
||||
derive(Insertable),
|
||||
omit(id),
|
||||
attributes_with = "deriveless"
|
||||
)]
|
||||
pub struct EvmTransactionLog {
|
||||
pub id: i32,
|
||||
pub cert_id: i32,
|
||||
pub event_type: String,
|
||||
pub timestamp: i32,
|
||||
pub details: Option<String>,
|
||||
pub grant_id: i32,
|
||||
pub wallet_access_id: i32,
|
||||
pub chain_id: i32,
|
||||
pub eth_value: Vec<u8>,
|
||||
pub signed_at: SqliteTimestamp,
|
||||
}
|
||||
|
||||
#[derive(Insertable)]
|
||||
#[diesel(table_name = schema::tls_rotation_history)]
|
||||
pub struct NewTlsRotationHistory {
|
||||
pub cert_id: i32,
|
||||
pub event_type: String,
|
||||
pub details: Option<String>,
|
||||
#[derive(Models, Queryable, Debug, Insertable, Selectable)]
|
||||
#[diesel(table_name = evm_ether_transfer_grant, check_for_backend(Sqlite))]
|
||||
#[view(
|
||||
NewEvmEtherTransferGrant,
|
||||
derive(Insertable),
|
||||
omit(id),
|
||||
attributes_with = "deriveless"
|
||||
)]
|
||||
pub struct EvmEtherTransferGrant {
|
||||
pub id: i32,
|
||||
pub basic_grant_id: i32,
|
||||
pub limit_id: i32, // references evm_ether_transfer_limit.id
|
||||
}
|
||||
|
||||
#[derive(Models, Queryable, Debug, Insertable, Selectable)]
|
||||
#[diesel(table_name = evm_ether_transfer_grant_target, check_for_backend(Sqlite))]
|
||||
#[view(
|
||||
NewEvmEtherTransferGrantTarget,
|
||||
derive(Insertable),
|
||||
omit(id),
|
||||
attributes_with = "deriveless"
|
||||
)]
|
||||
pub struct EvmEtherTransferGrantTarget {
|
||||
pub id: i32,
|
||||
pub grant_id: i32,
|
||||
pub address: Vec<u8>,
|
||||
}
|
||||
|
||||
#[derive(Models, Queryable, Debug, Insertable, Selectable)]
|
||||
#[diesel(table_name = evm_token_transfer_grant, check_for_backend(Sqlite))]
|
||||
#[view(
|
||||
NewEvmTokenTransferGrant,
|
||||
derive(Insertable),
|
||||
omit(id),
|
||||
attributes_with = "deriveless"
|
||||
)]
|
||||
pub struct EvmTokenTransferGrant {
|
||||
pub id: i32,
|
||||
pub basic_grant_id: i32,
|
||||
pub token_contract: Vec<u8>,
|
||||
pub receiver: Option<Vec<u8>>,
|
||||
}
|
||||
|
||||
#[derive(Models, Queryable, Debug, Insertable, Selectable)]
|
||||
#[diesel(table_name = evm_token_transfer_volume_limit, check_for_backend(Sqlite))]
|
||||
#[view(
|
||||
NewEvmTokenTransferVolumeLimit,
|
||||
derive(Insertable),
|
||||
omit(id),
|
||||
attributes_with = "deriveless"
|
||||
)]
|
||||
pub struct EvmTokenTransferVolumeLimit {
|
||||
pub id: i32,
|
||||
pub grant_id: i32,
|
||||
pub window_secs: i32,
|
||||
pub max_volume: Vec<u8>,
|
||||
}
|
||||
|
||||
#[derive(Models, Queryable, Debug, Insertable, Selectable)]
|
||||
#[diesel(table_name = evm_token_transfer_log, check_for_backend(Sqlite))]
|
||||
#[view(
|
||||
NewEvmTokenTransferLog,
|
||||
derive(Insertable),
|
||||
omit(id, created_at),
|
||||
attributes_with = "deriveless"
|
||||
)]
|
||||
pub struct EvmTokenTransferLog {
|
||||
pub id: i32,
|
||||
pub grant_id: i32,
|
||||
pub log_id: i32,
|
||||
pub chain_id: i32,
|
||||
pub token_contract: Vec<u8>,
|
||||
pub recipient_address: Vec<u8>,
|
||||
pub value: Vec<u8>,
|
||||
pub created_at: SqliteTimestamp,
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user