Compare commits
154 Commits
push-lspny
...
enforcing-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
763058b014 | ||
|
|
1497884ce6 | ||
|
|
b3464cf8a6 | ||
|
|
46d1318b6f | ||
| 9c80d51d45 | |||
|
|
33456a644d | ||
|
|
5bc0c42cc7 | ||
|
|
f6b62ab884 | ||
|
|
2dd5a3f32f | ||
|
|
1aca9d4007 | ||
| 5ee1b49c43 | |||
|
|
00745bb381 | ||
|
|
b122aa464c | ||
|
|
9fab945a00 | ||
|
|
aeed664e9a | ||
|
|
4057c1fc12 | ||
|
|
f5eb51978d | ||
|
|
d997e0f843 | ||
|
|
7aca281a81 | ||
| 0daad1dd37 | |||
| 9ea474e1b2 | |||
|
|
c6f440fdad | ||
| e17c25a604 | |||
|
|
01b12515bd | ||
|
|
4a50daa7ea | ||
|
|
352ee3ee63 | ||
|
|
dd51d756da | ||
|
|
0bb6e596ac | ||
|
|
083ff66af2 | ||
|
|
881f16bb1a | ||
|
|
78895bca5b | ||
| 1495fbe754 | |||
| ab8cf877d7 | |||
|
|
146f7a419e | ||
|
|
0362044b83 | ||
| 72618c186f | |||
|
|
e47ccc3108 | ||
| 90d8ae3c6c | |||
| 4af172e49a | |||
|
|
bc45b9b9ce | ||
|
|
5bce9fd68e | ||
|
|
63a4875fdb | ||
|
|
d5ec303b9a | ||
|
|
82b5b85f52 | ||
|
|
e2d8b7841b | ||
|
|
8feda7990c | ||
|
|
16f0e67d02 | ||
|
|
b5507e7d0f | ||
|
|
0388fa2c8b | ||
|
|
cfe01ba1ad | ||
|
|
59c7091cba | ||
|
|
523bf783ac | ||
|
|
643f251419 | ||
|
|
bce6ecd409 | ||
|
|
f32728a277 | ||
|
|
32743741e1 | ||
|
|
54b2183be5 | ||
|
|
ca35b9fed7 | ||
|
|
27428f709a | ||
|
|
78006e90f2 | ||
|
|
29cc4d9e5b | ||
|
|
7f8b9cc63e | ||
|
|
a02ef68a70 | ||
|
|
e5be55e141 | ||
|
|
8f0eb7130b | ||
|
|
94fe04a6a4 | ||
|
|
976c11902c | ||
|
|
c8d2662a36 | ||
|
|
ac5fedddd1 | ||
|
|
0c2d4986a2 | ||
|
|
a3203936d2 | ||
|
|
fb1c0ec130 | ||
|
|
2a21758369 | ||
|
|
1abb5fa006 | ||
|
|
e1b1c857fa | ||
|
|
4216007af3 | ||
|
|
6987e5f70f | ||
|
|
bbf8a8019c | ||
|
|
ac04495480 | ||
|
|
eb25d31361 | ||
|
|
056ff3470b | ||
|
|
c0b08e84cc | ||
|
|
ddd6e7910f | ||
|
|
d9b3694cab | ||
|
|
4ebe7b6fc4 | ||
|
|
8043cdf8d8 | ||
| 2148faa376 | |||
|
|
eb37ee0a0c | ||
|
|
1f07fd6a98 | ||
|
|
e135519c06 | ||
|
|
f015d345f4 | ||
|
|
51674bb39c | ||
|
|
cd07ab7a78 | ||
|
|
cfa6e068eb | ||
|
|
784261f4d8 | ||
|
|
971db0e919 | ||
|
|
e1a8553142 | ||
|
|
ec70561c93 | ||
|
|
3993d3a8cc | ||
|
|
c87456ae2f | ||
|
|
e89983de3a | ||
|
|
f56668d9f6 | ||
|
|
434738bae5 | ||
|
|
915540de32 | ||
|
|
5a5008080a | ||
|
|
3bc423f9b2 | ||
|
|
f2c33a5bf4 | ||
|
|
3e8b26418a | ||
|
|
60ce1cc110 | ||
|
|
2ff4d0961c | ||
|
|
d61dab3285 | ||
|
|
c439c9645d | ||
|
|
c2883704e6 | ||
| 47caec38a6 | |||
|
|
77c3babec7 | ||
|
|
6f03ce4d1d | ||
|
|
712f114763 | ||
|
|
c56184d30b | ||
|
|
9017ea4017 | ||
|
|
088fa6fe72 | ||
|
|
c90af9c196 | ||
|
|
a5a9bc73b0 | ||
|
|
6ed8150e48 | ||
|
|
fac312d860 | ||
|
|
549a0f5f52 | ||
|
|
4db102b3d1 | ||
|
|
c61a9e30ac | ||
|
|
27836beb75 | ||
|
|
099f76166e | ||
|
|
66026e903a | ||
|
|
3360d3c8c7 | ||
|
|
02980468db | ||
|
|
ec0e8a980c | ||
|
|
16d5b9a233 | ||
|
|
62c4bc5ade | ||
|
|
ccd657c9ec | ||
|
|
013af7e65f | ||
| 84978afd58 | |||
|
|
4cb5b303dc | ||
| 8fde3cec41 | |||
| 17ac195c5d | |||
| c1c5d14133 | |||
| 47144bdf81 | |||
| 42760bbd79 | |||
| d29bca853b | |||
| f8d27a1454 | |||
| 6030f30901 | |||
| a3c401194f | |||
|
|
6386510f52 | ||
| ec36e5c2ea | |||
|
|
ba86d18250 | ||
|
|
606a1f3774 | ||
|
|
b3a67ffc00 | ||
|
|
168290040c |
11
.claude/memory/feedback_widget_decomposition.md
Normal file
11
.claude/memory/feedback_widget_decomposition.md
Normal file
@@ -0,0 +1,11 @@
|
||||
---
|
||||
name: Widget decomposition and provider subscriptions
|
||||
description: Prefer splitting screens into multiple focused files/widgets; each widget subscribes to its own relevant providers
|
||||
type: feedback
|
||||
---
|
||||
|
||||
Split screens into multiple smaller widgets across multiple files. Each widget should subscribe only to the providers it needs (`ref.watch` at lowest possible level), rather than having one large screen widget that watches everything and passes data down as parameters.
|
||||
|
||||
**Why:** Reduces unnecessary rebuilds; improves readability; each file has one clear responsibility.
|
||||
|
||||
**How to apply:** When building a new screen, identify which sub-widgets need their own provider subscriptions and extract them into separate files (e.g., `widgets/grant_card.dart` watches enrichment providers itself, rather than the screen doing it and passing resolved strings down).
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -1,3 +1,6 @@
|
||||
target/
|
||||
scripts/__pycache__/
|
||||
.DS_Store
|
||||
.cargo/config.toml
|
||||
.vscode/
|
||||
docs/
|
||||
|
||||
3
.vscode/settings.json
vendored
3
.vscode/settings.json
vendored
@@ -1,3 +0,0 @@
|
||||
{
|
||||
"git.enabled": false
|
||||
}
|
||||
@@ -22,4 +22,4 @@ steps:
|
||||
- apt-get update && apt-get install -y pkg-config
|
||||
- mise install rust
|
||||
- mise install protoc
|
||||
- mise exec rust -- cargo clippy --all-targets --all-features -- -D warnings
|
||||
- mise exec rust -- cargo clippy --all -- -D warnings
|
||||
@@ -24,4 +24,4 @@ steps:
|
||||
- mise install rust
|
||||
- mise install protoc
|
||||
- mise install cargo:cargo-nextest
|
||||
- mise exec cargo:cargo-nextest -- cargo nextest run --no-fail-fast
|
||||
- mise exec cargo:cargo-nextest -- cargo nextest run --no-fail-fast --all-features
|
||||
18
.woodpecker/useragent-analyze.yaml
Normal file
18
.woodpecker/useragent-analyze.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
when:
|
||||
- event: pull_request
|
||||
path:
|
||||
include: ['.woodpecker/useragent-*.yaml', 'useragent/**']
|
||||
- event: push
|
||||
branch: main
|
||||
path:
|
||||
include: ['.woodpecker/useragent-*.yaml', 'useragent/**']
|
||||
|
||||
steps:
|
||||
- name: analyze
|
||||
image: jdxcode/mise:latest
|
||||
commands:
|
||||
- mise install flutter
|
||||
- mise install protoc
|
||||
# Reruns codegen to catch protocol drift
|
||||
- mise codegen
|
||||
- cd useragent/ && flutter analyze
|
||||
128
AGENTS.md
Normal file
128
AGENTS.md
Normal file
@@ -0,0 +1,128 @@
|
||||
# AGENTS.md
|
||||
|
||||
This file provides guidance to Codex (Codex.ai/code) when working with code in this repository.
|
||||
|
||||
## Project Overview
|
||||
|
||||
Arbiter is a **permissioned signing service** for cryptocurrency wallets. It consists of:
|
||||
- **`server/`** — Rust gRPC daemon that holds encrypted keys and enforces policies
|
||||
- **`useragent/`** — Flutter desktop app (macOS/Windows) with a Rust backend via Rinf
|
||||
- **`protobufs/`** — Protocol Buffer definitions shared between server and client
|
||||
|
||||
The vault never exposes key material; it only produces signatures when requests satisfy configured policies.
|
||||
|
||||
## Toolchain Setup
|
||||
|
||||
Tools are managed via [mise](https://mise.jdx.dev/). Install all required tools:
|
||||
```sh
|
||||
mise install
|
||||
```
|
||||
|
||||
Key versions: Rust 1.93.0 (with clippy), Flutter 3.38.9-stable, protoc 29.6, diesel_cli 2.3.6 (sqlite).
|
||||
|
||||
## Server (Rust workspace at `server/`)
|
||||
|
||||
### Crates
|
||||
|
||||
| Crate | Purpose |
|
||||
|---|---|
|
||||
| `arbiter-proto` | Generated gRPC stubs + protobuf types; compiled from `protobufs/*.proto` via `tonic-prost-build` |
|
||||
| `arbiter-server` | Main daemon — actors, DB, EVM policy engine, gRPC service implementation |
|
||||
| `arbiter-useragent` | Rust client library for the user agent side of the gRPC protocol |
|
||||
| `arbiter-client` | Rust client library for SDK clients |
|
||||
|
||||
### Common Commands
|
||||
|
||||
```sh
|
||||
cd server
|
||||
|
||||
# Build
|
||||
cargo build
|
||||
|
||||
# Run the server daemon
|
||||
cargo run -p arbiter-server
|
||||
|
||||
# Run all tests (preferred over cargo test)
|
||||
cargo nextest run
|
||||
|
||||
# Run a single test
|
||||
cargo nextest run <test_name>
|
||||
|
||||
# Lint
|
||||
cargo clippy
|
||||
|
||||
# Security audit
|
||||
cargo audit
|
||||
|
||||
# Check unused dependencies
|
||||
cargo shear
|
||||
|
||||
# Run snapshot tests and update snapshots
|
||||
cargo insta review
|
||||
```
|
||||
|
||||
### Architecture
|
||||
|
||||
The server is actor-based using the **kameo** crate. All long-lived state lives in `GlobalActors`:
|
||||
|
||||
- **`Bootstrapper`** — Manages the one-time bootstrap token written to `~/.arbiter/bootstrap_token` on first run.
|
||||
- **`KeyHolder`** — Holds the encrypted root key and manages the Sealed/Unsealed vault state machine. On unseal, decrypts the root key into a `memsafe` hardened memory cell.
|
||||
- **`FlowCoordinator`** — Coordinates cross-connection flow between user agents and SDK clients.
|
||||
- **`EvmActor`** — Handles EVM transaction policy enforcement and signing.
|
||||
|
||||
Per-connection actors live under `actors/user_agent/` and `actors/client/`, each with `auth` (challenge-response authentication) and `session` (post-auth operations) sub-modules.
|
||||
|
||||
**Database:** SQLite via `diesel-async` + `bb8` connection pool. Schema managed by embedded Diesel migrations in `crates/arbiter-server/migrations/`. DB file lives at `~/.arbiter/arbiter.sqlite`. Tests use a temp-file DB via `db::create_test_pool()`.
|
||||
|
||||
**Cryptography:**
|
||||
- Authentication: ed25519 (challenge-response, nonce-tracked per peer)
|
||||
- Encryption at rest: XChaCha20-Poly1305 (versioned via `scheme` field for transparent migration on unseal)
|
||||
- Password KDF: Argon2
|
||||
- Unseal transport: X25519 ephemeral key exchange
|
||||
- TLS: self-signed certificate (aws-lc-rs backend), fingerprint distributed via `ArbiterUrl`
|
||||
|
||||
**Protocol:** gRPC with Protocol Buffers. The `ArbiterUrl` type encodes host, port, CA cert, and bootstrap token into a single shareable string (printed to console on first run).
|
||||
|
||||
### Proto Regeneration
|
||||
|
||||
When `.proto` files in `protobufs/` change, rebuild to regenerate:
|
||||
```sh
|
||||
cd server && cargo build -p arbiter-proto
|
||||
```
|
||||
|
||||
### Database Migrations
|
||||
|
||||
```sh
|
||||
# Create a new migration
|
||||
diesel migration generate <name> --migration-dir crates/arbiter-server/migrations
|
||||
|
||||
# Run migrations manually (server also runs them on startup)
|
||||
diesel migration run --migration-dir crates/arbiter-server/migrations
|
||||
```
|
||||
|
||||
## User Agent (Flutter + Rinf at `useragent/`)
|
||||
|
||||
The Flutter app uses [Rinf](https://rinf.cunarist.org) to call Rust code. The Rust logic lives in `useragent/native/hub/` as a separate crate that uses `arbiter-useragent` for the gRPC client.
|
||||
|
||||
Communication between Dart and Rust uses typed **signals** defined in `useragent/native/hub/src/signals/`. After modifying signal structs, regenerate Dart bindings:
|
||||
|
||||
```sh
|
||||
cd useragent && rinf gen
|
||||
```
|
||||
|
||||
### Common Commands
|
||||
|
||||
```sh
|
||||
cd useragent
|
||||
|
||||
# Run the app (macOS or Windows)
|
||||
flutter run
|
||||
|
||||
# Regenerate Rust↔Dart signal bindings
|
||||
rinf gen
|
||||
|
||||
# Analyze Dart code
|
||||
flutter analyze
|
||||
```
|
||||
|
||||
The Rinf Rust entry point is `useragent/native/hub/src/lib.rs`. It spawns actors defined in `useragent/native/hub/src/actors/` which handle Dart↔server communication via signals.
|
||||
203
ARCHITECTURE.md
203
ARCHITECTURE.md
@@ -11,6 +11,7 @@ Arbiter distinguishes two kinds of peers:
|
||||
|
||||
- **User Agent** — A client application used by the owner to manage the vault (create wallets, approve SDK clients, configure policies).
|
||||
- **SDK Client** — A consumer of signing capabilities, typically an automation tool. In the future, this could include a browser-based wallet.
|
||||
- **Recovery Operator** — A dormant recovery participant with narrowly scoped authority used only for custody recovery and operator replacement.
|
||||
|
||||
---
|
||||
|
||||
@@ -42,7 +43,149 @@ There is no bootstrap mechanism for SDK clients. They must be explicitly approve
|
||||
|
||||
---
|
||||
|
||||
## 3. Server Identity
|
||||
## 3. Multi-Operator Governance
|
||||
|
||||
When more than one User Agent is registered, the vault is treated as having multiple operators. In that mode, sensitive actions are governed by voting rather than by a single operator decision.
|
||||
|
||||
### 3.1 Voting Rules
|
||||
|
||||
Voting is based on the total number of registered operators:
|
||||
|
||||
- **1 operator:** no vote is needed; the single operator decides directly.
|
||||
- **2 operators:** full consensus is required; both operators must approve.
|
||||
- **3 or more operators:** quorum is `floor(N / 2) + 1`.
|
||||
|
||||
For a decision to count, the operator's approval or rejection must be signed by that operator's associated key. Unsigned votes, or votes that fail signature verification, are ignored.
|
||||
|
||||
Examples:
|
||||
|
||||
- **3 operators:** 2 approvals required
|
||||
- **4 operators:** 3 approvals required
|
||||
|
||||
### 3.2 Actions Requiring a Vote
|
||||
|
||||
In multi-operator mode, a successful vote is required for:
|
||||
|
||||
- approving new SDK clients
|
||||
- granting an SDK client visibility to a wallet
|
||||
- approving a one-off transaction
|
||||
- approving creation of a persistent grant
|
||||
- approving operator replacement
|
||||
- approving server updates
|
||||
- updating Shamir secret-sharing parameters
|
||||
|
||||
### 3.3 Special Rule for Key Rotation
|
||||
|
||||
Key rotation always requires full quorum, regardless of the normal voting threshold.
|
||||
|
||||
This is stricter than ordinary governance actions because rotating the root key requires every operator to participate in coordinated share refresh/update steps. The root key itself is not redistributed directly, but each operator's share material must be changed consistently.
|
||||
|
||||
### 3.4 Root Key Custody
|
||||
|
||||
When the vault has multiple operators, the vault root key is protected using Shamir secret sharing.
|
||||
|
||||
The vault root key is encrypted in a way that requires reconstruction from user-held shares rather than from a single shared password.
|
||||
|
||||
For ordinary operators, the Shamir threshold matches the ordinary governance quorum. For example:
|
||||
|
||||
- **2 operators:** `2-of-2`
|
||||
- **3 operators:** `2-of-3`
|
||||
- **4 operators:** `3-of-4`
|
||||
|
||||
In practice, the Shamir share set also includes Recovery Operator shares. This means the effective Shamir parameters are computed over the combined share pool while keeping the same threshold. For example:
|
||||
|
||||
- **3 ordinary operators + 2 recovery shares:** `2-of-5`
|
||||
|
||||
This ensures that the normal custody threshold follows the ordinary operator quorum, while still allowing dormant recovery shares to exist for break-glass recovery flows.
|
||||
|
||||
### 3.5 Recovery Operators
|
||||
|
||||
Recovery Operators are a separate peer type from ordinary vault operators.
|
||||
|
||||
Their role is intentionally narrow. They can only:
|
||||
|
||||
- participate in unsealing the vault
|
||||
- vote for operator replacement
|
||||
|
||||
Recovery Operators do not participate in routine governance such as approving SDK clients, granting wallet visibility, approving transactions, creating grants, approving server updates, or changing Shamir parameters.
|
||||
|
||||
### 3.6 Sleeping and Waking Recovery Operators
|
||||
|
||||
By default, Recovery Operators are **sleeping** and do not participate in any active flow.
|
||||
|
||||
Any ordinary operator may request that Recovery Operators **wake up**.
|
||||
|
||||
Any ordinary operator may also cancel a pending wake-up request.
|
||||
|
||||
This creates a dispute window before recovery powers become active. The default wake-up delay is **14 days**.
|
||||
|
||||
Recovery Operators are therefore part of the break-glass recovery path rather than the normal operating quorum.
|
||||
|
||||
The high-level recovery flow is:
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
autonumber
|
||||
actor Op as Ordinary Operator
|
||||
participant Server
|
||||
actor Other as Other Operator
|
||||
actor Rec as Recovery Operator
|
||||
|
||||
Op->>Server: Request recovery wake-up
|
||||
Server-->>Op: Wake-up pending
|
||||
Note over Server: Default dispute window: 14 days
|
||||
|
||||
alt Wake-up cancelled during dispute window
|
||||
Other->>Server: Cancel wake-up
|
||||
Server-->>Op: Recovery cancelled
|
||||
Server-->>Rec: Stay sleeping
|
||||
else No cancellation for 14 days
|
||||
Server-->>Rec: Wake up
|
||||
Rec->>Server: Join recovery flow
|
||||
critical Recovery authority
|
||||
Rec->>Server: Participate in unseal
|
||||
Rec->>Server: Vote on operator replacement
|
||||
end
|
||||
Server-->>Op: Recovery mode active
|
||||
end
|
||||
```
|
||||
|
||||
### 3.7 Committee Formation
|
||||
|
||||
There are two ways to form a multi-operator committee:
|
||||
|
||||
- convert an existing single-operator vault by adding new operators
|
||||
- bootstrap an unbootstrapped vault directly into multi-operator mode
|
||||
|
||||
In both cases, committee formation is a coordinated process. Arbiter does not allow multi-operator custody to emerge implicitly from unrelated registrations.
|
||||
|
||||
### 3.8 Bootstrapping an Unbootstrapped Vault into Multi-Operator Mode
|
||||
|
||||
When an unbootstrapped vault is initialized as a multi-operator vault, the setup proceeds as follows:
|
||||
|
||||
1. An operator connects to the unbootstrapped vault using a User Agent and the bootstrap token.
|
||||
2. During bootstrap setup, that operator declares:
|
||||
- the total number of ordinary operators
|
||||
- the total number of Recovery Operators
|
||||
3. The vault enters **multi-bootstrap mode**.
|
||||
4. While in multi-bootstrap mode:
|
||||
- every ordinary operator must connect with a User Agent using the bootstrap token
|
||||
- every Recovery Operator must also connect using the bootstrap token
|
||||
- each participant is registered individually
|
||||
- each participant's share is created and protected with that participant's credentials
|
||||
5. The vault is considered fully bootstrapped only after all declared operator and recovery-share registrations have completed successfully.
|
||||
|
||||
This means the operator and recovery set is fixed at bootstrap completion time, based on the counts declared when multi-bootstrap mode was entered.
|
||||
|
||||
### 3.9 Special Bootstrap Constraint for Two-Operator Vaults
|
||||
|
||||
If a vault is declared with exactly **2 ordinary operators**, Arbiter requires at least **1 Recovery Operator** to be configured during bootstrap.
|
||||
|
||||
This prevents the worst-case custody failure in which a `2-of-2` operator set becomes permanently unrecoverable after loss of a single operator.
|
||||
|
||||
---
|
||||
|
||||
## 4. Server Identity
|
||||
|
||||
The server proves its identity using TLS with a self-signed certificate. The TLS private key is generated on first run and is long-term; no rotation mechanism exists yet due to the complexity of multi-peer coordination.
|
||||
|
||||
@@ -55,9 +198,9 @@ Peers verify the server by its **public key fingerprint**:
|
||||
|
||||
---
|
||||
|
||||
## 4. Key Management
|
||||
## 5. Key Management
|
||||
|
||||
### 4.1 Key Hierarchy
|
||||
### 5.1 Key Hierarchy
|
||||
|
||||
There are three layers of keys:
|
||||
|
||||
@@ -72,19 +215,19 @@ This layered design enables:
|
||||
- **Password rotation** without re-encrypting every wallet key (only the root key is re-encrypted).
|
||||
- **Root key rotation** without requiring the user to change their password.
|
||||
|
||||
### 4.2 Encryption at Rest
|
||||
### 5.2 Encryption at Rest
|
||||
|
||||
The database stores everything in encrypted form using symmetric AEAD. The encryption scheme is versioned to support transparent migration — when the vault unseals, Arbiter automatically re-encrypts any entries that are behind the current scheme version. See [IMPLEMENTATION.md](IMPLEMENTATION.md) for the specific scheme and versioning mechanism.
|
||||
|
||||
---
|
||||
|
||||
## 5. Vault Lifecycle
|
||||
## 6. Vault Lifecycle
|
||||
|
||||
### 5.1 Sealed State
|
||||
### 6.1 Sealed State
|
||||
|
||||
On boot, the root key is encrypted and the server cannot perform any signing operations. This state is called **Sealed**.
|
||||
|
||||
### 5.2 Unseal Flow
|
||||
### 6.2 Unseal Flow
|
||||
|
||||
To transition to the **Unsealed** state, a User Agent must provide the password:
|
||||
|
||||
@@ -95,7 +238,7 @@ To transition to the **Unsealed** state, a User Agent must provide the password:
|
||||
- **Success:** The root key is decrypted and placed into a hardened memory cell. The server transitions to `Unsealed`. Any entries pending encryption scheme migration are re-encrypted.
|
||||
- **Failure:** The server returns an error indicating the password is incorrect.
|
||||
|
||||
### 5.3 Memory Protection
|
||||
### 6.3 Memory Protection
|
||||
|
||||
Once unsealed, the root key must be protected in memory against:
|
||||
|
||||
@@ -107,9 +250,9 @@ See [IMPLEMENTATION.md](IMPLEMENTATION.md) for the current and planned memory pr
|
||||
|
||||
---
|
||||
|
||||
## 6. Permission Engine
|
||||
## 7. Permission Engine
|
||||
|
||||
### 6.1 Fundamental Rules
|
||||
### 7.1 Fundamental Rules
|
||||
|
||||
- SDK clients have **no access by default**.
|
||||
- Access is granted **explicitly** by a User Agent.
|
||||
@@ -119,11 +262,45 @@ Each blockchain requires its own policy system due to differences in static tran
|
||||
|
||||
Arbiter is also responsible for ensuring that **transaction nonces are never reused**.
|
||||
|
||||
### 6.2 EVM Policies
|
||||
### 7.2 EVM Policies
|
||||
|
||||
Every EVM grant is scoped to a specific **wallet** and **chain ID**.
|
||||
|
||||
#### 6.2.1 Transaction Sub-Grants
|
||||
#### 7.2.0 Transaction Signing Sequence
|
||||
|
||||
The high-level interaction order is:
|
||||
|
||||
```mermaid
|
||||
sequenceDiagram
|
||||
autonumber
|
||||
actor SDK as SDK Client
|
||||
participant Server
|
||||
participant UA as User Agent
|
||||
|
||||
SDK->>Server: SignTransactionRequest
|
||||
Server->>Server: Resolve wallet and wallet visibility
|
||||
alt Visibility approval required
|
||||
Server->>UA: Ask for wallet visibility approval
|
||||
UA-->>Server: Vote result
|
||||
end
|
||||
Server->>Server: Evaluate transaction
|
||||
Server->>Server: Load grant and limits context
|
||||
alt Grant approval required
|
||||
Server->>UA: Ask for execution / grant approval
|
||||
UA-->>Server: Vote result
|
||||
opt Create persistent grant
|
||||
Server->>Server: Create and store grant
|
||||
end
|
||||
Server->>Server: Retry evaluation
|
||||
end
|
||||
critical Final authorization path
|
||||
Server->>Server: Check limits and record execution
|
||||
Server-->>Server: Signature or evaluation error
|
||||
end
|
||||
Server-->>SDK: Signature or error
|
||||
```
|
||||
|
||||
#### 7.2.1 Transaction Sub-Grants
|
||||
|
||||
Arbiter maintains an ever-expanding database of known contracts and their ABIs. Based on contract knowledge, transaction requests fall into three categories:
|
||||
|
||||
@@ -147,7 +324,7 @@ Available restrictions:
|
||||
|
||||
These transactions have no `calldata` and therefore cannot interact with contracts. They can be subject to the same volume and rate restrictions as above.
|
||||
|
||||
#### 6.2.2 Global Limits
|
||||
#### 7.2.2 Global Limits
|
||||
|
||||
In addition to sub-grant-specific restrictions, the following limits can be applied across all grant types:
|
||||
|
||||
|
||||
128
CLAUDE.md
Normal file
128
CLAUDE.md
Normal file
@@ -0,0 +1,128 @@
|
||||
# CLAUDE.md
|
||||
|
||||
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
||||
|
||||
## Project Overview
|
||||
|
||||
Arbiter is a **permissioned signing service** for cryptocurrency wallets. It consists of:
|
||||
- **`server/`** — Rust gRPC daemon that holds encrypted keys and enforces policies
|
||||
- **`useragent/`** — Flutter desktop app (macOS/Windows) with a Rust backend via Rinf
|
||||
- **`protobufs/`** — Protocol Buffer definitions shared between server and client
|
||||
|
||||
The vault never exposes key material; it only produces signatures when requests satisfy configured policies.
|
||||
|
||||
## Toolchain Setup
|
||||
|
||||
Tools are managed via [mise](https://mise.jdx.dev/). Install all required tools:
|
||||
```sh
|
||||
mise install
|
||||
```
|
||||
|
||||
Key versions: Rust 1.93.0 (with clippy), Flutter 3.38.9-stable, protoc 29.6, diesel_cli 2.3.6 (sqlite).
|
||||
|
||||
## Server (Rust workspace at `server/`)
|
||||
|
||||
### Crates
|
||||
|
||||
| Crate | Purpose |
|
||||
|---|---|
|
||||
| `arbiter-proto` | Generated gRPC stubs + protobuf types; compiled from `protobufs/*.proto` via `tonic-prost-build` |
|
||||
| `arbiter-server` | Main daemon — actors, DB, EVM policy engine, gRPC service implementation |
|
||||
| `arbiter-useragent` | Rust client library for the user agent side of the gRPC protocol |
|
||||
| `arbiter-client` | Rust client library for SDK clients |
|
||||
|
||||
### Common Commands
|
||||
|
||||
```sh
|
||||
cd server
|
||||
|
||||
# Build
|
||||
cargo build
|
||||
|
||||
# Run the server daemon
|
||||
cargo run -p arbiter-server
|
||||
|
||||
# Run all tests (preferred over cargo test)
|
||||
cargo nextest run
|
||||
|
||||
# Run a single test
|
||||
cargo nextest run <test_name>
|
||||
|
||||
# Lint
|
||||
cargo clippy
|
||||
|
||||
# Security audit
|
||||
cargo audit
|
||||
|
||||
# Check unused dependencies
|
||||
cargo shear
|
||||
|
||||
# Run snapshot tests and update snapshots
|
||||
cargo insta review
|
||||
```
|
||||
|
||||
### Architecture
|
||||
|
||||
The server is actor-based using the **kameo** crate. All long-lived state lives in `GlobalActors`:
|
||||
|
||||
- **`Bootstrapper`** — Manages the one-time bootstrap token written to `~/.arbiter/bootstrap_token` on first run.
|
||||
- **`KeyHolder`** — Holds the encrypted root key and manages the Sealed/Unsealed vault state machine. On unseal, decrypts the root key into a `memsafe` hardened memory cell.
|
||||
- **`FlowCoordinator`** — Coordinates cross-connection flow between user agents and SDK clients.
|
||||
- **`EvmActor`** — Handles EVM transaction policy enforcement and signing.
|
||||
|
||||
Per-connection actors live under `actors/user_agent/` and `actors/client/`, each with `auth` (challenge-response authentication) and `session` (post-auth operations) sub-modules.
|
||||
|
||||
**Database:** SQLite via `diesel-async` + `bb8` connection pool. Schema managed by embedded Diesel migrations in `crates/arbiter-server/migrations/`. DB file lives at `~/.arbiter/arbiter.sqlite`. Tests use a temp-file DB via `db::create_test_pool()`.
|
||||
|
||||
**Cryptography:**
|
||||
- Authentication: ed25519 (challenge-response, nonce-tracked per peer)
|
||||
- Encryption at rest: XChaCha20-Poly1305 (versioned via `scheme` field for transparent migration on unseal)
|
||||
- Password KDF: Argon2
|
||||
- Unseal transport: X25519 ephemeral key exchange
|
||||
- TLS: self-signed certificate (aws-lc-rs backend), fingerprint distributed via `ArbiterUrl`
|
||||
|
||||
**Protocol:** gRPC with Protocol Buffers. The `ArbiterUrl` type encodes host, port, CA cert, and bootstrap token into a single shareable string (printed to console on first run).
|
||||
|
||||
### Proto Regeneration
|
||||
|
||||
When `.proto` files in `protobufs/` change, rebuild to regenerate:
|
||||
```sh
|
||||
cd server && cargo build -p arbiter-proto
|
||||
```
|
||||
|
||||
### Database Migrations
|
||||
|
||||
```sh
|
||||
# Create a new migration
|
||||
diesel migration generate <name> --migration-dir crates/arbiter-server/migrations
|
||||
|
||||
# Run migrations manually (server also runs them on startup)
|
||||
diesel migration run --migration-dir crates/arbiter-server/migrations
|
||||
```
|
||||
|
||||
## User Agent (Flutter + Rinf at `useragent/`)
|
||||
|
||||
The Flutter app uses [Rinf](https://rinf.cunarist.org) to call Rust code. The Rust logic lives in `useragent/native/hub/` as a separate crate that uses `arbiter-useragent` for the gRPC client.
|
||||
|
||||
Communication between Dart and Rust uses typed **signals** defined in `useragent/native/hub/src/signals/`. After modifying signal structs, regenerate Dart bindings:
|
||||
|
||||
```sh
|
||||
cd useragent && rinf gen
|
||||
```
|
||||
|
||||
### Common Commands
|
||||
|
||||
```sh
|
||||
cd useragent
|
||||
|
||||
# Run the app (macOS or Windows)
|
||||
flutter run
|
||||
|
||||
# Regenerate Rust↔Dart signal bindings
|
||||
rinf gen
|
||||
|
||||
# Analyze Dart code
|
||||
flutter analyze
|
||||
```
|
||||
|
||||
The Rinf Rust entry point is `useragent/native/hub/src/lib.rs`. It spawns actors defined in `useragent/native/hub/src/actors/` which handle Dart↔server communication via signals.
|
||||
@@ -4,10 +4,81 @@ This document covers concrete technology choices and dependencies. For the archi
|
||||
|
||||
---
|
||||
|
||||
## Client Connection Flow
|
||||
|
||||
### Authentication Result Semantics
|
||||
|
||||
Authentication no longer uses an implicit success-only response shape. Both `client` and `user-agent` return explicit auth status enums over the wire.
|
||||
|
||||
- **Client:** `AuthResult` may return `SUCCESS`, `INVALID_KEY`, `INVALID_SIGNATURE`, `APPROVAL_DENIED`, `NO_USER_AGENTS_ONLINE`, or `INTERNAL`
|
||||
- **User-agent:** `AuthResult` may return `SUCCESS`, `INVALID_KEY`, `INVALID_SIGNATURE`, `BOOTSTRAP_REQUIRED`, `TOKEN_INVALID`, or `INTERNAL`
|
||||
|
||||
This makes transport-level failures and actor/domain-level auth failures distinct:
|
||||
|
||||
- **Transport/protocol failures** are surfaced as stream/status errors
|
||||
- **Authentication failures** are surfaced as successful protocol responses carrying an explicit auth status
|
||||
|
||||
Clients are expected to handle these status codes directly and present the concrete failure reason to the user.
|
||||
|
||||
### New Client Approval
|
||||
|
||||
When a client whose public key is not yet in the database connects, all connected user agents are asked to approve the connection. The first agent to respond determines the outcome; remaining requests are cancelled via a watch channel.
|
||||
|
||||
```mermaid
|
||||
flowchart TD
|
||||
A([Client connects]) --> B[Receive AuthChallengeRequest]
|
||||
B --> C{pubkey in DB?}
|
||||
|
||||
C -- yes --> D[Read nonce\nIncrement nonce in DB]
|
||||
D --> G
|
||||
|
||||
C -- no --> E[Ask all UserAgents:\nClientConnectionRequest]
|
||||
E --> F{First response}
|
||||
F -- denied --> Z([Reject connection])
|
||||
F -- approved --> F2[Cancel remaining\nUserAgent requests]
|
||||
F2 --> F3[INSERT client\nnonce = 1]
|
||||
F3 --> G[Send AuthChallenge\nwith nonce]
|
||||
|
||||
G --> H[Receive AuthChallengeSolution]
|
||||
H --> I{Signature valid?}
|
||||
I -- no --> Z
|
||||
I -- yes --> J([Session started])
|
||||
```
|
||||
|
||||
### Known Issue: Concurrent Registration Race (TOCTOU)
|
||||
|
||||
Two connections presenting the same previously-unknown public key can race through the approval flow simultaneously:
|
||||
|
||||
1. Both check the DB → neither is registered.
|
||||
2. Both request approval from user agents → both receive approval.
|
||||
3. Both `INSERT` the client record → the second insert silently overwrites the first, resetting the nonce.
|
||||
|
||||
This means the first connection's nonce is invalidated by the second, causing its challenge verification to fail. A fix requires either serialising new-client registration (e.g. an in-memory lock keyed on pubkey) or replacing the separate check + insert with an `INSERT OR IGNORE` / upsert guarded by a unique constraint on `public_key`.
|
||||
|
||||
### Nonce Semantics
|
||||
|
||||
The `program_client.nonce` column stores the **next usable nonce** — i.e. it is always one ahead of the nonce last issued in a challenge.
|
||||
|
||||
- **New client:** inserted with `nonce = 1`; the first challenge is issued with `nonce = 0`.
|
||||
- **Existing client:** the current DB value is read and used as the challenge nonce, then immediately incremented within the same exclusive transaction, preventing replay.
|
||||
|
||||
---
|
||||
|
||||
## Cryptography
|
||||
|
||||
### Authentication
|
||||
- **Signature scheme:** ed25519
|
||||
- **Client protocol:** ed25519
|
||||
|
||||
### User-Agent Authentication
|
||||
|
||||
User-agent authentication supports multiple signature schemes because platform-provided "hardware-bound" keys do not expose a uniform algorithm across operating systems and hardware.
|
||||
|
||||
- **Supported schemes:** RSA, Ed25519, ECDSA (secp256k1)
|
||||
- **Why:** the user agent authenticates with keys backed by platform facilities, and those facilities differ by platform
|
||||
- **Apple Silicon Secure Enclave / Secure Element:** ECDSA-only in practice
|
||||
- **Windows Hello / TPM 2.0:** currently RSA-backed in our integration
|
||||
|
||||
This is why the user-agent auth protocol carries an explicit `KeyType`, while the SDK client protocol remains fixed to ed25519.
|
||||
|
||||
### Encryption at Rest
|
||||
- **Scheme:** Symmetric AEAD — currently **XChaCha20-Poly1305**
|
||||
@@ -22,9 +93,21 @@ This document covers concrete technology choices and dependencies. For the archi
|
||||
## Communication
|
||||
|
||||
- **Protocol:** gRPC with Protocol Buffers
|
||||
- **Request/response matching:** multiplexed over a single bidirectional stream using per-connection request IDs
|
||||
- **Server identity distribution:** `ServerInfo` protobuf struct containing the TLS public key fingerprint
|
||||
- **Future consideration:** grpc-web lacks bidirectional stream support, so a browser-based wallet may require protojson over WebSocket
|
||||
|
||||
### Request Multiplexing
|
||||
|
||||
Both `client` and `user-agent` connections support multiple in-flight requests over one gRPC bidi stream.
|
||||
|
||||
- Every request carries a monotonically increasing request ID
|
||||
- Every normal response echoes the request ID it corresponds to
|
||||
- Out-of-band server messages omit the response ID entirely
|
||||
- The server rejects already-seen request IDs at the transport adapter boundary before business logic sees the message
|
||||
|
||||
This keeps request correlation entirely in transport/client connection code while leaving actor and domain handlers unaware of request IDs.
|
||||
|
||||
---
|
||||
|
||||
## EVM Policy Engine
|
||||
@@ -45,6 +128,52 @@ The central abstraction is the `Policy` trait. Each implementation handles one s
|
||||
4. **Evaluate** — `Policy::evaluate` checks the decoded meaning against the grant's policy-specific constraints and returns any violations.
|
||||
5. **Record** — If `RunKind::Execution` and there are no violations, the engine writes to `evm_transaction_log` and calls `Policy::record_transaction` for any policy-specific logging (e.g., token transfer volume).
|
||||
|
||||
The detailed branch structure is shown below:
|
||||
|
||||
```mermaid
|
||||
flowchart TD
|
||||
A[SDK Client sends sign transaction request] --> B[Server resolves wallet]
|
||||
B --> C{Wallet exists?}
|
||||
|
||||
C -- No --> Z1[Return wallet not found error]
|
||||
C -- Yes --> D[Check SDK client wallet visibility]
|
||||
|
||||
D --> E{Wallet visible to SDK client?}
|
||||
E -- No --> F[Start wallet visibility voting flow]
|
||||
F --> G{Vote approved?}
|
||||
G -- No --> Z2[Return wallet access denied error]
|
||||
G -- Yes --> H[Persist wallet visibility]
|
||||
E -- Yes --> I[Classify transaction meaning]
|
||||
H --> I
|
||||
|
||||
I --> J{Meaning supported?}
|
||||
J -- No --> Z3[Return unsupported transaction error]
|
||||
J -- Yes --> K[Find matching grant]
|
||||
|
||||
K --> L{Grant exists?}
|
||||
L -- Yes --> M[Check grant limits]
|
||||
L -- No --> N[Start execution or grant voting flow]
|
||||
|
||||
N --> O{User-agent decision}
|
||||
O -- Reject --> Z4[Return no matching grant error]
|
||||
O -- Allow once --> M
|
||||
O -- Create grant --> P[Create grant with user-selected limits]
|
||||
P --> Q[Persist grant]
|
||||
Q --> M
|
||||
|
||||
M --> R{Limits exceeded?}
|
||||
R -- Yes --> Z5[Return evaluation error]
|
||||
R -- No --> S[Record transaction in logs]
|
||||
S --> T[Produce signature]
|
||||
T --> U[Return signature to SDK client]
|
||||
|
||||
note1[Limit checks include volume, count, and gas constraints.]
|
||||
note2[Grant lookup depends on classified meaning, such as ether transfer or token transfer.]
|
||||
|
||||
K -. uses .-> note2
|
||||
M -. checks .-> note1
|
||||
```
|
||||
|
||||
### Policy Trait
|
||||
|
||||
| Method | Purpose |
|
||||
@@ -76,7 +205,7 @@ The central abstraction is the `Policy` trait. Each implementation handles one s
|
||||
Every grant has two layers:
|
||||
|
||||
- **Shared (`evm_basic_grant`)** — wallet, chain, validity period, gas fee caps, transaction count rate limit. One row per grant regardless of type.
|
||||
- **Specific** — policy-owned tables (`evm_ether_transfer_grant`, `evm_token_transfer_grant`, etc.) holding type-specific configuration.
|
||||
- **Specific** — policy-owned tables (`evm_ether_transfer_grant`, `evm_token_transfer_grant`) holding type-specific configuration.
|
||||
|
||||
`find_all_grants` uses a `#[diesel::auto_type]` base join between the specific and shared tables, then batch-loads related rows (targets, volume limits) in two additional queries to avoid N+1.
|
||||
|
||||
@@ -99,7 +228,6 @@ These are checked centrally in `check_shared_constraints` before policy evaluati
|
||||
- **Only EIP-1559 transactions are supported.** Legacy and EIP-2930 types are rejected outright.
|
||||
- **No opaque-calldata (unknown contract) grant type.** The architecture describes a category for unrecognised contracts, but no policy implements it yet. Any transaction that is not a plain ETH transfer or a known ERC-20 transfer is unconditionally rejected.
|
||||
- **Token registry is static.** Tokens are recognised only if they appear in the hard-coded `arbiter_tokens_registry` crate. There is no mechanism to register additional contracts at runtime.
|
||||
- **Nonce management is not implemented.** The architecture lists nonce deduplication as a core responsibility, but no nonce tracking or enforcement exists yet.
|
||||
|
||||
---
|
||||
|
||||
@@ -107,5 +235,5 @@ These are checked centrally in `check_shared_constraints` before policy evaluati
|
||||
|
||||
The unsealed root key must be held in a hardened memory cell resistant to dumps, page swaps, and hibernation.
|
||||
|
||||
- **Current:** Using the `memsafe` crate as an interim solution
|
||||
- **Planned:** Custom implementation based on `mlock` (Unix) and `VirtualProtect` (Windows)
|
||||
- **Current:** A dedicated memory-protection abstraction is in place, with `memsafe` used behind that abstraction today
|
||||
- **Planned:** Additional backends can be introduced behind the same abstraction, including a custom implementation based on `mlock` (Unix) and `VirtualProtect` (Windows)
|
||||
|
||||
1308
docs/superpowers/plans/2026-03-28-grant-creation-refactor.md
Normal file
1308
docs/superpowers/plans/2026-03-28-grant-creation-refactor.md
Normal file
File diff suppressed because it is too large
Load Diff
821
docs/superpowers/plans/2026-03-28-grant-grid-view.md
Normal file
821
docs/superpowers/plans/2026-03-28-grant-grid-view.md
Normal file
@@ -0,0 +1,821 @@
|
||||
# Grant Grid View Implementation Plan
|
||||
|
||||
> **For agentic workers:** REQUIRED SUB-SKILL: Use superpowers:subagent-driven-development (recommended) or superpowers:executing-plans to implement this plan task-by-task. Steps use checkbox (`- [ ]`) syntax for tracking.
|
||||
|
||||
**Goal:** Add an "EVM Grants" dashboard tab that displays all grants as enriched cards (type, chain, wallet address, client name) with per-card revoke support.
|
||||
|
||||
**Architecture:** A new `walletAccessListProvider` fetches wallet accesses with their DB row IDs. The screen (`grants.dart`) watches only `evmGrantsProvider` for top-level state. Each `GrantCard` widget (its own file) watches enrichment providers (`walletAccessListProvider`, `evmProvider`, `sdkClientsProvider`) and the revoke mutation directly — keeping rebuilds scoped to the card. The screen is registered as a dashboard tab in `AdaptiveScaffold`.
|
||||
|
||||
**Tech Stack:** Flutter, Riverpod (`riverpod_annotation` + `build_runner` codegen), `sizer` (adaptive sizing), `auto_route`, Protocol Buffers (Dart), `Palette` design tokens.
|
||||
|
||||
---
|
||||
|
||||
## File Map
|
||||
|
||||
| File | Action | Responsibility |
|
||||
|---|---|---|
|
||||
| `useragent/lib/theme/palette.dart` | Modify | Add `Palette.token` (indigo accent for token-transfer cards) |
|
||||
| `useragent/lib/features/connection/evm/wallet_access.dart` | Modify | Add `listAllWalletAccesses()` function |
|
||||
| `useragent/lib/providers/sdk_clients/wallet_access_list.dart` | Create | `WalletAccessListProvider` — fetches full wallet access list with IDs |
|
||||
| `useragent/lib/screens/dashboard/evm/grants/widgets/grant_card.dart` | Create | `GrantCard` widget — watches enrichment providers + revoke mutation; one card per grant |
|
||||
| `useragent/lib/screens/dashboard/evm/grants/grants.dart` | Create | `EvmGrantsScreen` — watches `evmGrantsProvider`; handles loading/error/empty/data states; renders `GrantCard` list |
|
||||
| `useragent/lib/router.dart` | Modify | Register `EvmGrantsRoute` in dashboard children |
|
||||
| `useragent/lib/screens/dashboard.dart` | Modify | Add Grants entry to `routes` list and `NavigationDestination` list |
|
||||
|
||||
---
|
||||
|
||||
## Task 1: Add `Palette.token`
|
||||
|
||||
**Files:**
|
||||
- Modify: `useragent/lib/theme/palette.dart`
|
||||
|
||||
- [ ] **Step 1: Add the color**
|
||||
|
||||
Replace the contents of `useragent/lib/theme/palette.dart` with:
|
||||
|
||||
```dart
|
||||
import 'package:flutter/material.dart';
|
||||
|
||||
class Palette {
|
||||
static const ink = Color(0xFF15263C);
|
||||
static const coral = Color(0xFFE26254);
|
||||
static const cream = Color(0xFFFFFAF4);
|
||||
static const line = Color(0x1A15263C);
|
||||
static const token = Color(0xFF5C6BC0);
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 2: Verify**
|
||||
|
||||
```sh
|
||||
cd useragent && flutter analyze lib/theme/palette.dart
|
||||
```
|
||||
|
||||
Expected: no issues.
|
||||
|
||||
- [ ] **Step 3: Commit**
|
||||
|
||||
```sh
|
||||
jj describe -m "feat(theme): add Palette.token for token-transfer grant cards"
|
||||
jj new
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Task 2: Add `listAllWalletAccesses` feature function
|
||||
|
||||
**Files:**
|
||||
- Modify: `useragent/lib/features/connection/evm/wallet_access.dart`
|
||||
|
||||
`readClientWalletAccess` (existing) filters the list to one client's wallet IDs and returns `Set<int>`. This new function returns the complete unfiltered list with row IDs so the grant cards can resolve wallet_access_id → wallet + client.
|
||||
|
||||
- [ ] **Step 1: Append function**
|
||||
|
||||
Add at the bottom of `useragent/lib/features/connection/evm/wallet_access.dart`:
|
||||
|
||||
```dart
|
||||
Future<List<SdkClientWalletAccess>> listAllWalletAccesses(
|
||||
Connection connection,
|
||||
) async {
|
||||
final response = await connection.ask(
|
||||
UserAgentRequest(listWalletAccess: Empty()),
|
||||
);
|
||||
if (!response.hasListWalletAccessResponse()) {
|
||||
throw Exception(
|
||||
'Expected list wallet access response, got ${response.whichPayload()}',
|
||||
);
|
||||
}
|
||||
return response.listWalletAccessResponse.accesses.toList(growable: false);
|
||||
}
|
||||
```
|
||||
|
||||
Each returned `SdkClientWalletAccess` has:
|
||||
- `.id` — the `evm_wallet_access` row ID (same value as `wallet_access_id` in a `GrantEntry`)
|
||||
- `.access.walletId` — the EVM wallet DB ID
|
||||
- `.access.sdkClientId` — the SDK client DB ID
|
||||
|
||||
- [ ] **Step 2: Verify**
|
||||
|
||||
```sh
|
||||
cd useragent && flutter analyze lib/features/connection/evm/wallet_access.dart
|
||||
```
|
||||
|
||||
Expected: no issues.
|
||||
|
||||
- [ ] **Step 3: Commit**
|
||||
|
||||
```sh
|
||||
jj describe -m "feat(evm): add listAllWalletAccesses feature function"
|
||||
jj new
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Task 3: Create `WalletAccessListProvider`
|
||||
|
||||
**Files:**
|
||||
- Create: `useragent/lib/providers/sdk_clients/wallet_access_list.dart`
|
||||
- Generated: `useragent/lib/providers/sdk_clients/wallet_access_list.g.dart`
|
||||
|
||||
Mirrors the structure of `EvmGrants` in `providers/evm/evm_grants.dart` — class-based `@riverpod` with a `refresh()` method.
|
||||
|
||||
- [ ] **Step 1: Write the provider**
|
||||
|
||||
Create `useragent/lib/providers/sdk_clients/wallet_access_list.dart`:
|
||||
|
||||
```dart
|
||||
import 'package:arbiter/features/connection/evm/wallet_access.dart';
|
||||
import 'package:arbiter/proto/user_agent.pb.dart';
|
||||
import 'package:arbiter/providers/connection/connection_manager.dart';
|
||||
import 'package:mtcore/markettakers.dart';
|
||||
import 'package:riverpod_annotation/riverpod_annotation.dart';
|
||||
|
||||
part 'wallet_access_list.g.dart';
|
||||
|
||||
@riverpod
|
||||
class WalletAccessList extends _$WalletAccessList {
|
||||
@override
|
||||
Future<List<SdkClientWalletAccess>?> build() async {
|
||||
final connection = await ref.watch(connectionManagerProvider.future);
|
||||
if (connection == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
try {
|
||||
return await listAllWalletAccesses(connection);
|
||||
} catch (e, st) {
|
||||
talker.handle(e, st);
|
||||
rethrow;
|
||||
}
|
||||
}
|
||||
|
||||
Future<void> refresh() async {
|
||||
final connection = await ref.read(connectionManagerProvider.future);
|
||||
if (connection == null) {
|
||||
state = const AsyncData(null);
|
||||
return;
|
||||
}
|
||||
|
||||
state = const AsyncLoading();
|
||||
state = await AsyncValue.guard(() => listAllWalletAccesses(connection));
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 2: Run code generation**
|
||||
|
||||
```sh
|
||||
cd useragent && dart run build_runner build --delete-conflicting-outputs
|
||||
```
|
||||
|
||||
Expected: `useragent/lib/providers/sdk_clients/wallet_access_list.g.dart` created. No errors.
|
||||
|
||||
- [ ] **Step 3: Verify**
|
||||
|
||||
```sh
|
||||
cd useragent && flutter analyze lib/providers/sdk_clients/
|
||||
```
|
||||
|
||||
Expected: no issues.
|
||||
|
||||
- [ ] **Step 4: Commit**
|
||||
|
||||
```sh
|
||||
jj describe -m "feat(providers): add WalletAccessListProvider"
|
||||
jj new
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Task 4: Create `GrantCard` widget
|
||||
|
||||
**Files:**
|
||||
- Create: `useragent/lib/screens/dashboard/evm/grants/widgets/grant_card.dart`
|
||||
|
||||
This widget owns all per-card logic: enrichment lookups, revoke action, and rebuild scope. The screen only passes it a `GrantEntry` — the card fetches everything else itself.
|
||||
|
||||
**Key types:**
|
||||
- `GrantEntry` (from `proto/evm.pb.dart`): `.id`, `.shared.walletAccessId`, `.shared.chainId`, `.specific.whichGrant()`
|
||||
- `SpecificGrant_Grant.etherTransfer` / `.tokenTransfer` — enum values for the oneof
|
||||
- `SdkClientWalletAccess` (from `proto/user_agent.pb.dart`): `.id`, `.access.walletId`, `.access.sdkClientId`
|
||||
- `WalletEntry` (from `proto/evm.pb.dart`): `.id`, `.address` (List<int>)
|
||||
- `SdkClientEntry` (from `proto/user_agent.pb.dart`): `.id`, `.info.name`
|
||||
- `revokeEvmGrantMutation` — `Mutation<void>` (global; all revoke buttons disable together while any revoke is in flight)
|
||||
- `executeRevokeEvmGrant(ref, grantId: int)` — `Future<void>`
|
||||
|
||||
- [ ] **Step 1: Write the widget**
|
||||
|
||||
Create `useragent/lib/screens/dashboard/evm/grants/widgets/grant_card.dart`:
|
||||
|
||||
```dart
|
||||
import 'package:arbiter/proto/evm.pb.dart';
|
||||
import 'package:arbiter/proto/user_agent.pb.dart';
|
||||
import 'package:arbiter/providers/evm/evm.dart';
|
||||
import 'package:arbiter/providers/evm/evm_grants.dart';
|
||||
import 'package:arbiter/providers/sdk_clients/list.dart';
|
||||
import 'package:arbiter/providers/sdk_clients/wallet_access_list.dart';
|
||||
import 'package:arbiter/theme/palette.dart';
|
||||
import 'package:flutter/material.dart';
|
||||
import 'package:hooks_riverpod/experimental/mutation.dart';
|
||||
import 'package:hooks_riverpod/hooks_riverpod.dart';
|
||||
import 'package:sizer/sizer.dart';
|
||||
|
||||
String _shortAddress(List<int> bytes) {
|
||||
final hex = bytes.map((b) => b.toRadixString(16).padLeft(2, '0')).join();
|
||||
return '0x${hex.substring(0, 6)}...${hex.substring(hex.length - 4)}';
|
||||
}
|
||||
|
||||
String _formatError(Object error) {
|
||||
final message = error.toString();
|
||||
if (message.startsWith('Exception: ')) {
|
||||
return message.substring('Exception: '.length);
|
||||
}
|
||||
return message;
|
||||
}
|
||||
|
||||
class GrantCard extends ConsumerWidget {
|
||||
const GrantCard({super.key, required this.grant});
|
||||
|
||||
final GrantEntry grant;
|
||||
|
||||
@override
|
||||
Widget build(BuildContext context, WidgetRef ref) {
|
||||
// Enrichment lookups — each watch scopes rebuilds to this card only
|
||||
final walletAccesses =
|
||||
ref.watch(walletAccessListProvider).asData?.value ?? const [];
|
||||
final wallets = ref.watch(evmProvider).asData?.value ?? const [];
|
||||
final clients = ref.watch(sdkClientsProvider).asData?.value ?? const [];
|
||||
final revoking = ref.watch(revokeEvmGrantMutation) is MutationPending;
|
||||
|
||||
final isEther =
|
||||
grant.specific.whichGrant() == SpecificGrant_Grant.etherTransfer;
|
||||
final accent = isEther ? Palette.coral : Palette.token;
|
||||
final typeLabel = isEther ? 'Ether' : 'Token';
|
||||
final theme = Theme.of(context);
|
||||
final muted = Palette.ink.withValues(alpha: 0.62);
|
||||
|
||||
// Resolve wallet_access_id → wallet address + client name
|
||||
final accessById = <int, SdkClientWalletAccess>{
|
||||
for (final a in walletAccesses) a.id: a,
|
||||
};
|
||||
final walletById = <int, WalletEntry>{
|
||||
for (final w in wallets) w.id: w,
|
||||
};
|
||||
final clientNameById = <int, String>{
|
||||
for (final c in clients) c.id: c.info.name,
|
||||
};
|
||||
|
||||
final accessId = grant.shared.walletAccessId;
|
||||
final access = accessById[accessId];
|
||||
final wallet = access != null ? walletById[access.access.walletId] : null;
|
||||
|
||||
final walletLabel = wallet != null
|
||||
? _shortAddress(wallet.address)
|
||||
: 'Access #$accessId';
|
||||
|
||||
final clientLabel = () {
|
||||
if (access == null) return '';
|
||||
final name = clientNameById[access.access.sdkClientId] ?? '';
|
||||
return name.isEmpty ? 'Client #${access.access.sdkClientId}' : name;
|
||||
}();
|
||||
|
||||
void showError(String message) {
|
||||
if (!context.mounted) return;
|
||||
ScaffoldMessenger.of(context).showSnackBar(
|
||||
SnackBar(content: Text(message), behavior: SnackBarBehavior.floating),
|
||||
);
|
||||
}
|
||||
|
||||
Future<void> revoke() async {
|
||||
try {
|
||||
await executeRevokeEvmGrant(ref, grantId: grant.id);
|
||||
} catch (e) {
|
||||
showError(_formatError(e));
|
||||
}
|
||||
}
|
||||
|
||||
return Container(
|
||||
decoration: BoxDecoration(
|
||||
borderRadius: BorderRadius.circular(24),
|
||||
color: Palette.cream.withValues(alpha: 0.92),
|
||||
border: Border.all(color: Palette.line),
|
||||
),
|
||||
child: IntrinsicHeight(
|
||||
child: Row(
|
||||
crossAxisAlignment: CrossAxisAlignment.stretch,
|
||||
children: [
|
||||
// Accent strip
|
||||
Container(
|
||||
width: 0.8.w,
|
||||
decoration: BoxDecoration(
|
||||
color: accent,
|
||||
borderRadius: const BorderRadius.horizontal(
|
||||
left: Radius.circular(24),
|
||||
),
|
||||
),
|
||||
),
|
||||
// Card body
|
||||
Expanded(
|
||||
child: Padding(
|
||||
padding: EdgeInsets.symmetric(
|
||||
horizontal: 1.6.w,
|
||||
vertical: 1.4.h,
|
||||
),
|
||||
child: Column(
|
||||
crossAxisAlignment: CrossAxisAlignment.start,
|
||||
children: [
|
||||
// Row 1: type badge · chain · spacer · revoke button
|
||||
Row(
|
||||
children: [
|
||||
Container(
|
||||
padding: EdgeInsets.symmetric(
|
||||
horizontal: 1.w,
|
||||
vertical: 0.4.h,
|
||||
),
|
||||
decoration: BoxDecoration(
|
||||
color: accent.withValues(alpha: 0.15),
|
||||
borderRadius: BorderRadius.circular(8),
|
||||
),
|
||||
child: Text(
|
||||
typeLabel,
|
||||
style: theme.textTheme.labelSmall?.copyWith(
|
||||
color: accent,
|
||||
fontWeight: FontWeight.w800,
|
||||
),
|
||||
),
|
||||
),
|
||||
SizedBox(width: 1.w),
|
||||
Container(
|
||||
padding: EdgeInsets.symmetric(
|
||||
horizontal: 1.w,
|
||||
vertical: 0.4.h,
|
||||
),
|
||||
decoration: BoxDecoration(
|
||||
color: Palette.ink.withValues(alpha: 0.06),
|
||||
borderRadius: BorderRadius.circular(8),
|
||||
),
|
||||
child: Text(
|
||||
'Chain ${grant.shared.chainId}',
|
||||
style: theme.textTheme.labelSmall?.copyWith(
|
||||
color: muted,
|
||||
fontWeight: FontWeight.w700,
|
||||
),
|
||||
),
|
||||
),
|
||||
const Spacer(),
|
||||
if (revoking)
|
||||
SizedBox(
|
||||
width: 1.8.h,
|
||||
height: 1.8.h,
|
||||
child: CircularProgressIndicator(
|
||||
strokeWidth: 2,
|
||||
color: Palette.coral,
|
||||
),
|
||||
)
|
||||
else
|
||||
OutlinedButton.icon(
|
||||
onPressed: revoke,
|
||||
style: OutlinedButton.styleFrom(
|
||||
foregroundColor: Palette.coral,
|
||||
side: BorderSide(
|
||||
color: Palette.coral.withValues(alpha: 0.4),
|
||||
),
|
||||
padding: EdgeInsets.symmetric(
|
||||
horizontal: 1.w,
|
||||
vertical: 0.6.h,
|
||||
),
|
||||
shape: RoundedRectangleBorder(
|
||||
borderRadius: BorderRadius.circular(10),
|
||||
),
|
||||
),
|
||||
icon: const Icon(Icons.block_rounded, size: 16),
|
||||
label: const Text('Revoke'),
|
||||
),
|
||||
],
|
||||
),
|
||||
SizedBox(height: 0.8.h),
|
||||
// Row 2: wallet address · client name
|
||||
Row(
|
||||
children: [
|
||||
Text(
|
||||
walletLabel,
|
||||
style: theme.textTheme.bodySmall?.copyWith(
|
||||
color: Palette.ink,
|
||||
fontFamily: 'monospace',
|
||||
),
|
||||
),
|
||||
Padding(
|
||||
padding: EdgeInsets.symmetric(horizontal: 0.8.w),
|
||||
child: Text(
|
||||
'·',
|
||||
style: theme.textTheme.bodySmall
|
||||
?.copyWith(color: muted),
|
||||
),
|
||||
),
|
||||
Expanded(
|
||||
child: Text(
|
||||
clientLabel,
|
||||
maxLines: 1,
|
||||
overflow: TextOverflow.ellipsis,
|
||||
style: theme.textTheme.bodySmall
|
||||
?.copyWith(color: muted),
|
||||
),
|
||||
),
|
||||
],
|
||||
),
|
||||
],
|
||||
),
|
||||
),
|
||||
),
|
||||
],
|
||||
),
|
||||
),
|
||||
);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 2: Verify**
|
||||
|
||||
```sh
|
||||
cd useragent && flutter analyze lib/screens/dashboard/evm/grants/widgets/grant_card.dart
|
||||
```
|
||||
|
||||
Expected: no issues.
|
||||
|
||||
- [ ] **Step 3: Commit**
|
||||
|
||||
```sh
|
||||
jj describe -m "feat(grants): add GrantCard widget with self-contained enrichment"
|
||||
jj new
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Task 5: Create `EvmGrantsScreen`
|
||||
|
||||
**Files:**
|
||||
- Create: `useragent/lib/screens/dashboard/evm/grants/grants.dart`
|
||||
|
||||
The screen watches only `evmGrantsProvider` for top-level state (loading / error / no connection / empty / data). When there is data it renders a list of `GrantCard` widgets — each card manages its own enrichment subscriptions.
|
||||
|
||||
- [ ] **Step 1: Write the screen**
|
||||
|
||||
Create `useragent/lib/screens/dashboard/evm/grants/grants.dart`:
|
||||
|
||||
```dart
|
||||
import 'package:arbiter/proto/evm.pb.dart';
|
||||
import 'package:arbiter/providers/evm/evm_grants.dart';
|
||||
import 'package:arbiter/providers/sdk_clients/wallet_access_list.dart';
|
||||
import 'package:arbiter/router.gr.dart';
|
||||
import 'package:arbiter/screens/dashboard/evm/grants/widgets/grant_card.dart';
|
||||
import 'package:arbiter/theme/palette.dart';
|
||||
import 'package:arbiter/widgets/page_header.dart';
|
||||
import 'package:auto_route/auto_route.dart';
|
||||
import 'package:flutter/material.dart';
|
||||
import 'package:hooks_riverpod/hooks_riverpod.dart';
|
||||
import 'package:sizer/sizer.dart';
|
||||
|
||||
String _formatError(Object error) {
|
||||
final message = error.toString();
|
||||
if (message.startsWith('Exception: ')) {
|
||||
return message.substring('Exception: '.length);
|
||||
}
|
||||
return message;
|
||||
}
|
||||
|
||||
// ─── State panel ──────────────────────────────────────────────────────────────
|
||||
|
||||
class _StatePanel extends StatelessWidget {
|
||||
const _StatePanel({
|
||||
required this.icon,
|
||||
required this.title,
|
||||
required this.body,
|
||||
this.actionLabel,
|
||||
this.onAction,
|
||||
this.busy = false,
|
||||
});
|
||||
|
||||
final IconData icon;
|
||||
final String title;
|
||||
final String body;
|
||||
final String? actionLabel;
|
||||
final Future<void> Function()? onAction;
|
||||
final bool busy;
|
||||
|
||||
@override
|
||||
Widget build(BuildContext context) {
|
||||
final theme = Theme.of(context);
|
||||
|
||||
return Container(
|
||||
decoration: BoxDecoration(
|
||||
borderRadius: BorderRadius.circular(24),
|
||||
color: Palette.cream.withValues(alpha: 0.92),
|
||||
border: Border.all(color: Palette.line),
|
||||
),
|
||||
child: Padding(
|
||||
padding: EdgeInsets.all(2.8.h),
|
||||
child: Column(
|
||||
crossAxisAlignment: CrossAxisAlignment.start,
|
||||
children: [
|
||||
if (busy)
|
||||
SizedBox(
|
||||
width: 2.8.h,
|
||||
height: 2.8.h,
|
||||
child: const CircularProgressIndicator(strokeWidth: 2.5),
|
||||
)
|
||||
else
|
||||
Icon(icon, size: 34, color: Palette.coral),
|
||||
SizedBox(height: 1.8.h),
|
||||
Text(
|
||||
title,
|
||||
style: theme.textTheme.headlineSmall?.copyWith(
|
||||
color: Palette.ink,
|
||||
fontWeight: FontWeight.w800,
|
||||
),
|
||||
),
|
||||
SizedBox(height: 1.h),
|
||||
Text(
|
||||
body,
|
||||
style: theme.textTheme.bodyLarge?.copyWith(
|
||||
color: Palette.ink.withValues(alpha: 0.72),
|
||||
height: 1.5,
|
||||
),
|
||||
),
|
||||
if (actionLabel != null && onAction != null) ...[
|
||||
SizedBox(height: 2.h),
|
||||
OutlinedButton.icon(
|
||||
onPressed: () => onAction!(),
|
||||
icon: const Icon(Icons.refresh),
|
||||
label: Text(actionLabel!),
|
||||
),
|
||||
],
|
||||
],
|
||||
),
|
||||
),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// ─── Grant list ───────────────────────────────────────────────────────────────
|
||||
|
||||
class _GrantList extends StatelessWidget {
|
||||
const _GrantList({required this.grants});
|
||||
|
||||
final List<GrantEntry> grants;
|
||||
|
||||
@override
|
||||
Widget build(BuildContext context) {
|
||||
return Column(
|
||||
children: [
|
||||
for (var i = 0; i < grants.length; i++)
|
||||
Padding(
|
||||
padding: EdgeInsets.only(
|
||||
bottom: i == grants.length - 1 ? 0 : 1.8.h,
|
||||
),
|
||||
child: GrantCard(grant: grants[i]),
|
||||
),
|
||||
],
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// ─── Screen ───────────────────────────────────────────────────────────────────
|
||||
|
||||
@RoutePage()
|
||||
class EvmGrantsScreen extends ConsumerWidget {
|
||||
const EvmGrantsScreen({super.key});
|
||||
|
||||
@override
|
||||
Widget build(BuildContext context, WidgetRef ref) {
|
||||
// Screen watches only the grant list for top-level state decisions
|
||||
final grantsAsync = ref.watch(evmGrantsProvider);
|
||||
|
||||
Future<void> refresh() async {
|
||||
await Future.wait([
|
||||
ref.read(evmGrantsProvider.notifier).refresh(),
|
||||
ref.read(walletAccessListProvider.notifier).refresh(),
|
||||
]);
|
||||
}
|
||||
|
||||
void showMessage(String message) {
|
||||
if (!context.mounted) return;
|
||||
ScaffoldMessenger.of(context).showSnackBar(
|
||||
SnackBar(content: Text(message), behavior: SnackBarBehavior.floating),
|
||||
);
|
||||
}
|
||||
|
||||
Future<void> safeRefresh() async {
|
||||
try {
|
||||
await refresh();
|
||||
} catch (e) {
|
||||
showMessage(_formatError(e));
|
||||
}
|
||||
}
|
||||
|
||||
final grantsState = grantsAsync.asData?.value;
|
||||
final grants = grantsState?.grants;
|
||||
|
||||
final content = switch (grantsAsync) {
|
||||
AsyncLoading() when grantsState == null => const _StatePanel(
|
||||
icon: Icons.hourglass_top,
|
||||
title: 'Loading grants',
|
||||
body: 'Pulling grant registry from Arbiter.',
|
||||
busy: true,
|
||||
),
|
||||
AsyncError(:final error) => _StatePanel(
|
||||
icon: Icons.sync_problem,
|
||||
title: 'Grant registry unavailable',
|
||||
body: _formatError(error),
|
||||
actionLabel: 'Retry',
|
||||
onAction: safeRefresh,
|
||||
),
|
||||
AsyncData(:final value) when value == null => _StatePanel(
|
||||
icon: Icons.portable_wifi_off,
|
||||
title: 'No active server connection',
|
||||
body: 'Reconnect to Arbiter to list EVM grants.',
|
||||
actionLabel: 'Refresh',
|
||||
onAction: safeRefresh,
|
||||
),
|
||||
_ when grants != null && grants.isEmpty => _StatePanel(
|
||||
icon: Icons.policy_outlined,
|
||||
title: 'No grants yet',
|
||||
body: 'Create a grant to allow SDK clients to sign transactions.',
|
||||
actionLabel: 'Create grant',
|
||||
onAction: () => context.router.push(const CreateEvmGrantRoute()),
|
||||
),
|
||||
_ => _GrantList(grants: grants ?? const []),
|
||||
};
|
||||
|
||||
return Scaffold(
|
||||
body: SafeArea(
|
||||
child: RefreshIndicator.adaptive(
|
||||
color: Palette.ink,
|
||||
backgroundColor: Colors.white,
|
||||
onRefresh: safeRefresh,
|
||||
child: ListView(
|
||||
physics: const BouncingScrollPhysics(
|
||||
parent: AlwaysScrollableScrollPhysics(),
|
||||
),
|
||||
padding: EdgeInsets.fromLTRB(2.4.w, 2.4.h, 2.4.w, 3.2.h),
|
||||
children: [
|
||||
PageHeader(
|
||||
title: 'EVM Grants',
|
||||
isBusy: grantsAsync.isLoading,
|
||||
actions: [
|
||||
FilledButton.icon(
|
||||
onPressed: () =>
|
||||
context.router.push(const CreateEvmGrantRoute()),
|
||||
icon: const Icon(Icons.add_rounded),
|
||||
label: const Text('Create grant'),
|
||||
),
|
||||
SizedBox(width: 1.w),
|
||||
OutlinedButton.icon(
|
||||
onPressed: safeRefresh,
|
||||
style: OutlinedButton.styleFrom(
|
||||
foregroundColor: Palette.ink,
|
||||
side: BorderSide(color: Palette.line),
|
||||
padding: EdgeInsets.symmetric(
|
||||
horizontal: 1.4.w,
|
||||
vertical: 1.2.h,
|
||||
),
|
||||
shape: RoundedRectangleBorder(
|
||||
borderRadius: BorderRadius.circular(14),
|
||||
),
|
||||
),
|
||||
icon: const Icon(Icons.refresh, size: 18),
|
||||
label: const Text('Refresh'),
|
||||
),
|
||||
],
|
||||
),
|
||||
SizedBox(height: 1.8.h),
|
||||
content,
|
||||
],
|
||||
),
|
||||
),
|
||||
),
|
||||
);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 2: Verify**
|
||||
|
||||
```sh
|
||||
cd useragent && flutter analyze lib/screens/dashboard/evm/grants/
|
||||
```
|
||||
|
||||
Expected: no issues.
|
||||
|
||||
- [ ] **Step 3: Commit**
|
||||
|
||||
```sh
|
||||
jj describe -m "feat(grants): add EvmGrantsScreen"
|
||||
jj new
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Task 6: Wire router and dashboard tab
|
||||
|
||||
**Files:**
|
||||
- Modify: `useragent/lib/router.dart`
|
||||
- Modify: `useragent/lib/screens/dashboard.dart`
|
||||
- Regenerated: `useragent/lib/router.gr.dart`
|
||||
|
||||
- [ ] **Step 1: Add route to `router.dart`**
|
||||
|
||||
Replace the contents of `useragent/lib/router.dart` with:
|
||||
|
||||
```dart
|
||||
import 'package:auto_route/auto_route.dart';
|
||||
|
||||
import 'router.gr.dart';
|
||||
|
||||
@AutoRouterConfig(generateForDir: ['lib/screens'])
|
||||
class Router extends RootStackRouter {
|
||||
@override
|
||||
List<AutoRoute> get routes => [
|
||||
AutoRoute(page: Bootstrap.page, path: '/bootstrap', initial: true),
|
||||
AutoRoute(page: ServerInfoSetupRoute.page, path: '/server-info'),
|
||||
AutoRoute(page: ServerConnectionRoute.page, path: '/server-connection'),
|
||||
AutoRoute(page: VaultSetupRoute.page, path: '/vault'),
|
||||
AutoRoute(page: ClientDetailsRoute.page, path: '/clients/:clientId'),
|
||||
AutoRoute(page: CreateEvmGrantRoute.page, path: '/evm-grants/create'),
|
||||
|
||||
AutoRoute(
|
||||
page: DashboardRouter.page,
|
||||
path: '/dashboard',
|
||||
children: [
|
||||
AutoRoute(page: EvmRoute.page, path: 'evm'),
|
||||
AutoRoute(page: ClientsRoute.page, path: 'clients'),
|
||||
AutoRoute(page: EvmGrantsRoute.page, path: 'grants'),
|
||||
AutoRoute(page: AboutRoute.page, path: 'about'),
|
||||
],
|
||||
),
|
||||
];
|
||||
}
|
||||
```
|
||||
|
||||
- [ ] **Step 2: Update `dashboard.dart`**
|
||||
|
||||
In `useragent/lib/screens/dashboard.dart`, replace the `routes` constant:
|
||||
|
||||
```dart
|
||||
final routes = [
|
||||
const EvmRoute(),
|
||||
const ClientsRoute(),
|
||||
const EvmGrantsRoute(),
|
||||
const AboutRoute(),
|
||||
];
|
||||
```
|
||||
|
||||
And replace the `destinations` list inside `AdaptiveScaffold`:
|
||||
|
||||
```dart
|
||||
destinations: const [
|
||||
NavigationDestination(
|
||||
icon: Icon(Icons.account_balance_wallet_outlined),
|
||||
selectedIcon: Icon(Icons.account_balance_wallet),
|
||||
label: 'Wallets',
|
||||
),
|
||||
NavigationDestination(
|
||||
icon: Icon(Icons.devices_other_outlined),
|
||||
selectedIcon: Icon(Icons.devices_other),
|
||||
label: 'Clients',
|
||||
),
|
||||
NavigationDestination(
|
||||
icon: Icon(Icons.policy_outlined),
|
||||
selectedIcon: Icon(Icons.policy),
|
||||
label: 'Grants',
|
||||
),
|
||||
NavigationDestination(
|
||||
icon: Icon(Icons.info_outline),
|
||||
selectedIcon: Icon(Icons.info),
|
||||
label: 'About',
|
||||
),
|
||||
],
|
||||
```
|
||||
|
||||
- [ ] **Step 3: Regenerate router**
|
||||
|
||||
```sh
|
||||
cd useragent && dart run build_runner build --delete-conflicting-outputs
|
||||
```
|
||||
|
||||
Expected: `lib/router.gr.dart` updated, `EvmGrantsRoute` now available, no errors.
|
||||
|
||||
- [ ] **Step 4: Full project verify**
|
||||
|
||||
```sh
|
||||
cd useragent && flutter analyze
|
||||
```
|
||||
|
||||
Expected: no issues.
|
||||
|
||||
- [ ] **Step 5: Commit**
|
||||
|
||||
```sh
|
||||
jj describe -m "feat(nav): add Grants dashboard tab"
|
||||
jj new
|
||||
```
|
||||
170
docs/superpowers/specs/2026-03-28-grant-grid-view-design.md
Normal file
170
docs/superpowers/specs/2026-03-28-grant-grid-view-design.md
Normal file
@@ -0,0 +1,170 @@
|
||||
# Grant Grid View — Design Spec
|
||||
|
||||
**Date:** 2026-03-28
|
||||
|
||||
## Overview
|
||||
|
||||
Add a "Grants" dashboard tab to the Flutter user-agent app that displays all EVM grants as a card-based grid. Each card shows a compact summary (type, chain, wallet address, client name) with a revoke action. The tab integrates into the existing `AdaptiveScaffold` navigation alongside Wallets, Clients, and About.
|
||||
|
||||
## Scope
|
||||
|
||||
- New `walletAccessListProvider` for fetching wallet access entries with their DB row IDs
|
||||
- New `EvmGrantsScreen` as a dashboard tab
|
||||
- Grant card widget with enriched display (type, chain, wallet, client)
|
||||
- Revoke action wired to existing `executeRevokeEvmGrant` mutation
|
||||
- Dashboard tab bar and router updated
|
||||
- New token-transfer accent color added to `Palette`
|
||||
|
||||
**Out of scope:** Fixing grant creation (separate task).
|
||||
|
||||
---
|
||||
|
||||
## Data Layer
|
||||
|
||||
### `walletAccessListProvider`
|
||||
|
||||
**File:** `useragent/lib/providers/sdk_clients/wallet_access_list.dart`
|
||||
|
||||
- `@riverpod` class, watches `connectionManagerProvider.future`
|
||||
- Returns `List<SdkClientWalletAccess>?` (null when not connected)
|
||||
- Each entry: `.id` (wallet_access_id), `.access.walletId`, `.access.sdkClientId`
|
||||
- Exposes a `refresh()` method following the same pattern as `EvmGrants.refresh()`
|
||||
|
||||
### Enrichment at render time (Approach A)
|
||||
|
||||
The `EvmGrantsScreen` watches four providers:
|
||||
1. `evmGrantsProvider` — the grant list
|
||||
2. `walletAccessListProvider` — to resolve wallet_access_id → (wallet_id, sdk_client_id)
|
||||
3. `evmProvider` — to resolve wallet_id → wallet address
|
||||
4. `sdkClientsProvider` — to resolve sdk_client_id → client name
|
||||
|
||||
All lookups are in-memory Maps built inside the build method; no extra model class needed.
|
||||
|
||||
Fallbacks:
|
||||
- Wallet address not found → `"Access #N"` where N is the wallet_access_id
|
||||
- Client name not found → `"Client #N"` where N is the sdk_client_id
|
||||
|
||||
---
|
||||
|
||||
## Route Structure
|
||||
|
||||
```
|
||||
/dashboard
|
||||
/evm ← existing (Wallets tab)
|
||||
/clients ← existing (Clients tab)
|
||||
/grants ← NEW (Grants tab)
|
||||
/about ← existing
|
||||
|
||||
/evm-grants/create ← existing push route (unchanged)
|
||||
```
|
||||
|
||||
### Changes to `router.dart`
|
||||
|
||||
Add inside dashboard children:
|
||||
```dart
|
||||
AutoRoute(page: EvmGrantsRoute.page, path: 'grants'),
|
||||
```
|
||||
|
||||
### Changes to `dashboard.dart`
|
||||
|
||||
Add to `routes` list:
|
||||
```dart
|
||||
const EvmGrantsRoute()
|
||||
```
|
||||
|
||||
Add `NavigationDestination`:
|
||||
```dart
|
||||
NavigationDestination(
|
||||
icon: Icon(Icons.policy_outlined),
|
||||
selectedIcon: Icon(Icons.policy),
|
||||
label: 'Grants',
|
||||
),
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Screen: `EvmGrantsScreen`
|
||||
|
||||
**File:** `useragent/lib/screens/dashboard/evm/grants/grants.dart`
|
||||
|
||||
```
|
||||
Scaffold
|
||||
└─ SafeArea
|
||||
└─ RefreshIndicator.adaptive (refreshes evmGrantsProvider + walletAccessListProvider)
|
||||
└─ ListView (BouncingScrollPhysics + AlwaysScrollableScrollPhysics)
|
||||
├─ PageHeader
|
||||
│ title: 'EVM Grants'
|
||||
│ isBusy: evmGrantsProvider.isLoading
|
||||
│ actions: [CreateGrantButton, RefreshButton]
|
||||
├─ SizedBox(height: 1.8.h)
|
||||
└─ <content>
|
||||
```
|
||||
|
||||
### State handling
|
||||
|
||||
Matches the pattern from `EvmScreen` and `ClientsScreen`:
|
||||
|
||||
| State | Display |
|
||||
|---|---|
|
||||
| Loading (no data yet) | `_StatePanel` with spinner, "Loading grants" |
|
||||
| Error | `_StatePanel` with coral icon, error message, Retry button |
|
||||
| No connection | `_StatePanel`, "No active server connection" |
|
||||
| Empty list | `_StatePanel`, "No grants yet", with Create Grant shortcut |
|
||||
| Data | Column of `_GrantCard` widgets |
|
||||
|
||||
### Header actions
|
||||
|
||||
**CreateGrantButton:** `FilledButton.icon` with `Icons.add_rounded`, pushes `CreateEvmGrantRoute()` via `context.router.push(...)`.
|
||||
|
||||
**RefreshButton:** `OutlinedButton.icon` with `Icons.refresh`, calls `ref.read(evmGrantsProvider.notifier).refresh()`.
|
||||
|
||||
---
|
||||
|
||||
## Grant Card: `_GrantCard`
|
||||
|
||||
**Layout:**
|
||||
|
||||
```
|
||||
Container (rounded 24, Palette.cream bg, Palette.line border)
|
||||
└─ IntrinsicHeight > Row
|
||||
├─ Accent strip (0.8.w wide, full height, rounded left)
|
||||
└─ Padding > Column
|
||||
├─ Row 1: TypeBadge + ChainChip + Spacer + RevokeButton
|
||||
└─ Row 2: WalletText + "·" + ClientText
|
||||
```
|
||||
|
||||
**Accent color by grant type:**
|
||||
- Ether transfer → `Palette.coral`
|
||||
- Token transfer → `Palette.token` (new entry in `Palette` — indigo, e.g. `Color(0xFF5C6BC0)`)
|
||||
|
||||
**TypeBadge:** Small pill container with accent color background at 15% opacity, accent-colored text. Label: `'Ether'` or `'Token'`.
|
||||
|
||||
**ChainChip:** Small container: `'Chain ${grant.shared.chainId}'`, muted ink color.
|
||||
|
||||
**WalletText:** Short hex address (`0xabc...def`) from wallet lookup, `bodySmall`, monospace font family.
|
||||
|
||||
**ClientText:** Client name from `sdkClientsProvider` lookup, or fallback string. `bodySmall`, muted ink.
|
||||
|
||||
**RevokeButton:**
|
||||
- `OutlinedButton` with `Icons.block_rounded` icon, label `'Revoke'`
|
||||
- `foregroundColor: Palette.coral`, `side: BorderSide(color: Palette.coral.withValues(alpha: 0.4))`
|
||||
- Disabled (replaced with `CircularProgressIndicator`) while `revokeEvmGrantMutation` is pending — note: this is a single global mutation, so all revoke buttons disable while any revoke is in flight
|
||||
- On press: calls `executeRevokeEvmGrant(ref, grantId: grant.id)`; shows `SnackBar` on error
|
||||
|
||||
---
|
||||
|
||||
## Adaptive Sizing
|
||||
|
||||
All sizing uses `sizer` units (`1.h`, `1.w`, etc.). No hardcoded pixel values.
|
||||
|
||||
---
|
||||
|
||||
## Files to Create / Modify
|
||||
|
||||
| File | Action |
|
||||
|---|---|
|
||||
| `lib/theme/palette.dart` | Modify — add `Palette.token` color |
|
||||
| `lib/providers/sdk_clients/wallet_access_list.dart` | Create |
|
||||
| `lib/screens/dashboard/evm/grants/grants.dart` | Create |
|
||||
| `lib/router.dart` | Modify — add grants route to dashboard children |
|
||||
| `lib/screens/dashboard.dart` | Modify — add tab to routes list and NavigationDestinations |
|
||||
127
mise.lock
127
mise.lock
@@ -1,10 +1,44 @@
|
||||
# @generated - this file is auto-generated by `mise lock` https://mise.jdx.dev/dev-tools/mise-lock.html
|
||||
|
||||
[[tools.ast-grep]]
|
||||
version = "0.42.0"
|
||||
backend = "aqua:ast-grep/ast-grep"
|
||||
|
||||
[tools.ast-grep."platforms.linux-arm64"]
|
||||
checksum = "sha256:5c830eae8456569e2f7212434ed9c238f58dca412d76045418ed6d394a755836"
|
||||
url = "https://github.com/ast-grep/ast-grep/releases/download/0.42.0/app-aarch64-unknown-linux-gnu.zip"
|
||||
|
||||
[tools.ast-grep."platforms.linux-arm64-musl"]
|
||||
checksum = "sha256:5c830eae8456569e2f7212434ed9c238f58dca412d76045418ed6d394a755836"
|
||||
url = "https://github.com/ast-grep/ast-grep/releases/download/0.42.0/app-aarch64-unknown-linux-gnu.zip"
|
||||
|
||||
[tools.ast-grep."platforms.linux-x64"]
|
||||
checksum = "sha256:e825a05603f0bcc4cd9076c4cc8c9abd6d008b7cd07d9aa3cc323ba4b8606651"
|
||||
url = "https://github.com/ast-grep/ast-grep/releases/download/0.42.0/app-x86_64-unknown-linux-gnu.zip"
|
||||
|
||||
[tools.ast-grep."platforms.linux-x64-musl"]
|
||||
checksum = "sha256:e825a05603f0bcc4cd9076c4cc8c9abd6d008b7cd07d9aa3cc323ba4b8606651"
|
||||
url = "https://github.com/ast-grep/ast-grep/releases/download/0.42.0/app-x86_64-unknown-linux-gnu.zip"
|
||||
|
||||
[tools.ast-grep."platforms.macos-arm64"]
|
||||
checksum = "sha256:fc300d5293b1c770a5aece03a8a193b92e71e87cec726c28096990691a582620"
|
||||
url = "https://github.com/ast-grep/ast-grep/releases/download/0.42.0/app-aarch64-apple-darwin.zip"
|
||||
|
||||
[tools.ast-grep."platforms.macos-x64"]
|
||||
checksum = "sha256:979ffe611327056f4730a1ae71b0209b3b830f58b22c6ed194cda34f55400db2"
|
||||
url = "https://github.com/ast-grep/ast-grep/releases/download/0.42.0/app-x86_64-apple-darwin.zip"
|
||||
|
||||
[tools.ast-grep."platforms.windows-x64"]
|
||||
checksum = "sha256:55836fa1b2c65dc7d61615a4d9368622a0d2371a76d28b9a165e5a3ab6ae32a4"
|
||||
url = "https://github.com/ast-grep/ast-grep/releases/download/0.42.0/app-x86_64-pc-windows-msvc.zip"
|
||||
|
||||
[[tools."cargo:cargo-audit"]]
|
||||
version = "0.22.1"
|
||||
backend = "cargo:cargo-audit"
|
||||
|
||||
[[tools."cargo:cargo-features"]]
|
||||
version = "1.0.0"
|
||||
backend = "cargo:cargo-features"
|
||||
[[tools."cargo:cargo-edit"]]
|
||||
version = "0.13.9"
|
||||
backend = "cargo:cargo-edit"
|
||||
|
||||
[[tools."cargo:cargo-features-manager"]]
|
||||
version = "0.11.1"
|
||||
@@ -14,26 +48,22 @@ backend = "cargo:cargo-features-manager"
|
||||
version = "1.46.3"
|
||||
backend = "cargo:cargo-insta"
|
||||
|
||||
[[tools."cargo:cargo-mutants"]]
|
||||
version = "27.0.0"
|
||||
backend = "cargo:cargo-mutants"
|
||||
|
||||
[[tools."cargo:cargo-nextest"]]
|
||||
version = "0.9.126"
|
||||
backend = "cargo:cargo-nextest"
|
||||
|
||||
[[tools."cargo:cargo-shear"]]
|
||||
version = "1.9.1"
|
||||
version = "1.11.2"
|
||||
backend = "cargo:cargo-shear"
|
||||
|
||||
[[tools."cargo:cargo-vet"]]
|
||||
version = "0.10.2"
|
||||
backend = "cargo:cargo-vet"
|
||||
|
||||
[[tools."cargo:diesel-cli"]]
|
||||
version = "2.3.6"
|
||||
backend = "cargo:diesel-cli"
|
||||
|
||||
[tools."cargo:diesel-cli".options]
|
||||
default-features = "false"
|
||||
features = "sqlite,sqlite-bundled"
|
||||
|
||||
[[tools."cargo:diesel_cli"]]
|
||||
version = "2.3.6"
|
||||
backend = "cargo:diesel_cli"
|
||||
@@ -49,20 +79,73 @@ backend = "asdf:flutter"
|
||||
[[tools.protoc]]
|
||||
version = "29.6"
|
||||
backend = "aqua:protocolbuffers/protobuf/protoc"
|
||||
"platforms.linux-arm64" = { checksum = "sha256:2594ff4fcae8cb57310d394d0961b236190ad9c5efbfdf1f597ea471d424fe79", url = "https://github.com/protocolbuffers/protobuf/releases/download/v29.6/protoc-29.6-linux-aarch_64.zip"}
|
||||
"platforms.linux-x64" = { checksum = "sha256:48785a926e73ffa3f68e2f22b14e7b849620c7a1d36809ac9249a5495e280323", url = "https://github.com/protocolbuffers/protobuf/releases/download/v29.6/protoc-29.6-linux-x86_64.zip"}
|
||||
"platforms.macos-arm64" = { checksum = "sha256:b9576b5fa1a1ef3fe13a8c91d9d8204b46545759bea5ae155cd6ba2ea4cdaeed", url = "https://github.com/protocolbuffers/protobuf/releases/download/v29.6/protoc-29.6-osx-aarch_64.zip"}
|
||||
"platforms.macos-x64" = { checksum = "sha256:312f04713946921cc0187ef34df80241ddca1bab6f564c636885fd2cc90d3f88", url = "https://github.com/protocolbuffers/protobuf/releases/download/v29.6/protoc-29.6-osx-x86_64.zip"}
|
||||
"platforms.windows-x64" = { checksum = "sha256:1ebd7c87baffb9f1c47169b640872bf5fb1e4408079c691af527be9561d8f6f7", url = "https://github.com/protocolbuffers/protobuf/releases/download/v29.6/protoc-29.6-win64.zip"}
|
||||
|
||||
[tools.protoc."platforms.linux-arm64"]
|
||||
checksum = "sha256:2594ff4fcae8cb57310d394d0961b236190ad9c5efbfdf1f597ea471d424fe79"
|
||||
url = "https://github.com/protocolbuffers/protobuf/releases/download/v29.6/protoc-29.6-linux-aarch_64.zip"
|
||||
|
||||
[tools.protoc."platforms.linux-arm64-musl"]
|
||||
checksum = "sha256:2594ff4fcae8cb57310d394d0961b236190ad9c5efbfdf1f597ea471d424fe79"
|
||||
url = "https://github.com/protocolbuffers/protobuf/releases/download/v29.6/protoc-29.6-linux-aarch_64.zip"
|
||||
|
||||
[tools.protoc."platforms.linux-x64"]
|
||||
checksum = "sha256:48785a926e73ffa3f68e2f22b14e7b849620c7a1d36809ac9249a5495e280323"
|
||||
url = "https://github.com/protocolbuffers/protobuf/releases/download/v29.6/protoc-29.6-linux-x86_64.zip"
|
||||
|
||||
[tools.protoc."platforms.linux-x64-musl"]
|
||||
checksum = "sha256:48785a926e73ffa3f68e2f22b14e7b849620c7a1d36809ac9249a5495e280323"
|
||||
url = "https://github.com/protocolbuffers/protobuf/releases/download/v29.6/protoc-29.6-linux-x86_64.zip"
|
||||
|
||||
[tools.protoc."platforms.macos-arm64"]
|
||||
checksum = "sha256:b9576b5fa1a1ef3fe13a8c91d9d8204b46545759bea5ae155cd6ba2ea4cdaeed"
|
||||
url = "https://github.com/protocolbuffers/protobuf/releases/download/v29.6/protoc-29.6-osx-aarch_64.zip"
|
||||
|
||||
[tools.protoc."platforms.macos-x64"]
|
||||
checksum = "sha256:312f04713946921cc0187ef34df80241ddca1bab6f564c636885fd2cc90d3f88"
|
||||
url = "https://github.com/protocolbuffers/protobuf/releases/download/v29.6/protoc-29.6-osx-x86_64.zip"
|
||||
|
||||
[tools.protoc."platforms.windows-x64"]
|
||||
checksum = "sha256:1ebd7c87baffb9f1c47169b640872bf5fb1e4408079c691af527be9561d8f6f7"
|
||||
url = "https://github.com/protocolbuffers/protobuf/releases/download/v29.6/protoc-29.6-win64.zip"
|
||||
|
||||
[[tools.python]]
|
||||
version = "3.14.3"
|
||||
backend = "core:python"
|
||||
"platforms.linux-arm64" = { checksum = "sha256:be0f4dc2932f762292b27d46ea7d3e8e66ddf3969a5eb0254a229015ed402625", url = "https://github.com/astral-sh/python-build-standalone/releases/download/20260303/cpython-3.14.3+20260303-aarch64-unknown-linux-gnu-install_only_stripped.tar.gz"}
|
||||
"platforms.linux-x64" = { checksum = "sha256:0a73413f89efd417871876c9accaab28a9d1e3cd6358fbfff171a38ec99302f0", url = "https://github.com/astral-sh/python-build-standalone/releases/download/20260303/cpython-3.14.3+20260303-x86_64-unknown-linux-gnu-install_only_stripped.tar.gz"}
|
||||
"platforms.macos-arm64" = { checksum = "sha256:4703cdf18b26798fde7b49b6b66149674c25f97127be6a10dbcf29309bdcdcdb", url = "https://github.com/astral-sh/python-build-standalone/releases/download/20260303/cpython-3.14.3+20260303-aarch64-apple-darwin-install_only_stripped.tar.gz"}
|
||||
"platforms.macos-x64" = { checksum = "sha256:76f1cc26e3d262eae8ca546a93e8bded10cf0323613f7e246fea2e10a8115eb7", url = "https://github.com/astral-sh/python-build-standalone/releases/download/20260303/cpython-3.14.3+20260303-x86_64-apple-darwin-install_only_stripped.tar.gz"}
|
||||
"platforms.windows-x64" = { checksum = "sha256:950c5f21a015c1bdd1337f233456df2470fab71e4d794407d27a84cb8b9909a0", url = "https://github.com/astral-sh/python-build-standalone/releases/download/20260303/cpython-3.14.3+20260303-x86_64-pc-windows-msvc-install_only_stripped.tar.gz"}
|
||||
|
||||
[tools.python."platforms.linux-arm64"]
|
||||
checksum = "sha256:53700338695e402a1a1fe22be4a41fbdacc70e22bb308a48eca8ed67cb7992be"
|
||||
url = "https://github.com/astral-sh/python-build-standalone/releases/download/20260324/cpython-3.14.3+20260324-aarch64-unknown-linux-gnu-install_only_stripped.tar.gz"
|
||||
provenance = "github-attestations"
|
||||
|
||||
[tools.python."platforms.linux-arm64-musl"]
|
||||
checksum = "sha256:53700338695e402a1a1fe22be4a41fbdacc70e22bb308a48eca8ed67cb7992be"
|
||||
url = "https://github.com/astral-sh/python-build-standalone/releases/download/20260324/cpython-3.14.3+20260324-aarch64-unknown-linux-gnu-install_only_stripped.tar.gz"
|
||||
provenance = "github-attestations"
|
||||
|
||||
[tools.python."platforms.linux-x64"]
|
||||
checksum = "sha256:d7a9f970914bb4c88756fe3bdcc186d4feb90e9500e54f1db47dae4dc9687e39"
|
||||
url = "https://github.com/astral-sh/python-build-standalone/releases/download/20260324/cpython-3.14.3+20260324-x86_64-unknown-linux-gnu-install_only_stripped.tar.gz"
|
||||
provenance = "github-attestations"
|
||||
|
||||
[tools.python."platforms.linux-x64-musl"]
|
||||
checksum = "sha256:d7a9f970914bb4c88756fe3bdcc186d4feb90e9500e54f1db47dae4dc9687e39"
|
||||
url = "https://github.com/astral-sh/python-build-standalone/releases/download/20260324/cpython-3.14.3+20260324-x86_64-unknown-linux-gnu-install_only_stripped.tar.gz"
|
||||
provenance = "github-attestations"
|
||||
|
||||
[tools.python."platforms.macos-arm64"]
|
||||
checksum = "sha256:c43aecde4a663aebff99b9b83da0efec506479f1c3f98331442f33d2c43501f9"
|
||||
url = "https://github.com/astral-sh/python-build-standalone/releases/download/20260324/cpython-3.14.3+20260324-aarch64-apple-darwin-install_only_stripped.tar.gz"
|
||||
provenance = "github-attestations"
|
||||
|
||||
[tools.python."platforms.macos-x64"]
|
||||
checksum = "sha256:9ab41dbc2f100a2a45d1833b9c11165f51051c558b5213eda9a9731d5948a0c0"
|
||||
url = "https://github.com/astral-sh/python-build-standalone/releases/download/20260324/cpython-3.14.3+20260324-x86_64-apple-darwin-install_only_stripped.tar.gz"
|
||||
provenance = "github-attestations"
|
||||
|
||||
[tools.python."platforms.windows-x64"]
|
||||
checksum = "sha256:bbe19034b35b0267176a7442575ae7dc6343480fd4d35598cb7700173d431e09"
|
||||
url = "https://github.com/astral-sh/python-build-standalone/releases/download/20260324/cpython-3.14.3+20260324-x86_64-pc-windows-msvc-install_only_stripped.tar.gz"
|
||||
provenance = "github-attestations"
|
||||
|
||||
[[tools.rust]]
|
||||
version = "1.93.0"
|
||||
|
||||
11
mise.toml
11
mise.toml
@@ -10,3 +10,14 @@ protoc = "29.6"
|
||||
"cargo:cargo-shear" = "latest"
|
||||
"cargo:cargo-insta" = "1.46.3"
|
||||
python = "3.14.3"
|
||||
ast-grep = "0.42.0"
|
||||
"cargo:cargo-edit" = "0.13.9"
|
||||
"cargo:cargo-mutants" = "27.0.0"
|
||||
|
||||
[tasks.codegen]
|
||||
sources = ['protobufs/*.proto', 'protobufs/**/*.proto']
|
||||
outputs = ['useragent/lib/proto/**']
|
||||
run = '''
|
||||
dart pub global activate protoc_plugin && \
|
||||
protoc --dart_out=grpc:useragent/lib/proto --proto_path=protobufs/ $(find protobufs -name '*.proto' | sort)
|
||||
'''
|
||||
|
||||
@@ -2,37 +2,24 @@ syntax = "proto3";
|
||||
|
||||
package arbiter.client;
|
||||
|
||||
import "evm.proto";
|
||||
|
||||
message AuthChallengeRequest {
|
||||
bytes pubkey = 1;
|
||||
}
|
||||
|
||||
message AuthChallenge {
|
||||
bytes pubkey = 1;
|
||||
int32 nonce = 2;
|
||||
}
|
||||
|
||||
message AuthChallengeSolution {
|
||||
bytes signature = 1;
|
||||
}
|
||||
|
||||
message AuthOk {}
|
||||
import "client/auth.proto";
|
||||
import "client/evm.proto";
|
||||
import "client/vault.proto";
|
||||
|
||||
message ClientRequest {
|
||||
int32 request_id = 4;
|
||||
oneof payload {
|
||||
AuthChallengeRequest auth_challenge_request = 1;
|
||||
AuthChallengeSolution auth_challenge_solution = 2;
|
||||
arbiter.evm.EvmSignTransactionRequest evm_sign_transaction = 3;
|
||||
arbiter.evm.EvmAnalyzeTransactionRequest evm_analyze_transaction = 4;
|
||||
auth.Request auth = 1;
|
||||
vault.Request vault = 2;
|
||||
evm.Request evm = 3;
|
||||
}
|
||||
}
|
||||
|
||||
message ClientResponse {
|
||||
optional int32 request_id = 7;
|
||||
oneof payload {
|
||||
AuthChallenge auth_challenge = 1;
|
||||
AuthOk auth_ok = 2;
|
||||
arbiter.evm.EvmSignTransactionResponse evm_sign_transaction = 3;
|
||||
arbiter.evm.EvmAnalyzeTransactionResponse evm_analyze_transaction = 4;
|
||||
auth.Response auth = 1;
|
||||
vault.Response vault = 2;
|
||||
evm.Response evm = 3;
|
||||
}
|
||||
}
|
||||
|
||||
43
protobufs/client/auth.proto
Normal file
43
protobufs/client/auth.proto
Normal file
@@ -0,0 +1,43 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package arbiter.client.auth;
|
||||
|
||||
import "shared/client.proto";
|
||||
|
||||
message AuthChallengeRequest {
|
||||
bytes pubkey = 1;
|
||||
arbiter.shared.ClientInfo client_info = 2;
|
||||
}
|
||||
|
||||
message AuthChallenge {
|
||||
bytes pubkey = 1;
|
||||
int32 nonce = 2;
|
||||
}
|
||||
|
||||
message AuthChallengeSolution {
|
||||
bytes signature = 1;
|
||||
}
|
||||
|
||||
enum AuthResult {
|
||||
AUTH_RESULT_UNSPECIFIED = 0;
|
||||
AUTH_RESULT_SUCCESS = 1;
|
||||
AUTH_RESULT_INVALID_KEY = 2;
|
||||
AUTH_RESULT_INVALID_SIGNATURE = 3;
|
||||
AUTH_RESULT_APPROVAL_DENIED = 4;
|
||||
AUTH_RESULT_NO_USER_AGENTS_ONLINE = 5;
|
||||
AUTH_RESULT_INTERNAL = 6;
|
||||
}
|
||||
|
||||
message Request {
|
||||
oneof payload {
|
||||
AuthChallengeRequest challenge_request = 1;
|
||||
AuthChallengeSolution challenge_solution = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message Response {
|
||||
oneof payload {
|
||||
AuthChallenge challenge = 1;
|
||||
AuthResult result = 2;
|
||||
}
|
||||
}
|
||||
19
protobufs/client/evm.proto
Normal file
19
protobufs/client/evm.proto
Normal file
@@ -0,0 +1,19 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package arbiter.client.evm;
|
||||
|
||||
import "evm.proto";
|
||||
|
||||
message Request {
|
||||
oneof payload {
|
||||
arbiter.evm.EvmSignTransactionRequest sign_transaction = 1;
|
||||
arbiter.evm.EvmAnalyzeTransactionRequest analyze_transaction = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message Response {
|
||||
oneof payload {
|
||||
arbiter.evm.EvmSignTransactionResponse sign_transaction = 1;
|
||||
arbiter.evm.EvmAnalyzeTransactionResponse analyze_transaction = 2;
|
||||
}
|
||||
}
|
||||
18
protobufs/client/vault.proto
Normal file
18
protobufs/client/vault.proto
Normal file
@@ -0,0 +1,18 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package arbiter.client.vault;
|
||||
|
||||
import "google/protobuf/empty.proto";
|
||||
import "shared/vault.proto";
|
||||
|
||||
message Request {
|
||||
oneof payload {
|
||||
google.protobuf.Empty query_state = 1;
|
||||
}
|
||||
}
|
||||
|
||||
message Response {
|
||||
oneof payload {
|
||||
arbiter.shared.VaultState state = 1;
|
||||
}
|
||||
}
|
||||
@@ -4,6 +4,7 @@ package arbiter.evm;
|
||||
|
||||
import "google/protobuf/empty.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
import "shared/evm.proto";
|
||||
|
||||
enum EvmError {
|
||||
EVM_ERROR_UNSPECIFIED = 0;
|
||||
@@ -12,7 +13,8 @@ enum EvmError {
|
||||
}
|
||||
|
||||
message WalletEntry {
|
||||
bytes address = 1; // 20-byte Ethereum address
|
||||
int32 id = 1;
|
||||
bytes address = 2; // 20-byte Ethereum address
|
||||
}
|
||||
|
||||
message WalletList {
|
||||
@@ -46,7 +48,7 @@ message VolumeRateLimit {
|
||||
}
|
||||
|
||||
message SharedSettings {
|
||||
int32 wallet_id = 1;
|
||||
int32 wallet_access_id = 1;
|
||||
uint64 chain_id = 2;
|
||||
optional google.protobuf.Timestamp valid_from = 3;
|
||||
optional google.protobuf.Timestamp valid_until = 4;
|
||||
@@ -73,75 +75,10 @@ message SpecificGrant {
|
||||
}
|
||||
}
|
||||
|
||||
message EtherTransferMeaning {
|
||||
bytes to = 1; // 20-byte Ethereum address
|
||||
bytes value = 2; // U256 as big-endian bytes
|
||||
}
|
||||
|
||||
message TokenInfo {
|
||||
string symbol = 1;
|
||||
bytes address = 2; // 20-byte Ethereum address
|
||||
uint64 chain_id = 3;
|
||||
}
|
||||
|
||||
// Mirror of token_transfers::Meaning
|
||||
message TokenTransferMeaning {
|
||||
TokenInfo token = 1;
|
||||
bytes to = 2; // 20-byte Ethereum address
|
||||
bytes value = 3; // U256 as big-endian bytes
|
||||
}
|
||||
|
||||
// Mirror of policies::SpecificMeaning
|
||||
message SpecificMeaning {
|
||||
oneof meaning {
|
||||
EtherTransferMeaning ether_transfer = 1;
|
||||
TokenTransferMeaning token_transfer = 2;
|
||||
}
|
||||
}
|
||||
|
||||
// --- Eval error types ---
|
||||
message GasLimitExceededViolation {
|
||||
optional bytes max_gas_fee_per_gas = 1; // U256 as big-endian bytes
|
||||
optional bytes max_priority_fee_per_gas = 2; // U256 as big-endian bytes
|
||||
}
|
||||
|
||||
message EvalViolation {
|
||||
oneof kind {
|
||||
bytes invalid_target = 1; // 20-byte Ethereum address
|
||||
GasLimitExceededViolation gas_limit_exceeded = 2;
|
||||
google.protobuf.Empty rate_limit_exceeded = 3;
|
||||
google.protobuf.Empty volumetric_limit_exceeded = 4;
|
||||
google.protobuf.Empty invalid_time = 5;
|
||||
google.protobuf.Empty invalid_transaction_type = 6;
|
||||
}
|
||||
}
|
||||
|
||||
// Transaction was classified but no grant covers it
|
||||
message NoMatchingGrantError {
|
||||
SpecificMeaning meaning = 1;
|
||||
}
|
||||
|
||||
// Transaction was classified and a grant was found, but constraints were violated
|
||||
message PolicyViolationsError {
|
||||
SpecificMeaning meaning = 1;
|
||||
repeated EvalViolation violations = 2;
|
||||
}
|
||||
|
||||
// top-level error returned when transaction evaluation fails
|
||||
message TransactionEvalError {
|
||||
oneof kind {
|
||||
google.protobuf.Empty contract_creation_not_supported = 1;
|
||||
google.protobuf.Empty unsupported_transaction_type = 2;
|
||||
NoMatchingGrantError no_matching_grant = 3;
|
||||
PolicyViolationsError policy_violations = 4;
|
||||
}
|
||||
}
|
||||
|
||||
// --- UserAgent grant management ---
|
||||
message EvmGrantCreateRequest {
|
||||
int32 client_id = 1;
|
||||
SharedSettings shared = 2;
|
||||
SpecificGrant specific = 3;
|
||||
SharedSettings shared = 1;
|
||||
SpecificGrant specific = 2;
|
||||
}
|
||||
|
||||
message EvmGrantCreateResponse {
|
||||
@@ -165,13 +102,13 @@ message EvmGrantDeleteResponse {
|
||||
// Basic grant info returned in grant listings
|
||||
message GrantEntry {
|
||||
int32 id = 1;
|
||||
int32 client_id = 2;
|
||||
int32 wallet_access_id = 2;
|
||||
SharedSettings shared = 3;
|
||||
SpecificGrant specific = 4;
|
||||
}
|
||||
|
||||
message EvmGrantListRequest {
|
||||
optional int32 wallet_id = 1;
|
||||
optional int32 wallet_access_id = 1;
|
||||
}
|
||||
|
||||
message EvmGrantListResponse {
|
||||
@@ -197,7 +134,7 @@ message EvmSignTransactionRequest {
|
||||
message EvmSignTransactionResponse {
|
||||
oneof result {
|
||||
bytes signature = 1; // 65-byte signature: r[32] || s[32] || v[1]
|
||||
TransactionEvalError eval_error = 2;
|
||||
arbiter.shared.evm.TransactionEvalError eval_error = 2;
|
||||
EvmError error = 3;
|
||||
}
|
||||
}
|
||||
@@ -209,8 +146,8 @@ message EvmAnalyzeTransactionRequest {
|
||||
|
||||
message EvmAnalyzeTransactionResponse {
|
||||
oneof result {
|
||||
SpecificMeaning meaning = 1;
|
||||
TransactionEvalError eval_error = 2;
|
||||
arbiter.shared.evm.SpecificMeaning meaning = 1;
|
||||
arbiter.shared.evm.TransactionEvalError eval_error = 2;
|
||||
EvmError error = 3;
|
||||
}
|
||||
}
|
||||
|
||||
9
protobufs/shared/client.proto
Normal file
9
protobufs/shared/client.proto
Normal file
@@ -0,0 +1,9 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package arbiter.shared;
|
||||
|
||||
message ClientInfo {
|
||||
string name = 1;
|
||||
optional string description = 2;
|
||||
optional string version = 3;
|
||||
}
|
||||
74
protobufs/shared/evm.proto
Normal file
74
protobufs/shared/evm.proto
Normal file
@@ -0,0 +1,74 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package arbiter.shared.evm;
|
||||
|
||||
import "google/protobuf/empty.proto";
|
||||
|
||||
message EtherTransferMeaning {
|
||||
bytes to = 1; // 20-byte Ethereum address
|
||||
bytes value = 2; // U256 as big-endian bytes
|
||||
}
|
||||
|
||||
message TokenInfo {
|
||||
string symbol = 1;
|
||||
bytes address = 2; // 20-byte Ethereum address
|
||||
uint64 chain_id = 3;
|
||||
}
|
||||
|
||||
// Mirror of token_transfers::Meaning
|
||||
message TokenTransferMeaning {
|
||||
TokenInfo token = 1;
|
||||
bytes to = 2; // 20-byte Ethereum address
|
||||
bytes value = 3; // U256 as big-endian bytes
|
||||
}
|
||||
|
||||
// Mirror of policies::SpecificMeaning
|
||||
message SpecificMeaning {
|
||||
oneof meaning {
|
||||
EtherTransferMeaning ether_transfer = 1;
|
||||
TokenTransferMeaning token_transfer = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message GasLimitExceededViolation {
|
||||
optional bytes max_gas_fee_per_gas = 1; // U256 as big-endian bytes
|
||||
optional bytes max_priority_fee_per_gas = 2; // U256 as big-endian bytes
|
||||
}
|
||||
|
||||
message EvalViolation {
|
||||
message ChainIdMismatch {
|
||||
uint64 expected = 1;
|
||||
uint64 actual = 2;
|
||||
}
|
||||
oneof kind {
|
||||
bytes invalid_target = 1; // 20-byte Ethereum address
|
||||
GasLimitExceededViolation gas_limit_exceeded = 2;
|
||||
google.protobuf.Empty rate_limit_exceeded = 3;
|
||||
google.protobuf.Empty volumetric_limit_exceeded = 4;
|
||||
google.protobuf.Empty invalid_time = 5;
|
||||
google.protobuf.Empty invalid_transaction_type = 6;
|
||||
|
||||
ChainIdMismatch chain_id_mismatch = 7;
|
||||
}
|
||||
}
|
||||
|
||||
// Transaction was classified but no grant covers it
|
||||
message NoMatchingGrantError {
|
||||
SpecificMeaning meaning = 1;
|
||||
}
|
||||
|
||||
// Transaction was classified and a grant was found, but constraints were violated
|
||||
message PolicyViolationsError {
|
||||
SpecificMeaning meaning = 1;
|
||||
repeated EvalViolation violations = 2;
|
||||
}
|
||||
|
||||
// top-level error returned when transaction evaluation fails
|
||||
message TransactionEvalError {
|
||||
oneof kind {
|
||||
google.protobuf.Empty contract_creation_not_supported = 1;
|
||||
google.protobuf.Empty unsupported_transaction_type = 2;
|
||||
NoMatchingGrantError no_matching_grant = 3;
|
||||
PolicyViolationsError policy_violations = 4;
|
||||
}
|
||||
}
|
||||
11
protobufs/shared/vault.proto
Normal file
11
protobufs/shared/vault.proto
Normal file
@@ -0,0 +1,11 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package arbiter.shared;
|
||||
|
||||
enum VaultState {
|
||||
VAULT_STATE_UNSPECIFIED = 0;
|
||||
VAULT_STATE_UNBOOTSTRAPPED = 1;
|
||||
VAULT_STATE_SEALED = 2;
|
||||
VAULT_STATE_UNSEALED = 3;
|
||||
VAULT_STATE_ERROR = 4;
|
||||
}
|
||||
@@ -2,78 +2,27 @@ syntax = "proto3";
|
||||
|
||||
package arbiter.user_agent;
|
||||
|
||||
import "google/protobuf/empty.proto";
|
||||
import "evm.proto";
|
||||
|
||||
message AuthChallengeRequest {
|
||||
bytes pubkey = 1;
|
||||
optional string bootstrap_token = 2;
|
||||
}
|
||||
|
||||
message AuthChallenge {
|
||||
bytes pubkey = 1;
|
||||
int32 nonce = 2;
|
||||
}
|
||||
|
||||
message AuthChallengeSolution {
|
||||
bytes signature = 1;
|
||||
}
|
||||
|
||||
message AuthOk {}
|
||||
|
||||
message UnsealStart {
|
||||
bytes client_pubkey = 1;
|
||||
}
|
||||
|
||||
message UnsealStartResponse {
|
||||
bytes server_pubkey = 1;
|
||||
}
|
||||
message UnsealEncryptedKey {
|
||||
bytes nonce = 1;
|
||||
bytes ciphertext = 2;
|
||||
bytes associated_data = 3;
|
||||
}
|
||||
|
||||
enum UnsealResult {
|
||||
UNSEAL_RESULT_UNSPECIFIED = 0;
|
||||
UNSEAL_RESULT_SUCCESS = 1;
|
||||
UNSEAL_RESULT_INVALID_KEY = 2;
|
||||
UNSEAL_RESULT_UNBOOTSTRAPPED = 3;
|
||||
}
|
||||
|
||||
enum VaultState {
|
||||
VAULT_STATE_UNSPECIFIED = 0;
|
||||
VAULT_STATE_UNBOOTSTRAPPED = 1;
|
||||
VAULT_STATE_SEALED = 2;
|
||||
VAULT_STATE_UNSEALED = 3;
|
||||
VAULT_STATE_ERROR = 4;
|
||||
}
|
||||
import "user_agent/auth.proto";
|
||||
import "user_agent/evm.proto";
|
||||
import "user_agent/sdk_client.proto";
|
||||
import "user_agent/vault/vault.proto";
|
||||
|
||||
message UserAgentRequest {
|
||||
int32 id = 16;
|
||||
oneof payload {
|
||||
AuthChallengeRequest auth_challenge_request = 1;
|
||||
AuthChallengeSolution auth_challenge_solution = 2;
|
||||
UnsealStart unseal_start = 3;
|
||||
UnsealEncryptedKey unseal_encrypted_key = 4;
|
||||
google.protobuf.Empty query_vault_state = 5;
|
||||
google.protobuf.Empty evm_wallet_create = 6;
|
||||
google.protobuf.Empty evm_wallet_list = 7;
|
||||
arbiter.evm.EvmGrantCreateRequest evm_grant_create = 8;
|
||||
arbiter.evm.EvmGrantDeleteRequest evm_grant_delete = 9;
|
||||
arbiter.evm.EvmGrantListRequest evm_grant_list = 10;
|
||||
auth.Request auth = 1;
|
||||
vault.Request vault = 2;
|
||||
evm.Request evm = 3;
|
||||
sdk_client.Request sdk_client = 4;
|
||||
}
|
||||
}
|
||||
|
||||
message UserAgentResponse {
|
||||
optional int32 id = 16;
|
||||
oneof payload {
|
||||
AuthChallenge auth_challenge = 1;
|
||||
AuthOk auth_ok = 2;
|
||||
UnsealStartResponse unseal_start_response = 3;
|
||||
UnsealResult unseal_result = 4;
|
||||
VaultState vault_state = 5;
|
||||
arbiter.evm.WalletCreateResponse evm_wallet_create = 6;
|
||||
arbiter.evm.WalletListResponse evm_wallet_list = 7;
|
||||
arbiter.evm.EvmGrantCreateResponse evm_grant_create = 8;
|
||||
arbiter.evm.EvmGrantDeleteResponse evm_grant_delete = 9;
|
||||
arbiter.evm.EvmGrantListResponse evm_grant_list = 10;
|
||||
auth.Response auth = 1;
|
||||
vault.Response vault = 2;
|
||||
evm.Response evm = 3;
|
||||
sdk_client.Response sdk_client = 4;
|
||||
}
|
||||
}
|
||||
|
||||
48
protobufs/user_agent/auth.proto
Normal file
48
protobufs/user_agent/auth.proto
Normal file
@@ -0,0 +1,48 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package arbiter.user_agent.auth;
|
||||
|
||||
enum KeyType {
|
||||
KEY_TYPE_UNSPECIFIED = 0;
|
||||
KEY_TYPE_ED25519 = 1;
|
||||
KEY_TYPE_ECDSA_SECP256K1 = 2;
|
||||
KEY_TYPE_RSA = 3;
|
||||
}
|
||||
|
||||
message AuthChallengeRequest {
|
||||
bytes pubkey = 1;
|
||||
optional string bootstrap_token = 2;
|
||||
KeyType key_type = 3;
|
||||
}
|
||||
|
||||
message AuthChallenge {
|
||||
int32 nonce = 1;
|
||||
}
|
||||
|
||||
message AuthChallengeSolution {
|
||||
bytes signature = 1;
|
||||
}
|
||||
|
||||
enum AuthResult {
|
||||
AUTH_RESULT_UNSPECIFIED = 0;
|
||||
AUTH_RESULT_SUCCESS = 1;
|
||||
AUTH_RESULT_INVALID_KEY = 2;
|
||||
AUTH_RESULT_INVALID_SIGNATURE = 3;
|
||||
AUTH_RESULT_BOOTSTRAP_REQUIRED = 4;
|
||||
AUTH_RESULT_TOKEN_INVALID = 5;
|
||||
AUTH_RESULT_INTERNAL = 6;
|
||||
}
|
||||
|
||||
message Request {
|
||||
oneof payload {
|
||||
AuthChallengeRequest challenge_request = 1;
|
||||
AuthChallengeSolution challenge_solution = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message Response {
|
||||
oneof payload {
|
||||
AuthChallenge challenge = 1;
|
||||
AuthResult result = 2;
|
||||
}
|
||||
}
|
||||
33
protobufs/user_agent/evm.proto
Normal file
33
protobufs/user_agent/evm.proto
Normal file
@@ -0,0 +1,33 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package arbiter.user_agent.evm;
|
||||
|
||||
import "evm.proto";
|
||||
import "google/protobuf/empty.proto";
|
||||
|
||||
message SignTransactionRequest {
|
||||
int32 client_id = 1;
|
||||
arbiter.evm.EvmSignTransactionRequest request = 2;
|
||||
}
|
||||
|
||||
message Request {
|
||||
oneof payload {
|
||||
google.protobuf.Empty wallet_create = 1;
|
||||
google.protobuf.Empty wallet_list = 2;
|
||||
arbiter.evm.EvmGrantCreateRequest grant_create = 3;
|
||||
arbiter.evm.EvmGrantDeleteRequest grant_delete = 4;
|
||||
arbiter.evm.EvmGrantListRequest grant_list = 5;
|
||||
SignTransactionRequest sign_transaction = 6;
|
||||
}
|
||||
}
|
||||
|
||||
message Response {
|
||||
oneof payload {
|
||||
arbiter.evm.WalletCreateResponse wallet_create = 1;
|
||||
arbiter.evm.WalletListResponse wallet_list = 2;
|
||||
arbiter.evm.EvmGrantCreateResponse grant_create = 3;
|
||||
arbiter.evm.EvmGrantDeleteResponse grant_delete = 4;
|
||||
arbiter.evm.EvmGrantListResponse grant_list = 5;
|
||||
arbiter.evm.EvmSignTransactionResponse sign_transaction = 6;
|
||||
}
|
||||
}
|
||||
100
protobufs/user_agent/sdk_client.proto
Normal file
100
protobufs/user_agent/sdk_client.proto
Normal file
@@ -0,0 +1,100 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package arbiter.user_agent.sdk_client;
|
||||
|
||||
import "shared/client.proto";
|
||||
import "google/protobuf/empty.proto";
|
||||
|
||||
enum Error {
|
||||
ERROR_UNSPECIFIED = 0;
|
||||
ERROR_ALREADY_EXISTS = 1;
|
||||
ERROR_NOT_FOUND = 2;
|
||||
ERROR_HAS_RELATED_DATA = 3; // hard-delete blocked by FK (client has grants or transaction logs)
|
||||
ERROR_INTERNAL = 4;
|
||||
}
|
||||
|
||||
message RevokeRequest {
|
||||
int32 client_id = 1;
|
||||
}
|
||||
|
||||
message Entry {
|
||||
int32 id = 1;
|
||||
bytes pubkey = 2;
|
||||
arbiter.shared.ClientInfo info = 3;
|
||||
int32 created_at = 4;
|
||||
}
|
||||
|
||||
message List {
|
||||
repeated Entry clients = 1;
|
||||
}
|
||||
|
||||
message RevokeResponse {
|
||||
oneof result {
|
||||
google.protobuf.Empty ok = 1;
|
||||
Error error = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message ListResponse {
|
||||
oneof result {
|
||||
List clients = 1;
|
||||
Error error = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message ConnectionRequest {
|
||||
bytes pubkey = 1;
|
||||
arbiter.shared.ClientInfo info = 2;
|
||||
}
|
||||
|
||||
message ConnectionResponse {
|
||||
bool approved = 1;
|
||||
bytes pubkey = 2;
|
||||
}
|
||||
|
||||
message ConnectionCancel {
|
||||
bytes pubkey = 1;
|
||||
}
|
||||
|
||||
message WalletAccess {
|
||||
int32 wallet_id = 1;
|
||||
int32 sdk_client_id = 2;
|
||||
}
|
||||
|
||||
message WalletAccessEntry {
|
||||
int32 id = 1;
|
||||
WalletAccess access = 2;
|
||||
}
|
||||
|
||||
message GrantWalletAccess {
|
||||
repeated WalletAccess accesses = 1;
|
||||
}
|
||||
|
||||
message RevokeWalletAccess {
|
||||
repeated int32 accesses = 1;
|
||||
}
|
||||
|
||||
message ListWalletAccessResponse {
|
||||
repeated WalletAccessEntry accesses = 1;
|
||||
}
|
||||
|
||||
message Request {
|
||||
oneof payload {
|
||||
ConnectionResponse connection_response = 1;
|
||||
RevokeRequest revoke = 2;
|
||||
google.protobuf.Empty list = 3;
|
||||
GrantWalletAccess grant_wallet_access = 4;
|
||||
RevokeWalletAccess revoke_wallet_access = 5;
|
||||
google.protobuf.Empty list_wallet_access = 6;
|
||||
}
|
||||
}
|
||||
|
||||
message Response {
|
||||
oneof payload {
|
||||
ConnectionRequest connection_request = 1;
|
||||
ConnectionCancel connection_cancel = 2;
|
||||
RevokeResponse revoke = 3;
|
||||
ListResponse list = 4;
|
||||
ListWalletAccessResponse list_wallet_access = 5;
|
||||
}
|
||||
}
|
||||
24
protobufs/user_agent/vault/bootstrap.proto
Normal file
24
protobufs/user_agent/vault/bootstrap.proto
Normal file
@@ -0,0 +1,24 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package arbiter.user_agent.vault.bootstrap;
|
||||
|
||||
message BootstrapEncryptedKey {
|
||||
bytes nonce = 1;
|
||||
bytes ciphertext = 2;
|
||||
bytes associated_data = 3;
|
||||
}
|
||||
|
||||
enum BootstrapResult {
|
||||
BOOTSTRAP_RESULT_UNSPECIFIED = 0;
|
||||
BOOTSTRAP_RESULT_SUCCESS = 1;
|
||||
BOOTSTRAP_RESULT_ALREADY_BOOTSTRAPPED = 2;
|
||||
BOOTSTRAP_RESULT_INVALID_KEY = 3;
|
||||
}
|
||||
|
||||
message Request {
|
||||
BootstrapEncryptedKey encrypted_key = 2;
|
||||
}
|
||||
|
||||
message Response {
|
||||
BootstrapResult result = 1;
|
||||
}
|
||||
37
protobufs/user_agent/vault/unseal.proto
Normal file
37
protobufs/user_agent/vault/unseal.proto
Normal file
@@ -0,0 +1,37 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package arbiter.user_agent.vault.unseal;
|
||||
|
||||
message UnsealStart {
|
||||
bytes client_pubkey = 1;
|
||||
}
|
||||
|
||||
message UnsealStartResponse {
|
||||
bytes server_pubkey = 1;
|
||||
}
|
||||
message UnsealEncryptedKey {
|
||||
bytes nonce = 1;
|
||||
bytes ciphertext = 2;
|
||||
bytes associated_data = 3;
|
||||
}
|
||||
|
||||
enum UnsealResult {
|
||||
UNSEAL_RESULT_UNSPECIFIED = 0;
|
||||
UNSEAL_RESULT_SUCCESS = 1;
|
||||
UNSEAL_RESULT_INVALID_KEY = 2;
|
||||
UNSEAL_RESULT_UNBOOTSTRAPPED = 3;
|
||||
}
|
||||
|
||||
message Request {
|
||||
oneof payload {
|
||||
UnsealStart start = 1;
|
||||
UnsealEncryptedKey encrypted_key = 2;
|
||||
}
|
||||
}
|
||||
|
||||
message Response {
|
||||
oneof payload {
|
||||
UnsealStartResponse start = 1;
|
||||
UnsealResult result = 2;
|
||||
}
|
||||
}
|
||||
24
protobufs/user_agent/vault/vault.proto
Normal file
24
protobufs/user_agent/vault/vault.proto
Normal file
@@ -0,0 +1,24 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package arbiter.user_agent.vault;
|
||||
|
||||
import "google/protobuf/empty.proto";
|
||||
import "shared/vault.proto";
|
||||
import "user_agent/vault/bootstrap.proto";
|
||||
import "user_agent/vault/unseal.proto";
|
||||
|
||||
message Request {
|
||||
oneof payload {
|
||||
google.protobuf.Empty query_state = 1;
|
||||
unseal.Request unseal = 2;
|
||||
bootstrap.Request bootstrap = 3;
|
||||
}
|
||||
}
|
||||
|
||||
message Response {
|
||||
oneof payload {
|
||||
arbiter.shared.VaultState state = 1;
|
||||
unseal.Response unseal = 2;
|
||||
bootstrap.Response bootstrap = 3;
|
||||
}
|
||||
}
|
||||
13
server/.cargo/audit.toml
Normal file
13
server/.cargo/audit.toml
Normal file
@@ -0,0 +1,13 @@
|
||||
[advisories]
|
||||
# RUSTSEC-2023-0071: Marvin Attack timing side-channel in rsa crate.
|
||||
# No fixed version is available upstream.
|
||||
# RSA support is required for Windows Hello / KeyCredentialManager
|
||||
# (https://learn.microsoft.com/en-us/uwp/api/windows.security.credentials.keycredentialmanager.requestcreateasync),
|
||||
# which only issues RSA-2048 keys.
|
||||
# Mitigations in place:
|
||||
# - Signing uses BlindedSigningKey (PSS+SHA-256), which applies blinding to
|
||||
# protect the private key from timing recovery during signing.
|
||||
# - RSA decryption is never performed; we only verify public-key signatures.
|
||||
# - The attack requires local, high-resolution timing access against the
|
||||
# signing process, which is not exposed in our threat model.
|
||||
ignore = ["RUSTSEC-2023-0071"]
|
||||
1
server/.cargo/mutants.toml
Normal file
1
server/.cargo/mutants.toml
Normal file
@@ -0,0 +1 @@
|
||||
test_tool = "nextest"
|
||||
2
server/.gitignore
vendored
Normal file
2
server/.gitignore
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
mutants.out/
|
||||
mutants.out.old/
|
||||
547
server/Cargo.lock
generated
547
server/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -4,25 +4,27 @@ members = [
|
||||
]
|
||||
resolver = "3"
|
||||
|
||||
[workspace.lints.clippy]
|
||||
disallowed-methods = "deny"
|
||||
|
||||
|
||||
[workspace.dependencies]
|
||||
tonic = { version = "0.14.3", features = [
|
||||
tonic = { version = "0.14.5", features = [
|
||||
"deflate",
|
||||
"gzip",
|
||||
"tls-connect-info",
|
||||
"zstd",
|
||||
] }
|
||||
tracing = "0.1.44"
|
||||
tokio = { version = "1.49.0", features = ["full"] }
|
||||
tokio = { version = "1.50.0", features = ["full"] }
|
||||
ed25519-dalek = { version = "3.0.0-pre.6", features = ["rand_core"] }
|
||||
chrono = { version = "0.4.43", features = ["serde"] }
|
||||
chrono = { version = "0.4.44", features = ["serde"] }
|
||||
rand = "0.10.0"
|
||||
rustls = "0.23.36"
|
||||
rustls = { version = "0.23.37", features = ["aws-lc-rs"] }
|
||||
smlang = "0.8.0"
|
||||
miette = { version = "7.6.0", features = ["fancy", "serde"] }
|
||||
thiserror = "2.0.18"
|
||||
async-trait = "0.1.89"
|
||||
futures = "0.3.31"
|
||||
futures = "0.3.32"
|
||||
tokio-stream = { version = "0.1.18", features = ["full"] }
|
||||
kameo = "0.19.2"
|
||||
prost-types = { version = "0.14.3", features = ["chrono"] }
|
||||
@@ -36,3 +38,10 @@ rcgen = { version = "0.14.7", features = [
|
||||
"x509-parser",
|
||||
"zeroize",
|
||||
], default-features = false }
|
||||
k256 = { version = "0.13.4", features = ["ecdsa", "pkcs8"] }
|
||||
rsa = { version = "0.9", features = ["sha2"] }
|
||||
sha2 = "0.10"
|
||||
spki = "0.7"
|
||||
prost = "0.14.3"
|
||||
miette = { version = "7.6.0", features = ["fancy", "serde"] }
|
||||
mutants = "0.0.4"
|
||||
|
||||
11
server/clippy.toml
Normal file
11
server/clippy.toml
Normal file
@@ -0,0 +1,11 @@
|
||||
disallowed-methods = [
|
||||
# RSA decryption is forbidden: the rsa crate has RUSTSEC-2023-0071 (Marvin Attack).
|
||||
# We only use RSA for Windows Hello (KeyCredentialManager) public-key verification — decryption
|
||||
# is never required and must not be introduced.
|
||||
{ path = "rsa::RsaPrivateKey::decrypt", reason = "RSA decryption is forbidden (RUSTSEC-2023-0071 Marvin Attack). Only PSS signing/verification is permitted." },
|
||||
{ path = "rsa::RsaPrivateKey::decrypt_blinded", reason = "RSA decryption is forbidden (RUSTSEC-2023-0071 Marvin Attack). Only PSS signing/verification is permitted." },
|
||||
{ path = "rsa::traits::Decryptor::decrypt", reason = "RSA decryption is forbidden (RUSTSEC-2023-0071 Marvin Attack). This blocks decrypt() on rsa::{pkcs1v15,oaep}::DecryptingKey." },
|
||||
{ path = "rsa::traits::RandomizedDecryptor::decrypt_with_rng", reason = "RSA decryption is forbidden (RUSTSEC-2023-0071 Marvin Attack). This blocks decrypt_with_rng() on rsa::{pkcs1v15,oaep}::DecryptingKey." },
|
||||
|
||||
{ path = "arbiter_server::crypto::integrity::v1::lookup_verified_allow_unavailable", reason = "This function allows integrity checks to be bypassed when vault key material is unavailable, which can lead to silent security failures if used incorrectly. It should only be used in specific contexts where this behavior is acceptable, and its use should be carefully audited." },
|
||||
]
|
||||
@@ -5,4 +5,22 @@ edition = "2024"
|
||||
repository = "https://git.markettakers.org/MarketTakers/arbiter"
|
||||
license = "Apache-2.0"
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[features]
|
||||
evm = ["dep:alloy"]
|
||||
|
||||
[dependencies]
|
||||
arbiter-proto.path = "../arbiter-proto"
|
||||
alloy = { workspace = true, optional = true }
|
||||
tonic.workspace = true
|
||||
tonic.features = ["tls-aws-lc"]
|
||||
tokio.workspace = true
|
||||
tokio-stream.workspace = true
|
||||
ed25519-dalek.workspace = true
|
||||
thiserror.workspace = true
|
||||
http = "1.4.0"
|
||||
rustls-webpki = { version = "0.103.10", features = ["aws-lc-rs"] }
|
||||
async-trait.workspace = true
|
||||
rand.workspace = true
|
||||
|
||||
149
server/crates/arbiter-client/src/auth.rs
Normal file
149
server/crates/arbiter-client/src/auth.rs
Normal file
@@ -0,0 +1,149 @@
|
||||
use arbiter_proto::{
|
||||
ClientMetadata, format_challenge,
|
||||
proto::{
|
||||
client::{
|
||||
ClientRequest,
|
||||
auth::{
|
||||
self as proto_auth, AuthChallenge, AuthChallengeRequest, AuthChallengeSolution,
|
||||
AuthResult, request::Payload as AuthRequestPayload,
|
||||
response::Payload as AuthResponsePayload,
|
||||
},
|
||||
client_request::Payload as ClientRequestPayload,
|
||||
client_response::Payload as ClientResponsePayload,
|
||||
},
|
||||
shared::ClientInfo as ProtoClientInfo,
|
||||
},
|
||||
};
|
||||
use ed25519_dalek::Signer as _;
|
||||
|
||||
use crate::{
|
||||
storage::StorageError,
|
||||
transport::{ClientTransport, next_request_id},
|
||||
};
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum AuthError {
|
||||
#[error("Auth challenge was not returned by server")]
|
||||
MissingAuthChallenge,
|
||||
|
||||
#[error("Client approval denied by User Agent")]
|
||||
ApprovalDenied,
|
||||
|
||||
#[error("No User Agents online to approve client")]
|
||||
NoUserAgentsOnline,
|
||||
|
||||
#[error("Unexpected auth response payload")]
|
||||
UnexpectedAuthResponse,
|
||||
|
||||
#[error("Signing key storage error")]
|
||||
Storage(#[from] StorageError),
|
||||
}
|
||||
|
||||
fn map_auth_result(code: i32) -> AuthError {
|
||||
match AuthResult::try_from(code).unwrap_or(AuthResult::Unspecified) {
|
||||
AuthResult::ApprovalDenied => AuthError::ApprovalDenied,
|
||||
AuthResult::NoUserAgentsOnline => AuthError::NoUserAgentsOnline,
|
||||
AuthResult::Unspecified
|
||||
| AuthResult::Success
|
||||
| AuthResult::InvalidKey
|
||||
| AuthResult::InvalidSignature
|
||||
| AuthResult::Internal => AuthError::UnexpectedAuthResponse,
|
||||
}
|
||||
}
|
||||
|
||||
async fn send_auth_challenge_request(
|
||||
transport: &mut ClientTransport,
|
||||
metadata: ClientMetadata,
|
||||
key: &ed25519_dalek::SigningKey,
|
||||
) -> std::result::Result<(), AuthError> {
|
||||
transport
|
||||
.send(ClientRequest {
|
||||
request_id: next_request_id(),
|
||||
payload: Some(ClientRequestPayload::Auth(proto_auth::Request {
|
||||
payload: Some(AuthRequestPayload::ChallengeRequest(AuthChallengeRequest {
|
||||
pubkey: key.verifying_key().to_bytes().to_vec(),
|
||||
client_info: Some(ProtoClientInfo {
|
||||
name: metadata.name,
|
||||
description: metadata.description,
|
||||
version: metadata.version,
|
||||
}),
|
||||
})),
|
||||
})),
|
||||
})
|
||||
.await
|
||||
.map_err(|_| AuthError::UnexpectedAuthResponse)
|
||||
}
|
||||
|
||||
async fn receive_auth_challenge(
|
||||
transport: &mut ClientTransport,
|
||||
) -> std::result::Result<AuthChallenge, AuthError> {
|
||||
let response = transport
|
||||
.recv()
|
||||
.await
|
||||
.map_err(|_| AuthError::MissingAuthChallenge)?;
|
||||
|
||||
let payload = response.payload.ok_or(AuthError::MissingAuthChallenge)?;
|
||||
match payload {
|
||||
ClientResponsePayload::Auth(response) => match response.payload {
|
||||
Some(AuthResponsePayload::Challenge(challenge)) => Ok(challenge),
|
||||
Some(AuthResponsePayload::Result(result)) => Err(map_auth_result(result)),
|
||||
None => Err(AuthError::MissingAuthChallenge),
|
||||
},
|
||||
_ => Err(AuthError::UnexpectedAuthResponse),
|
||||
}
|
||||
}
|
||||
|
||||
async fn send_auth_challenge_solution(
|
||||
transport: &mut ClientTransport,
|
||||
key: &ed25519_dalek::SigningKey,
|
||||
challenge: AuthChallenge,
|
||||
) -> std::result::Result<(), AuthError> {
|
||||
let challenge_payload = format_challenge(challenge.nonce, &challenge.pubkey);
|
||||
let signature = key.sign(&challenge_payload).to_bytes().to_vec();
|
||||
|
||||
transport
|
||||
.send(ClientRequest {
|
||||
request_id: next_request_id(),
|
||||
payload: Some(ClientRequestPayload::Auth(proto_auth::Request {
|
||||
payload: Some(AuthRequestPayload::ChallengeSolution(
|
||||
AuthChallengeSolution { signature },
|
||||
)),
|
||||
})),
|
||||
})
|
||||
.await
|
||||
.map_err(|_| AuthError::UnexpectedAuthResponse)
|
||||
}
|
||||
|
||||
async fn receive_auth_confirmation(
|
||||
transport: &mut ClientTransport,
|
||||
) -> std::result::Result<(), AuthError> {
|
||||
let response = transport
|
||||
.recv()
|
||||
.await
|
||||
.map_err(|_| AuthError::UnexpectedAuthResponse)?;
|
||||
|
||||
let payload = response.payload.ok_or(AuthError::UnexpectedAuthResponse)?;
|
||||
match payload {
|
||||
ClientResponsePayload::Auth(response) => match response.payload {
|
||||
Some(AuthResponsePayload::Result(result))
|
||||
if AuthResult::try_from(result).ok() == Some(AuthResult::Success) =>
|
||||
{
|
||||
Ok(())
|
||||
}
|
||||
Some(AuthResponsePayload::Result(result)) => Err(map_auth_result(result)),
|
||||
_ => Err(AuthError::UnexpectedAuthResponse),
|
||||
},
|
||||
_ => Err(AuthError::UnexpectedAuthResponse),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn authenticate(
|
||||
transport: &mut ClientTransport,
|
||||
metadata: ClientMetadata,
|
||||
key: &ed25519_dalek::SigningKey,
|
||||
) -> std::result::Result<(), AuthError> {
|
||||
send_auth_challenge_request(transport, metadata, key).await?;
|
||||
let challenge = receive_auth_challenge(transport).await?;
|
||||
send_auth_challenge_solution(transport, key, challenge).await?;
|
||||
receive_auth_confirmation(transport).await
|
||||
}
|
||||
44
server/crates/arbiter-client/src/bin/test_connect.rs
Normal file
44
server/crates/arbiter-client/src/bin/test_connect.rs
Normal file
@@ -0,0 +1,44 @@
|
||||
use std::io::{self, Write};
|
||||
|
||||
use arbiter_client::ArbiterClient;
|
||||
use arbiter_proto::{ClientMetadata, url::ArbiterUrl};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
println!("Testing connection to Arbiter server...");
|
||||
print!("Enter ArbiterUrl: ");
|
||||
let _ = io::stdout().flush();
|
||||
|
||||
let mut input = String::new();
|
||||
if let Err(err) = io::stdin().read_line(&mut input) {
|
||||
eprintln!("Failed to read input: {err}");
|
||||
return;
|
||||
}
|
||||
|
||||
let input = input.trim();
|
||||
if input.is_empty() {
|
||||
eprintln!("ArbiterUrl cannot be empty");
|
||||
return;
|
||||
}
|
||||
|
||||
let url = match ArbiterUrl::try_from(input) {
|
||||
Ok(url) => url,
|
||||
Err(err) => {
|
||||
eprintln!("Invalid ArbiterUrl: {err}");
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
println!("{:#?}", url);
|
||||
|
||||
let metadata = ClientMetadata {
|
||||
name: "arbiter-client test_connect".to_string(),
|
||||
description: Some("Manual connection smoke test".to_string()),
|
||||
version: Some(env!("CARGO_PKG_VERSION").to_string()),
|
||||
};
|
||||
|
||||
match ArbiterClient::connect(url, metadata).await {
|
||||
Ok(_) => println!("Connected and authenticated successfully."),
|
||||
Err(err) => eprintln!("Failed to connect: {:#?}", err),
|
||||
}
|
||||
}
|
||||
94
server/crates/arbiter-client/src/client.rs
Normal file
94
server/crates/arbiter-client/src/client.rs
Normal file
@@ -0,0 +1,94 @@
|
||||
use arbiter_proto::{
|
||||
ClientMetadata, proto::arbiter_service_client::ArbiterServiceClient, url::ArbiterUrl,
|
||||
};
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::{Mutex, mpsc};
|
||||
use tokio_stream::wrappers::ReceiverStream;
|
||||
use tonic::transport::ClientTlsConfig;
|
||||
|
||||
use crate::{
|
||||
StorageError,
|
||||
auth::{AuthError, authenticate},
|
||||
storage::{FileSigningKeyStorage, SigningKeyStorage},
|
||||
transport::{BUFFER_LENGTH, ClientTransport},
|
||||
};
|
||||
|
||||
#[cfg(feature = "evm")]
|
||||
use crate::wallets::evm::ArbiterEvmWallet;
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum Error {
|
||||
#[error("gRPC error")]
|
||||
Grpc(#[from] tonic::Status),
|
||||
|
||||
#[error("Could not establish connection")]
|
||||
Connection(#[from] tonic::transport::Error),
|
||||
|
||||
#[error("Invalid server URI")]
|
||||
InvalidUri(#[from] http::uri::InvalidUri),
|
||||
|
||||
#[error("Invalid CA certificate")]
|
||||
InvalidCaCert(#[from] webpki::Error),
|
||||
|
||||
#[error("Authentication error")]
|
||||
Authentication(#[from] AuthError),
|
||||
|
||||
#[error("Storage error")]
|
||||
Storage(#[from] StorageError),
|
||||
}
|
||||
|
||||
pub struct ArbiterClient {
|
||||
#[allow(dead_code)]
|
||||
transport: Arc<Mutex<ClientTransport>>,
|
||||
}
|
||||
|
||||
impl ArbiterClient {
|
||||
pub async fn connect(url: ArbiterUrl, metadata: ClientMetadata) -> Result<Self, Error> {
|
||||
let storage = FileSigningKeyStorage::from_default_location()?;
|
||||
Self::connect_with_storage(url, metadata, &storage).await
|
||||
}
|
||||
|
||||
pub async fn connect_with_storage<S: SigningKeyStorage>(
|
||||
url: ArbiterUrl,
|
||||
metadata: ClientMetadata,
|
||||
storage: &S,
|
||||
) -> Result<Self, Error> {
|
||||
let key = storage.load_or_create()?;
|
||||
Self::connect_with_key(url, metadata, key).await
|
||||
}
|
||||
|
||||
pub async fn connect_with_key(
|
||||
url: ArbiterUrl,
|
||||
metadata: ClientMetadata,
|
||||
key: ed25519_dalek::SigningKey,
|
||||
) -> Result<Self, Error> {
|
||||
let anchor = webpki::anchor_from_trusted_cert(&url.ca_cert)?.to_owned();
|
||||
let tls = ClientTlsConfig::new().trust_anchor(anchor);
|
||||
|
||||
let channel =
|
||||
tonic::transport::Channel::from_shared(format!("https://{}:{}", url.host, url.port))?
|
||||
.tls_config(tls)?
|
||||
.connect()
|
||||
.await?;
|
||||
|
||||
let mut client = ArbiterServiceClient::new(channel);
|
||||
let (tx, rx) = mpsc::channel(BUFFER_LENGTH);
|
||||
let response_stream = client.client(ReceiverStream::new(rx)).await?.into_inner();
|
||||
|
||||
let mut transport = ClientTransport {
|
||||
sender: tx,
|
||||
receiver: response_stream,
|
||||
};
|
||||
|
||||
authenticate(&mut transport, metadata, &key).await?;
|
||||
|
||||
Ok(Self {
|
||||
transport: Arc::new(Mutex::new(transport)),
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(feature = "evm")]
|
||||
pub async fn evm_wallets(&self) -> Result<Vec<ArbiterEvmWallet>, Error> {
|
||||
todo!("fetch EVM wallet list from server")
|
||||
}
|
||||
}
|
||||
@@ -1,14 +1,12 @@
|
||||
pub fn add(left: u64, right: u64) -> u64 {
|
||||
left + right
|
||||
}
|
||||
mod auth;
|
||||
mod client;
|
||||
mod storage;
|
||||
mod transport;
|
||||
pub mod wallets;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
pub use auth::AuthError;
|
||||
pub use client::{ArbiterClient, Error};
|
||||
pub use storage::{FileSigningKeyStorage, SigningKeyStorage, StorageError};
|
||||
|
||||
#[test]
|
||||
fn it_works() {
|
||||
let result = add(2, 2);
|
||||
assert_eq!(result, 4);
|
||||
}
|
||||
}
|
||||
#[cfg(feature = "evm")]
|
||||
pub use wallets::evm::{ArbiterEvmSignTransactionError, ArbiterEvmWallet};
|
||||
|
||||
132
server/crates/arbiter-client/src/storage.rs
Normal file
132
server/crates/arbiter-client/src/storage.rs
Normal file
@@ -0,0 +1,132 @@
|
||||
use arbiter_proto::home_path;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum StorageError {
|
||||
#[error("I/O error")]
|
||||
Io(#[from] std::io::Error),
|
||||
|
||||
#[error("Invalid signing key length in storage: expected {expected} bytes, got {actual} bytes")]
|
||||
InvalidKeyLength { expected: usize, actual: usize },
|
||||
}
|
||||
|
||||
pub trait SigningKeyStorage {
|
||||
fn load_or_create(&self) -> std::result::Result<ed25519_dalek::SigningKey, StorageError>;
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct FileSigningKeyStorage {
|
||||
path: PathBuf,
|
||||
}
|
||||
|
||||
impl FileSigningKeyStorage {
|
||||
pub const DEFAULT_FILE_NAME: &str = "sdk_client_ed25519.key";
|
||||
|
||||
pub fn new(path: impl Into<PathBuf>) -> Self {
|
||||
Self { path: path.into() }
|
||||
}
|
||||
|
||||
pub fn from_default_location() -> std::result::Result<Self, StorageError> {
|
||||
Ok(Self::new(home_path()?.join(Self::DEFAULT_FILE_NAME)))
|
||||
}
|
||||
|
||||
fn read_key(path: &Path) -> std::result::Result<ed25519_dalek::SigningKey, StorageError> {
|
||||
let bytes = std::fs::read(path)?;
|
||||
let raw: [u8; 32] =
|
||||
bytes
|
||||
.try_into()
|
||||
.map_err(|v: Vec<u8>| StorageError::InvalidKeyLength {
|
||||
expected: 32,
|
||||
actual: v.len(),
|
||||
})?;
|
||||
Ok(ed25519_dalek::SigningKey::from_bytes(&raw))
|
||||
}
|
||||
}
|
||||
|
||||
impl SigningKeyStorage for FileSigningKeyStorage {
|
||||
fn load_or_create(&self) -> std::result::Result<ed25519_dalek::SigningKey, StorageError> {
|
||||
if let Some(parent) = self.path.parent() {
|
||||
std::fs::create_dir_all(parent)?;
|
||||
}
|
||||
|
||||
if self.path.exists() {
|
||||
return Self::read_key(&self.path);
|
||||
}
|
||||
|
||||
let key = ed25519_dalek::SigningKey::generate(&mut rand::rng());
|
||||
let raw_key = key.to_bytes();
|
||||
|
||||
// Use create_new to prevent accidental overwrite if another process creates the key first.
|
||||
match std::fs::OpenOptions::new()
|
||||
.create_new(true)
|
||||
.write(true)
|
||||
.open(&self.path)
|
||||
{
|
||||
Ok(mut file) => {
|
||||
use std::io::Write as _;
|
||||
file.write_all(&raw_key)?;
|
||||
Ok(key)
|
||||
}
|
||||
Err(err) if err.kind() == std::io::ErrorKind::AlreadyExists => {
|
||||
Self::read_key(&self.path)
|
||||
}
|
||||
Err(err) => Err(StorageError::Io(err)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{FileSigningKeyStorage, SigningKeyStorage, StorageError};
|
||||
|
||||
fn unique_temp_key_path() -> std::path::PathBuf {
|
||||
let nanos = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.expect("clock should be after unix epoch")
|
||||
.as_nanos();
|
||||
std::env::temp_dir().join(format!(
|
||||
"arbiter-client-key-{}-{}.bin",
|
||||
std::process::id(),
|
||||
nanos
|
||||
))
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn file_storage_creates_and_reuses_key() {
|
||||
let path = unique_temp_key_path();
|
||||
let storage = FileSigningKeyStorage::new(path.clone());
|
||||
|
||||
let key_a = storage
|
||||
.load_or_create()
|
||||
.expect("first load_or_create should create key");
|
||||
let key_b = storage
|
||||
.load_or_create()
|
||||
.expect("second load_or_create should read same key");
|
||||
|
||||
assert_eq!(key_a.to_bytes(), key_b.to_bytes());
|
||||
assert!(path.exists());
|
||||
|
||||
std::fs::remove_file(path).expect("temp key file should be removable");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn file_storage_rejects_invalid_key_length() {
|
||||
let path = unique_temp_key_path();
|
||||
std::fs::write(&path, [42u8; 31]).expect("should write invalid key file");
|
||||
let storage = FileSigningKeyStorage::new(path.clone());
|
||||
|
||||
let err = storage
|
||||
.load_or_create()
|
||||
.expect_err("storage should reject non-32-byte key file");
|
||||
|
||||
match err {
|
||||
StorageError::InvalidKeyLength { expected, actual } => {
|
||||
assert_eq!(expected, 32);
|
||||
assert_eq!(actual, 31);
|
||||
}
|
||||
other => panic!("unexpected error: {other:?}"),
|
||||
}
|
||||
|
||||
std::fs::remove_file(path).expect("temp key file should be removable");
|
||||
}
|
||||
}
|
||||
44
server/crates/arbiter-client/src/transport.rs
Normal file
44
server/crates/arbiter-client/src/transport.rs
Normal file
@@ -0,0 +1,44 @@
|
||||
use arbiter_proto::proto::client::{ClientRequest, ClientResponse};
|
||||
use std::sync::atomic::{AtomicI32, Ordering};
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
pub(crate) const BUFFER_LENGTH: usize = 16;
|
||||
static NEXT_REQUEST_ID: AtomicI32 = AtomicI32::new(1);
|
||||
|
||||
pub(crate) fn next_request_id() -> i32 {
|
||||
NEXT_REQUEST_ID.fetch_add(1, Ordering::Relaxed)
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub(crate) enum ClientSignError {
|
||||
#[error("Transport channel closed")]
|
||||
ChannelClosed,
|
||||
|
||||
#[error("Connection closed by server")]
|
||||
ConnectionClosed,
|
||||
}
|
||||
|
||||
pub(crate) struct ClientTransport {
|
||||
pub(crate) sender: mpsc::Sender<ClientRequest>,
|
||||
pub(crate) receiver: tonic::Streaming<ClientResponse>,
|
||||
}
|
||||
|
||||
impl ClientTransport {
|
||||
pub(crate) async fn send(
|
||||
&mut self,
|
||||
request: ClientRequest,
|
||||
) -> std::result::Result<(), ClientSignError> {
|
||||
self.sender
|
||||
.send(request)
|
||||
.await
|
||||
.map_err(|_| ClientSignError::ChannelClosed)
|
||||
}
|
||||
|
||||
pub(crate) async fn recv(&mut self) -> std::result::Result<ClientResponse, ClientSignError> {
|
||||
match self.receiver.message().await {
|
||||
Ok(Some(resp)) => Ok(resp),
|
||||
Ok(None) => Err(ClientSignError::ConnectionClosed),
|
||||
Err(_) => Err(ClientSignError::ConnectionClosed),
|
||||
}
|
||||
}
|
||||
}
|
||||
196
server/crates/arbiter-client/src/wallets/evm.rs
Normal file
196
server/crates/arbiter-client/src/wallets/evm.rs
Normal file
@@ -0,0 +1,196 @@
|
||||
use alloy::{
|
||||
consensus::SignableTransaction,
|
||||
network::TxSigner,
|
||||
primitives::{Address, B256, ChainId, Signature},
|
||||
signers::{Error, Result, Signer},
|
||||
};
|
||||
use async_trait::async_trait;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use arbiter_proto::proto::{
|
||||
client::{
|
||||
ClientRequest,
|
||||
client_request::Payload as ClientRequestPayload,
|
||||
client_response::Payload as ClientResponsePayload,
|
||||
evm::{
|
||||
self as proto_evm, request::Payload as EvmRequestPayload,
|
||||
response::Payload as EvmResponsePayload,
|
||||
},
|
||||
},
|
||||
evm::{
|
||||
EvmSignTransactionRequest,
|
||||
evm_sign_transaction_response::Result as EvmSignTransactionResult,
|
||||
},
|
||||
shared::evm::TransactionEvalError,
|
||||
};
|
||||
|
||||
use crate::transport::{ClientTransport, next_request_id};
|
||||
|
||||
/// A typed error payload returned by [`ArbiterEvmWallet`] transaction signing.
|
||||
///
|
||||
/// This is wrapped into `alloy::signers::Error::Other`, so consumers can downcast by [`TryFrom`] and
|
||||
/// interpret the concrete policy evaluation failure instead of parsing strings.
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[non_exhaustive]
|
||||
pub enum ArbiterEvmSignTransactionError {
|
||||
#[error("transaction rejected by policy: {0:?}")]
|
||||
PolicyEval(TransactionEvalError),
|
||||
}
|
||||
|
||||
impl<'a> TryFrom<&'a Error> for &'a ArbiterEvmSignTransactionError {
|
||||
type Error = ();
|
||||
|
||||
fn try_from(value: &'a Error) -> Result<Self, Self::Error> {
|
||||
if let Error::Other(inner) = value
|
||||
&& let Some(eval_error) = inner.downcast_ref()
|
||||
{
|
||||
Ok(eval_error)
|
||||
} else {
|
||||
Err(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ArbiterEvmWallet {
|
||||
transport: Arc<Mutex<ClientTransport>>,
|
||||
address: Address,
|
||||
chain_id: Option<ChainId>,
|
||||
}
|
||||
|
||||
impl ArbiterEvmWallet {
|
||||
#[expect(
|
||||
dead_code,
|
||||
reason = "constructor may be used in future extensions, e.g. to support wallet listing"
|
||||
)]
|
||||
pub(crate) fn new(transport: Arc<Mutex<ClientTransport>>, address: Address) -> Self {
|
||||
Self {
|
||||
transport,
|
||||
address,
|
||||
chain_id: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn address(&self) -> Address {
|
||||
self.address
|
||||
}
|
||||
|
||||
pub fn with_chain_id(mut self, chain_id: ChainId) -> Self {
|
||||
self.chain_id = Some(chain_id);
|
||||
self
|
||||
}
|
||||
|
||||
fn validate_chain_id(&self, tx: &mut dyn SignableTransaction<Signature>) -> Result<()> {
|
||||
if let Some(chain_id) = self.chain_id
|
||||
&& !tx.set_chain_id_checked(chain_id)
|
||||
{
|
||||
return Err(Error::TransactionChainIdMismatch {
|
||||
signer: chain_id,
|
||||
tx: tx.chain_id().unwrap(),
|
||||
});
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Signer for ArbiterEvmWallet {
|
||||
async fn sign_hash(&self, _hash: &B256) -> Result<Signature> {
|
||||
Err(Error::other(
|
||||
"hash-only signing is not supported for ArbiterEvmWallet; use transaction signing",
|
||||
))
|
||||
}
|
||||
|
||||
fn address(&self) -> Address {
|
||||
self.address
|
||||
}
|
||||
|
||||
fn chain_id(&self) -> Option<ChainId> {
|
||||
self.chain_id
|
||||
}
|
||||
|
||||
fn set_chain_id(&mut self, chain_id: Option<ChainId>) {
|
||||
self.chain_id = chain_id;
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl TxSigner<Signature> for ArbiterEvmWallet {
|
||||
fn address(&self) -> Address {
|
||||
self.address
|
||||
}
|
||||
|
||||
async fn sign_transaction(
|
||||
&self,
|
||||
tx: &mut dyn SignableTransaction<Signature>,
|
||||
) -> Result<Signature> {
|
||||
self.validate_chain_id(tx)?;
|
||||
|
||||
let mut transport = self.transport.lock().await;
|
||||
let request_id = next_request_id();
|
||||
let rlp_transaction = tx.encoded_for_signing();
|
||||
|
||||
transport
|
||||
.send(ClientRequest {
|
||||
request_id,
|
||||
payload: Some(ClientRequestPayload::Evm(proto_evm::Request {
|
||||
payload: Some(EvmRequestPayload::SignTransaction(
|
||||
EvmSignTransactionRequest {
|
||||
wallet_address: self.address.to_vec(),
|
||||
rlp_transaction,
|
||||
},
|
||||
)),
|
||||
})),
|
||||
})
|
||||
.await
|
||||
.map_err(|_| Error::other("failed to send evm sign transaction request"))?;
|
||||
|
||||
let response = transport
|
||||
.recv()
|
||||
.await
|
||||
.map_err(|_| Error::other("failed to receive evm sign transaction response"))?;
|
||||
|
||||
if response.request_id != Some(request_id) {
|
||||
return Err(Error::other(
|
||||
"received mismatched response id for evm sign transaction",
|
||||
));
|
||||
}
|
||||
|
||||
let payload = response
|
||||
.payload
|
||||
.ok_or_else(|| Error::other("missing evm sign transaction response payload"))?;
|
||||
|
||||
let ClientResponsePayload::Evm(proto_evm::Response {
|
||||
payload: Some(payload),
|
||||
}) = payload
|
||||
else {
|
||||
return Err(Error::other(
|
||||
"unexpected response payload for evm sign transaction request",
|
||||
));
|
||||
};
|
||||
|
||||
let EvmResponsePayload::SignTransaction(response) = payload else {
|
||||
return Err(Error::other(
|
||||
"unexpected evm response payload for sign transaction request",
|
||||
));
|
||||
};
|
||||
|
||||
let result = response
|
||||
.result
|
||||
.ok_or_else(|| Error::other("missing evm sign transaction result"))?;
|
||||
|
||||
match result {
|
||||
EvmSignTransactionResult::Signature(signature) => {
|
||||
Signature::try_from(signature.as_slice())
|
||||
.map_err(|_| Error::other("invalid signature returned by server"))
|
||||
}
|
||||
EvmSignTransactionResult::EvalError(eval_error) => Err(Error::other(
|
||||
ArbiterEvmSignTransactionError::PolicyEval(eval_error),
|
||||
)),
|
||||
EvmSignTransactionResult::Error(code) => Err(Error::other(format!(
|
||||
"server failed to sign transaction with error code {code}"
|
||||
))),
|
||||
}
|
||||
}
|
||||
}
|
||||
2
server/crates/arbiter-client/src/wallets/mod.rs
Normal file
2
server/crates/arbiter-client/src/wallets/mod.rs
Normal file
@@ -0,0 +1,2 @@
|
||||
#[cfg(feature = "evm")]
|
||||
pub mod evm;
|
||||
@@ -10,8 +10,8 @@ tonic.workspace = true
|
||||
tokio.workspace = true
|
||||
futures.workspace = true
|
||||
hex = "0.4.3"
|
||||
tonic-prost = "0.14.3"
|
||||
prost = "0.14.3"
|
||||
tonic-prost = "0.14.5"
|
||||
prost.workspace = true
|
||||
kameo.workspace = true
|
||||
url = "2.5.8"
|
||||
miette.workspace = true
|
||||
@@ -21,9 +21,11 @@ base64 = "0.22.1"
|
||||
prost-types.workspace = true
|
||||
tracing.workspace = true
|
||||
async-trait.workspace = true
|
||||
tokio-stream.workspace = true
|
||||
|
||||
[build-dependencies]
|
||||
tonic-prost-build = "0.14.3"
|
||||
tonic-prost-build = "0.14.5"
|
||||
protoc-bin-vendored = "3"
|
||||
|
||||
[dev-dependencies]
|
||||
rstest.workspace = true
|
||||
@@ -32,5 +34,3 @@ rcgen.workspace = true
|
||||
|
||||
[package.metadata.cargo-shear]
|
||||
ignored = ["tonic-prost", "prost", "kameo"]
|
||||
|
||||
|
||||
|
||||
@@ -3,7 +3,6 @@ use tonic_prost_build::configure;
|
||||
static PROTOBUF_DIR: &str = "../../../protobufs";
|
||||
|
||||
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
|
||||
println!("cargo::rerun-if-changed={PROTOBUF_DIR}");
|
||||
|
||||
configure()
|
||||
@@ -17,7 +16,6 @@ fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
],
|
||||
&[PROTOBUF_DIR.to_string()],
|
||||
)
|
||||
|
||||
.unwrap();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -6,12 +6,56 @@ use base64::{Engine, prelude::BASE64_STANDARD};
|
||||
pub mod proto {
|
||||
tonic::include_proto!("arbiter");
|
||||
|
||||
pub mod shared {
|
||||
tonic::include_proto!("arbiter.shared");
|
||||
|
||||
pub mod evm {
|
||||
tonic::include_proto!("arbiter.shared.evm");
|
||||
}
|
||||
}
|
||||
|
||||
pub mod user_agent {
|
||||
tonic::include_proto!("arbiter.user_agent");
|
||||
|
||||
pub mod auth {
|
||||
tonic::include_proto!("arbiter.user_agent.auth");
|
||||
}
|
||||
|
||||
pub mod evm {
|
||||
tonic::include_proto!("arbiter.user_agent.evm");
|
||||
}
|
||||
|
||||
pub mod sdk_client {
|
||||
tonic::include_proto!("arbiter.user_agent.sdk_client");
|
||||
}
|
||||
|
||||
pub mod vault {
|
||||
tonic::include_proto!("arbiter.user_agent.vault");
|
||||
|
||||
pub mod bootstrap {
|
||||
tonic::include_proto!("arbiter.user_agent.vault.bootstrap");
|
||||
}
|
||||
|
||||
pub mod unseal {
|
||||
tonic::include_proto!("arbiter.user_agent.vault.unseal");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub mod client {
|
||||
tonic::include_proto!("arbiter.client");
|
||||
|
||||
pub mod auth {
|
||||
tonic::include_proto!("arbiter.client.auth");
|
||||
}
|
||||
|
||||
pub mod evm {
|
||||
tonic::include_proto!("arbiter.client.evm");
|
||||
}
|
||||
|
||||
pub mod vault {
|
||||
tonic::include_proto!("arbiter.client.vault");
|
||||
}
|
||||
}
|
||||
|
||||
pub mod evm {
|
||||
@@ -19,6 +63,13 @@ pub mod proto {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct ClientMetadata {
|
||||
pub name: String,
|
||||
pub description: Option<String>,
|
||||
pub version: Option<String>,
|
||||
}
|
||||
|
||||
pub static BOOTSTRAP_PATH: &str = "bootstrap_token";
|
||||
|
||||
pub fn home_path() -> Result<std::path::PathBuf, std::io::Error> {
|
||||
|
||||
@@ -1,78 +1,59 @@
|
||||
//! Transport-facing abstractions for protocol/session code.
|
||||
//! Transport-facing abstractions shared by protocol/session code.
|
||||
//!
|
||||
//! This module separates three concerns:
|
||||
//! This module defines a small set of transport traits that actors and other
|
||||
//! protocol code can depend on without knowing anything about the concrete
|
||||
//! transport underneath.
|
||||
//!
|
||||
//! - protocol/session logic wants a small duplex interface ([`Bi`])
|
||||
//! - transport adapters push concrete stream items to an underlying IO layer
|
||||
//! - transport boundaries translate between protocol-facing and transport-facing
|
||||
//! item types via direction-specific converters
|
||||
//! The abstraction is split into:
|
||||
//! - [`Sender`] for outbound delivery
|
||||
//! - [`Receiver`] for inbound delivery
|
||||
//! - [`Bi`] as the combined duplex form (`Sender + Receiver`)
|
||||
//!
|
||||
//! [`Bi`] is intentionally minimal and transport-agnostic:
|
||||
//! - [`Bi::recv`] yields inbound protocol messages
|
||||
//! - [`Bi::send`] accepts outbound protocol/domain items
|
||||
//! This split lets code depend only on the half it actually needs. For
|
||||
//! example, some actor/session code only sends out-of-band messages, while
|
||||
//! auth/state-machine code may need full duplex access.
|
||||
//!
|
||||
//! [`Bi`] remains intentionally minimal and transport-agnostic:
|
||||
//! - [`Receiver::recv`] yields inbound messages
|
||||
//! - [`Sender::send`] accepts outbound messages
|
||||
//!
|
||||
//! Transport-specific adapters, including protobuf or gRPC bridges, live in the
|
||||
//! crates that own those boundaries rather than in `arbiter-proto`.
|
||||
//!
|
||||
//! [`Bi`] deliberately does not model request/response correlation. Some
|
||||
//! transports may carry multiplexed request/response traffic, some may emit
|
||||
//! out-of-band messages, and some may be one-message-at-a-time state machines.
|
||||
//! Correlation concerns such as request IDs, pending response maps, and
|
||||
//! out-of-band routing belong in the adapter or connection layer built on top
|
||||
//! of [`Bi`], not in this abstraction itself.
|
||||
//!
|
||||
//! # Generic Ordering Rule
|
||||
//!
|
||||
//! This module uses a single convention consistently: when a type or trait is
|
||||
//! parameterized by protocol message directions, the generic parameters are
|
||||
//! declared as `Inbound` first, then `Outbound`.
|
||||
//! This module consistently uses `Inbound` first and `Outbound` second in
|
||||
//! generic parameter lists.
|
||||
//!
|
||||
//! For [`Bi`], that means `Bi<Inbound, Outbound>`:
|
||||
//! For [`Receiver`], [`Sender`], and [`Bi`], this means:
|
||||
//! - `Receiver<Inbound>`
|
||||
//! - `Sender<Outbound>`
|
||||
//! - `Bi<Inbound, Outbound>`
|
||||
//!
|
||||
//! Concretely, for [`Bi`]:
|
||||
//! - `recv() -> Option<Inbound>`
|
||||
//! - `send(Outbound)`
|
||||
//!
|
||||
//! For adapter types that are parameterized by direction-specific converters,
|
||||
//! inbound-related converter parameters are declared before outbound-related
|
||||
//! converter parameters.
|
||||
//! [`expect_message`] is a small helper for linear protocol steps: it reads one
|
||||
//! inbound message from a transport and extracts a typed value from it, failing
|
||||
//! if the channel closes or the message shape is not what the caller expected.
|
||||
//!
|
||||
//! [`RecvConverter`] and [`SendConverter`] are infallible conversion traits used
|
||||
//! by adapters to map between protocol-facing and transport-facing item types.
|
||||
//! The traits themselves are not result-aware; adapters decide how transport
|
||||
//! errors are handled before (or instead of) conversion.
|
||||
//!
|
||||
//! [`grpc::GrpcAdapter`] combines:
|
||||
//! - a tonic inbound stream
|
||||
//! - a Tokio sender for outbound transport items
|
||||
//! - a [`RecvConverter`] for the receive path
|
||||
//! - a [`SendConverter`] for the send path
|
||||
//!
|
||||
//! [`DummyTransport`] is a no-op implementation useful for tests and local actor
|
||||
//! execution where no real network stream exists.
|
||||
//!
|
||||
//! # Component Interaction
|
||||
//!
|
||||
//! ```text
|
||||
//! inbound (network -> protocol)
|
||||
//! ============================
|
||||
//!
|
||||
//! tonic::Streaming<RecvTransport>
|
||||
//! -> grpc::GrpcAdapter::recv()
|
||||
//! |
|
||||
//! +--> on `Ok(item)`: RecvConverter::convert(RecvTransport) -> Inbound
|
||||
//! +--> on `Err(status)`: log error and close stream (`None`)
|
||||
//! -> Bi::recv()
|
||||
//! -> protocol/session actor
|
||||
//!
|
||||
//! outbound (protocol -> network)
|
||||
//! ==============================
|
||||
//!
|
||||
//! protocol/session actor
|
||||
//! -> Bi::send(Outbound)
|
||||
//! -> grpc::GrpcAdapter::send()
|
||||
//! |
|
||||
//! +--> SendConverter::convert(Outbound) -> SendTransport
|
||||
//! -> Tokio mpsc::Sender<SendTransport>
|
||||
//! -> tonic response stream
|
||||
//! ```
|
||||
//! [`DummyTransport`] is a no-op implementation useful for tests and local
|
||||
//! actor execution where no real stream exists.
|
||||
//!
|
||||
//! # Design Notes
|
||||
//!
|
||||
//! - `send()` returns [`Error`] only for transport delivery failures (for
|
||||
//! example, when the outbound channel is closed).
|
||||
//! - [`grpc::GrpcAdapter`] logs tonic receive errors and treats them as stream
|
||||
//! closure (`None`).
|
||||
//! - When protocol-facing and transport-facing types are identical, use
|
||||
//! [`IdentityRecvConverter`] / [`IdentitySendConverter`].
|
||||
//! - [`Bi::send`] returns [`Error`] only for transport delivery failures, such
|
||||
//! as a closed outbound channel.
|
||||
//! - [`Bi::recv`] returns `None` when the underlying transport closes.
|
||||
//! - Message translation is intentionally out of scope for this module.
|
||||
|
||||
use std::marker::PhantomData;
|
||||
|
||||
@@ -83,175 +64,54 @@ use async_trait::async_trait;
|
||||
pub enum Error {
|
||||
#[error("Transport channel is closed")]
|
||||
ChannelClosed,
|
||||
#[error("Unexpected message received")]
|
||||
UnexpectedMessage,
|
||||
}
|
||||
|
||||
/// Receives one message from `transport` and extracts a value from it using
|
||||
/// `extractor`. Returns [`Error::ChannelClosed`] if the transport closes and
|
||||
/// [`Error::UnexpectedMessage`] if `extractor` returns `None`.
|
||||
pub async fn expect_message<T, Inbound, Outbound, Target, F>(
|
||||
transport: &mut T,
|
||||
extractor: F,
|
||||
) -> Result<Target, Error>
|
||||
where
|
||||
T: Bi<Inbound, Outbound> + ?Sized,
|
||||
F: FnOnce(Inbound) -> Option<Target>,
|
||||
{
|
||||
let msg = transport.recv().await.ok_or(Error::ChannelClosed)?;
|
||||
extractor(msg).ok_or(Error::UnexpectedMessage)
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait Sender<Outbound>: Send + Sync {
|
||||
async fn send(&mut self, item: Outbound) -> Result<(), Error>;
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait Receiver<Inbound>: Send + Sync {
|
||||
async fn recv(&mut self) -> Option<Inbound>;
|
||||
}
|
||||
|
||||
/// Minimal bidirectional transport abstraction used by protocol code.
|
||||
///
|
||||
/// `Bi<Inbound, Outbound>` models a duplex channel with:
|
||||
/// `Bi<Inbound, Outbound>` is the combined duplex form of [`Sender`] and
|
||||
/// [`Receiver`].
|
||||
///
|
||||
/// It models a channel with:
|
||||
/// - inbound items of type `Inbound` read via [`Bi::recv`]
|
||||
/// - outbound items of type `Outbound` written via [`Bi::send`]
|
||||
#[async_trait]
|
||||
pub trait Bi<Inbound, Outbound>: Send + Sync + 'static {
|
||||
async fn send(&mut self, item: Outbound) -> Result<(), Error>;
|
||||
///
|
||||
/// It does not imply request/response sequencing, one-at-a-time exchange, or
|
||||
/// any built-in correlation mechanism between inbound and outbound items.
|
||||
pub trait Bi<Inbound, Outbound>: Sender<Outbound> + Receiver<Inbound> + Send + Sync {}
|
||||
|
||||
async fn recv(&mut self) -> Option<Inbound>;
|
||||
}
|
||||
pub trait SplittableBi<Inbound, Outbound>: Bi<Inbound, Outbound> {
|
||||
type Sender: Sender<Outbound>;
|
||||
type Receiver: Receiver<Inbound>;
|
||||
|
||||
/// Converts transport-facing inbound items into protocol-facing inbound items.
|
||||
pub trait RecvConverter: Send + Sync + 'static {
|
||||
type Input;
|
||||
type Output;
|
||||
|
||||
fn convert(&self, item: Self::Input) -> Self::Output;
|
||||
}
|
||||
|
||||
/// Converts protocol/domain outbound items into transport-facing outbound items.
|
||||
pub trait SendConverter: Send + Sync + 'static {
|
||||
type Input;
|
||||
type Output;
|
||||
|
||||
fn convert(&self, item: Self::Input) -> Self::Output;
|
||||
}
|
||||
|
||||
/// A [`RecvConverter`] that forwards values unchanged.
|
||||
pub struct IdentityRecvConverter<T> {
|
||||
_marker: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl<T> IdentityRecvConverter<T> {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
_marker: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Default for IdentityRecvConverter<T> {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> RecvConverter for IdentityRecvConverter<T>
|
||||
where
|
||||
T: Send + Sync + 'static,
|
||||
{
|
||||
type Input = T;
|
||||
type Output = T;
|
||||
|
||||
fn convert(&self, item: Self::Input) -> Self::Output {
|
||||
item
|
||||
}
|
||||
}
|
||||
|
||||
/// A [`SendConverter`] that forwards values unchanged.
|
||||
pub struct IdentitySendConverter<T> {
|
||||
_marker: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl<T> IdentitySendConverter<T> {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
_marker: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Default for IdentitySendConverter<T> {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> SendConverter for IdentitySendConverter<T>
|
||||
where
|
||||
T: Send + Sync + 'static,
|
||||
{
|
||||
type Input = T;
|
||||
type Output = T;
|
||||
|
||||
fn convert(&self, item: Self::Input) -> Self::Output {
|
||||
item
|
||||
}
|
||||
}
|
||||
|
||||
/// gRPC-specific transport adapters and helpers.
|
||||
pub mod grpc {
|
||||
use async_trait::async_trait;
|
||||
use futures::StreamExt;
|
||||
use tokio::sync::mpsc;
|
||||
use tonic::Streaming;
|
||||
|
||||
use super::{Bi, Error, RecvConverter, SendConverter};
|
||||
|
||||
/// [`Bi`] adapter backed by a tonic gRPC bidirectional stream.
|
||||
///
|
||||
|
||||
/// Tonic receive errors are logged and treated as stream closure (`None`).
|
||||
/// The receive converter is only invoked for successful inbound transport
|
||||
/// items.
|
||||
pub struct GrpcAdapter<InboundConverter, OutboundConverter>
|
||||
where
|
||||
InboundConverter: RecvConverter,
|
||||
OutboundConverter: SendConverter,
|
||||
{
|
||||
sender: mpsc::Sender<OutboundConverter::Output>,
|
||||
receiver: Streaming<InboundConverter::Input>,
|
||||
inbound_converter: InboundConverter,
|
||||
outbound_converter: OutboundConverter,
|
||||
}
|
||||
|
||||
impl<InboundTransport, Inbound, InboundConverter, OutboundConverter>
|
||||
GrpcAdapter<InboundConverter, OutboundConverter>
|
||||
where
|
||||
InboundConverter: RecvConverter<Input = InboundTransport, Output = Inbound>,
|
||||
OutboundConverter: SendConverter,
|
||||
{
|
||||
pub fn new(
|
||||
sender: mpsc::Sender<OutboundConverter::Output>,
|
||||
receiver: Streaming<InboundTransport>,
|
||||
inbound_converter: InboundConverter,
|
||||
outbound_converter: OutboundConverter,
|
||||
) -> Self {
|
||||
Self {
|
||||
sender,
|
||||
receiver,
|
||||
inbound_converter,
|
||||
outbound_converter,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<InboundConverter, OutboundConverter> Bi<InboundConverter::Output, OutboundConverter::Input>
|
||||
for GrpcAdapter<InboundConverter, OutboundConverter>
|
||||
where
|
||||
InboundConverter: RecvConverter,
|
||||
OutboundConverter: SendConverter,
|
||||
OutboundConverter::Input: Send + 'static,
|
||||
OutboundConverter::Output: Send + 'static,
|
||||
{
|
||||
#[tracing::instrument(level = "trace", skip(self, item))]
|
||||
async fn send(&mut self, item: OutboundConverter::Input) -> Result<(), Error> {
|
||||
let outbound = self.outbound_converter.convert(item);
|
||||
self.sender
|
||||
.send(outbound)
|
||||
.await
|
||||
.map_err(|_| Error::ChannelClosed)
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "trace", skip(self))]
|
||||
async fn recv(&mut self) -> Option<InboundConverter::Output> {
|
||||
match self.receiver.next().await {
|
||||
Some(Ok(item)) => Some(self.inbound_converter.convert(item)),
|
||||
Some(Err(error)) => {
|
||||
tracing::error!(error = ?error, "grpc transport recv failed; closing stream");
|
||||
None
|
||||
}
|
||||
None => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
fn split(self) -> (Self::Sender, Self::Receiver);
|
||||
fn from_parts(sender: Self::Sender, receiver: Self::Receiver) -> Self;
|
||||
}
|
||||
|
||||
/// No-op [`Bi`] transport for tests and manual actor usage.
|
||||
@@ -262,22 +122,16 @@ pub struct DummyTransport<Inbound, Outbound> {
|
||||
_marker: PhantomData<(Inbound, Outbound)>,
|
||||
}
|
||||
|
||||
impl<Inbound, Outbound> DummyTransport<Inbound, Outbound> {
|
||||
pub fn new() -> Self {
|
||||
impl<Inbound, Outbound> Default for DummyTransport<Inbound, Outbound> {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
_marker: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<Inbound, Outbound> Default for DummyTransport<Inbound, Outbound> {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<Inbound, Outbound> Bi<Inbound, Outbound> for DummyTransport<Inbound, Outbound>
|
||||
impl<Inbound, Outbound> Sender<Outbound> for DummyTransport<Inbound, Outbound>
|
||||
where
|
||||
Inbound: Send + Sync + 'static,
|
||||
Outbound: Send + Sync + 'static,
|
||||
@@ -285,9 +139,25 @@ where
|
||||
async fn send(&mut self, _item: Outbound) -> Result<(), Error> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<Inbound, Outbound> Receiver<Inbound> for DummyTransport<Inbound, Outbound>
|
||||
where
|
||||
Inbound: Send + Sync + 'static,
|
||||
Outbound: Send + Sync + 'static,
|
||||
{
|
||||
async fn recv(&mut self) -> Option<Inbound> {
|
||||
std::future::pending::<()>().await;
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
impl<Inbound, Outbound> Bi<Inbound, Outbound> for DummyTransport<Inbound, Outbound>
|
||||
where
|
||||
Inbound: Send + Sync + 'static,
|
||||
Outbound: Send + Sync + 'static,
|
||||
{
|
||||
}
|
||||
|
||||
pub mod grpc;
|
||||
|
||||
106
server/crates/arbiter-proto/src/transport/grpc.rs
Normal file
106
server/crates/arbiter-proto/src/transport/grpc.rs
Normal file
@@ -0,0 +1,106 @@
|
||||
use async_trait::async_trait;
|
||||
use futures::StreamExt;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio_stream::wrappers::ReceiverStream;
|
||||
|
||||
use super::{Bi, Receiver, Sender};
|
||||
|
||||
pub struct GrpcSender<Outbound> {
|
||||
tx: mpsc::Sender<Result<Outbound, tonic::Status>>,
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<Outbound> Sender<Result<Outbound, tonic::Status>> for GrpcSender<Outbound>
|
||||
where
|
||||
Outbound: Send + Sync + 'static,
|
||||
{
|
||||
async fn send(&mut self, item: Result<Outbound, tonic::Status>) -> Result<(), super::Error> {
|
||||
self.tx
|
||||
.send(item)
|
||||
.await
|
||||
.map_err(|_| super::Error::ChannelClosed)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct GrpcReceiver<Inbound> {
|
||||
rx: tonic::Streaming<Inbound>,
|
||||
}
|
||||
#[async_trait]
|
||||
impl<Inbound> Receiver<Result<Inbound, tonic::Status>> for GrpcReceiver<Inbound>
|
||||
where
|
||||
Inbound: Send + Sync + 'static,
|
||||
{
|
||||
async fn recv(&mut self) -> Option<Result<Inbound, tonic::Status>> {
|
||||
self.rx.next().await
|
||||
}
|
||||
}
|
||||
|
||||
pub struct GrpcBi<Inbound, Outbound> {
|
||||
sender: GrpcSender<Outbound>,
|
||||
receiver: GrpcReceiver<Inbound>,
|
||||
}
|
||||
|
||||
impl<Inbound, Outbound> GrpcBi<Inbound, Outbound>
|
||||
where
|
||||
Inbound: Send + Sync + 'static,
|
||||
Outbound: Send + Sync + 'static,
|
||||
{
|
||||
pub fn from_bi_stream(
|
||||
receiver: tonic::Streaming<Inbound>,
|
||||
) -> (Self, ReceiverStream<Result<Outbound, tonic::Status>>) {
|
||||
let (tx, rx) = mpsc::channel(10);
|
||||
let sender = GrpcSender { tx };
|
||||
let receiver = GrpcReceiver { rx: receiver };
|
||||
let bi = GrpcBi { sender, receiver };
|
||||
(bi, ReceiverStream::new(rx))
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<Inbound, Outbound> Sender<Result<Outbound, tonic::Status>> for GrpcBi<Inbound, Outbound>
|
||||
where
|
||||
Inbound: Send + Sync + 'static,
|
||||
Outbound: Send + Sync + 'static,
|
||||
{
|
||||
async fn send(&mut self, item: Result<Outbound, tonic::Status>) -> Result<(), super::Error> {
|
||||
self.sender.send(item).await
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<Inbound, Outbound> Receiver<Result<Inbound, tonic::Status>> for GrpcBi<Inbound, Outbound>
|
||||
where
|
||||
Inbound: Send + Sync + 'static,
|
||||
Outbound: Send + Sync + 'static,
|
||||
{
|
||||
async fn recv(&mut self) -> Option<Result<Inbound, tonic::Status>> {
|
||||
self.receiver.recv().await
|
||||
}
|
||||
}
|
||||
|
||||
impl<Inbound, Outbound> Bi<Result<Inbound, tonic::Status>, Result<Outbound, tonic::Status>>
|
||||
for GrpcBi<Inbound, Outbound>
|
||||
where
|
||||
Inbound: Send + Sync + 'static,
|
||||
Outbound: Send + Sync + 'static,
|
||||
{
|
||||
}
|
||||
|
||||
impl<Inbound, Outbound>
|
||||
super::SplittableBi<Result<Inbound, tonic::Status>, Result<Outbound, tonic::Status>>
|
||||
for GrpcBi<Inbound, Outbound>
|
||||
where
|
||||
Inbound: Send + Sync + 'static,
|
||||
Outbound: Send + Sync + 'static,
|
||||
{
|
||||
type Sender = GrpcSender<Outbound>;
|
||||
type Receiver = GrpcReceiver<Inbound>;
|
||||
|
||||
fn split(self) -> (Self::Sender, Self::Receiver) {
|
||||
(self.sender, self.receiver)
|
||||
}
|
||||
|
||||
fn from_parts(sender: Self::Sender, receiver: Self::Receiver) -> Self {
|
||||
GrpcBi { sender, receiver }
|
||||
}
|
||||
}
|
||||
@@ -7,6 +7,7 @@ const ARBITER_URL_SCHEME: &str = "arbiter";
|
||||
const CERT_QUERY_KEY: &str = "cert";
|
||||
const BOOTSTRAP_TOKEN_QUERY_KEY: &str = "bootstrap_token";
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ArbiterUrl {
|
||||
pub host: String,
|
||||
pub port: u16,
|
||||
@@ -20,7 +21,7 @@ impl Display for ArbiterUrl {
|
||||
"{ARBITER_URL_SCHEME}://{}:{}?{CERT_QUERY_KEY}={}",
|
||||
self.host,
|
||||
self.port,
|
||||
BASE64_URL_SAFE.encode(self.ca_cert.to_vec())
|
||||
BASE64_URL_SAFE.encode(&self.ca_cert)
|
||||
);
|
||||
if let Some(token) = &self.bootstrap_token {
|
||||
base.push_str(&format!("&{BOOTSTRAP_TOKEN_QUERY_KEY}={}", token));
|
||||
|
||||
@@ -5,15 +5,19 @@ edition = "2024"
|
||||
repository = "https://git.markettakers.org/MarketTakers/arbiter"
|
||||
license = "Apache-2.0"
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
diesel = { version = "2.3.6", features = ["chrono", "returning_clauses_for_sqlite_3_35", "serde_json", "time", "uuid"] }
|
||||
diesel-async = { version = "0.7.4", features = [
|
||||
diesel = { version = "2.3.7", features = ["chrono", "returning_clauses_for_sqlite_3_35", "serde_json", "time", "uuid"] }
|
||||
diesel-async = { version = "0.8.0", features = [
|
||||
"bb8",
|
||||
"migrations",
|
||||
"sqlite",
|
||||
"tokio",
|
||||
] }
|
||||
ed25519-dalek.workspace = true
|
||||
ed25519-dalek.features = ["serde"]
|
||||
arbiter-proto.path = "../arbiter-proto"
|
||||
tracing.workspace = true
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
@@ -22,8 +26,8 @@ tonic.features = ["tls-aws-lc"]
|
||||
tokio.workspace = true
|
||||
rustls.workspace = true
|
||||
smlang.workspace = true
|
||||
miette.workspace = true
|
||||
thiserror.workspace = true
|
||||
fatality = "0.1.1"
|
||||
diesel_migrations = { version = "2.3.1", features = ["sqlite"] }
|
||||
async-trait.workspace = true
|
||||
secrecy = "0.10.3"
|
||||
@@ -40,12 +44,26 @@ x25519-dalek.workspace = true
|
||||
chacha20poly1305 = { version = "0.10.1", features = ["std"] }
|
||||
argon2 = { version = "0.5.3", features = ["zeroize"] }
|
||||
restructed = "0.2.2"
|
||||
strum = { version = "0.27.2", features = ["derive"] }
|
||||
strum = { version = "0.28.0", features = ["derive"] }
|
||||
pem = "3.0.6"
|
||||
k256 = "0.13.4"
|
||||
k256.workspace = true
|
||||
k256.features = ["serde"]
|
||||
rsa.workspace = true
|
||||
rsa.features = ["serde"]
|
||||
sha2.workspace = true
|
||||
hmac = "0.12"
|
||||
spki.workspace = true
|
||||
alloy.workspace = true
|
||||
prost-types.workspace = true
|
||||
prost.workspace = true
|
||||
arbiter-tokens-registry.path = "../arbiter-tokens-registry"
|
||||
anyhow = "1.0.102"
|
||||
serde_with = "3.18.0"
|
||||
mutants.workspace = true
|
||||
subtle = "2.6.1"
|
||||
|
||||
[dev-dependencies]
|
||||
insta = "1.46.3"
|
||||
proptest = "1.11.0"
|
||||
rstest.workspace = true
|
||||
test-log = { version = "0.2", default-features = false, features = ["trace"] }
|
||||
|
||||
@@ -40,24 +40,51 @@ create table if not exists arbiter_settings (
|
||||
tls_id integer references tls_history (id) on delete RESTRICT
|
||||
) STRICT;
|
||||
|
||||
insert into arbiter_settings (id) values (1) on conflict do nothing; -- ensure singleton row exists
|
||||
insert into arbiter_settings (id) values (1) on conflict do nothing;
|
||||
-- ensure singleton row exists
|
||||
|
||||
create table if not exists useragent_client (
|
||||
id integer not null primary key,
|
||||
nonce integer not null default(1), -- used for auth challenge
|
||||
public_key blob not null,
|
||||
key_type integer not null default(1), -- 1=Ed25519, 2=ECDSA(secp256k1)
|
||||
created_at integer not null default(unixepoch ('now')),
|
||||
updated_at integer not null default(unixepoch ('now'))
|
||||
) STRICT;
|
||||
create unique index if not exists uniq_useragent_client_public_key on useragent_client (public_key, key_type);
|
||||
|
||||
create table if not exists client_metadata (
|
||||
id integer not null primary key,
|
||||
name text not null, -- human-readable name for the client
|
||||
description text, -- optional description for the client
|
||||
version text, -- client version for tracking and debugging
|
||||
created_at integer not null default(unixepoch ('now'))
|
||||
) STRICT;
|
||||
|
||||
-- created to track history of changes
|
||||
create table if not exists client_metadata_history (
|
||||
id integer not null primary key,
|
||||
metadata_id integer not null references client_metadata (id) on delete cascade,
|
||||
client_id integer not null references program_client (id) on delete cascade,
|
||||
created_at integer not null default(unixepoch ('now'))
|
||||
) STRICT;
|
||||
|
||||
create unique index if not exists uniq_metadata_binding_client on client_metadata_history (client_id);
|
||||
|
||||
create table if not exists program_client (
|
||||
id integer not null primary key,
|
||||
nonce integer not null default(1), -- used for auth challenge
|
||||
public_key blob not null,
|
||||
metadata_id integer not null references client_metadata (id) on delete cascade,
|
||||
created_at integer not null default(unixepoch ('now')),
|
||||
updated_at integer not null default(unixepoch ('now'))
|
||||
) STRICT;
|
||||
|
||||
create unique index if not exists program_client_public_key_unique
|
||||
on program_client (public_key);
|
||||
|
||||
create unique index if not exists uniq_program_client_public_key on program_client (public_key);
|
||||
|
||||
create table if not exists evm_wallet (
|
||||
id integer not null primary key,
|
||||
address blob not null, -- 20-byte Ethereum address
|
||||
@@ -66,8 +93,18 @@ create table if not exists evm_wallet (
|
||||
) STRICT;
|
||||
|
||||
create unique index if not exists uniq_evm_wallet_address on evm_wallet (address);
|
||||
|
||||
create unique index if not exists uniq_evm_wallet_aead on evm_wallet (aead_encrypted_id);
|
||||
|
||||
create table if not exists evm_wallet_access (
|
||||
id integer not null primary key,
|
||||
wallet_id integer not null references evm_wallet (id) on delete cascade,
|
||||
client_id integer not null references program_client (id) on delete cascade,
|
||||
created_at integer not null default(unixepoch ('now'))
|
||||
) STRICT;
|
||||
|
||||
create unique index if not exists uniq_wallet_access on evm_wallet_access (wallet_id, client_id);
|
||||
|
||||
create table if not exists evm_ether_transfer_limit (
|
||||
id integer not null primary key,
|
||||
window_secs integer not null, -- window duration in seconds
|
||||
@@ -77,8 +114,7 @@ create table if not exists evm_ether_transfer_limit (
|
||||
-- Shared grant properties: client scope, timeframe, fee caps, and rate limit
|
||||
create table if not exists evm_basic_grant (
|
||||
id integer not null primary key,
|
||||
wallet_id integer not null references evm_wallet(id) on delete restrict,
|
||||
client_id integer not null references program_client(id) on delete restrict,
|
||||
wallet_access_id integer not null references evm_wallet_access (id) on delete restrict,
|
||||
chain_id integer not null, -- EIP-155 chain ID
|
||||
valid_from integer, -- unix timestamp (seconds), null = no lower bound
|
||||
valid_until integer, -- unix timestamp (seconds), null = no upper bound
|
||||
@@ -87,28 +123,27 @@ create table if not exists evm_basic_grant (
|
||||
rate_limit_count integer, -- max transactions in window, null = unlimited
|
||||
rate_limit_window_secs integer, -- window duration in seconds, null = unlimited
|
||||
revoked_at integer, -- unix timestamp when revoked, null = still active
|
||||
created_at integer not null default(unixepoch('now'))
|
||||
created_at integer not null default(unixepoch ('now'))
|
||||
) STRICT;
|
||||
|
||||
-- Shared transaction log for all EVM grants, used for rate limit tracking and auditing
|
||||
create table if not exists evm_transaction_log (
|
||||
id integer not null primary key,
|
||||
grant_id integer not null references evm_basic_grant(id) on delete restrict,
|
||||
client_id integer not null references program_client(id) on delete restrict,
|
||||
wallet_id integer not null references evm_wallet(id) on delete restrict,
|
||||
wallet_access_id integer not null references evm_wallet_access (id) on delete restrict,
|
||||
grant_id integer not null references evm_basic_grant (id) on delete restrict,
|
||||
chain_id integer not null,
|
||||
eth_value blob not null, -- always present on any EVM tx
|
||||
signed_at integer not null default(unixepoch('now'))
|
||||
signed_at integer not null default(unixepoch ('now'))
|
||||
) STRICT;
|
||||
|
||||
create index if not exists idx_evm_basic_grant_wallet_chain on evm_basic_grant(client_id, wallet_id, chain_id);
|
||||
create index if not exists idx_evm_basic_grant_access_chain on evm_basic_grant (wallet_access_id, chain_id);
|
||||
|
||||
-- ===============================
|
||||
-- ERC20 token transfer grant
|
||||
-- ===============================
|
||||
create table if not exists evm_token_transfer_grant (
|
||||
id integer not null primary key,
|
||||
basic_grant_id integer not null unique references evm_basic_grant(id) on delete cascade,
|
||||
basic_grant_id integer not null unique references evm_basic_grant (id) on delete cascade,
|
||||
token_contract blob not null, -- 20-byte ERC20 contract address
|
||||
receiver blob -- 20-byte recipient address or null if every recipient allowed
|
||||
) STRICT;
|
||||
@@ -116,7 +151,7 @@ create table if not exists evm_token_transfer_grant (
|
||||
-- Per-window volume limits for token transfer grants
|
||||
create table if not exists evm_token_transfer_volume_limit (
|
||||
id integer not null primary key,
|
||||
grant_id integer not null references evm_token_transfer_grant(id) on delete cascade,
|
||||
grant_id integer not null references evm_token_transfer_grant (id) on delete cascade,
|
||||
window_secs integer not null, -- window duration in seconds
|
||||
max_volume blob not null -- big-endian 32-byte U256
|
||||
) STRICT;
|
||||
@@ -124,35 +159,51 @@ create table if not exists evm_token_transfer_volume_limit (
|
||||
-- Log table for token transfer grant usage
|
||||
create table if not exists evm_token_transfer_log (
|
||||
id integer not null primary key,
|
||||
grant_id integer not null references evm_token_transfer_grant(id) on delete restrict,
|
||||
log_id integer not null references evm_transaction_log(id) on delete restrict,
|
||||
grant_id integer not null references evm_token_transfer_grant (id) on delete restrict,
|
||||
log_id integer not null references evm_transaction_log (id) on delete restrict,
|
||||
chain_id integer not null, -- EIP-155 chain ID
|
||||
token_contract blob not null, -- 20-byte ERC20 contract address
|
||||
recipient_address blob not null, -- 20-byte recipient address
|
||||
value blob not null, -- big-endian 32-byte U256
|
||||
created_at integer not null default(unixepoch('now'))
|
||||
created_at integer not null default(unixepoch ('now'))
|
||||
) STRICT;
|
||||
|
||||
create index if not exists idx_token_transfer_log_grant on evm_token_transfer_log(grant_id);
|
||||
create index if not exists idx_token_transfer_log_log_id on evm_token_transfer_log(log_id);
|
||||
create index if not exists idx_token_transfer_log_chain on evm_token_transfer_log(chain_id);
|
||||
create index if not exists idx_token_transfer_log_grant on evm_token_transfer_log (grant_id);
|
||||
|
||||
create index if not exists idx_token_transfer_log_log_id on evm_token_transfer_log (log_id);
|
||||
|
||||
create index if not exists idx_token_transfer_log_chain on evm_token_transfer_log (chain_id);
|
||||
|
||||
-- ===============================
|
||||
-- Ether transfer grant (uses base log)
|
||||
-- ===============================
|
||||
create table if not exists evm_ether_transfer_grant (
|
||||
id integer not null primary key,
|
||||
basic_grant_id integer not null unique references evm_basic_grant(id) on delete cascade,
|
||||
limit_id integer not null references evm_ether_transfer_limit(id) on delete restrict
|
||||
basic_grant_id integer not null unique references evm_basic_grant (id) on delete cascade,
|
||||
limit_id integer not null references evm_ether_transfer_limit (id) on delete restrict
|
||||
) STRICT;
|
||||
|
||||
-- Specific recipient addresses for an ether transfer grant
|
||||
create table if not exists evm_ether_transfer_grant_target (
|
||||
id integer not null primary key,
|
||||
grant_id integer not null references evm_ether_transfer_grant(id) on delete cascade,
|
||||
grant_id integer not null references evm_ether_transfer_grant (id) on delete cascade,
|
||||
address blob not null -- 20-byte recipient address
|
||||
) STRICT;
|
||||
|
||||
create unique index if not exists uniq_ether_transfer_target on evm_ether_transfer_grant_target(grant_id, address);
|
||||
create unique index if not exists uniq_ether_transfer_target on evm_ether_transfer_grant_target (grant_id, address);
|
||||
|
||||
-- ===============================
|
||||
-- Integrity Envelopes
|
||||
-- ===============================
|
||||
create table if not exists integrity_envelope (
|
||||
id integer not null primary key,
|
||||
entity_kind text not null,
|
||||
entity_id blob not null,
|
||||
payload_version integer not null,
|
||||
key_version integer not null,
|
||||
mac blob not null, -- 20-byte recipient address
|
||||
signed_at integer not null default(unixepoch ('now')),
|
||||
created_at integer not null default(unixepoch ('now'))
|
||||
) STRICT;
|
||||
|
||||
create unique index if not exists uniq_integrity_envelope_entity on integrity_envelope (entity_kind, entity_id);
|
||||
|
||||
@@ -2,13 +2,9 @@ use arbiter_proto::{BOOTSTRAP_PATH, home_path};
|
||||
use diesel::QueryDsl;
|
||||
use diesel_async::RunQueryDsl;
|
||||
use kameo::{Actor, messages};
|
||||
use miette::Diagnostic;
|
||||
use rand::{
|
||||
RngExt,
|
||||
distr::{Alphanumeric},
|
||||
make_rng,
|
||||
rngs::StdRng,
|
||||
};
|
||||
|
||||
use rand::{RngExt, distr::Alphanumeric, make_rng, rngs::StdRng};
|
||||
use subtle::ConstantTimeEq as _;
|
||||
use thiserror::Error;
|
||||
|
||||
use crate::db::{self, DatabasePool, schema};
|
||||
@@ -30,18 +26,15 @@ pub async fn generate_token() -> Result<String, std::io::Error> {
|
||||
Ok(token)
|
||||
}
|
||||
|
||||
#[derive(Error, Debug, Diagnostic)]
|
||||
#[derive(Error, Debug)]
|
||||
pub enum Error {
|
||||
#[error("Database error: {0}")]
|
||||
#[diagnostic(code(arbiter_server::bootstrap::database))]
|
||||
Database(#[from] db::PoolError),
|
||||
|
||||
#[error("Database query error: {0}")]
|
||||
#[diagnostic(code(arbiter_server::bootstrap::database_query))]
|
||||
Query(#[from] diesel::result::Error),
|
||||
|
||||
#[error("I/O error: {0}")]
|
||||
#[diagnostic(code(arbiter_server::bootstrap::io))]
|
||||
Io(#[from] std::io::Error),
|
||||
}
|
||||
|
||||
@@ -52,15 +45,14 @@ pub struct Bootstrapper {
|
||||
|
||||
impl Bootstrapper {
|
||||
pub async fn new(db: &DatabasePool) -> Result<Self, Error> {
|
||||
let row_count: i64 = {
|
||||
let mut conn = db.get().await?;
|
||||
|
||||
let row_count: i64 = schema::useragent_client::table
|
||||
schema::useragent_client::table
|
||||
.count()
|
||||
.get_result(&mut conn)
|
||||
.await?;
|
||||
|
||||
drop(conn);
|
||||
|
||||
.await?
|
||||
};
|
||||
|
||||
let token = if row_count == 0 {
|
||||
let token = generate_token().await?;
|
||||
@@ -78,7 +70,13 @@ impl Bootstrapper {
|
||||
#[message]
|
||||
pub fn is_correct_token(&self, token: String) -> bool {
|
||||
match &self.token {
|
||||
Some(expected) => *expected == token,
|
||||
Some(expected) => {
|
||||
let expected_bytes = expected.as_bytes();
|
||||
let token_bytes = token.as_bytes();
|
||||
|
||||
let choice = expected_bytes.ct_eq(token_bytes);
|
||||
bool::from(choice)
|
||||
}
|
||||
None => false,
|
||||
}
|
||||
}
|
||||
|
||||
410
server/crates/arbiter-server/src/actors/client/auth.rs
Normal file
410
server/crates/arbiter-server/src/actors/client/auth.rs
Normal file
@@ -0,0 +1,410 @@
|
||||
use arbiter_proto::{
|
||||
ClientMetadata, format_challenge,
|
||||
transport::{Bi, expect_message},
|
||||
};
|
||||
use chrono::Utc;
|
||||
use diesel::{
|
||||
ExpressionMethods as _, OptionalExtension as _, QueryDsl as _, SelectableHelper as _,
|
||||
dsl::insert_into, update,
|
||||
};
|
||||
use diesel_async::RunQueryDsl as _;
|
||||
use ed25519_dalek::{Signature, VerifyingKey};
|
||||
use kameo::{actor::ActorRef, error::SendError};
|
||||
use tracing::error;
|
||||
|
||||
use crate::{
|
||||
actors::{
|
||||
client::{ClientConnection, ClientCredentials, ClientProfile},
|
||||
flow_coordinator::{self, RequestClientApproval},
|
||||
keyholder::KeyHolder,
|
||||
},
|
||||
crypto::integrity::{self},
|
||||
db::{
|
||||
self,
|
||||
models::{ProgramClientMetadata, SqliteTimestamp},
|
||||
schema::program_client,
|
||||
},
|
||||
};
|
||||
|
||||
#[derive(thiserror::Error, Debug, Clone, PartialEq, Eq)]
|
||||
pub enum Error {
|
||||
#[error("Database pool unavailable")]
|
||||
DatabasePoolUnavailable,
|
||||
#[error("Database operation failed")]
|
||||
DatabaseOperationFailed,
|
||||
#[error("Integrity check failed")]
|
||||
IntegrityCheckFailed,
|
||||
#[error("Invalid challenge solution")]
|
||||
InvalidChallengeSolution,
|
||||
#[error("Client approval request failed")]
|
||||
ApproveError(#[from] ApproveError),
|
||||
#[error("Transport error")]
|
||||
Transport,
|
||||
}
|
||||
|
||||
impl From<diesel::result::Error> for Error {
|
||||
fn from(e: diesel::result::Error) -> Self {
|
||||
error!(?e, "Database error");
|
||||
Self::DatabaseOperationFailed
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(thiserror::Error, Debug, Clone, PartialEq, Eq)]
|
||||
pub enum ApproveError {
|
||||
#[error("Internal error")]
|
||||
Internal,
|
||||
#[error("Client connection denied by user agents")]
|
||||
Denied,
|
||||
#[error("Upstream error: {0}")]
|
||||
Upstream(flow_coordinator::ApprovalError),
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum Inbound {
|
||||
AuthChallengeRequest {
|
||||
pubkey: VerifyingKey,
|
||||
metadata: ClientMetadata,
|
||||
},
|
||||
AuthChallengeSolution {
|
||||
signature: Signature,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum Outbound {
|
||||
AuthChallenge { pubkey: VerifyingKey, nonce: i32 },
|
||||
AuthSuccess,
|
||||
}
|
||||
|
||||
/// Returns the current nonce and client ID for a registered client.
|
||||
/// Returns `None` if the pubkey is not registered.
|
||||
async fn get_current_nonce_and_id(
|
||||
db: &db::DatabasePool,
|
||||
pubkey: &VerifyingKey,
|
||||
) -> Result<Option<(i32, i32)>, Error> {
|
||||
let pubkey_bytes = pubkey.as_bytes().to_vec();
|
||||
let mut conn = db.get().await.map_err(|e| {
|
||||
error!(error = ?e, "Database pool error");
|
||||
Error::DatabasePoolUnavailable
|
||||
})?;
|
||||
program_client::table
|
||||
.filter(program_client::public_key.eq(&pubkey_bytes))
|
||||
.select((program_client::id, program_client::nonce))
|
||||
.first::<(i32, i32)>(&mut conn)
|
||||
.await
|
||||
.optional()
|
||||
.map_err(|e| {
|
||||
error!(error = ?e, "Database error");
|
||||
Error::DatabaseOperationFailed
|
||||
})
|
||||
}
|
||||
|
||||
async fn verify_integrity(
|
||||
db: &db::DatabasePool,
|
||||
keyholder: &ActorRef<KeyHolder>,
|
||||
pubkey: &VerifyingKey,
|
||||
) -> Result<(), Error> {
|
||||
let mut db_conn = db.get().await.map_err(|e| {
|
||||
error!(error = ?e, "Database pool error");
|
||||
Error::DatabasePoolUnavailable
|
||||
})?;
|
||||
|
||||
let (id, nonce) = get_current_nonce_and_id(db, pubkey).await?.ok_or_else(|| {
|
||||
error!("Client not found during integrity verification");
|
||||
Error::DatabaseOperationFailed
|
||||
})?;
|
||||
|
||||
integrity::verify_entity(
|
||||
&mut db_conn,
|
||||
keyholder,
|
||||
&ClientCredentials {
|
||||
pubkey: *pubkey,
|
||||
nonce,
|
||||
},
|
||||
id,
|
||||
)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!(?e, "Integrity verification failed");
|
||||
Error::IntegrityCheckFailed
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Atomically increments the nonce and re-signs the integrity envelope.
|
||||
/// Returns the new nonce, which is used as the challenge nonce.
|
||||
async fn create_nonce(
|
||||
db: &db::DatabasePool,
|
||||
keyholder: &ActorRef<KeyHolder>,
|
||||
pubkey: &VerifyingKey,
|
||||
) -> Result<i32, Error> {
|
||||
let pubkey_bytes = pubkey.as_bytes().to_vec();
|
||||
|
||||
let mut conn = db.get().await.map_err(|e| {
|
||||
error!(error = ?e, "Database pool error");
|
||||
Error::DatabasePoolUnavailable
|
||||
})?;
|
||||
|
||||
conn.exclusive_transaction(|conn| {
|
||||
let keyholder = keyholder.clone();
|
||||
Box::pin(async move {
|
||||
let (id, new_nonce): (i32, i32) = update(program_client::table)
|
||||
.filter(program_client::public_key.eq(&pubkey_bytes))
|
||||
.set(program_client::nonce.eq(program_client::nonce + 1))
|
||||
.returning((program_client::id, program_client::nonce))
|
||||
.get_result(conn)
|
||||
.await?;
|
||||
|
||||
integrity::sign_entity(
|
||||
conn,
|
||||
&keyholder,
|
||||
&ClientCredentials {
|
||||
pubkey: *pubkey,
|
||||
nonce: new_nonce,
|
||||
},
|
||||
id,
|
||||
)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!(?e, "Integrity sign failed after nonce update");
|
||||
Error::DatabaseOperationFailed
|
||||
})?;
|
||||
|
||||
Ok(new_nonce)
|
||||
})
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
async fn approve_new_client(
|
||||
actors: &crate::actors::GlobalActors,
|
||||
profile: ClientProfile,
|
||||
) -> Result<(), Error> {
|
||||
let result = actors
|
||||
.flow_coordinator
|
||||
.ask(RequestClientApproval { client: profile })
|
||||
.await;
|
||||
|
||||
match result {
|
||||
Ok(true) => Ok(()),
|
||||
Ok(false) => Err(Error::ApproveError(ApproveError::Denied)),
|
||||
Err(SendError::HandlerError(e)) => {
|
||||
error!(error = ?e, "Approval upstream error");
|
||||
Err(Error::ApproveError(ApproveError::Upstream(e)))
|
||||
}
|
||||
Err(e) => {
|
||||
error!(error = ?e, "Approval request to flow coordinator failed");
|
||||
Err(Error::ApproveError(ApproveError::Internal))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn insert_client(
|
||||
db: &db::DatabasePool,
|
||||
keyholder: &ActorRef<KeyHolder>,
|
||||
pubkey: &VerifyingKey,
|
||||
metadata: &ClientMetadata,
|
||||
) -> Result<i32, Error> {
|
||||
use crate::db::schema::{client_metadata, program_client};
|
||||
let metadata = metadata.clone();
|
||||
|
||||
let mut conn = db.get().await.map_err(|e| {
|
||||
error!(error = ?e, "Database pool error");
|
||||
Error::DatabasePoolUnavailable
|
||||
})?;
|
||||
|
||||
conn.exclusive_transaction(|conn| {
|
||||
let keyholder = keyholder.clone();
|
||||
Box::pin(async move {
|
||||
const NONCE_START: i32 = 1;
|
||||
|
||||
let metadata_id = insert_into(client_metadata::table)
|
||||
.values((
|
||||
client_metadata::name.eq(&metadata.name),
|
||||
client_metadata::description.eq(&metadata.description),
|
||||
client_metadata::version.eq(&metadata.version),
|
||||
))
|
||||
.returning(client_metadata::id)
|
||||
.get_result::<i32>(conn)
|
||||
.await?;
|
||||
|
||||
let client_id = insert_into(program_client::table)
|
||||
.values((
|
||||
program_client::public_key.eq(pubkey.as_bytes().to_vec()),
|
||||
program_client::metadata_id.eq(metadata_id),
|
||||
program_client::nonce.eq(NONCE_START),
|
||||
))
|
||||
.on_conflict_do_nothing()
|
||||
.returning(program_client::id)
|
||||
.get_result::<i32>(conn)
|
||||
.await?;
|
||||
|
||||
integrity::sign_entity(
|
||||
conn,
|
||||
&keyholder,
|
||||
&ClientCredentials {
|
||||
pubkey: *pubkey,
|
||||
nonce: NONCE_START,
|
||||
},
|
||||
client_id,
|
||||
)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!(error = ?e, "Failed to sign integrity tag for new client key");
|
||||
Error::DatabaseOperationFailed
|
||||
})?;
|
||||
|
||||
Ok(client_id)
|
||||
})
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
async fn sync_client_metadata(
|
||||
db: &db::DatabasePool,
|
||||
client_id: i32,
|
||||
metadata: &ClientMetadata,
|
||||
) -> Result<(), Error> {
|
||||
use crate::db::schema::{client_metadata, client_metadata_history};
|
||||
|
||||
let now = SqliteTimestamp(Utc::now());
|
||||
|
||||
let mut conn = db.get().await.map_err(|e| {
|
||||
error!(error = ?e, "Database pool error");
|
||||
Error::DatabasePoolUnavailable
|
||||
})?;
|
||||
|
||||
conn.exclusive_transaction(|conn| {
|
||||
let metadata = metadata.clone();
|
||||
Box::pin(async move {
|
||||
let (current_metadata_id, current): (i32, ProgramClientMetadata) =
|
||||
program_client::table
|
||||
.find(client_id)
|
||||
.inner_join(client_metadata::table)
|
||||
.select((
|
||||
program_client::metadata_id,
|
||||
ProgramClientMetadata::as_select(),
|
||||
))
|
||||
.first(conn)
|
||||
.await?;
|
||||
|
||||
let unchanged = current.name == metadata.name
|
||||
&& current.description == metadata.description
|
||||
&& current.version == metadata.version;
|
||||
if unchanged {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
insert_into(client_metadata_history::table)
|
||||
.values((
|
||||
client_metadata_history::metadata_id.eq(current_metadata_id),
|
||||
client_metadata_history::client_id.eq(client_id),
|
||||
))
|
||||
.execute(conn)
|
||||
.await?;
|
||||
|
||||
let metadata_id = insert_into(client_metadata::table)
|
||||
.values((
|
||||
client_metadata::name.eq(&metadata.name),
|
||||
client_metadata::description.eq(&metadata.description),
|
||||
client_metadata::version.eq(&metadata.version),
|
||||
))
|
||||
.returning(client_metadata::id)
|
||||
.get_result::<i32>(conn)
|
||||
.await?;
|
||||
|
||||
update(program_client::table.find(client_id))
|
||||
.set((
|
||||
program_client::metadata_id.eq(metadata_id),
|
||||
program_client::updated_at.eq(now),
|
||||
))
|
||||
.execute(conn)
|
||||
.await?;
|
||||
|
||||
Ok::<(), diesel::result::Error>(())
|
||||
})
|
||||
})
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!(error = ?e, "Database error");
|
||||
Error::DatabaseOperationFailed
|
||||
})
|
||||
}
|
||||
|
||||
async fn challenge_client<T>(
|
||||
transport: &mut T,
|
||||
pubkey: VerifyingKey,
|
||||
nonce: i32,
|
||||
) -> Result<(), Error>
|
||||
where
|
||||
T: Bi<Inbound, Result<Outbound, Error>> + ?Sized,
|
||||
{
|
||||
transport
|
||||
.send(Ok(Outbound::AuthChallenge { pubkey, nonce }))
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!(error = ?e, "Failed to send auth challenge");
|
||||
Error::Transport
|
||||
})?;
|
||||
|
||||
let signature = expect_message(transport, |req: Inbound| match req {
|
||||
Inbound::AuthChallengeSolution { signature } => Some(signature),
|
||||
_ => None,
|
||||
})
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!(error = ?e, "Failed to receive challenge solution");
|
||||
Error::Transport
|
||||
})?;
|
||||
|
||||
let formatted = format_challenge(nonce, pubkey.as_bytes());
|
||||
|
||||
pubkey.verify_strict(&formatted, &signature).map_err(|_| {
|
||||
error!("Challenge solution verification failed");
|
||||
Error::InvalidChallengeSolution
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn authenticate<T>(props: &mut ClientConnection, transport: &mut T) -> Result<i32, Error>
|
||||
where
|
||||
T: Bi<Inbound, Result<Outbound, Error>> + Send + ?Sized,
|
||||
{
|
||||
let Some(Inbound::AuthChallengeRequest { pubkey, metadata }) = transport.recv().await else {
|
||||
return Err(Error::Transport);
|
||||
};
|
||||
|
||||
let client_id = match get_current_nonce_and_id(&props.db, &pubkey).await? {
|
||||
Some((id, _)) => {
|
||||
verify_integrity(&props.db, &props.actors.key_holder, &pubkey).await?;
|
||||
id
|
||||
}
|
||||
None => {
|
||||
approve_new_client(
|
||||
&props.actors,
|
||||
ClientProfile {
|
||||
pubkey,
|
||||
metadata: metadata.clone(),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
insert_client(&props.db, &props.actors.key_holder, &pubkey, &metadata).await?
|
||||
}
|
||||
};
|
||||
|
||||
sync_client_metadata(&props.db, client_id, &metadata).await?;
|
||||
let challenge_nonce = create_nonce(&props.db, &props.actors.key_holder, &pubkey).await?;
|
||||
challenge_client(transport, pubkey, challenge_nonce).await?;
|
||||
|
||||
transport
|
||||
.send(Ok(Outbound::AuthSuccess))
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!(error = ?e, "Failed to send auth success");
|
||||
Error::Transport
|
||||
})?;
|
||||
|
||||
Ok(client_id)
|
||||
}
|
||||
@@ -1,102 +0,0 @@
|
||||
use arbiter_proto::proto::client::{
|
||||
AuthChallengeRequest, AuthChallengeSolution, ClientRequest,
|
||||
client_request::Payload as ClientRequestPayload,
|
||||
};
|
||||
use ed25519_dalek::VerifyingKey;
|
||||
use tracing::error;
|
||||
|
||||
use crate::actors::client::{
|
||||
ClientConnection,
|
||||
auth::state::{AuthContext, AuthStateMachine},
|
||||
session::ClientSession,
|
||||
};
|
||||
|
||||
#[derive(thiserror::Error, Debug, Clone, PartialEq, Eq)]
|
||||
pub enum Error {
|
||||
#[error("Unexpected message payload")]
|
||||
UnexpectedMessagePayload,
|
||||
#[error("Invalid client public key length")]
|
||||
InvalidClientPubkeyLength,
|
||||
#[error("Invalid client public key encoding")]
|
||||
InvalidAuthPubkeyEncoding,
|
||||
#[error("Database pool unavailable")]
|
||||
DatabasePoolUnavailable,
|
||||
#[error("Database operation failed")]
|
||||
DatabaseOperationFailed,
|
||||
#[error("Public key not registered")]
|
||||
PublicKeyNotRegistered,
|
||||
#[error("Invalid signature length")]
|
||||
InvalidSignatureLength,
|
||||
#[error("Invalid challenge solution")]
|
||||
InvalidChallengeSolution,
|
||||
#[error("Transport error")]
|
||||
Transport,
|
||||
}
|
||||
|
||||
mod state;
|
||||
use state::*;
|
||||
|
||||
fn parse_auth_event(payload: ClientRequestPayload) -> Result<AuthEvents, Error> {
|
||||
match payload {
|
||||
ClientRequestPayload::AuthChallengeRequest(AuthChallengeRequest { pubkey }) => {
|
||||
let pubkey_bytes = pubkey.as_array().ok_or(Error::InvalidClientPubkeyLength)?;
|
||||
let pubkey = VerifyingKey::from_bytes(pubkey_bytes)
|
||||
.map_err(|_| Error::InvalidAuthPubkeyEncoding)?;
|
||||
Ok(AuthEvents::AuthRequest(ChallengeRequest {
|
||||
pubkey: pubkey.into(),
|
||||
}))
|
||||
}
|
||||
ClientRequestPayload::AuthChallengeSolution(AuthChallengeSolution { signature }) => {
|
||||
Ok(AuthEvents::ReceivedSolution(ChallengeSolution {
|
||||
solution: signature,
|
||||
}))
|
||||
}
|
||||
_ => Err(Error::UnexpectedMessagePayload) ,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn authenticate(props: &mut ClientConnection) -> Result<VerifyingKey, Error> {
|
||||
let mut state = AuthStateMachine::new(AuthContext::new(props));
|
||||
|
||||
loop {
|
||||
let transport = state.context_mut().conn.transport.as_mut();
|
||||
let Some(ClientRequest {
|
||||
payload: Some(payload),
|
||||
}) = transport.recv().await
|
||||
else {
|
||||
return Err(Error::Transport);
|
||||
};
|
||||
|
||||
let event = parse_auth_event(payload)?;
|
||||
|
||||
match state.process_event(event).await {
|
||||
Ok(AuthStates::AuthOk(key)) => return Ok(key.clone()),
|
||||
Err(AuthError::ActionFailed(err)) => {
|
||||
error!(?err, "State machine action failed");
|
||||
return Err(err);
|
||||
}
|
||||
Err(AuthError::GuardFailed(err)) => {
|
||||
error!(?err, "State machine guard failed");
|
||||
return Err(err);
|
||||
}
|
||||
Err(AuthError::InvalidEvent) => {
|
||||
error!("Invalid event for current state");
|
||||
return Err(Error::InvalidChallengeSolution);
|
||||
}
|
||||
Err(AuthError::TransitionsFailed) => {
|
||||
error!("Invalid state transition");
|
||||
return Err(Error::InvalidChallengeSolution);
|
||||
}
|
||||
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn authenticate_and_create(
|
||||
mut props: ClientConnection,
|
||||
) -> Result<ClientSession, Error> {
|
||||
let key = authenticate(&mut props).await?;
|
||||
let session = ClientSession::new(props, key);
|
||||
Ok(session)
|
||||
}
|
||||
@@ -1,136 +0,0 @@
|
||||
use arbiter_proto::proto::client::{
|
||||
AuthChallenge, ClientResponse,
|
||||
client_response::Payload as ClientResponsePayload,
|
||||
};
|
||||
use diesel::{ExpressionMethods as _, OptionalExtension as _, QueryDsl, update};
|
||||
use diesel_async::RunQueryDsl;
|
||||
use ed25519_dalek::VerifyingKey;
|
||||
use tracing::error;
|
||||
|
||||
use super::Error;
|
||||
use crate::{actors::client::ClientConnection, db::schema};
|
||||
|
||||
pub struct ChallengeRequest {
|
||||
pub pubkey: VerifyingKey,
|
||||
}
|
||||
|
||||
pub struct ChallengeContext {
|
||||
pub challenge: AuthChallenge,
|
||||
pub key: VerifyingKey,
|
||||
}
|
||||
|
||||
pub struct ChallengeSolution {
|
||||
pub solution: Vec<u8>,
|
||||
}
|
||||
|
||||
smlang::statemachine!(
|
||||
name: Auth,
|
||||
custom_error: true,
|
||||
transitions: {
|
||||
*Init + AuthRequest(ChallengeRequest) / async prepare_challenge = SentChallenge(ChallengeContext),
|
||||
SentChallenge(ChallengeContext) + ReceivedSolution(ChallengeSolution) [async verify_solution] / provide_key = AuthOk(VerifyingKey),
|
||||
}
|
||||
);
|
||||
|
||||
async fn create_nonce(db: &crate::db::DatabasePool, pubkey_bytes: &[u8]) -> Result<i32, Error> {
|
||||
let mut db_conn = db.get().await.map_err(|e| {
|
||||
error!(error = ?e, "Database pool error");
|
||||
Error::DatabasePoolUnavailable
|
||||
})?;
|
||||
db_conn
|
||||
.exclusive_transaction(|conn| {
|
||||
Box::pin(async move {
|
||||
let current_nonce = schema::program_client::table
|
||||
.filter(schema::program_client::public_key.eq(pubkey_bytes.to_vec()))
|
||||
.select(schema::program_client::nonce)
|
||||
.first::<i32>(conn)
|
||||
.await?;
|
||||
|
||||
update(schema::program_client::table)
|
||||
.filter(schema::program_client::public_key.eq(pubkey_bytes.to_vec()))
|
||||
.set(schema::program_client::nonce.eq(current_nonce + 1))
|
||||
.execute(conn)
|
||||
.await?;
|
||||
|
||||
Result::<_, diesel::result::Error>::Ok(current_nonce)
|
||||
})
|
||||
})
|
||||
.await
|
||||
.optional()
|
||||
.map_err(|e| {
|
||||
error!(error = ?e, "Database error");
|
||||
Error::DatabaseOperationFailed
|
||||
})?
|
||||
.ok_or_else(|| {
|
||||
error!(?pubkey_bytes, "Public key not found in database");
|
||||
Error::PublicKeyNotRegistered
|
||||
})
|
||||
}
|
||||
|
||||
pub struct AuthContext<'a> {
|
||||
pub(super) conn: &'a mut ClientConnection,
|
||||
}
|
||||
|
||||
impl<'a> AuthContext<'a> {
|
||||
pub fn new(conn: &'a mut ClientConnection) -> Self {
|
||||
Self { conn }
|
||||
}
|
||||
}
|
||||
|
||||
impl AuthStateMachineContext for AuthContext<'_> {
|
||||
type Error = Error;
|
||||
|
||||
async fn verify_solution(
|
||||
&self,
|
||||
ChallengeContext { challenge, key }: &ChallengeContext,
|
||||
ChallengeSolution { solution }: &ChallengeSolution,
|
||||
) -> Result<bool, Self::Error> {
|
||||
let formatted_challenge =
|
||||
arbiter_proto::format_challenge(challenge.nonce, &challenge.pubkey);
|
||||
|
||||
let signature = solution.as_slice().try_into().map_err(|_| {
|
||||
error!(?solution, "Invalid signature length");
|
||||
Error::InvalidChallengeSolution
|
||||
})?;
|
||||
|
||||
let valid = key.verify_strict(&formatted_challenge, &signature).is_ok();
|
||||
|
||||
Ok(valid)
|
||||
}
|
||||
|
||||
async fn prepare_challenge(
|
||||
&mut self,
|
||||
ChallengeRequest { pubkey }: ChallengeRequest,
|
||||
) -> Result<ChallengeContext, Self::Error> {
|
||||
let nonce = create_nonce(&self.conn.db, pubkey.as_bytes()).await?;
|
||||
|
||||
let challenge = AuthChallenge {
|
||||
pubkey: pubkey.as_bytes().to_vec(),
|
||||
nonce,
|
||||
};
|
||||
|
||||
self.conn
|
||||
.transport
|
||||
.send(Ok(ClientResponse {
|
||||
payload: Some(ClientResponsePayload::AuthChallenge(challenge.clone())),
|
||||
}))
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!(?e, "Failed to send auth challenge");
|
||||
Error::Transport
|
||||
})?;
|
||||
|
||||
Ok(ChallengeContext {
|
||||
challenge,
|
||||
key: pubkey,
|
||||
})
|
||||
}
|
||||
|
||||
fn provide_key(
|
||||
&mut self,
|
||||
state_data: &ChallengeContext,
|
||||
_: ChallengeSolution,
|
||||
) -> Result<VerifyingKey, Self::Error> {
|
||||
Ok(state_data.key)
|
||||
}
|
||||
}
|
||||
@@ -1,57 +1,60 @@
|
||||
use arbiter_proto::{
|
||||
proto::client::{ClientRequest, ClientResponse},
|
||||
transport::Bi,
|
||||
};
|
||||
use arbiter_proto::{ClientMetadata, transport::Bi};
|
||||
use kameo::actor::Spawn;
|
||||
use tracing::{error, info};
|
||||
|
||||
use crate::{
|
||||
actors::{GlobalActors, client::session::ClientSession},
|
||||
crypto::integrity::{Integrable, hashing::Hashable},
|
||||
db,
|
||||
};
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, thiserror::Error)]
|
||||
pub enum ClientError {
|
||||
#[error("Expected message with payload")]
|
||||
MissingRequestPayload,
|
||||
#[error("Unexpected request payload")]
|
||||
UnexpectedRequestPayload,
|
||||
#[error("State machine error")]
|
||||
StateTransitionFailed,
|
||||
#[error("Connection registration failed")]
|
||||
ConnectionRegistrationFailed,
|
||||
#[error(transparent)]
|
||||
Auth(#[from] auth::Error),
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ClientProfile {
|
||||
pub pubkey: ed25519_dalek::VerifyingKey,
|
||||
pub metadata: ClientMetadata,
|
||||
}
|
||||
|
||||
pub type Transport = Box<dyn Bi<ClientRequest, Result<ClientResponse, ClientError>> + Send>;
|
||||
pub struct ClientCredentials {
|
||||
pub pubkey: ed25519_dalek::VerifyingKey,
|
||||
pub nonce: i32,
|
||||
}
|
||||
|
||||
impl Integrable for ClientCredentials {
|
||||
const KIND: &'static str = "client_credentials";
|
||||
}
|
||||
|
||||
impl Hashable for ClientCredentials {
|
||||
fn hash<H: sha2::Digest>(&self, hasher: &mut H) {
|
||||
hasher.update(self.pubkey.as_bytes());
|
||||
self.nonce.hash(hasher);
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ClientConnection {
|
||||
pub(crate) db: db::DatabasePool,
|
||||
pub(crate) transport: Transport,
|
||||
pub(crate) actors: GlobalActors,
|
||||
}
|
||||
|
||||
impl ClientConnection {
|
||||
pub fn new(db: db::DatabasePool, transport: Transport, actors: GlobalActors) -> Self {
|
||||
Self {
|
||||
db,
|
||||
transport,
|
||||
actors,
|
||||
}
|
||||
pub fn new(db: db::DatabasePool, actors: GlobalActors) -> Self {
|
||||
Self { db, actors }
|
||||
}
|
||||
}
|
||||
|
||||
pub mod auth;
|
||||
pub mod session;
|
||||
|
||||
pub async fn connect_client(props: ClientConnection) {
|
||||
match auth::authenticate_and_create(props).await {
|
||||
Ok(session) => {
|
||||
ClientSession::spawn(session);
|
||||
pub async fn connect_client<T>(mut props: ClientConnection, transport: &mut T)
|
||||
where
|
||||
T: Bi<auth::Inbound, Result<auth::Outbound, auth::Error>> + Send + ?Sized,
|
||||
{
|
||||
match auth::authenticate(&mut props, transport).await {
|
||||
Ok(client_id) => {
|
||||
ClientSession::spawn(ClientSession::new(props, client_id));
|
||||
info!("Client authenticated, session started");
|
||||
}
|
||||
Err(err) => {
|
||||
let _ = transport.send(Err(err.clone())).await;
|
||||
error!(?err, "Authentication failed, closing connection");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,41 +1,81 @@
|
||||
use arbiter_proto::proto::client::{ClientRequest, ClientResponse};
|
||||
use ed25519_dalek::VerifyingKey;
|
||||
use kameo::Actor;
|
||||
use tokio::select;
|
||||
use tracing::{error, info};
|
||||
use kameo::{Actor, messages};
|
||||
use tracing::error;
|
||||
|
||||
use crate::{actors::{
|
||||
GlobalActors, client::{ClientError, ClientConnection}, router::RegisterClient
|
||||
}, db};
|
||||
use alloy::{consensus::TxEip1559, primitives::Address, signers::Signature};
|
||||
|
||||
use crate::{
|
||||
actors::{
|
||||
GlobalActors,
|
||||
client::ClientConnection,
|
||||
evm::{ClientSignTransaction, SignTransactionError},
|
||||
flow_coordinator::RegisterClient,
|
||||
keyholder::KeyHolderState,
|
||||
},
|
||||
db,
|
||||
evm::VetError,
|
||||
};
|
||||
|
||||
pub struct ClientSession {
|
||||
props: ClientConnection,
|
||||
key: VerifyingKey,
|
||||
client_id: i32,
|
||||
}
|
||||
|
||||
impl ClientSession {
|
||||
pub(crate) fn new(props: ClientConnection, key: VerifyingKey) -> Self {
|
||||
Self { props, key }
|
||||
}
|
||||
|
||||
pub async fn process_transport_inbound(&mut self, req: ClientRequest) -> Output {
|
||||
let msg = req.payload.ok_or_else(|| {
|
||||
error!(actor = "client", "Received message with no payload");
|
||||
ClientError::MissingRequestPayload
|
||||
})?;
|
||||
|
||||
match msg {
|
||||
_ => Err(ClientError::UnexpectedRequestPayload),
|
||||
}
|
||||
pub(crate) fn new(props: ClientConnection, client_id: i32) -> Self {
|
||||
Self { props, client_id }
|
||||
}
|
||||
}
|
||||
|
||||
type Output = Result<ClientResponse, ClientError>;
|
||||
#[messages]
|
||||
impl ClientSession {
|
||||
#[message]
|
||||
pub(crate) async fn handle_query_vault_state(&mut self) -> Result<KeyHolderState, Error> {
|
||||
use crate::actors::keyholder::GetState;
|
||||
|
||||
let vault_state = match self.props.actors.key_holder.ask(GetState {}).await {
|
||||
Ok(state) => state,
|
||||
Err(err) => {
|
||||
error!(?err, actor = "client", "keyholder.query.failed");
|
||||
return Err(Error::Internal);
|
||||
}
|
||||
};
|
||||
|
||||
Ok(vault_state)
|
||||
}
|
||||
|
||||
#[message]
|
||||
pub(crate) async fn handle_sign_transaction(
|
||||
&mut self,
|
||||
wallet_address: Address,
|
||||
transaction: TxEip1559,
|
||||
) -> Result<Signature, SignTransactionRpcError> {
|
||||
match self
|
||||
.props
|
||||
.actors
|
||||
.evm
|
||||
.ask(ClientSignTransaction {
|
||||
client_id: self.client_id,
|
||||
wallet_address,
|
||||
transaction,
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(signature) => Ok(signature),
|
||||
Err(kameo::error::SendError::HandlerError(SignTransactionError::Vet(vet_error))) => {
|
||||
Err(SignTransactionRpcError::Vet(vet_error))
|
||||
}
|
||||
Err(err) => {
|
||||
error!(?err, "Failed to sign EVM transaction in client session");
|
||||
Err(SignTransactionRpcError::Internal)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Actor for ClientSession {
|
||||
type Args = Self;
|
||||
|
||||
type Error = ClientError;
|
||||
type Error = Error;
|
||||
|
||||
async fn on_start(
|
||||
args: Self::Args,
|
||||
@@ -43,56 +83,37 @@ impl Actor for ClientSession {
|
||||
) -> Result<Self, Self::Error> {
|
||||
args.props
|
||||
.actors
|
||||
.router
|
||||
.flow_coordinator
|
||||
.ask(RegisterClient { actor: this })
|
||||
.await
|
||||
.map_err(|_| ClientError::ConnectionRegistrationFailed)?;
|
||||
.map_err(|_| Error::ConnectionRegistrationFailed)?;
|
||||
Ok(args)
|
||||
}
|
||||
|
||||
async fn next(
|
||||
&mut self,
|
||||
_actor_ref: kameo::prelude::WeakActorRef<Self>,
|
||||
mailbox_rx: &mut kameo::prelude::MailboxReceiver<Self>,
|
||||
) -> Option<kameo::mailbox::Signal<Self>> {
|
||||
loop {
|
||||
select! {
|
||||
signal = mailbox_rx.recv() => {
|
||||
return signal;
|
||||
}
|
||||
msg = self.props.transport.recv() => {
|
||||
match msg {
|
||||
Some(request) => {
|
||||
match self.process_transport_inbound(request).await {
|
||||
Ok(resp) => {
|
||||
if self.props.transport.send(Ok(resp)).await.is_err() {
|
||||
error!(actor = "client", reason = "channel closed", "send.failed");
|
||||
return Some(kameo::mailbox::Signal::Stop);
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
let _ = self.props.transport.send(Err(err)).await;
|
||||
return Some(kameo::mailbox::Signal::Stop);
|
||||
}
|
||||
}
|
||||
}
|
||||
None => {
|
||||
info!(actor = "client", "transport.closed");
|
||||
return Some(kameo::mailbox::Signal::Stop);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ClientSession {
|
||||
pub fn new_test(db: db::DatabasePool, actors: GlobalActors) -> Self {
|
||||
use arbiter_proto::transport::DummyTransport;
|
||||
let transport: super::Transport = Box::new(DummyTransport::new());
|
||||
let props = ClientConnection::new(db, transport, actors);
|
||||
let key = VerifyingKey::from_bytes(&[0u8; 32]).unwrap();
|
||||
Self { props, key }
|
||||
let props = ClientConnection::new(db, actors);
|
||||
Self {
|
||||
props,
|
||||
client_id: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum Error {
|
||||
#[error("Connection registration failed")]
|
||||
ConnectionRegistrationFailed,
|
||||
#[error("Internal error")]
|
||||
Internal,
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum SignTransactionRpcError {
|
||||
#[error("Policy evaluation failed")]
|
||||
Vet(#[from] VetError),
|
||||
|
||||
#[error("Internal error")]
|
||||
Internal,
|
||||
}
|
||||
|
||||
@@ -1,77 +1,65 @@
|
||||
use alloy::{consensus::TxEip1559, network::TxSigner, primitives::Address, signers::Signature};
|
||||
use diesel::{ExpressionMethods, OptionalExtension as _, QueryDsl, SelectableHelper as _, dsl::insert_into};
|
||||
use alloy::{consensus::TxEip1559, primitives::Address, signers::Signature};
|
||||
use diesel::{
|
||||
ExpressionMethods, OptionalExtension as _, QueryDsl, SelectableHelper as _, dsl::insert_into,
|
||||
};
|
||||
use diesel_async::RunQueryDsl;
|
||||
use kameo::{Actor, actor::ActorRef, messages};
|
||||
use memsafe::MemSafe;
|
||||
use rand::{SeedableRng, rng, rngs::StdRng};
|
||||
|
||||
use crate::{
|
||||
actors::keyholder::{CreateNew, Decrypt, KeyHolder},
|
||||
db::{self, DatabasePool, models::{self, EvmBasicGrant, SqliteTimestamp}, schema},
|
||||
crypto::integrity,
|
||||
db::{
|
||||
DatabaseError, DatabasePool,
|
||||
models::{self},
|
||||
schema,
|
||||
},
|
||||
evm::{
|
||||
self, RunKind,
|
||||
self, ListError, RunKind,
|
||||
policies::{
|
||||
FullGrant, SharedGrantSettings, SpecificGrant, SpecificMeaning,
|
||||
ether_transfer::EtherTransfer,
|
||||
token_transfers::TokenTransfer,
|
||||
CombinedSettings, Grant, SharedGrantSettings, SpecificGrant, SpecificMeaning,
|
||||
ether_transfer::EtherTransfer, token_transfers::TokenTransfer,
|
||||
},
|
||||
},
|
||||
safe_cell::{SafeCell, SafeCellHandle as _},
|
||||
};
|
||||
|
||||
pub use crate::evm::safe_signer;
|
||||
|
||||
#[derive(Debug, thiserror::Error, miette::Diagnostic)]
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum SignTransactionError {
|
||||
#[error("Wallet not found")]
|
||||
#[diagnostic(code(arbiter::evm::sign::wallet_not_found))]
|
||||
WalletNotFound,
|
||||
|
||||
#[error("Database error: {0}")]
|
||||
#[diagnostic(code(arbiter::evm::sign::database))]
|
||||
Database(#[from] diesel::result::Error),
|
||||
|
||||
#[error("Database pool error: {0}")]
|
||||
#[diagnostic(code(arbiter::evm::sign::pool))]
|
||||
Pool(#[from] db::PoolError),
|
||||
Database(#[from] DatabaseError),
|
||||
|
||||
#[error("Keyholder error: {0}")]
|
||||
#[diagnostic(code(arbiter::evm::sign::keyholder))]
|
||||
Keyholder(#[from] crate::actors::keyholder::Error),
|
||||
|
||||
#[error("Keyholder mailbox error")]
|
||||
#[diagnostic(code(arbiter::evm::sign::keyholder_send))]
|
||||
KeyholderSend,
|
||||
|
||||
#[error("Signing error: {0}")]
|
||||
#[diagnostic(code(arbiter::evm::sign::signing))]
|
||||
Signing(#[from] alloy::signers::Error),
|
||||
|
||||
#[error("Policy error: {0}")]
|
||||
#[diagnostic(code(arbiter::evm::sign::vet))]
|
||||
Vet(#[from] evm::VetError),
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error, miette::Diagnostic)]
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum Error {
|
||||
#[error("Keyholder error: {0}")]
|
||||
#[diagnostic(code(arbiter::evm::keyholder))]
|
||||
Keyholder(#[from] crate::actors::keyholder::Error),
|
||||
|
||||
#[error("Keyholder mailbox error")]
|
||||
#[diagnostic(code(arbiter::evm::keyholder_send))]
|
||||
KeyholderSend,
|
||||
|
||||
#[error("Database error: {0}")]
|
||||
#[diagnostic(code(arbiter::evm::database))]
|
||||
Database(#[from] diesel::result::Error),
|
||||
Database(#[from] DatabaseError),
|
||||
|
||||
#[error("Database pool error: {0}")]
|
||||
#[diagnostic(code(arbiter::evm::database_pool))]
|
||||
DatabasePool(#[from] db::PoolError),
|
||||
|
||||
#[error("Grant creation error: {0}")]
|
||||
#[diagnostic(code(arbiter::evm::creation))]
|
||||
Creation(#[from] evm::CreationError),
|
||||
#[error("Integrity violation: {0}")]
|
||||
Integrity(#[from] integrity::Error),
|
||||
}
|
||||
|
||||
#[derive(Actor)]
|
||||
@@ -87,22 +75,23 @@ impl EvmActor {
|
||||
// is it safe to seed rng from system once?
|
||||
// todo: audit
|
||||
let rng = StdRng::from_rng(&mut rng());
|
||||
let engine = evm::Engine::new(db.clone());
|
||||
Self { keyholder, db, rng, engine }
|
||||
let engine = evm::Engine::new(db.clone(), keyholder.clone());
|
||||
Self {
|
||||
keyholder,
|
||||
db,
|
||||
rng,
|
||||
engine,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[messages]
|
||||
impl EvmActor {
|
||||
#[message]
|
||||
pub async fn generate(&mut self) -> Result<Address, Error> {
|
||||
pub async fn generate(&mut self) -> Result<(i32, Address), Error> {
|
||||
let (mut key_cell, address) = safe_signer::generate(&mut self.rng);
|
||||
|
||||
// Move raw key bytes into a Vec<u8> MemSafe for KeyHolder
|
||||
let plaintext = {
|
||||
let reader = key_cell.read().expect("MemSafe read");
|
||||
MemSafe::new(reader.to_vec()).expect("MemSafe allocation")
|
||||
};
|
||||
let plaintext = key_cell.read_inline(|reader| SafeCell::new(reader.to_vec()));
|
||||
|
||||
let aead_id: i32 = self
|
||||
.keyholder
|
||||
@@ -110,29 +99,32 @@ impl EvmActor {
|
||||
.await
|
||||
.map_err(|_| Error::KeyholderSend)?;
|
||||
|
||||
let mut conn = self.db.get().await?;
|
||||
insert_into(schema::evm_wallet::table)
|
||||
let mut conn = self.db.get().await.map_err(DatabaseError::from)?;
|
||||
let wallet_id = insert_into(schema::evm_wallet::table)
|
||||
.values(&models::NewEvmWallet {
|
||||
address: address.as_slice().to_vec(),
|
||||
aead_encrypted_id: aead_id,
|
||||
})
|
||||
.execute(&mut conn)
|
||||
.await?;
|
||||
.returning(schema::evm_wallet::id)
|
||||
.get_result(&mut conn)
|
||||
.await
|
||||
.map_err(DatabaseError::from)?;
|
||||
|
||||
Ok(address)
|
||||
Ok((wallet_id, address))
|
||||
}
|
||||
|
||||
#[message]
|
||||
pub async fn list_wallets(&self) -> Result<Vec<Address>, Error> {
|
||||
let mut conn = self.db.get().await?;
|
||||
pub async fn list_wallets(&self) -> Result<Vec<(i32, Address)>, Error> {
|
||||
let mut conn = self.db.get().await.map_err(DatabaseError::from)?;
|
||||
let rows: Vec<models::EvmWallet> = schema::evm_wallet::table
|
||||
.select(models::EvmWallet::as_select())
|
||||
.load(&mut conn)
|
||||
.await?;
|
||||
.await
|
||||
.map_err(DatabaseError::from)?;
|
||||
|
||||
Ok(rows
|
||||
.into_iter()
|
||||
.map(|w| Address::from_slice(&w.address))
|
||||
.map(|w| (w.id, Address::from_slice(&w.address)))
|
||||
.collect())
|
||||
}
|
||||
}
|
||||
@@ -142,49 +134,61 @@ impl EvmActor {
|
||||
#[message]
|
||||
pub async fn useragent_create_grant(
|
||||
&mut self,
|
||||
client_id: i32,
|
||||
basic: SharedGrantSettings,
|
||||
grant: SpecificGrant,
|
||||
) -> Result<i32, evm::CreationError> {
|
||||
) -> Result<integrity::Verified<i32>, Error> {
|
||||
match grant {
|
||||
SpecificGrant::EtherTransfer(settings) => {
|
||||
self.engine
|
||||
.create_grant::<EtherTransfer>(client_id, FullGrant { basic, specific: settings })
|
||||
SpecificGrant::EtherTransfer(settings) => self
|
||||
.engine
|
||||
.create_grant::<EtherTransfer>(CombinedSettings {
|
||||
shared: basic,
|
||||
specific: settings,
|
||||
})
|
||||
.await
|
||||
}
|
||||
SpecificGrant::TokenTransfer(settings) => {
|
||||
self.engine
|
||||
.create_grant::<TokenTransfer>(client_id, FullGrant { basic, specific: settings })
|
||||
.map_err(Error::from),
|
||||
SpecificGrant::TokenTransfer(settings) => self
|
||||
.engine
|
||||
.create_grant::<TokenTransfer>(CombinedSettings {
|
||||
shared: basic,
|
||||
specific: settings,
|
||||
})
|
||||
.await
|
||||
}
|
||||
.map_err(Error::from),
|
||||
}
|
||||
}
|
||||
|
||||
#[message]
|
||||
pub async fn useragent_delete_grant(&mut self, grant_id: i32) -> Result<(), Error> {
|
||||
let mut conn = self.db.get().await?;
|
||||
diesel::update(schema::evm_basic_grant::table)
|
||||
.filter(schema::evm_basic_grant::id.eq(grant_id))
|
||||
.set(schema::evm_basic_grant::revoked_at.eq(SqliteTimestamp::now()))
|
||||
.execute(&mut conn)
|
||||
.await?;
|
||||
Ok(())
|
||||
pub async fn useragent_delete_grant(&mut self, _grant_id: i32) -> Result<(), Error> {
|
||||
// let mut conn = self.db.get().await.map_err(DatabaseError::from)?;
|
||||
// let keyholder = self.keyholder.clone();
|
||||
|
||||
// diesel_async::AsyncConnection::transaction(&mut conn, |conn| {
|
||||
// Box::pin(async move {
|
||||
// diesel::update(schema::evm_basic_grant::table)
|
||||
// .filter(schema::evm_basic_grant::id.eq(grant_id))
|
||||
// .set(schema::evm_basic_grant::revoked_at.eq(SqliteTimestamp::now()))
|
||||
// .execute(conn)
|
||||
// .await?;
|
||||
|
||||
// let signed = integrity::evm::load_signed_grant_by_basic_id(conn, grant_id).await?;
|
||||
|
||||
// diesel::result::QueryResult::Ok(())
|
||||
// })
|
||||
// })
|
||||
// .await
|
||||
// .map_err(DatabaseError::from)?;
|
||||
|
||||
// Ok(())
|
||||
todo!()
|
||||
}
|
||||
|
||||
#[message]
|
||||
pub async fn useragent_list_grants(
|
||||
&mut self,
|
||||
wallet_id: Option<i32>,
|
||||
) -> Result<Vec<EvmBasicGrant>, Error> {
|
||||
let mut conn = self.db.get().await?;
|
||||
let mut query = schema::evm_basic_grant::table
|
||||
.select(EvmBasicGrant::as_select())
|
||||
.filter(schema::evm_basic_grant::revoked_at.is_null())
|
||||
.into_boxed();
|
||||
if let Some(wid) = wallet_id {
|
||||
query = query.filter(schema::evm_basic_grant::wallet_id.eq(wid));
|
||||
pub async fn useragent_list_grants(&mut self) -> Result<Vec<Grant<SpecificGrant>>, Error> {
|
||||
match self.engine.list_all_grants().await {
|
||||
Ok(grants) => Ok(grants),
|
||||
Err(ListError::Database(db_err)) => Err(Error::Database(db_err)),
|
||||
Err(ListError::Integrity(integrity_err)) => Err(Error::Integrity(integrity_err)),
|
||||
}
|
||||
Ok(query.load(&mut conn).await?)
|
||||
}
|
||||
|
||||
#[message]
|
||||
@@ -194,18 +198,29 @@ impl EvmActor {
|
||||
wallet_address: Address,
|
||||
transaction: TxEip1559,
|
||||
) -> Result<SpecificMeaning, SignTransactionError> {
|
||||
let mut conn = self.db.get().await?;
|
||||
let mut conn = self.db.get().await.map_err(DatabaseError::from)?;
|
||||
let wallet = schema::evm_wallet::table
|
||||
.select(models::EvmWallet::as_select())
|
||||
.filter(schema::evm_wallet::address.eq(wallet_address.as_slice()))
|
||||
.first(&mut conn)
|
||||
.await
|
||||
.optional()?
|
||||
.optional()
|
||||
.map_err(DatabaseError::from)?
|
||||
.ok_or(SignTransactionError::WalletNotFound)?;
|
||||
let wallet_access = schema::evm_wallet_access::table
|
||||
.select(models::EvmWalletAccess::as_select())
|
||||
.filter(schema::evm_wallet_access::wallet_id.eq(wallet.id))
|
||||
.filter(schema::evm_wallet_access::client_id.eq(client_id))
|
||||
.first(&mut conn)
|
||||
.await
|
||||
.optional()
|
||||
.map_err(DatabaseError::from)?
|
||||
.ok_or(SignTransactionError::WalletNotFound)?;
|
||||
drop(conn);
|
||||
|
||||
let meaning = self.engine
|
||||
.evaluate_transaction(wallet.id, client_id, transaction.clone(), RunKind::Execution)
|
||||
let meaning = self
|
||||
.engine
|
||||
.evaluate_transaction(wallet_access, transaction.clone(), RunKind::Execution)
|
||||
.await?;
|
||||
|
||||
Ok(meaning)
|
||||
@@ -218,26 +233,38 @@ impl EvmActor {
|
||||
wallet_address: Address,
|
||||
mut transaction: TxEip1559,
|
||||
) -> Result<Signature, SignTransactionError> {
|
||||
let mut conn = self.db.get().await?;
|
||||
let mut conn = self.db.get().await.map_err(DatabaseError::from)?;
|
||||
let wallet = schema::evm_wallet::table
|
||||
.select(models::EvmWallet::as_select())
|
||||
.filter(schema::evm_wallet::address.eq(wallet_address.as_slice()))
|
||||
.first(&mut conn)
|
||||
.await
|
||||
.optional()?
|
||||
.optional()
|
||||
.map_err(DatabaseError::from)?
|
||||
.ok_or(SignTransactionError::WalletNotFound)?;
|
||||
let wallet_access = schema::evm_wallet_access::table
|
||||
.select(models::EvmWalletAccess::as_select())
|
||||
.filter(schema::evm_wallet_access::wallet_id.eq(wallet.id))
|
||||
.filter(schema::evm_wallet_access::client_id.eq(client_id))
|
||||
.first(&mut conn)
|
||||
.await
|
||||
.optional()
|
||||
.map_err(DatabaseError::from)?
|
||||
.ok_or(SignTransactionError::WalletNotFound)?;
|
||||
drop(conn);
|
||||
|
||||
let raw_key: MemSafe<Vec<u8>> = self
|
||||
let raw_key: SafeCell<Vec<u8>> = self
|
||||
.keyholder
|
||||
.ask(Decrypt { aead_id: wallet.aead_encrypted_id })
|
||||
.ask(Decrypt {
|
||||
aead_id: wallet.aead_encrypted_id,
|
||||
})
|
||||
.await
|
||||
.map_err(|_| SignTransactionError::KeyholderSend)?;
|
||||
|
||||
let signer = safe_signer::SafeSigner::from_memsafe(raw_key)?;
|
||||
let signer = safe_signer::SafeSigner::from_cell(raw_key)?;
|
||||
|
||||
self.engine
|
||||
.evaluate_transaction(wallet.id, client_id, transaction.clone(), RunKind::Execution)
|
||||
.evaluate_transaction(wallet_access, transaction.clone(), RunKind::Execution)
|
||||
.await?;
|
||||
|
||||
use alloy::network::TxSignerSync as _;
|
||||
|
||||
@@ -0,0 +1,105 @@
|
||||
use std::ops::ControlFlow;
|
||||
|
||||
use kameo::{
|
||||
Actor, messages,
|
||||
prelude::{ActorId, ActorRef, ActorStopReason, Context, WeakActorRef},
|
||||
reply::ReplySender,
|
||||
};
|
||||
|
||||
use crate::actors::{
|
||||
client::ClientProfile,
|
||||
flow_coordinator::ApprovalError,
|
||||
user_agent::{UserAgentSession, session::BeginNewClientApproval},
|
||||
};
|
||||
|
||||
pub struct Args {
|
||||
pub client: ClientProfile,
|
||||
pub user_agents: Vec<ActorRef<UserAgentSession>>,
|
||||
pub reply: ReplySender<Result<bool, ApprovalError>>,
|
||||
}
|
||||
|
||||
pub struct ClientApprovalController {
|
||||
/// Number of UAs that have not yet responded (approval or denial) or died.
|
||||
pending: usize,
|
||||
/// Number of approvals received so far.
|
||||
approved: usize,
|
||||
reply: Option<ReplySender<Result<bool, ApprovalError>>>,
|
||||
}
|
||||
|
||||
impl ClientApprovalController {
|
||||
fn send_reply(&mut self, result: Result<bool, ApprovalError>) {
|
||||
if let Some(reply) = self.reply.take() {
|
||||
reply.send(result);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Actor for ClientApprovalController {
|
||||
type Args = Args;
|
||||
type Error = ();
|
||||
|
||||
async fn on_start(
|
||||
Args {
|
||||
client,
|
||||
mut user_agents,
|
||||
reply,
|
||||
}: Self::Args,
|
||||
actor_ref: ActorRef<Self>,
|
||||
) -> Result<Self, Self::Error> {
|
||||
let this = Self {
|
||||
pending: user_agents.len(),
|
||||
approved: 0,
|
||||
reply: Some(reply),
|
||||
};
|
||||
|
||||
for user_agent in user_agents.drain(..) {
|
||||
actor_ref.link(&user_agent).await;
|
||||
let _ = user_agent
|
||||
.tell(BeginNewClientApproval {
|
||||
client: client.clone(),
|
||||
controller: actor_ref.clone(),
|
||||
})
|
||||
.await;
|
||||
}
|
||||
|
||||
Ok(this)
|
||||
}
|
||||
|
||||
async fn on_link_died(
|
||||
&mut self,
|
||||
_: WeakActorRef<Self>,
|
||||
_: ActorId,
|
||||
_: ActorStopReason,
|
||||
) -> Result<ControlFlow<ActorStopReason>, Self::Error> {
|
||||
// A linked UA died before responding — counts as a non-approval.
|
||||
self.pending = self.pending.saturating_sub(1);
|
||||
if self.pending == 0 {
|
||||
// At least one UA didn't approve: deny.
|
||||
self.send_reply(Ok(false));
|
||||
return Ok(ControlFlow::Break(ActorStopReason::Normal));
|
||||
}
|
||||
Ok(ControlFlow::Continue(()))
|
||||
}
|
||||
}
|
||||
|
||||
#[messages]
|
||||
impl ClientApprovalController {
|
||||
#[message(ctx)]
|
||||
pub async fn client_approval_answer(&mut self, approved: bool, ctx: &mut Context<Self, ()>) {
|
||||
if !approved {
|
||||
// Denial wins immediately regardless of other pending responses.
|
||||
self.send_reply(Ok(false));
|
||||
ctx.stop();
|
||||
return;
|
||||
}
|
||||
|
||||
self.approved += 1;
|
||||
self.pending = self.pending.saturating_sub(1);
|
||||
|
||||
if self.pending == 0 {
|
||||
// Every connected UA approved.
|
||||
self.send_reply(Ok(true));
|
||||
ctx.stop();
|
||||
}
|
||||
}
|
||||
}
|
||||
118
server/crates/arbiter-server/src/actors/flow_coordinator/mod.rs
Normal file
118
server/crates/arbiter-server/src/actors/flow_coordinator/mod.rs
Normal file
@@ -0,0 +1,118 @@
|
||||
use std::{collections::HashMap, ops::ControlFlow};
|
||||
|
||||
use kameo::{
|
||||
Actor,
|
||||
actor::{ActorId, ActorRef, Spawn},
|
||||
messages,
|
||||
prelude::{ActorStopReason, Context, WeakActorRef},
|
||||
reply::DelegatedReply,
|
||||
};
|
||||
use tracing::info;
|
||||
|
||||
use crate::actors::{
|
||||
client::{ClientProfile, session::ClientSession},
|
||||
flow_coordinator::client_connect_approval::ClientApprovalController,
|
||||
user_agent::session::UserAgentSession,
|
||||
};
|
||||
|
||||
pub mod client_connect_approval;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct FlowCoordinator {
|
||||
pub user_agents: HashMap<ActorId, ActorRef<UserAgentSession>>,
|
||||
pub clients: HashMap<ActorId, ActorRef<ClientSession>>,
|
||||
}
|
||||
|
||||
impl Actor for FlowCoordinator {
|
||||
type Args = Self;
|
||||
|
||||
type Error = ();
|
||||
|
||||
async fn on_start(args: Self::Args, _: ActorRef<Self>) -> Result<Self, Self::Error> {
|
||||
Ok(args)
|
||||
}
|
||||
|
||||
async fn on_link_died(
|
||||
&mut self,
|
||||
_: WeakActorRef<Self>,
|
||||
id: ActorId,
|
||||
_: ActorStopReason,
|
||||
) -> Result<ControlFlow<ActorStopReason>, Self::Error> {
|
||||
if self.user_agents.remove(&id).is_some() {
|
||||
info!(
|
||||
?id,
|
||||
actor = "FlowCoordinator",
|
||||
event = "useragent.disconnected"
|
||||
);
|
||||
} else if self.clients.remove(&id).is_some() {
|
||||
info!(
|
||||
?id,
|
||||
actor = "FlowCoordinator",
|
||||
event = "client.disconnected"
|
||||
);
|
||||
} else {
|
||||
info!(
|
||||
?id,
|
||||
actor = "FlowCoordinator",
|
||||
event = "unknown.actor.disconnected"
|
||||
);
|
||||
}
|
||||
Ok(ControlFlow::Continue(()))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error, Clone, PartialEq, Eq, Hash)]
|
||||
pub enum ApprovalError {
|
||||
#[error("No user agents connected")]
|
||||
NoUserAgentsConnected,
|
||||
}
|
||||
|
||||
#[messages]
|
||||
impl FlowCoordinator {
|
||||
#[message(ctx)]
|
||||
pub async fn register_user_agent(
|
||||
&mut self,
|
||||
actor: ActorRef<UserAgentSession>,
|
||||
ctx: &mut Context<Self, ()>,
|
||||
) {
|
||||
info!(id = %actor.id(), actor = "FlowCoordinator", event = "useragent.connected");
|
||||
ctx.actor_ref().link(&actor).await;
|
||||
self.user_agents.insert(actor.id(), actor);
|
||||
}
|
||||
|
||||
#[message(ctx)]
|
||||
pub async fn register_client(
|
||||
&mut self,
|
||||
actor: ActorRef<ClientSession>,
|
||||
ctx: &mut Context<Self, ()>,
|
||||
) {
|
||||
info!(id = %actor.id(), actor = "FlowCoordinator", event = "client.connected");
|
||||
ctx.actor_ref().link(&actor).await;
|
||||
self.clients.insert(actor.id(), actor);
|
||||
}
|
||||
|
||||
#[message(ctx)]
|
||||
pub async fn request_client_approval(
|
||||
&mut self,
|
||||
client: ClientProfile,
|
||||
ctx: &mut Context<Self, DelegatedReply<Result<bool, ApprovalError>>>,
|
||||
) -> DelegatedReply<Result<bool, ApprovalError>> {
|
||||
let (reply, Some(reply_sender)) = ctx.reply_sender() else {
|
||||
unreachable!("Expected `request_client_approval` to have callback channel");
|
||||
};
|
||||
|
||||
let refs: Vec<_> = self.user_agents.values().cloned().collect();
|
||||
if refs.is_empty() {
|
||||
reply_sender.send(Err(ApprovalError::NoUserAgentsConnected));
|
||||
return reply;
|
||||
}
|
||||
|
||||
ClientApprovalController::spawn(client_connect_approval::Args {
|
||||
client,
|
||||
user_agents: refs,
|
||||
reply: reply_sender,
|
||||
});
|
||||
|
||||
reply
|
||||
}
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
pub mod v1;
|
||||
@@ -1,237 +0,0 @@
|
||||
use std::ops::Deref as _;
|
||||
|
||||
use argon2::{Algorithm, Argon2, password_hash::Salt as ArgonSalt};
|
||||
use chacha20poly1305::{
|
||||
AeadInPlace, Key, KeyInit as _, XChaCha20Poly1305, XNonce,
|
||||
aead::{AeadMut, Error, Payload},
|
||||
};
|
||||
use memsafe::MemSafe;
|
||||
use rand::{
|
||||
Rng as _, SeedableRng,
|
||||
rngs::{StdRng, SysRng},
|
||||
};
|
||||
|
||||
pub const ROOT_KEY_TAG: &[u8] = "arbiter/seal/v1".as_bytes();
|
||||
pub const TAG: &[u8] = "arbiter/private-key/v1".as_bytes();
|
||||
|
||||
pub const NONCE_LENGTH: usize = 24;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct Nonce([u8; NONCE_LENGTH]);
|
||||
impl Nonce {
|
||||
pub fn increment(&mut self) {
|
||||
for i in (0..self.0.len()).rev() {
|
||||
if self.0[i] == 0xFF {
|
||||
self.0[i] = 0;
|
||||
} else {
|
||||
self.0[i] += 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_vec(&self) -> Vec<u8> {
|
||||
self.0.to_vec()
|
||||
}
|
||||
}
|
||||
impl<'a> TryFrom<&'a [u8]> for Nonce {
|
||||
type Error = ();
|
||||
|
||||
fn try_from(value: &'a [u8]) -> Result<Self, Self::Error> {
|
||||
if value.len() != NONCE_LENGTH {
|
||||
return Err(());
|
||||
}
|
||||
let mut nonce = [0u8; NONCE_LENGTH];
|
||||
nonce.copy_from_slice(value);
|
||||
Ok(Self(nonce))
|
||||
}
|
||||
}
|
||||
|
||||
pub struct KeyCell(pub MemSafe<Key>);
|
||||
impl From<MemSafe<Key>> for KeyCell {
|
||||
fn from(value: MemSafe<Key>) -> Self {
|
||||
Self(value)
|
||||
}
|
||||
}
|
||||
impl TryFrom<MemSafe<Vec<u8>>> for KeyCell {
|
||||
type Error = ();
|
||||
|
||||
fn try_from(mut value: MemSafe<Vec<u8>>) -> Result<Self, Self::Error> {
|
||||
let value = value.read().unwrap();
|
||||
if value.len() != size_of::<Key>() {
|
||||
return Err(());
|
||||
}
|
||||
let mut cell = MemSafe::new(Key::default()).unwrap();
|
||||
{
|
||||
let mut cell_write = cell.write().unwrap();
|
||||
let cell_slice: &mut [u8] = cell_write.as_mut();
|
||||
cell_slice.copy_from_slice(&value);
|
||||
}
|
||||
Ok(Self(cell))
|
||||
}
|
||||
}
|
||||
|
||||
impl KeyCell {
|
||||
pub fn new_secure_random() -> Self {
|
||||
let mut key = MemSafe::new(Key::default()).unwrap();
|
||||
{
|
||||
let mut key_buffer = key.write().unwrap();
|
||||
let key_buffer: &mut [u8] = key_buffer.as_mut();
|
||||
|
||||
let mut rng = StdRng::try_from_rng(&mut SysRng).unwrap();
|
||||
rng.fill_bytes(key_buffer);
|
||||
}
|
||||
|
||||
key.into()
|
||||
}
|
||||
|
||||
pub fn encrypt_in_place(
|
||||
&mut self,
|
||||
nonce: &Nonce,
|
||||
associated_data: &[u8],
|
||||
mut buffer: impl AsMut<Vec<u8>>,
|
||||
) -> Result<(), Error> {
|
||||
let key_reader = self.0.read().unwrap();
|
||||
let key_ref = key_reader.deref();
|
||||
let cipher = XChaCha20Poly1305::new(key_ref);
|
||||
let nonce = XNonce::from_slice(nonce.0.as_ref());
|
||||
let buffer = buffer.as_mut();
|
||||
cipher.encrypt_in_place(nonce, associated_data, buffer)
|
||||
}
|
||||
pub fn decrypt_in_place(
|
||||
&mut self,
|
||||
nonce: &Nonce,
|
||||
associated_data: &[u8],
|
||||
buffer: &mut MemSafe<Vec<u8>>,
|
||||
) -> Result<(), Error> {
|
||||
let key_reader = self.0.read().unwrap();
|
||||
let key_ref = key_reader.deref();
|
||||
let cipher = XChaCha20Poly1305::new(key_ref);
|
||||
let nonce = XNonce::from_slice(nonce.0.as_ref());
|
||||
let mut buffer = buffer.write().unwrap();
|
||||
let buffer: &mut Vec<u8> = buffer.as_mut();
|
||||
cipher.decrypt_in_place(nonce, associated_data, buffer)
|
||||
}
|
||||
|
||||
pub fn encrypt(
|
||||
&mut self,
|
||||
nonce: &Nonce,
|
||||
associated_data: &[u8],
|
||||
plaintext: impl AsRef<[u8]>,
|
||||
) -> Result<Vec<u8>, Error> {
|
||||
let key_reader = self.0.read().unwrap();
|
||||
let key_ref = key_reader.deref();
|
||||
let mut cipher = XChaCha20Poly1305::new(key_ref);
|
||||
let nonce = XNonce::from_slice(nonce.0.as_ref());
|
||||
|
||||
let ciphertext = cipher.encrypt(
|
||||
nonce,
|
||||
Payload {
|
||||
msg: plaintext.as_ref(),
|
||||
aad: associated_data,
|
||||
},
|
||||
)?;
|
||||
Ok(ciphertext)
|
||||
}
|
||||
}
|
||||
|
||||
pub type Salt = [u8; ArgonSalt::RECOMMENDED_LENGTH];
|
||||
|
||||
pub fn generate_salt() -> Salt {
|
||||
let mut salt = Salt::default();
|
||||
let mut rng = StdRng::try_from_rng(&mut SysRng).unwrap();
|
||||
rng.fill_bytes(&mut salt);
|
||||
salt
|
||||
}
|
||||
|
||||
/// User password might be of different length, have not enough entropy, etc...
|
||||
/// Derive a fixed-length key from the password using Argon2id, which is designed for password hashing and key derivation.
|
||||
pub fn derive_seal_key(mut password: MemSafe<Vec<u8>>, salt: &Salt) -> KeyCell {
|
||||
let params = argon2::Params::new(262_144, 3, 4, None).unwrap();
|
||||
let hasher = Argon2::new(Algorithm::Argon2id, argon2::Version::V0x13, params);
|
||||
let mut key = MemSafe::new(Key::default()).unwrap();
|
||||
{
|
||||
let password_source = password.read().unwrap();
|
||||
let mut key_buffer = key.write().unwrap();
|
||||
let key_buffer: &mut [u8] = key_buffer.as_mut();
|
||||
|
||||
hasher
|
||||
.hash_password_into(password_source.deref(), salt, key_buffer)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
key.into()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use memsafe::MemSafe;
|
||||
|
||||
#[test]
|
||||
pub fn derive_seal_key_deterministic() {
|
||||
static PASSWORD: &[u8] = b"password";
|
||||
let password = MemSafe::new(PASSWORD.to_vec()).unwrap();
|
||||
let password2 = MemSafe::new(PASSWORD.to_vec()).unwrap();
|
||||
let salt = generate_salt();
|
||||
|
||||
let mut key1 = derive_seal_key(password, &salt);
|
||||
let mut key2 = derive_seal_key(password2, &salt);
|
||||
|
||||
let key1_reader = key1.0.read().unwrap();
|
||||
let key2_reader = key2.0.read().unwrap();
|
||||
|
||||
assert_eq!(key1_reader.deref(), key2_reader.deref());
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn successful_derive() {
|
||||
static PASSWORD: &[u8] = b"password";
|
||||
let password = MemSafe::new(PASSWORD.to_vec()).unwrap();
|
||||
let salt = generate_salt();
|
||||
|
||||
let mut key = derive_seal_key(password, &salt);
|
||||
let key_reader = key.0.read().unwrap();
|
||||
let key_ref = key_reader.deref();
|
||||
|
||||
assert_ne!(key_ref.as_slice(), &[0u8; 32][..]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn encrypt_decrypt() {
|
||||
static PASSWORD: &[u8] = b"password";
|
||||
let password = MemSafe::new(PASSWORD.to_vec()).unwrap();
|
||||
let salt = generate_salt();
|
||||
|
||||
let mut key = derive_seal_key(password, &salt);
|
||||
let nonce = Nonce(*b"unique nonce 123 1231233"); // 24 bytes for XChaCha20Poly1305
|
||||
let associated_data = b"associated data";
|
||||
let mut buffer = b"secret data".to_vec();
|
||||
|
||||
key.encrypt_in_place(&nonce, associated_data, &mut buffer)
|
||||
.unwrap();
|
||||
assert_ne!(buffer, b"secret data");
|
||||
|
||||
let mut buffer = MemSafe::new(buffer).unwrap();
|
||||
|
||||
key.decrypt_in_place(&nonce, associated_data, &mut buffer)
|
||||
.unwrap();
|
||||
|
||||
let buffer = buffer.read().unwrap();
|
||||
assert_eq!(*buffer, b"secret data");
|
||||
}
|
||||
|
||||
#[test]
|
||||
// We should fuzz this
|
||||
pub fn test_nonce_increment() {
|
||||
let mut nonce = Nonce([0u8; NONCE_LENGTH]);
|
||||
nonce.increment();
|
||||
|
||||
assert_eq!(
|
||||
nonce.0,
|
||||
[
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1
|
||||
]
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -4,22 +4,30 @@ use diesel::{
|
||||
dsl::{insert_into, update},
|
||||
};
|
||||
use diesel_async::{AsyncConnection, RunQueryDsl};
|
||||
use hmac::Mac as _;
|
||||
use kameo::{Actor, Reply, messages};
|
||||
use memsafe::MemSafe;
|
||||
use strum::{EnumDiscriminants, IntoDiscriminant};
|
||||
use tracing::{error, info};
|
||||
|
||||
use crate::db::{
|
||||
use crate::{
|
||||
crypto::{
|
||||
KeyCell, derive_key,
|
||||
encryption::v1::{self, Nonce},
|
||||
integrity::v1::HmacSha256,
|
||||
},
|
||||
safe_cell::SafeCell,
|
||||
};
|
||||
use crate::{
|
||||
db::{
|
||||
self,
|
||||
models::{self, RootKeyHistory},
|
||||
schema::{self},
|
||||
},
|
||||
safe_cell::SafeCellHandle as _,
|
||||
};
|
||||
use encryption::v1::{self, KeyCell, Nonce};
|
||||
|
||||
pub mod encryption;
|
||||
|
||||
#[derive(Default, EnumDiscriminants)]
|
||||
#[strum_discriminants(derive(Reply), vis(pub))]
|
||||
#[strum_discriminants(derive(Reply), vis(pub), name(KeyHolderState))]
|
||||
enum State {
|
||||
#[default]
|
||||
Unbootstrapped,
|
||||
@@ -32,36 +40,28 @@ enum State {
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error, miette::Diagnostic)]
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum Error {
|
||||
#[error("Keyholder is already bootstrapped")]
|
||||
#[diagnostic(code(arbiter::keyholder::already_bootstrapped))]
|
||||
AlreadyBootstrapped,
|
||||
#[error("Keyholder is not bootstrapped")]
|
||||
#[diagnostic(code(arbiter::keyholder::not_bootstrapped))]
|
||||
NotBootstrapped,
|
||||
#[error("Invalid key provided")]
|
||||
#[diagnostic(code(arbiter::keyholder::invalid_key))]
|
||||
InvalidKey,
|
||||
|
||||
#[error("Requested aead entry not found")]
|
||||
#[diagnostic(code(arbiter::keyholder::aead_not_found))]
|
||||
NotFound,
|
||||
|
||||
#[error("Encryption error: {0}")]
|
||||
#[diagnostic(code(arbiter::keyholder::encryption_error))]
|
||||
Encryption(#[from] chacha20poly1305::aead::Error),
|
||||
|
||||
#[error("Database error: {0}")]
|
||||
#[diagnostic(code(arbiter::keyholder::database_error))]
|
||||
DatabaseConnection(#[from] db::PoolError),
|
||||
|
||||
#[error("Database transaction error: {0}")]
|
||||
#[diagnostic(code(arbiter::keyholder::database_transaction_error))]
|
||||
DatabaseTransaction(#[from] diesel::result::Error),
|
||||
|
||||
#[error("Broken database")]
|
||||
#[diagnostic(code(arbiter::keyholder::broken_database))]
|
||||
BrokenDatabase,
|
||||
}
|
||||
|
||||
@@ -111,8 +111,7 @@ impl KeyHolder {
|
||||
.first(conn)
|
||||
.await?;
|
||||
|
||||
let mut nonce =
|
||||
v1::Nonce::try_from(current_nonce.as_slice()).map_err(|_| {
|
||||
let mut nonce = Nonce::try_from(current_nonce.as_slice()).map_err(|_| {
|
||||
error!(
|
||||
"Broken database: invalid nonce for root key history id={}",
|
||||
root_key_id
|
||||
@@ -136,28 +135,27 @@ impl KeyHolder {
|
||||
}
|
||||
|
||||
#[message]
|
||||
pub async fn bootstrap(&mut self, seal_key_raw: MemSafe<Vec<u8>>) -> Result<(), Error> {
|
||||
pub async fn bootstrap(&mut self, seal_key_raw: SafeCell<Vec<u8>>) -> Result<(), Error> {
|
||||
if !matches!(self.state, State::Unbootstrapped) {
|
||||
return Err(Error::AlreadyBootstrapped);
|
||||
}
|
||||
let salt = v1::generate_salt();
|
||||
let mut seal_key = v1::derive_seal_key(seal_key_raw, &salt);
|
||||
let mut seal_key = derive_key(seal_key_raw, &salt);
|
||||
let mut root_key = KeyCell::new_secure_random();
|
||||
|
||||
// Zero nonces are fine because they are one-time
|
||||
let root_key_nonce = v1::Nonce::default();
|
||||
let data_encryption_nonce = v1::Nonce::default();
|
||||
let root_key_nonce = Nonce::default();
|
||||
let data_encryption_nonce = Nonce::default();
|
||||
|
||||
let root_key_ciphertext: Vec<u8> = {
|
||||
let root_key_reader = root_key.0.read().unwrap();
|
||||
let root_key_reader = root_key_reader.as_slice();
|
||||
let root_key_ciphertext: Vec<u8> = root_key.0.read_inline(|reader| {
|
||||
let root_key_reader = reader.as_slice();
|
||||
seal_key
|
||||
.encrypt(&root_key_nonce, v1::ROOT_KEY_TAG, root_key_reader)
|
||||
.map_err(|err| {
|
||||
error!(?err, "Fatal bootstrap error");
|
||||
Error::Encryption(err)
|
||||
})?
|
||||
};
|
||||
})
|
||||
})?;
|
||||
|
||||
let mut conn = self.db.get().await?;
|
||||
|
||||
@@ -199,7 +197,7 @@ impl KeyHolder {
|
||||
}
|
||||
|
||||
#[message]
|
||||
pub async fn try_unseal(&mut self, seal_key_raw: MemSafe<Vec<u8>>) -> Result<(), Error> {
|
||||
pub async fn try_unseal(&mut self, seal_key_raw: SafeCell<Vec<u8>>) -> Result<(), Error> {
|
||||
let State::Sealed {
|
||||
root_key_history_id,
|
||||
} = &self.state
|
||||
@@ -212,7 +210,6 @@ impl KeyHolder {
|
||||
let mut conn = self.db.get().await?;
|
||||
schema::root_key_history::table
|
||||
.filter(schema::root_key_history::id.eq(*root_key_history_id))
|
||||
.select(schema::root_key_history::data_encryption_nonce)
|
||||
.select(RootKeyHistory::as_select())
|
||||
.first(&mut conn)
|
||||
.await?
|
||||
@@ -223,9 +220,9 @@ impl KeyHolder {
|
||||
error!("Broken database: invalid salt for root key");
|
||||
Error::BrokenDatabase
|
||||
})?;
|
||||
let mut seal_key = v1::derive_seal_key(seal_key_raw, &salt);
|
||||
let mut seal_key = derive_key(seal_key_raw, &salt);
|
||||
|
||||
let mut root_key = MemSafe::new(current_key.ciphertext.clone()).unwrap();
|
||||
let mut root_key = SafeCell::new(current_key.ciphertext.clone());
|
||||
|
||||
let nonce = v1::Nonce::try_from(current_key.root_key_encryption_nonce.as_slice()).map_err(
|
||||
|_| {
|
||||
@@ -243,7 +240,7 @@ impl KeyHolder {
|
||||
|
||||
self.state = State::Unsealed {
|
||||
root_key_history_id: current_key.id,
|
||||
root_key: v1::KeyCell::try_from(root_key).map_err(|err| {
|
||||
root_key: KeyCell::try_from(root_key).map_err(|err| {
|
||||
error!(?err, "Broken database: invalid encryption key size");
|
||||
Error::BrokenDatabase
|
||||
})?,
|
||||
@@ -254,9 +251,8 @@ impl KeyHolder {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Decrypts the `aead_encrypted` entry with the given ID and returns the plaintext
|
||||
#[message]
|
||||
pub async fn decrypt(&mut self, aead_id: i32) -> Result<MemSafe<Vec<u8>>, Error> {
|
||||
pub async fn decrypt(&mut self, aead_id: i32) -> Result<SafeCell<Vec<u8>>, Error> {
|
||||
let State::Unsealed { root_key, .. } = &mut self.state else {
|
||||
return Err(Error::NotBootstrapped);
|
||||
};
|
||||
@@ -279,17 +275,18 @@ impl KeyHolder {
|
||||
);
|
||||
Error::BrokenDatabase
|
||||
})?;
|
||||
let mut output = MemSafe::new(row.ciphertext).unwrap();
|
||||
let mut output = SafeCell::new(row.ciphertext);
|
||||
root_key.decrypt_in_place(&nonce, v1::TAG, &mut output)?;
|
||||
Ok(output)
|
||||
}
|
||||
|
||||
// Creates new `aead_encrypted` entry in the database and returns it's ID
|
||||
#[message]
|
||||
pub async fn create_new(&mut self, mut plaintext: MemSafe<Vec<u8>>) -> Result<i32, Error> {
|
||||
pub async fn create_new(&mut self, mut plaintext: SafeCell<Vec<u8>>) -> Result<i32, Error> {
|
||||
let State::Unsealed {
|
||||
root_key,
|
||||
root_key_history_id,
|
||||
..
|
||||
} = &mut self.state
|
||||
else {
|
||||
return Err(Error::NotBootstrapped);
|
||||
@@ -299,7 +296,7 @@ impl KeyHolder {
|
||||
// Borrow checker note: &mut borrow a few lines above is disjoint from this field
|
||||
let nonce = Self::get_new_nonce(&self.db, *root_key_history_id).await?;
|
||||
|
||||
let mut ciphertext_buffer = plaintext.write().unwrap();
|
||||
let mut ciphertext_buffer = plaintext.write();
|
||||
let ciphertext_buffer: &mut Vec<u8> = ciphertext_buffer.as_mut();
|
||||
root_key.encrypt_in_place(&nonce, v1::TAG, &mut *ciphertext_buffer)?;
|
||||
|
||||
@@ -313,7 +310,7 @@ impl KeyHolder {
|
||||
current_nonce: nonce.to_vec(),
|
||||
schema_version: 1,
|
||||
associated_root_key_id: *root_key_history_id,
|
||||
created_at: Utc::now().into()
|
||||
created_at: Utc::now().into(),
|
||||
})
|
||||
.returning(schema::aead_encrypted::id)
|
||||
.get_result(&mut conn)
|
||||
@@ -323,10 +320,64 @@ impl KeyHolder {
|
||||
}
|
||||
|
||||
#[message]
|
||||
pub fn get_state(&self) -> StateDiscriminants {
|
||||
pub fn get_state(&self) -> KeyHolderState {
|
||||
self.state.discriminant()
|
||||
}
|
||||
|
||||
#[message]
|
||||
pub fn sign_integrity(&mut self, mac_input: Vec<u8>) -> Result<(i32, Vec<u8>), Error> {
|
||||
let State::Unsealed {
|
||||
root_key,
|
||||
root_key_history_id,
|
||||
} = &mut self.state
|
||||
else {
|
||||
return Err(Error::NotBootstrapped);
|
||||
};
|
||||
|
||||
let mut hmac = root_key
|
||||
.0
|
||||
.read_inline(|k| match HmacSha256::new_from_slice(k) {
|
||||
Ok(v) => v,
|
||||
Err(_) => unreachable!("HMAC accepts keys of any size"),
|
||||
});
|
||||
hmac.update(&root_key_history_id.to_be_bytes());
|
||||
hmac.update(&mac_input);
|
||||
|
||||
let mac = hmac.finalize().into_bytes().to_vec();
|
||||
Ok((*root_key_history_id, mac))
|
||||
}
|
||||
|
||||
#[message]
|
||||
pub fn verify_integrity(
|
||||
&mut self,
|
||||
mac_input: Vec<u8>,
|
||||
expected_mac: Vec<u8>,
|
||||
key_version: i32,
|
||||
) -> Result<bool, Error> {
|
||||
let State::Unsealed {
|
||||
root_key,
|
||||
root_key_history_id,
|
||||
} = &mut self.state
|
||||
else {
|
||||
return Err(Error::NotBootstrapped);
|
||||
};
|
||||
|
||||
if *root_key_history_id != key_version {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
let mut hmac = root_key
|
||||
.0
|
||||
.read_inline(|k| match HmacSha256::new_from_slice(k) {
|
||||
Ok(v) => v,
|
||||
Err(_) => unreachable!("HMAC accepts keys of any size"),
|
||||
});
|
||||
hmac.update(&key_version.to_be_bytes());
|
||||
hmac.update(&mac_input);
|
||||
|
||||
Ok(hmac.verify_slice(&expected_mac).is_ok())
|
||||
}
|
||||
|
||||
#[message]
|
||||
pub fn seal(&mut self) -> Result<(), Error> {
|
||||
let State::Unsealed {
|
||||
@@ -348,15 +399,17 @@ mod tests {
|
||||
use diesel::SelectableHelper;
|
||||
|
||||
use diesel_async::RunQueryDsl;
|
||||
use memsafe::MemSafe;
|
||||
|
||||
use crate::db::{self};
|
||||
use crate::{
|
||||
db::{self},
|
||||
safe_cell::SafeCell,
|
||||
};
|
||||
|
||||
use super::*;
|
||||
|
||||
async fn bootstrapped_actor(db: &db::DatabasePool) -> KeyHolder {
|
||||
let mut actor = KeyHolder::new(db.clone()).await.unwrap();
|
||||
let seal_key = MemSafe::new(b"test-seal-key".to_vec()).unwrap();
|
||||
let seal_key = SafeCell::new(b"test-seal-key".to_vec());
|
||||
actor.bootstrap(seal_key).await.unwrap();
|
||||
actor
|
||||
}
|
||||
@@ -391,7 +444,7 @@ mod tests {
|
||||
assert_eq!(root_row.data_encryption_nonce, n2.to_vec());
|
||||
|
||||
let id = actor
|
||||
.create_new(MemSafe::new(b"post-interleave".to_vec()).unwrap())
|
||||
.create_new(SafeCell::new(b"post-interleave".to_vec()))
|
||||
.await
|
||||
.unwrap();
|
||||
let row: models::AeadEncrypted = schema::aead_encrypted::table
|
||||
|
||||
@@ -1,27 +1,27 @@
|
||||
use kameo::actor::{ActorRef, Spawn};
|
||||
use miette::Diagnostic;
|
||||
use thiserror::Error;
|
||||
|
||||
use crate::{
|
||||
actors::{bootstrap::Bootstrapper, evm::EvmActor, keyholder::KeyHolder, router::MessageRouter},
|
||||
actors::{
|
||||
bootstrap::Bootstrapper, evm::EvmActor, flow_coordinator::FlowCoordinator,
|
||||
keyholder::KeyHolder,
|
||||
},
|
||||
db,
|
||||
};
|
||||
|
||||
pub mod bootstrap;
|
||||
pub mod client;
|
||||
mod evm;
|
||||
pub mod flow_coordinator;
|
||||
pub mod keyholder;
|
||||
pub mod router;
|
||||
pub mod user_agent;
|
||||
|
||||
#[derive(Error, Debug, Diagnostic)]
|
||||
#[derive(Error, Debug)]
|
||||
pub enum SpawnError {
|
||||
#[error("Failed to spawn Bootstrapper actor")]
|
||||
#[diagnostic(code(SpawnError::Bootstrapper))]
|
||||
Bootstrapper(#[from] bootstrap::Error),
|
||||
|
||||
#[error("Failed to spawn KeyHolder actor")]
|
||||
#[diagnostic(code(SpawnError::KeyHolder))]
|
||||
KeyHolder(#[from] keyholder::Error),
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ pub enum SpawnError {
|
||||
pub struct GlobalActors {
|
||||
pub key_holder: ActorRef<KeyHolder>,
|
||||
pub bootstrapper: ActorRef<Bootstrapper>,
|
||||
pub router: ActorRef<MessageRouter>,
|
||||
pub flow_coordinator: ActorRef<FlowCoordinator>,
|
||||
pub evm: ActorRef<EvmActor>,
|
||||
}
|
||||
|
||||
@@ -41,7 +41,7 @@ impl GlobalActors {
|
||||
bootstrapper: Bootstrapper::spawn(Bootstrapper::new(&db).await?),
|
||||
evm: EvmActor::spawn(EvmActor::new(key_holder.clone(), db)),
|
||||
key_holder,
|
||||
router: MessageRouter::spawn(MessageRouter::default()),
|
||||
flow_coordinator: FlowCoordinator::spawn(FlowCoordinator::default()),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,79 +0,0 @@
|
||||
use std::{
|
||||
collections::{HashMap},
|
||||
ops::ControlFlow,
|
||||
};
|
||||
|
||||
use kameo::{
|
||||
Actor,
|
||||
actor::{ActorId, ActorRef},
|
||||
messages,
|
||||
prelude::{ActorStopReason, Context, WeakActorRef},
|
||||
};
|
||||
use tracing::info;
|
||||
|
||||
use crate::actors::{client::session::ClientSession, user_agent::session::UserAgentSession};
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct MessageRouter {
|
||||
pub user_agents: HashMap<ActorId, ActorRef<UserAgentSession>>,
|
||||
pub clients: HashMap<ActorId, ActorRef<ClientSession>>,
|
||||
}
|
||||
|
||||
impl Actor for MessageRouter {
|
||||
type Args = Self;
|
||||
|
||||
type Error = ();
|
||||
|
||||
async fn on_start(args: Self::Args, _: ActorRef<Self>) -> Result<Self, Self::Error> {
|
||||
Ok(args)
|
||||
}
|
||||
|
||||
async fn on_link_died(
|
||||
&mut self,
|
||||
_: WeakActorRef<Self>,
|
||||
id: ActorId,
|
||||
_: ActorStopReason,
|
||||
) -> Result<ControlFlow<ActorStopReason>, Self::Error> {
|
||||
if self.user_agents.remove(&id).is_some() {
|
||||
info!(
|
||||
?id,
|
||||
actor = "MessageRouter",
|
||||
event = "useragent.disconnected"
|
||||
);
|
||||
} else if self.clients.remove(&id).is_some() {
|
||||
info!(?id, actor = "MessageRouter", event = "client.disconnected");
|
||||
} else {
|
||||
info!(
|
||||
?id,
|
||||
actor = "MessageRouter",
|
||||
event = "unknown.actor.disconnected"
|
||||
);
|
||||
}
|
||||
Ok(ControlFlow::Continue(()))
|
||||
}
|
||||
}
|
||||
|
||||
#[messages]
|
||||
impl MessageRouter {
|
||||
#[message(ctx)]
|
||||
pub async fn register_user_agent(
|
||||
&mut self,
|
||||
actor: ActorRef<UserAgentSession>,
|
||||
ctx: &mut Context<Self, ()>,
|
||||
) {
|
||||
info!(id = %actor.id(), actor = "MessageRouter", event = "useragent.connected");
|
||||
ctx.actor_ref().link(&actor).await;
|
||||
self.user_agents.insert(actor.id(), actor);
|
||||
}
|
||||
|
||||
#[message(ctx)]
|
||||
pub async fn register_client(
|
||||
&mut self,
|
||||
actor: ActorRef<ClientSession>,
|
||||
ctx: &mut Context<Self, ()>,
|
||||
) {
|
||||
info!(id = %actor.id(), actor = "MessageRouter", event = "client.connected");
|
||||
ctx.actor_ref().link(&actor).await;
|
||||
self.clients.insert(actor.id(), actor);
|
||||
}
|
||||
}
|
||||
@@ -1,92 +1,98 @@
|
||||
use arbiter_proto::proto::user_agent::{
|
||||
AuthChallengeRequest, AuthChallengeSolution, UserAgentRequest,
|
||||
user_agent_request::Payload as UserAgentRequestPayload,
|
||||
};
|
||||
use ed25519_dalek::VerifyingKey;
|
||||
use arbiter_proto::transport::Bi;
|
||||
use tracing::error;
|
||||
|
||||
use crate::actors::user_agent::{
|
||||
UserAgentConnection,
|
||||
auth::state::{AuthContext, AuthStateMachine}, session::UserAgentSession,
|
||||
AuthPublicKey, UserAgentConnection,
|
||||
auth::state::{AuthContext, AuthStateMachine},
|
||||
};
|
||||
|
||||
#[derive(thiserror::Error, Debug, PartialEq)]
|
||||
pub enum Error {
|
||||
#[error("Unexpected message payload")]
|
||||
UnexpectedMessagePayload,
|
||||
#[error("Invalid client public key length")]
|
||||
InvalidClientPubkeyLength,
|
||||
#[error("Invalid client public key encoding")]
|
||||
InvalidAuthPubkeyEncoding,
|
||||
#[error("Database pool unavailable")]
|
||||
DatabasePoolUnavailable,
|
||||
#[error("Database operation failed")]
|
||||
DatabaseOperationFailed,
|
||||
#[error("Public key not registered")]
|
||||
PublicKeyNotRegistered,
|
||||
#[error("Transport error")]
|
||||
Transport,
|
||||
#[error("Invalid bootstrap token")]
|
||||
InvalidBootstrapToken,
|
||||
#[error("Bootstrapper actor unreachable")]
|
||||
BootstrapperActorUnreachable,
|
||||
#[error("Invalid challenge solution")]
|
||||
InvalidChallengeSolution,
|
||||
}
|
||||
|
||||
mod state;
|
||||
use state::*;
|
||||
|
||||
fn parse_auth_event(payload: UserAgentRequestPayload) -> Result<AuthEvents, Error> {
|
||||
match payload {
|
||||
UserAgentRequestPayload::AuthChallengeRequest(AuthChallengeRequest {
|
||||
pubkey,
|
||||
bootstrap_token: None,
|
||||
}) => {
|
||||
let pubkey_bytes = pubkey.as_array().ok_or(Error::InvalidClientPubkeyLength)?;
|
||||
let pubkey = VerifyingKey::from_bytes(pubkey_bytes)
|
||||
.map_err(|_| Error::InvalidAuthPubkeyEncoding)?;
|
||||
Ok(AuthEvents::AuthRequest(ChallengeRequest {
|
||||
pubkey: pubkey.into(),
|
||||
}))
|
||||
}
|
||||
UserAgentRequestPayload::AuthChallengeRequest(AuthChallengeRequest {
|
||||
pubkey,
|
||||
bootstrap_token: Some(token),
|
||||
}) => {
|
||||
let pubkey_bytes = pubkey.as_array().ok_or(Error::InvalidClientPubkeyLength)?;
|
||||
let pubkey = VerifyingKey::from_bytes(pubkey_bytes)
|
||||
.map_err(|_| Error::InvalidAuthPubkeyEncoding)?;
|
||||
Ok(AuthEvents::BootstrapAuthRequest(BootstrapAuthRequest {
|
||||
pubkey: pubkey.into(),
|
||||
token,
|
||||
}))
|
||||
}
|
||||
UserAgentRequestPayload::AuthChallengeSolution(AuthChallengeSolution { signature }) => {
|
||||
Ok(AuthEvents::ReceivedSolution(ChallengeSolution {
|
||||
solution: signature,
|
||||
}))
|
||||
}
|
||||
_ => Err(Error::UnexpectedMessagePayload),
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum Inbound {
|
||||
AuthChallengeRequest {
|
||||
pubkey: AuthPublicKey,
|
||||
bootstrap_token: Option<String>,
|
||||
},
|
||||
AuthChallengeSolution {
|
||||
signature: Vec<u8>,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum Error {
|
||||
UnregisteredPublicKey,
|
||||
InvalidChallengeSolution,
|
||||
InvalidBootstrapToken,
|
||||
Internal { details: String },
|
||||
Transport,
|
||||
}
|
||||
|
||||
impl Error {
|
||||
#[track_caller]
|
||||
pub(super) fn internal(details: impl Into<String>, err: &impl std::fmt::Debug) -> Self {
|
||||
let details = details.into();
|
||||
let caller = std::panic::Location::caller();
|
||||
error!(
|
||||
caller_file = %caller.file(),
|
||||
caller_line = caller.line(),
|
||||
caller_column = caller.column(),
|
||||
details = %details,
|
||||
error = ?err,
|
||||
"Internal error"
|
||||
);
|
||||
|
||||
Self::Internal { details }
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn authenticate(props: &mut UserAgentConnection) -> Result<VerifyingKey, Error> {
|
||||
let mut state = AuthStateMachine::new(AuthContext::new(props));
|
||||
impl From<diesel::result::Error> for Error {
|
||||
fn from(e: diesel::result::Error) -> Self {
|
||||
Self::internal("Database error", &e)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum Outbound {
|
||||
AuthChallenge { nonce: i32 },
|
||||
AuthSuccess,
|
||||
}
|
||||
|
||||
fn parse_auth_event(payload: Inbound) -> AuthEvents {
|
||||
match payload {
|
||||
Inbound::AuthChallengeRequest {
|
||||
pubkey,
|
||||
bootstrap_token: None,
|
||||
} => AuthEvents::AuthRequest(ChallengeRequest { pubkey }),
|
||||
Inbound::AuthChallengeRequest {
|
||||
pubkey,
|
||||
bootstrap_token: Some(token),
|
||||
} => AuthEvents::BootstrapAuthRequest(BootstrapAuthRequest { pubkey, token }),
|
||||
Inbound::AuthChallengeSolution { signature } => {
|
||||
AuthEvents::ReceivedSolution(ChallengeSolution {
|
||||
solution: signature,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn authenticate<T>(
|
||||
props: &mut UserAgentConnection,
|
||||
transport: T,
|
||||
) -> Result<AuthPublicKey, Error>
|
||||
where
|
||||
T: Bi<Inbound, Result<Outbound, Error>> + Send,
|
||||
{
|
||||
let mut state = AuthStateMachine::new(AuthContext::new(props, transport));
|
||||
|
||||
loop {
|
||||
// This is needed because `state` now holds mutable reference to `ConnectionProps`, so we can't directly access `props` here
|
||||
let transport = state.context_mut().conn.transport.as_mut();
|
||||
let Some(UserAgentRequest {
|
||||
payload: Some(payload),
|
||||
}) = transport.recv().await
|
||||
else {
|
||||
// `state` holds a mutable reference to `props` so we can't access it directly here
|
||||
let Some(payload) = state.context_mut().transport.recv().await else {
|
||||
return Err(Error::Transport);
|
||||
};
|
||||
|
||||
let event = parse_auth_event(payload)?;
|
||||
|
||||
match state.process_event(event).await {
|
||||
match state.process_event(parse_auth_event(payload)).await {
|
||||
Ok(AuthStates::AuthOk(key)) => return Ok(key.clone()),
|
||||
Err(AuthError::ActionFailed(err)) => {
|
||||
error!(?err, "State machine action failed");
|
||||
@@ -109,10 +115,3 @@ pub async fn authenticate(props: &mut UserAgentConnection) -> Result<VerifyingKe
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
pub async fn authenticate_and_create(mut props: UserAgentConnection) -> Result<UserAgentSession, Error> {
|
||||
let key = authenticate(&mut props).await?;
|
||||
let session = UserAgentSession::new(props, key.clone());
|
||||
Ok(session)
|
||||
}
|
||||
|
||||
@@ -1,30 +1,32 @@
|
||||
use arbiter_proto::proto::user_agent::{
|
||||
AuthChallenge, UserAgentResponse,
|
||||
user_agent_response::Payload as UserAgentResponsePayload,
|
||||
};
|
||||
use arbiter_proto::transport::Bi;
|
||||
use diesel::{ExpressionMethods as _, OptionalExtension as _, QueryDsl, update};
|
||||
use diesel_async::RunQueryDsl;
|
||||
use ed25519_dalek::VerifyingKey;
|
||||
use diesel_async::{AsyncConnection, RunQueryDsl};
|
||||
use kameo::actor::ActorRef;
|
||||
use tracing::error;
|
||||
|
||||
use super::Error;
|
||||
use crate::{
|
||||
actors::{bootstrap::ConsumeToken, user_agent::UserAgentConnection},
|
||||
db::schema,
|
||||
actors::{
|
||||
bootstrap::ConsumeToken,
|
||||
keyholder::KeyHolder,
|
||||
user_agent::{AuthPublicKey, UserAgentConnection, UserAgentCredentials, auth::Outbound},
|
||||
},
|
||||
crypto::integrity,
|
||||
db::{DatabasePool, schema::useragent_client},
|
||||
};
|
||||
|
||||
pub struct ChallengeRequest {
|
||||
pub pubkey: VerifyingKey,
|
||||
pub pubkey: AuthPublicKey,
|
||||
}
|
||||
|
||||
pub struct BootstrapAuthRequest {
|
||||
pub pubkey: VerifyingKey,
|
||||
pub pubkey: AuthPublicKey,
|
||||
pub token: String,
|
||||
}
|
||||
|
||||
pub struct ChallengeContext {
|
||||
pub challenge: AuthChallenge,
|
||||
pub key: VerifyingKey,
|
||||
pub challenge_nonce: i32,
|
||||
pub key: AuthPublicKey,
|
||||
}
|
||||
|
||||
pub struct ChallengeSolution {
|
||||
@@ -36,114 +38,202 @@ smlang::statemachine!(
|
||||
custom_error: true,
|
||||
transitions: {
|
||||
*Init + AuthRequest(ChallengeRequest) / async prepare_challenge = SentChallenge(ChallengeContext),
|
||||
Init + BootstrapAuthRequest(BootstrapAuthRequest) [async verify_bootstrap_token] / provide_key_bootstrap = AuthOk(VerifyingKey),
|
||||
SentChallenge(ChallengeContext) + ReceivedSolution(ChallengeSolution) [async verify_solution] / provide_key = AuthOk(VerifyingKey),
|
||||
Init + BootstrapAuthRequest(BootstrapAuthRequest) / async verify_bootstrap_token = AuthOk(AuthPublicKey),
|
||||
SentChallenge(ChallengeContext) + ReceivedSolution(ChallengeSolution) / async verify_solution = AuthOk(AuthPublicKey),
|
||||
}
|
||||
);
|
||||
|
||||
async fn create_nonce(db: &crate::db::DatabasePool, pubkey_bytes: &[u8]) -> Result<i32, Error> {
|
||||
let mut db_conn = db.get().await.map_err(|e| {
|
||||
error!(error = ?e, "Database pool error");
|
||||
Error::DatabasePoolUnavailable
|
||||
})?;
|
||||
/// Returns the current nonce, ready to use for the challenge nonce.
|
||||
async fn get_current_nonce_and_id(
|
||||
db: &DatabasePool,
|
||||
key: &AuthPublicKey,
|
||||
) -> Result<(i32, i32), Error> {
|
||||
let mut db_conn = db
|
||||
.get()
|
||||
.await
|
||||
.map_err(|e| Error::internal("Database unavailable", &e))?;
|
||||
db_conn
|
||||
.exclusive_transaction(|conn| {
|
||||
Box::pin(async move {
|
||||
let current_nonce = schema::useragent_client::table
|
||||
.filter(schema::useragent_client::public_key.eq(pubkey_bytes.to_vec()))
|
||||
.select(schema::useragent_client::nonce)
|
||||
.first::<i32>(conn)
|
||||
.await?;
|
||||
|
||||
update(schema::useragent_client::table)
|
||||
.filter(schema::useragent_client::public_key.eq(pubkey_bytes.to_vec()))
|
||||
.set(schema::useragent_client::nonce.eq(current_nonce + 1))
|
||||
.execute(conn)
|
||||
.await?;
|
||||
|
||||
Result::<_, diesel::result::Error>::Ok(current_nonce)
|
||||
useragent_client::table
|
||||
.filter(useragent_client::public_key.eq(key.to_stored_bytes()))
|
||||
.filter(useragent_client::key_type.eq(key.key_type()))
|
||||
.select((useragent_client::id, useragent_client::nonce))
|
||||
.first::<(i32, i32)>(conn)
|
||||
.await
|
||||
})
|
||||
})
|
||||
.await
|
||||
.optional()
|
||||
.map_err(|e| {
|
||||
error!(error = ?e, "Database error");
|
||||
Error::DatabaseOperationFailed
|
||||
})?
|
||||
.map_err(|e| Error::internal("Database operation failed", &e))?
|
||||
.ok_or_else(|| {
|
||||
error!(?pubkey_bytes, "Public key not found in database");
|
||||
Error::PublicKeyNotRegistered
|
||||
error!(?key, "Public key not found in database");
|
||||
Error::UnregisteredPublicKey
|
||||
})
|
||||
}
|
||||
|
||||
async fn register_key(db: &crate::db::DatabasePool, pubkey_bytes: &[u8]) -> Result<(), Error> {
|
||||
let mut conn = db.get().await.map_err(|e| {
|
||||
error!(error = ?e, "Database pool error");
|
||||
Error::DatabasePoolUnavailable
|
||||
})?;
|
||||
|
||||
diesel::insert_into(schema::useragent_client::table)
|
||||
.values((
|
||||
schema::useragent_client::public_key.eq(pubkey_bytes.to_vec()),
|
||||
schema::useragent_client::nonce.eq(1),
|
||||
))
|
||||
.execute(&mut conn)
|
||||
async fn verify_integrity(
|
||||
db: &DatabasePool,
|
||||
keyholder: &ActorRef<KeyHolder>,
|
||||
pubkey: &AuthPublicKey,
|
||||
) -> Result<(), Error> {
|
||||
let mut db_conn = db
|
||||
.get()
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!(error = ?e, "Database error");
|
||||
Error::DatabaseOperationFailed
|
||||
})?;
|
||||
.map_err(|e| Error::internal("Database unavailable", &e))?;
|
||||
|
||||
let (id, nonce) = get_current_nonce_and_id(db, pubkey).await?;
|
||||
|
||||
let attestation_status = integrity::check_entity_attestation(
|
||||
&mut db_conn,
|
||||
keyholder,
|
||||
&UserAgentCredentials {
|
||||
pubkey: pubkey.clone(),
|
||||
nonce,
|
||||
},
|
||||
id,
|
||||
)
|
||||
.await
|
||||
.map_err(|e| Error::internal("Integrity verification failed", &e))?;
|
||||
|
||||
use integrity::AttestationStatus as AS;
|
||||
// SAFETY (policy): challenge auth must work in both vault states.
|
||||
// While sealed, integrity checks can only report `Unavailable` because key material is not
|
||||
// accessible. While unsealed, the same check can report `Attested`.
|
||||
// This path intentionally accepts both outcomes to keep challenge auth available across state
|
||||
// transitions; stricter verification is enforced in sensitive post-auth flows.
|
||||
match attestation_status {
|
||||
AS::Attested | AS::Unavailable => Ok(()),
|
||||
}
|
||||
}
|
||||
|
||||
async fn create_nonce(
|
||||
db: &DatabasePool,
|
||||
keyholder: &ActorRef<KeyHolder>,
|
||||
pubkey: &AuthPublicKey,
|
||||
) -> Result<i32, Error> {
|
||||
let mut db_conn = db
|
||||
.get()
|
||||
.await
|
||||
.map_err(|e| Error::internal("Database unavailable", &e))?;
|
||||
let new_nonce = db_conn
|
||||
.exclusive_transaction(|conn| {
|
||||
Box::pin(async move {
|
||||
let (id, new_nonce): (i32, i32) = update(useragent_client::table)
|
||||
.filter(useragent_client::public_key.eq(pubkey.to_stored_bytes()))
|
||||
.filter(useragent_client::key_type.eq(pubkey.key_type()))
|
||||
.set(useragent_client::nonce.eq(useragent_client::nonce + 1))
|
||||
.returning((useragent_client::id, useragent_client::nonce))
|
||||
.get_result(conn)
|
||||
.await
|
||||
.map_err(|e| Error::internal("Database operation failed", &e))?;
|
||||
|
||||
integrity::sign_entity(
|
||||
conn,
|
||||
keyholder,
|
||||
&UserAgentCredentials {
|
||||
pubkey: pubkey.clone(),
|
||||
nonce: new_nonce,
|
||||
},
|
||||
id,
|
||||
)
|
||||
.await
|
||||
.map_err(|e| Error::internal("Database error", &e))?;
|
||||
|
||||
Result::<_, Error>::Ok(new_nonce)
|
||||
})
|
||||
})
|
||||
.await?;
|
||||
Ok(new_nonce)
|
||||
}
|
||||
|
||||
async fn register_key(
|
||||
db: &DatabasePool,
|
||||
keyholder: &ActorRef<KeyHolder>,
|
||||
pubkey: &AuthPublicKey,
|
||||
) -> Result<(), Error> {
|
||||
let pubkey_bytes = pubkey.to_stored_bytes();
|
||||
let key_type = pubkey.key_type();
|
||||
let mut conn = db
|
||||
.get()
|
||||
.await
|
||||
.map_err(|e| Error::internal("Database unavailable", &e))?;
|
||||
|
||||
conn.transaction(|conn| {
|
||||
Box::pin(async move {
|
||||
const NONCE_START: i32 = 1;
|
||||
|
||||
let id: i32 = diesel::insert_into(useragent_client::table)
|
||||
.values((
|
||||
useragent_client::public_key.eq(pubkey_bytes),
|
||||
useragent_client::nonce.eq(NONCE_START),
|
||||
useragent_client::key_type.eq(key_type),
|
||||
))
|
||||
.returning(useragent_client::id)
|
||||
.get_result(conn)
|
||||
.await
|
||||
.map_err(|e| Error::internal("Database operation failed", &e))?;
|
||||
|
||||
if let Err(e) = integrity::sign_entity(
|
||||
conn,
|
||||
keyholder,
|
||||
&UserAgentCredentials {
|
||||
pubkey: pubkey.clone(),
|
||||
nonce: NONCE_START,
|
||||
},
|
||||
id,
|
||||
)
|
||||
.await
|
||||
{
|
||||
match e {
|
||||
integrity::Error::Keyholder(
|
||||
crate::actors::keyholder::Error::NotBootstrapped,
|
||||
) => {
|
||||
// IMPORTANT: bootstrap-token auth must work before the vault has a root key.
|
||||
// We intentionally allow creating the DB row first and backfill envelopes
|
||||
// after bootstrap/unseal to keep the bootstrap flow possible.
|
||||
}
|
||||
other => {
|
||||
return Err(Error::internal("Failed to register public key", &other));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Result::<_, Error>::Ok(())
|
||||
})
|
||||
})
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub struct AuthContext<'a> {
|
||||
pub struct AuthContext<'a, T> {
|
||||
pub(super) conn: &'a mut UserAgentConnection,
|
||||
pub(super) transport: T,
|
||||
}
|
||||
|
||||
impl<'a> AuthContext<'a> {
|
||||
pub fn new(conn: &'a mut UserAgentConnection) -> Self {
|
||||
Self { conn }
|
||||
impl<'a, T> AuthContext<'a, T> {
|
||||
pub fn new(conn: &'a mut UserAgentConnection, transport: T) -> Self {
|
||||
Self { conn, transport }
|
||||
}
|
||||
}
|
||||
|
||||
impl AuthStateMachineContext for AuthContext<'_> {
|
||||
impl<T> AuthStateMachineContext for AuthContext<'_, T>
|
||||
where
|
||||
T: Bi<super::Inbound, Result<super::Outbound, Error>> + Send,
|
||||
{
|
||||
type Error = Error;
|
||||
|
||||
async fn verify_solution(
|
||||
&self,
|
||||
ChallengeContext { challenge, key }: &ChallengeContext,
|
||||
ChallengeSolution { solution }: &ChallengeSolution,
|
||||
) -> Result<bool, Self::Error> {
|
||||
let formatted_challenge =
|
||||
arbiter_proto::format_challenge(challenge.nonce, &challenge.pubkey);
|
||||
|
||||
let signature = solution.as_slice().try_into().map_err(|_| {
|
||||
error!(?solution, "Invalid signature length");
|
||||
Error::InvalidChallengeSolution
|
||||
})?;
|
||||
|
||||
let valid = key.verify_strict(&formatted_challenge, &signature).is_ok();
|
||||
|
||||
Ok(valid)
|
||||
}
|
||||
|
||||
async fn prepare_challenge(
|
||||
&mut self,
|
||||
ChallengeRequest { pubkey }: ChallengeRequest,
|
||||
) -> Result<ChallengeContext, Self::Error> {
|
||||
let nonce = create_nonce(&self.conn.db, pubkey.as_bytes()).await?;
|
||||
verify_integrity(&self.conn.db, &self.conn.actors.key_holder, &pubkey).await?;
|
||||
|
||||
let challenge = AuthChallenge {
|
||||
pubkey: pubkey.as_bytes().to_vec(),
|
||||
nonce,
|
||||
};
|
||||
let nonce = create_nonce(&self.conn.db, &self.conn.actors.key_holder, &pubkey).await?;
|
||||
|
||||
self.conn
|
||||
.transport
|
||||
.send(Ok(UserAgentResponse {
|
||||
payload: Some(UserAgentResponsePayload::AuthChallenge(challenge.clone())),
|
||||
}))
|
||||
self.transport
|
||||
.send(Ok(Outbound::AuthChallenge { nonce }))
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!(?e, "Failed to send auth challenge");
|
||||
@@ -151,7 +241,7 @@ impl AuthStateMachineContext for AuthContext<'_> {
|
||||
})?;
|
||||
|
||||
Ok(ChallengeContext {
|
||||
challenge,
|
||||
challenge_nonce: nonce,
|
||||
key: pubkey,
|
||||
})
|
||||
}
|
||||
@@ -159,9 +249,9 @@ impl AuthStateMachineContext for AuthContext<'_> {
|
||||
#[allow(missing_docs)]
|
||||
#[allow(clippy::result_unit_err)]
|
||||
async fn verify_bootstrap_token(
|
||||
&self,
|
||||
BootstrapAuthRequest { pubkey, token }: &BootstrapAuthRequest,
|
||||
) -> Result<bool, Self::Error> {
|
||||
&mut self,
|
||||
BootstrapAuthRequest { pubkey, token }: BootstrapAuthRequest,
|
||||
) -> Result<AuthPublicKey, Self::Error> {
|
||||
let token_ok: bool = self
|
||||
.conn
|
||||
.actors
|
||||
@@ -170,33 +260,87 @@ impl AuthStateMachineContext for AuthContext<'_> {
|
||||
token: token.clone(),
|
||||
})
|
||||
.await
|
||||
.map_err(|e| {
|
||||
error!(?pubkey, "Failed to consume bootstrap token: {e}");
|
||||
Error::BootstrapperActorUnreachable
|
||||
})?;
|
||||
.map_err(|e| Error::internal("Failed to consume bootstrap token", &e))?;
|
||||
|
||||
if !token_ok {
|
||||
error!(?pubkey, "Invalid bootstrap token provided");
|
||||
error!("Invalid bootstrap token provided");
|
||||
return Err(Error::InvalidBootstrapToken);
|
||||
}
|
||||
|
||||
register_key(&self.conn.db, pubkey.as_bytes()).await?;
|
||||
|
||||
Ok(true)
|
||||
match token_ok {
|
||||
true => {
|
||||
register_key(&self.conn.db, &self.conn.actors.key_holder, &pubkey).await?;
|
||||
self.transport
|
||||
.send(Ok(Outbound::AuthSuccess))
|
||||
.await
|
||||
.map_err(|_| Error::Transport)?;
|
||||
Ok(pubkey)
|
||||
}
|
||||
false => {
|
||||
error!("Invalid bootstrap token provided");
|
||||
self.transport
|
||||
.send(Err(Error::InvalidBootstrapToken))
|
||||
.await
|
||||
.map_err(|_| Error::Transport)?;
|
||||
Err(Error::InvalidBootstrapToken)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn provide_key_bootstrap(
|
||||
#[allow(missing_docs)]
|
||||
#[allow(clippy::unused_unit)]
|
||||
async fn verify_solution(
|
||||
&mut self,
|
||||
event_data: BootstrapAuthRequest,
|
||||
) -> Result<VerifyingKey, Self::Error> {
|
||||
Ok(event_data.pubkey)
|
||||
}
|
||||
ChallengeContext {
|
||||
challenge_nonce,
|
||||
key,
|
||||
}: &ChallengeContext,
|
||||
ChallengeSolution { solution }: ChallengeSolution,
|
||||
) -> Result<AuthPublicKey, Self::Error> {
|
||||
let formatted = arbiter_proto::format_challenge(*challenge_nonce, &key.to_stored_bytes());
|
||||
|
||||
fn provide_key(
|
||||
&mut self,
|
||||
state_data: &ChallengeContext,
|
||||
_: ChallengeSolution,
|
||||
) -> Result<VerifyingKey, Self::Error> {
|
||||
Ok(state_data.key)
|
||||
let valid = match key {
|
||||
AuthPublicKey::Ed25519(vk) => {
|
||||
let sig = solution.as_slice().try_into().map_err(|_| {
|
||||
error!(?solution, "Invalid Ed25519 signature length");
|
||||
Error::InvalidChallengeSolution
|
||||
})?;
|
||||
vk.verify_strict(&formatted, &sig).is_ok()
|
||||
}
|
||||
AuthPublicKey::EcdsaSecp256k1(vk) => {
|
||||
use k256::ecdsa::signature::Verifier as _;
|
||||
let sig = k256::ecdsa::Signature::try_from(solution.as_slice()).map_err(|_| {
|
||||
error!(?solution, "Invalid ECDSA signature bytes");
|
||||
Error::InvalidChallengeSolution
|
||||
})?;
|
||||
vk.verify(&formatted, &sig).is_ok()
|
||||
}
|
||||
AuthPublicKey::Rsa(pk) => {
|
||||
use rsa::signature::Verifier as _;
|
||||
let verifying_key = rsa::pss::VerifyingKey::<sha2::Sha256>::new(pk.clone());
|
||||
let sig = rsa::pss::Signature::try_from(solution.as_slice()).map_err(|_| {
|
||||
error!(?solution, "Invalid RSA signature bytes");
|
||||
Error::InvalidChallengeSolution
|
||||
})?;
|
||||
verifying_key.verify(&formatted, &sig).is_ok()
|
||||
}
|
||||
};
|
||||
|
||||
match valid {
|
||||
true => {
|
||||
self.transport
|
||||
.send(Ok(Outbound::AuthSuccess))
|
||||
.await
|
||||
.map_err(|_| Error::Transport)?;
|
||||
Ok(key.clone())
|
||||
}
|
||||
false => {
|
||||
self.transport
|
||||
.send(Err(Error::InvalidChallengeSolution))
|
||||
.await
|
||||
.map_err(|_| Error::Transport)?;
|
||||
Err(Error::InvalidChallengeSolution)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,65 +1,120 @@
|
||||
use arbiter_proto::{
|
||||
proto::user_agent::{UserAgentRequest, UserAgentResponse},
|
||||
transport::Bi,
|
||||
};
|
||||
use kameo::actor::Spawn as _;
|
||||
use tracing::{error, info};
|
||||
|
||||
use crate::{
|
||||
actors::{GlobalActors, user_agent::session::UserAgentSession},
|
||||
db::{self},
|
||||
actors::{GlobalActors, client::ClientProfile},
|
||||
crypto::integrity::Integrable,
|
||||
db::{self, models::KeyType},
|
||||
};
|
||||
|
||||
#[derive(Debug, thiserror::Error, PartialEq)]
|
||||
pub enum UserAgentError {
|
||||
#[error("Expected message with payload")]
|
||||
MissingRequestPayload,
|
||||
#[error("Unexpected request payload")]
|
||||
UnexpectedRequestPayload,
|
||||
#[error("Invalid state for unseal encrypted key")]
|
||||
InvalidStateForUnsealEncryptedKey,
|
||||
#[error("client_pubkey must be 32 bytes")]
|
||||
InvalidClientPubkeyLength,
|
||||
#[error("State machine error")]
|
||||
StateTransitionFailed,
|
||||
#[error("Vault is not available")]
|
||||
KeyHolderActorUnreachable,
|
||||
#[error(transparent)]
|
||||
Auth(#[from] auth::Error),
|
||||
#[error("Failed registering connection")]
|
||||
ConnectionRegistrationFailed,
|
||||
/// Abstraction over Ed25519 / ECDSA-secp256k1 / RSA public keys used during the auth handshake.
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum AuthPublicKey {
|
||||
Ed25519(ed25519_dalek::VerifyingKey),
|
||||
/// Compressed SEC1 public key; signature bytes are raw 64-byte (r||s).
|
||||
EcdsaSecp256k1(k256::ecdsa::VerifyingKey),
|
||||
/// RSA-2048+ public key (Windows Hello / KeyCredentialManager); signature bytes are PSS+SHA-256.
|
||||
Rsa(rsa::RsaPublicKey),
|
||||
}
|
||||
|
||||
pub type Transport =
|
||||
Box<dyn Bi<UserAgentRequest, Result<UserAgentResponse, UserAgentError>> + Send>;
|
||||
#[derive(Debug)]
|
||||
pub struct UserAgentCredentials {
|
||||
pub pubkey: AuthPublicKey,
|
||||
pub nonce: i32,
|
||||
}
|
||||
|
||||
impl Integrable for UserAgentCredentials {
|
||||
const KIND: &'static str = "useragent_credentials";
|
||||
}
|
||||
|
||||
impl AuthPublicKey {
|
||||
/// Canonical bytes stored in DB and echoed back in the challenge.
|
||||
/// Ed25519: raw 32 bytes. ECDSA: SEC1 compressed 33 bytes. RSA: DER-encoded SPKI.
|
||||
pub fn to_stored_bytes(&self) -> Vec<u8> {
|
||||
match self {
|
||||
AuthPublicKey::Ed25519(k) => k.to_bytes().to_vec(),
|
||||
// SEC1 compressed (33 bytes) is the natural compact format for secp256k1
|
||||
AuthPublicKey::EcdsaSecp256k1(k) => k.to_encoded_point(true).as_bytes().to_vec(),
|
||||
AuthPublicKey::Rsa(k) => {
|
||||
use rsa::pkcs8::EncodePublicKey as _;
|
||||
#[allow(clippy::expect_used)]
|
||||
k.to_public_key_der()
|
||||
.expect("rsa SPKI encoding is infallible")
|
||||
.to_vec()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn key_type(&self) -> KeyType {
|
||||
match self {
|
||||
AuthPublicKey::Ed25519(_) => KeyType::Ed25519,
|
||||
AuthPublicKey::EcdsaSecp256k1(_) => KeyType::EcdsaSecp256k1,
|
||||
AuthPublicKey::Rsa(_) => KeyType::Rsa,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TryFrom<(KeyType, Vec<u8>)> for AuthPublicKey {
|
||||
type Error = &'static str;
|
||||
|
||||
fn try_from(value: (KeyType, Vec<u8>)) -> Result<Self, Self::Error> {
|
||||
let (key_type, bytes) = value;
|
||||
match key_type {
|
||||
KeyType::Ed25519 => {
|
||||
let bytes: [u8; 32] = bytes.try_into().map_err(|_| "invalid Ed25519 key length")?;
|
||||
let key = ed25519_dalek::VerifyingKey::from_bytes(&bytes)
|
||||
.map_err(|_e| "invalid Ed25519 key")?;
|
||||
Ok(AuthPublicKey::Ed25519(key))
|
||||
}
|
||||
KeyType::EcdsaSecp256k1 => {
|
||||
let point =
|
||||
k256::EncodedPoint::from_bytes(&bytes).map_err(|_e| "invalid ECDSA key")?;
|
||||
let key = k256::ecdsa::VerifyingKey::from_encoded_point(&point)
|
||||
.map_err(|_e| "invalid ECDSA key")?;
|
||||
Ok(AuthPublicKey::EcdsaSecp256k1(key))
|
||||
}
|
||||
KeyType::Rsa => {
|
||||
use rsa::pkcs8::DecodePublicKey as _;
|
||||
let key = rsa::RsaPublicKey::from_public_key_der(&bytes)
|
||||
.map_err(|_e| "invalid RSA key")?;
|
||||
Ok(AuthPublicKey::Rsa(key))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Messages, sent by user agent to connection client without having a request
|
||||
#[derive(Debug)]
|
||||
pub enum OutOfBand {
|
||||
ClientConnectionRequest { profile: ClientProfile },
|
||||
ClientConnectionCancel { pubkey: ed25519_dalek::VerifyingKey },
|
||||
}
|
||||
|
||||
pub struct UserAgentConnection {
|
||||
db: db::DatabasePool,
|
||||
actors: GlobalActors,
|
||||
transport: Transport,
|
||||
pub(crate) db: db::DatabasePool,
|
||||
pub(crate) actors: GlobalActors,
|
||||
}
|
||||
|
||||
impl UserAgentConnection {
|
||||
pub fn new(db: db::DatabasePool, actors: GlobalActors, transport: Transport) -> Self {
|
||||
Self {
|
||||
db,
|
||||
actors,
|
||||
transport,
|
||||
}
|
||||
pub fn new(db: db::DatabasePool, actors: GlobalActors) -> Self {
|
||||
Self { db, actors }
|
||||
}
|
||||
}
|
||||
|
||||
pub mod auth;
|
||||
pub mod session;
|
||||
|
||||
pub async fn connect_user_agent(props: UserAgentConnection) {
|
||||
match auth::authenticate_and_create(props).await {
|
||||
Ok(session) => {
|
||||
UserAgentSession::spawn(session);
|
||||
info!("User authenticated, session started");
|
||||
}
|
||||
Err(err) => {
|
||||
error!(?err, "Authentication failed, closing connection");
|
||||
}
|
||||
pub use auth::authenticate;
|
||||
pub use session::UserAgentSession;
|
||||
|
||||
use crate::crypto::integrity::hashing::Hashable;
|
||||
|
||||
impl Hashable for AuthPublicKey {
|
||||
fn hash<H: sha2::Digest>(&self, hasher: &mut H) {
|
||||
hasher.update(self.to_stored_bytes());
|
||||
}
|
||||
}
|
||||
|
||||
impl Hashable for UserAgentCredentials {
|
||||
fn hash<H: sha2::Digest>(&self, hasher: &mut H) {
|
||||
self.pubkey.hash(hasher);
|
||||
self.nonce.hash(hasher);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,254 +1,132 @@
|
||||
use std::{ops::DerefMut, sync::Mutex};
|
||||
use std::{borrow::Cow, collections::HashMap};
|
||||
|
||||
use arbiter_proto::proto::{
|
||||
evm as evm_proto,
|
||||
user_agent::{
|
||||
UnsealEncryptedKey, UnsealResult, UnsealStart, UnsealStartResponse, UserAgentRequest,
|
||||
UserAgentResponse, user_agent_request::Payload as UserAgentRequestPayload,
|
||||
user_agent_response::Payload as UserAgentResponsePayload,
|
||||
},
|
||||
};
|
||||
use chacha20poly1305::{AeadInPlace, XChaCha20Poly1305, XNonce, aead::KeyInit};
|
||||
use arbiter_proto::transport::Sender;
|
||||
use async_trait::async_trait;
|
||||
use ed25519_dalek::VerifyingKey;
|
||||
use kameo::{
|
||||
Actor,
|
||||
error::SendError,
|
||||
};
|
||||
use memsafe::MemSafe;
|
||||
use tokio::select;
|
||||
use tracing::{error, info};
|
||||
use x25519_dalek::{EphemeralSecret, PublicKey};
|
||||
use kameo::{Actor, actor::ActorRef, messages};
|
||||
use thiserror::Error;
|
||||
use tracing::error;
|
||||
|
||||
use crate::actors::{
|
||||
evm::{Generate, ListWallets},
|
||||
keyholder::{self, TryUnseal},
|
||||
router::RegisterUserAgent,
|
||||
user_agent::{UserAgentConnection, UserAgentError},
|
||||
client::ClientProfile,
|
||||
flow_coordinator::{RegisterUserAgent, client_connect_approval::ClientApprovalController},
|
||||
user_agent::{OutOfBand, UserAgentConnection},
|
||||
};
|
||||
|
||||
mod state;
|
||||
use state::{DummyContext, UnsealContext, UserAgentEvents, UserAgentStateMachine, UserAgentStates};
|
||||
use state::{DummyContext, UserAgentEvents, UserAgentStateMachine};
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum Error {
|
||||
#[error("State transition failed")]
|
||||
State,
|
||||
|
||||
#[error("Internal error: {message}")]
|
||||
Internal { message: Cow<'static, str> },
|
||||
}
|
||||
|
||||
impl From<crate::db::PoolError> for Error {
|
||||
fn from(err: crate::db::PoolError) -> Self {
|
||||
error!(?err, "Database pool error");
|
||||
Self::internal("Database pool error")
|
||||
}
|
||||
}
|
||||
impl From<diesel::result::Error> for Error {
|
||||
fn from(err: diesel::result::Error) -> Self {
|
||||
error!(?err, "Database error");
|
||||
Self::internal("Database error")
|
||||
}
|
||||
}
|
||||
|
||||
impl Error {
|
||||
pub fn internal(message: impl Into<Cow<'static, str>>) -> Self {
|
||||
Self::Internal {
|
||||
message: message.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct PendingClientApproval {
|
||||
controller: ActorRef<ClientApprovalController>,
|
||||
}
|
||||
|
||||
pub struct UserAgentSession {
|
||||
props: UserAgentConnection,
|
||||
key: VerifyingKey,
|
||||
state: UserAgentStateMachine<DummyContext>,
|
||||
sender: Box<dyn Sender<OutOfBand>>,
|
||||
|
||||
pending_client_approvals: HashMap<VerifyingKey, PendingClientApproval>,
|
||||
}
|
||||
|
||||
pub mod connection;
|
||||
|
||||
impl UserAgentSession {
|
||||
pub(crate) fn new(props: UserAgentConnection, key: VerifyingKey) -> Self {
|
||||
pub(crate) fn new(props: UserAgentConnection, sender: Box<dyn Sender<OutOfBand>>) -> Self {
|
||||
Self {
|
||||
props,
|
||||
key,
|
||||
state: UserAgentStateMachine::new(DummyContext),
|
||||
sender,
|
||||
pending_client_approvals: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
fn transition(&mut self, event: UserAgentEvents) -> Result<(), UserAgentError> {
|
||||
pub fn new_test(db: crate::db::DatabasePool, actors: crate::actors::GlobalActors) -> Self {
|
||||
struct DummySender;
|
||||
|
||||
#[async_trait]
|
||||
impl Sender<OutOfBand> for DummySender {
|
||||
async fn send(
|
||||
&mut self,
|
||||
_item: OutOfBand,
|
||||
) -> Result<(), arbiter_proto::transport::Error> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
Self::new(UserAgentConnection::new(db, actors), Box::new(DummySender))
|
||||
}
|
||||
|
||||
fn transition(&mut self, event: UserAgentEvents) -> Result<(), Error> {
|
||||
self.state.process_event(event).map_err(|e| {
|
||||
error!(?e, "State transition failed");
|
||||
UserAgentError::StateTransitionFailed
|
||||
Error::State
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn process_transport_inbound(&mut self, req: UserAgentRequest) -> Output {
|
||||
let msg = req.payload.ok_or_else(|| {
|
||||
error!(actor = "useragent", "Received message with no payload");
|
||||
UserAgentError::MissingRequestPayload
|
||||
})?;
|
||||
|
||||
match msg {
|
||||
UserAgentRequestPayload::UnsealStart(unseal_start) => {
|
||||
self.handle_unseal_request(unseal_start).await
|
||||
}
|
||||
UserAgentRequestPayload::UnsealEncryptedKey(unseal_encrypted_key) => {
|
||||
self.handle_unseal_encrypted_key(unseal_encrypted_key).await
|
||||
}
|
||||
UserAgentRequestPayload::EvmWalletCreate(_) => self.handle_evm_wallet_create().await,
|
||||
UserAgentRequestPayload::EvmWalletList(_) => self.handle_evm_wallet_list().await,
|
||||
_ => Err(UserAgentError::UnexpectedRequestPayload),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type Output = Result<UserAgentResponse, UserAgentError>;
|
||||
|
||||
fn response(payload: UserAgentResponsePayload) -> UserAgentResponse {
|
||||
UserAgentResponse {
|
||||
payload: Some(payload),
|
||||
}
|
||||
}
|
||||
|
||||
#[messages]
|
||||
impl UserAgentSession {
|
||||
async fn handle_unseal_request(&mut self, req: UnsealStart) -> Output {
|
||||
let secret = EphemeralSecret::random();
|
||||
let public_key = PublicKey::from(&secret);
|
||||
|
||||
let client_pubkey_bytes: [u8; 32] = req
|
||||
.client_pubkey
|
||||
.try_into()
|
||||
.map_err(|_| UserAgentError::InvalidClientPubkeyLength)?;
|
||||
|
||||
let client_public_key = PublicKey::from(client_pubkey_bytes);
|
||||
|
||||
self.transition(UserAgentEvents::UnsealRequest(UnsealContext {
|
||||
secret: Mutex::new(Some(secret)),
|
||||
client_public_key,
|
||||
}))?;
|
||||
|
||||
Ok(response(UserAgentResponsePayload::UnsealStartResponse(
|
||||
UnsealStartResponse {
|
||||
server_pubkey: public_key.as_bytes().to_vec(),
|
||||
},
|
||||
)))
|
||||
}
|
||||
|
||||
async fn handle_unseal_encrypted_key(&mut self, req: UnsealEncryptedKey) -> Output {
|
||||
let UserAgentStates::WaitingForUnsealKey(unseal_context) = self.state.state() else {
|
||||
error!("Received unseal encrypted key in invalid state");
|
||||
return Err(UserAgentError::InvalidStateForUnsealEncryptedKey);
|
||||
};
|
||||
let ephemeral_secret = {
|
||||
let mut secret_lock = unseal_context.secret.lock().unwrap();
|
||||
let secret = secret_lock.take();
|
||||
match secret {
|
||||
Some(secret) => secret,
|
||||
None => {
|
||||
drop(secret_lock);
|
||||
error!("Ephemeral secret already taken");
|
||||
self.transition(UserAgentEvents::ReceivedInvalidKey)?;
|
||||
return Ok(response(UserAgentResponsePayload::UnsealResult(
|
||||
UnsealResult::InvalidKey.into(),
|
||||
)));
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let nonce = XNonce::from_slice(&req.nonce);
|
||||
|
||||
let shared_secret = ephemeral_secret.diffie_hellman(&unseal_context.client_public_key);
|
||||
let cipher = XChaCha20Poly1305::new(shared_secret.as_bytes().into());
|
||||
|
||||
let mut seal_key_buffer = MemSafe::new(req.ciphertext.clone()).unwrap();
|
||||
|
||||
let decryption_result = {
|
||||
let mut write_handle = seal_key_buffer.write().unwrap();
|
||||
let write_handle = write_handle.deref_mut();
|
||||
cipher.decrypt_in_place(nonce, &req.associated_data, write_handle)
|
||||
};
|
||||
|
||||
match decryption_result {
|
||||
Ok(_) => {
|
||||
match self
|
||||
.props
|
||||
.actors
|
||||
.key_holder
|
||||
.ask(TryUnseal {
|
||||
seal_key_raw: seal_key_buffer,
|
||||
#[message]
|
||||
pub async fn begin_new_client_approval(
|
||||
&mut self,
|
||||
client: ClientProfile,
|
||||
controller: ActorRef<ClientApprovalController>,
|
||||
) {
|
||||
if let Err(e) = self
|
||||
.sender
|
||||
.send(OutOfBand::ClientConnectionRequest {
|
||||
profile: client.clone(),
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(_) => {
|
||||
info!("Successfully unsealed key with client-provided key");
|
||||
self.transition(UserAgentEvents::ReceivedValidKey)?;
|
||||
Ok(response(UserAgentResponsePayload::UnsealResult(
|
||||
UnsealResult::Success.into(),
|
||||
)))
|
||||
}
|
||||
Err(SendError::HandlerError(keyholder::Error::InvalidKey)) => {
|
||||
self.transition(UserAgentEvents::ReceivedInvalidKey)?;
|
||||
Ok(response(UserAgentResponsePayload::UnsealResult(
|
||||
UnsealResult::InvalidKey.into(),
|
||||
)))
|
||||
}
|
||||
Err(SendError::HandlerError(err)) => {
|
||||
error!(?err, "Keyholder failed to unseal key");
|
||||
self.transition(UserAgentEvents::ReceivedInvalidKey)?;
|
||||
Ok(response(UserAgentResponsePayload::UnsealResult(
|
||||
UnsealResult::InvalidKey.into(),
|
||||
)))
|
||||
}
|
||||
Err(err) => {
|
||||
error!(?err, "Failed to send unseal request to keyholder");
|
||||
self.transition(UserAgentEvents::ReceivedInvalidKey)?;
|
||||
Err(UserAgentError::KeyHolderActorUnreachable)
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
error!(?err, "Failed to decrypt unseal key");
|
||||
self.transition(UserAgentEvents::ReceivedInvalidKey)?;
|
||||
Ok(response(UserAgentResponsePayload::UnsealResult(
|
||||
UnsealResult::InvalidKey.into(),
|
||||
)))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl UserAgentSession {
|
||||
async fn handle_evm_wallet_create(&mut self) -> Output {
|
||||
use evm_proto::wallet_create_response::Result as CreateResult;
|
||||
|
||||
let result = match self.props.actors.evm.ask(Generate {}).await {
|
||||
Ok(address) => CreateResult::Wallet(evm_proto::WalletEntry {
|
||||
address: address.as_slice().to_vec(),
|
||||
}),
|
||||
Err(err) => CreateResult::Error(map_evm_error("wallet create", err).into()),
|
||||
};
|
||||
|
||||
Ok(response(UserAgentResponsePayload::EvmWalletCreate(
|
||||
evm_proto::WalletCreateResponse {
|
||||
result: Some(result),
|
||||
},
|
||||
)))
|
||||
error!(
|
||||
?e,
|
||||
actor = "user_agent",
|
||||
event = "failed to announce new client connection"
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
async fn handle_evm_wallet_list(&mut self) -> Output {
|
||||
use evm_proto::wallet_list_response::Result as ListResult;
|
||||
|
||||
let result = match self.props.actors.evm.ask(ListWallets {}).await {
|
||||
Ok(wallets) => ListResult::Wallets(evm_proto::WalletList {
|
||||
wallets: wallets
|
||||
.into_iter()
|
||||
.map(|addr| evm_proto::WalletEntry {
|
||||
address: addr.as_slice().to_vec(),
|
||||
})
|
||||
.collect(),
|
||||
}),
|
||||
Err(err) => ListResult::Error(map_evm_error("wallet list", err).into()),
|
||||
};
|
||||
|
||||
Ok(response(UserAgentResponsePayload::EvmWalletList(
|
||||
evm_proto::WalletListResponse {
|
||||
result: Some(result),
|
||||
},
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
fn map_evm_error<M>(op: &str, err: SendError<M, crate::actors::evm::Error>) -> evm_proto::EvmError {
|
||||
use crate::actors::{evm::Error as EvmError, keyholder::Error as KhError};
|
||||
match err {
|
||||
SendError::HandlerError(EvmError::Keyholder(KhError::NotBootstrapped)) => {
|
||||
evm_proto::EvmError::VaultSealed
|
||||
}
|
||||
SendError::HandlerError(err) => {
|
||||
error!(?err, "EVM {op} failed");
|
||||
evm_proto::EvmError::Internal
|
||||
}
|
||||
_ => {
|
||||
error!("EVM actor unreachable during {op}");
|
||||
evm_proto::EvmError::Internal
|
||||
}
|
||||
self.pending_client_approvals
|
||||
.insert(client.pubkey, PendingClientApproval { controller });
|
||||
}
|
||||
}
|
||||
|
||||
impl Actor for UserAgentSession {
|
||||
type Args = Self;
|
||||
|
||||
type Error = UserAgentError;
|
||||
type Error = Error;
|
||||
|
||||
async fn on_start(
|
||||
args: Self::Args,
|
||||
@@ -256,65 +134,48 @@ impl Actor for UserAgentSession {
|
||||
) -> Result<Self, Self::Error> {
|
||||
args.props
|
||||
.actors
|
||||
.router
|
||||
.flow_coordinator
|
||||
.ask(RegisterUserAgent {
|
||||
actor: this.clone(),
|
||||
})
|
||||
.await
|
||||
.map_err(|err| {
|
||||
error!(?err, "Failed to register user agent connection with router");
|
||||
UserAgentError::ConnectionRegistrationFailed
|
||||
error!(
|
||||
?err,
|
||||
"Failed to register user agent connection with flow coordinator"
|
||||
);
|
||||
Error::internal("Failed to register user agent connection with flow coordinator")
|
||||
})?;
|
||||
Ok(args)
|
||||
}
|
||||
|
||||
async fn next(
|
||||
async fn on_link_died(
|
||||
&mut self,
|
||||
_actor_ref: kameo::prelude::WeakActorRef<Self>,
|
||||
mailbox_rx: &mut kameo::prelude::MailboxReceiver<Self>,
|
||||
) -> Option<kameo::mailbox::Signal<Self>> {
|
||||
loop {
|
||||
select! {
|
||||
signal = mailbox_rx.recv() => {
|
||||
return signal;
|
||||
}
|
||||
msg = self.props.transport.recv() => {
|
||||
match msg {
|
||||
Some(request) => {
|
||||
match self.process_transport_inbound(request).await {
|
||||
Ok(response) => {
|
||||
if self.props.transport.send(Ok(response)).await.is_err() {
|
||||
error!(actor = "useragent", reason = "channel closed", "send.failed");
|
||||
return Some(kameo::mailbox::Signal::Stop);
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
let _ = self.props.transport.send(Err(err)).await;
|
||||
return Some(kameo::mailbox::Signal::Stop);
|
||||
}
|
||||
}
|
||||
}
|
||||
None => {
|
||||
info!(actor = "useragent", "transport.closed");
|
||||
return Some(kameo::mailbox::Signal::Stop);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
_: kameo::prelude::WeakActorRef<Self>,
|
||||
id: kameo::prelude::ActorId,
|
||||
_: kameo::prelude::ActorStopReason,
|
||||
) -> Result<std::ops::ControlFlow<kameo::prelude::ActorStopReason>, Self::Error> {
|
||||
let cancelled_pubkey = self
|
||||
.pending_client_approvals
|
||||
.iter()
|
||||
.find_map(|(k, v)| (v.controller.id() == id).then_some(*k));
|
||||
|
||||
impl UserAgentSession {
|
||||
pub fn new_test(db: crate::db::DatabasePool, actors: crate::actors::GlobalActors) -> Self {
|
||||
use arbiter_proto::transport::DummyTransport;
|
||||
let transport: super::Transport = Box::new(DummyTransport::new());
|
||||
let props = UserAgentConnection::new(db, actors, transport);
|
||||
let key = VerifyingKey::from_bytes(&[0u8; 32]).unwrap();
|
||||
Self {
|
||||
props,
|
||||
key,
|
||||
state: UserAgentStateMachine::new(DummyContext),
|
||||
if let Some(pubkey) = cancelled_pubkey {
|
||||
self.pending_client_approvals.remove(&pubkey);
|
||||
|
||||
if let Err(e) = self
|
||||
.sender
|
||||
.send(OutOfBand::ClientConnectionCancel { pubkey })
|
||||
.await
|
||||
{
|
||||
error!(
|
||||
?e,
|
||||
actor = "user_agent",
|
||||
event = "failed to announce client connection cancellation"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(std::ops::ControlFlow::Continue(()))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,589 @@
|
||||
use std::sync::Mutex;
|
||||
|
||||
use alloy::{consensus::TxEip1559, primitives::Address, signers::Signature};
|
||||
use chacha20poly1305::{AeadInPlace, XChaCha20Poly1305, XNonce, aead::KeyInit};
|
||||
use diesel::{ExpressionMethods as _, QueryDsl as _, SelectableHelper};
|
||||
use diesel_async::{AsyncConnection, RunQueryDsl};
|
||||
use kameo::error::SendError;
|
||||
use kameo::messages;
|
||||
use kameo::prelude::Context;
|
||||
use tracing::{error, info};
|
||||
use x25519_dalek::{EphemeralSecret, PublicKey};
|
||||
|
||||
use crate::actors::keyholder::KeyHolderState;
|
||||
use crate::actors::user_agent::session::Error;
|
||||
use crate::db::models::{
|
||||
EvmWalletAccess, NewEvmWalletAccess, ProgramClient, ProgramClientMetadata,
|
||||
};
|
||||
use crate::evm::policies::{Grant, SpecificGrant};
|
||||
use crate::safe_cell::SafeCell;
|
||||
use crate::{
|
||||
actors::flow_coordinator::client_connect_approval::ClientApprovalAnswer,
|
||||
crypto::integrity::{self, Verified},
|
||||
};
|
||||
use crate::{
|
||||
actors::{
|
||||
evm::{
|
||||
ClientSignTransaction, Generate, ListWallets, SignTransactionError as EvmSignError,
|
||||
UseragentCreateGrant, UseragentDeleteGrant, UseragentListGrants,
|
||||
},
|
||||
keyholder::{self, Bootstrap, TryUnseal},
|
||||
user_agent::session::{
|
||||
UserAgentSession,
|
||||
state::{UnsealContext, UserAgentEvents, UserAgentStates},
|
||||
},
|
||||
user_agent::{AuthPublicKey, UserAgentCredentials},
|
||||
},
|
||||
db::schema::useragent_client,
|
||||
safe_cell::SafeCellHandle as _,
|
||||
};
|
||||
|
||||
fn is_vault_sealed_from_evm<M>(err: &SendError<M, crate::actors::evm::Error>) -> bool {
|
||||
matches!(
|
||||
err,
|
||||
SendError::HandlerError(crate::actors::evm::Error::Keyholder(
|
||||
keyholder::Error::NotBootstrapped
|
||||
)) | SendError::HandlerError(crate::actors::evm::Error::Integrity(
|
||||
crate::crypto::integrity::Error::Keyholder(keyholder::Error::NotBootstrapped)
|
||||
))
|
||||
)
|
||||
}
|
||||
|
||||
impl UserAgentSession {
|
||||
async fn backfill_useragent_integrity(&self) -> Result<(), Error> {
|
||||
let mut conn = self.props.db.get().await?;
|
||||
let keyholder = self.props.actors.key_holder.clone();
|
||||
|
||||
conn.transaction(|conn| {
|
||||
Box::pin(async move {
|
||||
let rows: Vec<(i32, i32, Vec<u8>, crate::db::models::KeyType)> =
|
||||
useragent_client::table
|
||||
.select((
|
||||
useragent_client::id,
|
||||
useragent_client::nonce,
|
||||
useragent_client::public_key,
|
||||
useragent_client::key_type,
|
||||
))
|
||||
.load(conn)
|
||||
.await?;
|
||||
|
||||
for (id, nonce, public_key, key_type) in rows {
|
||||
let pubkey = AuthPublicKey::try_from((key_type, public_key)).map_err(|e| {
|
||||
Error::internal(format!("Invalid user-agent key in db: {e}"))
|
||||
})?;
|
||||
|
||||
integrity::sign_entity(
|
||||
conn,
|
||||
&keyholder,
|
||||
&UserAgentCredentials { pubkey, nonce },
|
||||
id,
|
||||
)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
Error::internal(format!("Failed to backfill user-agent integrity: {e}"))
|
||||
})?;
|
||||
}
|
||||
|
||||
Result::<_, Error>::Ok(())
|
||||
})
|
||||
})
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn take_unseal_secret(&mut self) -> Result<(EphemeralSecret, PublicKey), Error> {
|
||||
let UserAgentStates::WaitingForUnsealKey(unseal_context) = self.state.state() else {
|
||||
error!("Received encrypted key in invalid state");
|
||||
return Err(Error::internal("Invalid state for unseal encrypted key"));
|
||||
};
|
||||
|
||||
let ephemeral_secret = {
|
||||
#[allow(
|
||||
clippy::unwrap_used,
|
||||
reason = "Mutex poison is unrecoverable and should panic"
|
||||
)]
|
||||
let mut secret_lock = unseal_context.secret.lock().unwrap();
|
||||
let secret = secret_lock.take();
|
||||
match secret {
|
||||
Some(secret) => secret,
|
||||
None => {
|
||||
drop(secret_lock);
|
||||
error!("Ephemeral secret already taken");
|
||||
return Err(Error::internal("Ephemeral secret already taken"));
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
Ok((ephemeral_secret, unseal_context.client_public_key))
|
||||
}
|
||||
|
||||
fn decrypt_client_key_material(
|
||||
ephemeral_secret: EphemeralSecret,
|
||||
client_public_key: PublicKey,
|
||||
nonce: &[u8],
|
||||
ciphertext: &[u8],
|
||||
associated_data: &[u8],
|
||||
) -> Result<SafeCell<Vec<u8>>, ()> {
|
||||
let nonce = XNonce::from_slice(nonce);
|
||||
|
||||
let shared_secret = ephemeral_secret.diffie_hellman(&client_public_key);
|
||||
let cipher = XChaCha20Poly1305::new(shared_secret.as_bytes().into());
|
||||
|
||||
let mut key_buffer = SafeCell::new(ciphertext.to_vec());
|
||||
|
||||
let decryption_result = key_buffer.write_inline(|write_handle| {
|
||||
cipher.decrypt_in_place(nonce, associated_data, write_handle)
|
||||
});
|
||||
|
||||
match decryption_result {
|
||||
Ok(_) => Ok(key_buffer),
|
||||
Err(err) => {
|
||||
error!(?err, "Failed to decrypt encrypted key material");
|
||||
Err(())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct UnsealStartResponse {
|
||||
pub server_pubkey: PublicKey,
|
||||
}
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum UnsealError {
|
||||
#[error("Invalid key provided for unsealing")]
|
||||
InvalidKey,
|
||||
#[error("Internal error during unsealing process")]
|
||||
General(#[from] super::Error),
|
||||
}
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum BootstrapError {
|
||||
#[error("Invalid key provided for bootstrapping")]
|
||||
InvalidKey,
|
||||
#[error("Vault is already bootstrapped")]
|
||||
AlreadyBootstrapped,
|
||||
|
||||
#[error("Internal error during bootstrapping process")]
|
||||
General(#[from] super::Error),
|
||||
}
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum SignTransactionError {
|
||||
#[error("Policy evaluation failed")]
|
||||
Vet(#[from] crate::evm::VetError),
|
||||
|
||||
#[error("Internal signing error")]
|
||||
Internal,
|
||||
}
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum GrantMutationError {
|
||||
#[error("Vault is sealed")]
|
||||
VaultSealed,
|
||||
|
||||
#[error("Internal grant mutation error")]
|
||||
Internal,
|
||||
}
|
||||
|
||||
#[messages]
|
||||
impl UserAgentSession {
|
||||
#[message]
|
||||
pub async fn handle_unseal_request(
|
||||
&mut self,
|
||||
client_pubkey: x25519_dalek::PublicKey,
|
||||
) -> Result<UnsealStartResponse, Error> {
|
||||
let secret = EphemeralSecret::random();
|
||||
let public_key = PublicKey::from(&secret);
|
||||
|
||||
self.transition(UserAgentEvents::UnsealRequest(UnsealContext {
|
||||
secret: Mutex::new(Some(secret)),
|
||||
client_public_key: client_pubkey,
|
||||
}))?;
|
||||
|
||||
Ok(UnsealStartResponse {
|
||||
server_pubkey: public_key,
|
||||
})
|
||||
}
|
||||
|
||||
#[message]
|
||||
pub async fn handle_unseal_encrypted_key(
|
||||
&mut self,
|
||||
nonce: Vec<u8>,
|
||||
ciphertext: Vec<u8>,
|
||||
associated_data: Vec<u8>,
|
||||
) -> Result<(), UnsealError> {
|
||||
let (ephemeral_secret, client_public_key) = match self.take_unseal_secret() {
|
||||
Ok(values) => values,
|
||||
Err(Error::State) => {
|
||||
self.transition(UserAgentEvents::ReceivedInvalidKey)?;
|
||||
return Err(UnsealError::InvalidKey);
|
||||
}
|
||||
Err(_err) => {
|
||||
return Err(Error::internal("Failed to take unseal secret").into());
|
||||
}
|
||||
};
|
||||
|
||||
let seal_key_buffer = match Self::decrypt_client_key_material(
|
||||
ephemeral_secret,
|
||||
client_public_key,
|
||||
&nonce,
|
||||
&ciphertext,
|
||||
&associated_data,
|
||||
) {
|
||||
Ok(buffer) => buffer,
|
||||
Err(()) => {
|
||||
self.transition(UserAgentEvents::ReceivedInvalidKey)?;
|
||||
return Err(UnsealError::InvalidKey);
|
||||
}
|
||||
};
|
||||
|
||||
match self
|
||||
.props
|
||||
.actors
|
||||
.key_holder
|
||||
.ask(TryUnseal {
|
||||
seal_key_raw: seal_key_buffer,
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(_) => {
|
||||
self.backfill_useragent_integrity().await?;
|
||||
info!("Successfully unsealed key with client-provided key");
|
||||
self.transition(UserAgentEvents::ReceivedValidKey)?;
|
||||
Ok(())
|
||||
}
|
||||
Err(SendError::HandlerError(keyholder::Error::InvalidKey)) => {
|
||||
self.transition(UserAgentEvents::ReceivedInvalidKey)?;
|
||||
Err(UnsealError::InvalidKey)
|
||||
}
|
||||
Err(SendError::HandlerError(err)) => {
|
||||
error!(?err, "Keyholder failed to unseal key");
|
||||
self.transition(UserAgentEvents::ReceivedInvalidKey)?;
|
||||
Err(UnsealError::InvalidKey)
|
||||
}
|
||||
Err(err) => {
|
||||
error!(?err, "Failed to send unseal request to keyholder");
|
||||
self.transition(UserAgentEvents::ReceivedInvalidKey)?;
|
||||
Err(Error::internal("Vault actor error").into())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[message]
|
||||
pub(crate) async fn handle_bootstrap_encrypted_key(
|
||||
&mut self,
|
||||
nonce: Vec<u8>,
|
||||
ciphertext: Vec<u8>,
|
||||
associated_data: Vec<u8>,
|
||||
) -> Result<(), BootstrapError> {
|
||||
let (ephemeral_secret, client_public_key) = match self.take_unseal_secret() {
|
||||
Ok(values) => values,
|
||||
Err(Error::State) => {
|
||||
self.transition(UserAgentEvents::ReceivedInvalidKey)?;
|
||||
return Err(BootstrapError::InvalidKey);
|
||||
}
|
||||
Err(err) => return Err(err.into()),
|
||||
};
|
||||
|
||||
let seal_key_buffer = match Self::decrypt_client_key_material(
|
||||
ephemeral_secret,
|
||||
client_public_key,
|
||||
&nonce,
|
||||
&ciphertext,
|
||||
&associated_data,
|
||||
) {
|
||||
Ok(buffer) => buffer,
|
||||
Err(()) => {
|
||||
self.transition(UserAgentEvents::ReceivedInvalidKey)?;
|
||||
return Err(BootstrapError::InvalidKey);
|
||||
}
|
||||
};
|
||||
|
||||
match self
|
||||
.props
|
||||
.actors
|
||||
.key_holder
|
||||
.ask(Bootstrap {
|
||||
seal_key_raw: seal_key_buffer,
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(_) => {
|
||||
self.backfill_useragent_integrity().await?;
|
||||
info!("Successfully bootstrapped vault with client-provided key");
|
||||
self.transition(UserAgentEvents::ReceivedValidKey)?;
|
||||
Ok(())
|
||||
}
|
||||
Err(SendError::HandlerError(keyholder::Error::AlreadyBootstrapped)) => {
|
||||
self.transition(UserAgentEvents::ReceivedInvalidKey)?;
|
||||
Err(BootstrapError::AlreadyBootstrapped)
|
||||
}
|
||||
Err(SendError::HandlerError(err)) => {
|
||||
error!(?err, "Keyholder failed to bootstrap vault");
|
||||
self.transition(UserAgentEvents::ReceivedInvalidKey)?;
|
||||
Err(BootstrapError::InvalidKey)
|
||||
}
|
||||
Err(err) => {
|
||||
error!(?err, "Failed to send bootstrap request to keyholder");
|
||||
self.transition(UserAgentEvents::ReceivedInvalidKey)?;
|
||||
Err(BootstrapError::General(Error::internal(
|
||||
"Vault actor error",
|
||||
)))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[messages]
|
||||
impl UserAgentSession {
|
||||
#[message]
|
||||
pub(crate) async fn handle_query_vault_state(&mut self) -> Result<KeyHolderState, Error> {
|
||||
use crate::actors::keyholder::GetState;
|
||||
|
||||
let vault_state = match self.props.actors.key_holder.ask(GetState {}).await {
|
||||
Ok(state) => state,
|
||||
Err(err) => {
|
||||
error!(?err, actor = "useragent", "keyholder.query.failed");
|
||||
return Err(Error::internal("Vault is in broken state"));
|
||||
}
|
||||
};
|
||||
|
||||
Ok(vault_state)
|
||||
}
|
||||
}
|
||||
|
||||
#[messages]
|
||||
impl UserAgentSession {
|
||||
#[message]
|
||||
pub(crate) async fn handle_evm_wallet_create(&mut self) -> Result<(i32, Address), Error> {
|
||||
match self.props.actors.evm.ask(Generate {}).await {
|
||||
Ok(address) => Ok(address),
|
||||
Err(SendError::HandlerError(err)) => Err(Error::internal(format!(
|
||||
"EVM wallet generation failed: {err}"
|
||||
))),
|
||||
Err(err) => {
|
||||
error!(?err, "EVM actor unreachable during wallet create");
|
||||
Err(Error::internal("EVM actor unreachable"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[message]
|
||||
pub(crate) async fn handle_evm_wallet_list(&mut self) -> Result<Vec<(i32, Address)>, Error> {
|
||||
match self.props.actors.evm.ask(ListWallets {}).await {
|
||||
Ok(wallets) => Ok(wallets),
|
||||
Err(err) => {
|
||||
error!(?err, "EVM wallet list failed");
|
||||
Err(Error::internal("Failed to list EVM wallets"))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[messages]
|
||||
impl UserAgentSession {
|
||||
#[message]
|
||||
pub(crate) async fn handle_grant_list(
|
||||
&mut self,
|
||||
) -> Result<Vec<Grant<SpecificGrant>>, GrantMutationError> {
|
||||
match self.props.actors.evm.ask(UseragentListGrants {}).await {
|
||||
Ok(grants) => Ok(grants),
|
||||
Err(err) if is_vault_sealed_from_evm(&err) => Err(GrantMutationError::VaultSealed),
|
||||
Err(err) => {
|
||||
error!(?err, "EVM grant list failed");
|
||||
Err(GrantMutationError::Internal)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[message]
|
||||
pub(crate) async fn handle_grant_create(
|
||||
&mut self,
|
||||
basic: crate::evm::policies::SharedGrantSettings,
|
||||
grant: crate::evm::policies::SpecificGrant,
|
||||
) -> Result<Verified<i32>, GrantMutationError> {
|
||||
match self
|
||||
.props
|
||||
.actors
|
||||
.evm
|
||||
.ask(UseragentCreateGrant { basic, grant })
|
||||
.await
|
||||
{
|
||||
Ok(grant_id) => Ok(grant_id),
|
||||
Err(err) if is_vault_sealed_from_evm(&err) => Err(GrantMutationError::VaultSealed),
|
||||
Err(err) => {
|
||||
error!(?err, "EVM grant create failed");
|
||||
Err(GrantMutationError::Internal)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[message]
|
||||
pub(crate) async fn handle_grant_delete(
|
||||
&mut self,
|
||||
grant_id: i32,
|
||||
) -> Result<(), GrantMutationError> {
|
||||
match self
|
||||
.props
|
||||
.actors
|
||||
.evm
|
||||
.ask(UseragentDeleteGrant {
|
||||
_grant_id: grant_id,
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(()) => Ok(()),
|
||||
Err(err) if is_vault_sealed_from_evm(&err) => Err(GrantMutationError::VaultSealed),
|
||||
Err(err) => {
|
||||
error!(?err, "EVM grant delete failed");
|
||||
Err(GrantMutationError::Internal)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[message]
|
||||
pub(crate) async fn handle_sign_transaction(
|
||||
&mut self,
|
||||
client_id: i32,
|
||||
wallet_address: Address,
|
||||
transaction: TxEip1559,
|
||||
) -> Result<Signature, SignTransactionError> {
|
||||
match self
|
||||
.props
|
||||
.actors
|
||||
.evm
|
||||
.ask(ClientSignTransaction {
|
||||
client_id,
|
||||
wallet_address,
|
||||
transaction,
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(signature) => Ok(signature),
|
||||
Err(SendError::HandlerError(EvmSignError::Vet(vet_error))) => {
|
||||
Err(SignTransactionError::Vet(vet_error))
|
||||
}
|
||||
Err(err) => {
|
||||
error!(?err, "EVM sign transaction failed in user-agent session");
|
||||
Err(SignTransactionError::Internal)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[message]
|
||||
pub(crate) async fn handle_grant_evm_wallet_access(
|
||||
&mut self,
|
||||
entries: Vec<NewEvmWalletAccess>,
|
||||
) -> Result<(), Error> {
|
||||
let mut conn = self.props.db.get().await?;
|
||||
conn.transaction(|conn| {
|
||||
Box::pin(async move {
|
||||
use crate::db::schema::evm_wallet_access;
|
||||
|
||||
for entry in entries {
|
||||
diesel::insert_into(evm_wallet_access::table)
|
||||
.values(&entry)
|
||||
.on_conflict_do_nothing()
|
||||
.execute(conn)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Result::<_, Error>::Ok(())
|
||||
})
|
||||
})
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[message]
|
||||
pub(crate) async fn handle_revoke_evm_wallet_access(
|
||||
&mut self,
|
||||
entries: Vec<i32>,
|
||||
) -> Result<(), Error> {
|
||||
let mut conn = self.props.db.get().await?;
|
||||
conn.transaction(|conn| {
|
||||
Box::pin(async move {
|
||||
use crate::db::schema::evm_wallet_access;
|
||||
for entry in entries {
|
||||
diesel::delete(evm_wallet_access::table)
|
||||
.filter(evm_wallet_access::wallet_id.eq(entry))
|
||||
.execute(conn)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Result::<_, Error>::Ok(())
|
||||
})
|
||||
})
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[message]
|
||||
pub(crate) async fn handle_list_wallet_access(
|
||||
&mut self,
|
||||
) -> Result<Vec<EvmWalletAccess>, Error> {
|
||||
let mut conn = self.props.db.get().await?;
|
||||
use crate::db::schema::evm_wallet_access;
|
||||
let access_entries = evm_wallet_access::table
|
||||
.select(EvmWalletAccess::as_select())
|
||||
.load::<_>(&mut conn)
|
||||
.await?;
|
||||
Ok(access_entries)
|
||||
}
|
||||
}
|
||||
|
||||
#[messages]
|
||||
impl UserAgentSession {
|
||||
#[message(ctx)]
|
||||
pub(crate) async fn handle_new_client_approve(
|
||||
&mut self,
|
||||
approved: bool,
|
||||
pubkey: ed25519_dalek::VerifyingKey,
|
||||
ctx: &mut Context<Self, Result<(), Error>>,
|
||||
) -> Result<(), Error> {
|
||||
let pending_approval = match self.pending_client_approvals.remove(&pubkey) {
|
||||
Some(approval) => approval,
|
||||
None => {
|
||||
error!("Received client connection response for unknown client");
|
||||
return Err(Error::internal("Unknown client in connection response"));
|
||||
}
|
||||
};
|
||||
|
||||
pending_approval
|
||||
.controller
|
||||
.tell(ClientApprovalAnswer { approved })
|
||||
.await
|
||||
.map_err(|err| {
|
||||
error!(
|
||||
?err,
|
||||
"Failed to send client approval response to controller"
|
||||
);
|
||||
Error::internal("Failed to send client approval response to controller")
|
||||
})?;
|
||||
|
||||
ctx.actor_ref().unlink(&pending_approval.controller).await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[message]
|
||||
pub(crate) async fn handle_sdk_client_list(
|
||||
&mut self,
|
||||
) -> Result<Vec<(ProgramClient, ProgramClientMetadata)>, Error> {
|
||||
use crate::db::schema::{client_metadata, program_client};
|
||||
let mut conn = self.props.db.get().await?;
|
||||
|
||||
let clients = program_client::table
|
||||
.inner_join(client_metadata::table)
|
||||
.select((
|
||||
ProgramClient::as_select(),
|
||||
ProgramClientMetadata::as_select(),
|
||||
))
|
||||
.load::<(ProgramClient, ProgramClientMetadata)>(&mut conn)
|
||||
.await?;
|
||||
|
||||
Ok(clients)
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,5 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use miette::Diagnostic;
|
||||
use thiserror::Error;
|
||||
|
||||
use crate::{
|
||||
@@ -11,30 +10,24 @@ use crate::{
|
||||
|
||||
pub mod tls;
|
||||
|
||||
#[derive(Error, Debug, Diagnostic)]
|
||||
#[derive(Error, Debug)]
|
||||
pub enum InitError {
|
||||
#[error("Database setup failed: {0}")]
|
||||
#[diagnostic(code(arbiter_server::init::database_setup))]
|
||||
DatabaseSetup(#[from] db::DatabaseSetupError),
|
||||
|
||||
#[error("Connection acquire failed: {0}")]
|
||||
#[diagnostic(code(arbiter_server::init::database_pool))]
|
||||
DatabasePool(#[from] db::PoolError),
|
||||
|
||||
#[error("Database query error: {0}")]
|
||||
#[diagnostic(code(arbiter_server::init::database_query))]
|
||||
DatabaseQuery(#[from] diesel::result::Error),
|
||||
|
||||
#[error("TLS initialization failed: {0}")]
|
||||
#[diagnostic(code(arbiter_server::init::tls_init))]
|
||||
Tls(#[from] tls::InitError),
|
||||
|
||||
#[error("Actor spawn failed: {0}")]
|
||||
#[diagnostic(code(arbiter_server::init::actor_spawn))]
|
||||
ActorSpawn(#[from] crate::actors::SpawnError),
|
||||
|
||||
#[error("I/O Error: {0}")]
|
||||
#[diagnostic(code(arbiter_server::init::io))]
|
||||
Io(#[from] std::io::Error),
|
||||
}
|
||||
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
use std::string::FromUtf8Error;
|
||||
use std::{net::Ipv4Addr, string::FromUtf8Error};
|
||||
|
||||
use diesel::{ExpressionMethods as _, QueryDsl, SelectableHelper as _};
|
||||
use diesel_async::{AsyncConnection, RunQueryDsl};
|
||||
use miette::Diagnostic;
|
||||
|
||||
use pem::Pem;
|
||||
use rcgen::{
|
||||
BasicConstraints, Certificate, CertificateParams, CertifiedIssuer, DistinguishedName, DnType,
|
||||
IsCa, Issuer, KeyPair, KeyUsagePurpose,
|
||||
IsCa, Issuer, KeyPair, KeyUsagePurpose, SanType,
|
||||
};
|
||||
use rustls::pki_types::{pem::PemObject};
|
||||
use rustls::pki_types::pem::PemObject;
|
||||
use thiserror::Error;
|
||||
use tonic::transport::CertificateDer;
|
||||
|
||||
@@ -29,40 +29,31 @@ const ENCODE_CONFIG: pem::EncodeConfig = {
|
||||
pem::EncodeConfig::new().set_line_ending(line_ending)
|
||||
};
|
||||
|
||||
#[derive(Error, Debug, Diagnostic)]
|
||||
#[derive(Error, Debug)]
|
||||
pub enum InitError {
|
||||
#[error("Key generation error during TLS initialization: {0}")]
|
||||
#[diagnostic(code(arbiter_server::tls_init::key_generation))]
|
||||
KeyGeneration(#[from] rcgen::Error),
|
||||
|
||||
#[error("Key invalid format: {0}")]
|
||||
#[diagnostic(code(arbiter_server::tls_init::key_invalid_format))]
|
||||
KeyInvalidFormat(#[from] FromUtf8Error),
|
||||
|
||||
#[error("Key deserialization error: {0}")]
|
||||
#[diagnostic(code(arbiter_server::tls_init::key_deserialization))]
|
||||
KeyDeserializationError(rcgen::Error),
|
||||
|
||||
#[error("Database error during TLS initialization: {0}")]
|
||||
#[diagnostic(code(arbiter_server::tls_init::database_error))]
|
||||
DatabaseError(#[from] diesel::result::Error),
|
||||
|
||||
#[error("Pem deserialization error during TLS initialization: {0}")]
|
||||
#[diagnostic(code(arbiter_server::tls_init::pem_deserialization))]
|
||||
PemDeserializationError(#[from] rustls::pki_types::pem::Error),
|
||||
|
||||
#[error("Database pool acquire error during TLS initialization: {0}")]
|
||||
#[diagnostic(code(arbiter_server::tls_init::database_pool_acquire))]
|
||||
DatabasePoolAcquire(#[from] db::PoolError),
|
||||
}
|
||||
|
||||
pub type PemCert = String;
|
||||
|
||||
pub fn encode_cert_to_pem(cert: &CertificateDer) -> PemCert {
|
||||
pem::encode_config(
|
||||
&Pem::new("CERTIFICATE", cert.to_vec()),
|
||||
ENCODE_CONFIG,
|
||||
)
|
||||
pem::encode_config(&Pem::new("CERTIFICATE", cert.to_vec()), ENCODE_CONFIG)
|
||||
}
|
||||
|
||||
#[allow(unused)]
|
||||
@@ -94,6 +85,10 @@ impl TlsCa {
|
||||
|
||||
let cert_key_pem = certified_issuer.key().serialize_pem();
|
||||
|
||||
#[allow(
|
||||
clippy::unwrap_used,
|
||||
reason = "Broken cert couldn't bootstrap server anyway"
|
||||
)]
|
||||
let issuer = Issuer::from_ca_cert_pem(
|
||||
&certified_issuer.pem(),
|
||||
KeyPair::from_pem(cert_key_pem.as_ref()).unwrap(),
|
||||
@@ -113,6 +108,9 @@ impl TlsCa {
|
||||
KeyUsagePurpose::DigitalSignature,
|
||||
KeyUsagePurpose::KeyEncipherment,
|
||||
];
|
||||
params
|
||||
.subject_alt_names
|
||||
.push(SanType::IpAddress(Ipv4Addr::LOCALHOST.into()));
|
||||
|
||||
let mut dn = DistinguishedName::new();
|
||||
dn.push(DnType::CommonName, "Arbiter Instance Leaf");
|
||||
|
||||
@@ -0,0 +1,3 @@
|
||||
pub mod v1;
|
||||
|
||||
pub use v1::*;
|
||||
109
server/crates/arbiter-server/src/crypto/encryption/v1.rs
Normal file
109
server/crates/arbiter-server/src/crypto/encryption/v1.rs
Normal file
@@ -0,0 +1,109 @@
|
||||
use argon2::password_hash::Salt as ArgonSalt;
|
||||
|
||||
use rand::{
|
||||
Rng as _, SeedableRng,
|
||||
rngs::{StdRng, SysRng},
|
||||
};
|
||||
|
||||
pub const ROOT_KEY_TAG: &[u8] = "arbiter/seal/v1".as_bytes();
|
||||
pub const TAG: &[u8] = "arbiter/private-key/v1".as_bytes();
|
||||
|
||||
pub const NONCE_LENGTH: usize = 24;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct Nonce(pub [u8; NONCE_LENGTH]);
|
||||
impl Nonce {
|
||||
pub fn increment(&mut self) {
|
||||
for i in (0..self.0.len()).rev() {
|
||||
if self.0[i] == 0xFF {
|
||||
self.0[i] = 0;
|
||||
} else {
|
||||
self.0[i] += 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_vec(&self) -> Vec<u8> {
|
||||
self.0.to_vec()
|
||||
}
|
||||
}
|
||||
impl<'a> TryFrom<&'a [u8]> for Nonce {
|
||||
type Error = ();
|
||||
|
||||
fn try_from(value: &'a [u8]) -> Result<Self, Self::Error> {
|
||||
if value.len() != NONCE_LENGTH {
|
||||
return Err(());
|
||||
}
|
||||
let mut nonce = [0u8; NONCE_LENGTH];
|
||||
nonce.copy_from_slice(value);
|
||||
Ok(Self(nonce))
|
||||
}
|
||||
}
|
||||
|
||||
pub type Salt = [u8; ArgonSalt::RECOMMENDED_LENGTH];
|
||||
|
||||
pub fn generate_salt() -> Salt {
|
||||
let mut salt = Salt::default();
|
||||
#[allow(
|
||||
clippy::unwrap_used,
|
||||
reason = "Rng failure is unrecoverable and should panic"
|
||||
)]
|
||||
let mut rng = StdRng::try_from_rng(&mut SysRng).unwrap();
|
||||
rng.fill_bytes(&mut salt);
|
||||
salt
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::ops::Deref as _;
|
||||
|
||||
use super::*;
|
||||
use crate::{
|
||||
crypto::derive_key,
|
||||
safe_cell::{SafeCell, SafeCellHandle as _},
|
||||
};
|
||||
|
||||
#[test]
|
||||
pub fn derive_seal_key_deterministic() {
|
||||
static PASSWORD: &[u8] = b"password";
|
||||
let password = SafeCell::new(PASSWORD.to_vec());
|
||||
let password2 = SafeCell::new(PASSWORD.to_vec());
|
||||
let salt = generate_salt();
|
||||
|
||||
let mut key1 = derive_key(password, &salt);
|
||||
let mut key2 = derive_key(password2, &salt);
|
||||
|
||||
let key1_reader = key1.0.read();
|
||||
let key2_reader = key2.0.read();
|
||||
|
||||
assert_eq!(key1_reader.deref(), key2_reader.deref());
|
||||
}
|
||||
|
||||
#[test]
|
||||
pub fn successful_derive() {
|
||||
static PASSWORD: &[u8] = b"password";
|
||||
let password = SafeCell::new(PASSWORD.to_vec());
|
||||
let salt = generate_salt();
|
||||
|
||||
let mut key = derive_key(password, &salt);
|
||||
let key_reader = key.0.read();
|
||||
let key_ref = key_reader.deref();
|
||||
|
||||
assert_ne!(key_ref.as_slice(), &[0u8; 32][..]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
// We should fuzz this
|
||||
pub fn test_nonce_increment() {
|
||||
let mut nonce = Nonce([0u8; NONCE_LENGTH]);
|
||||
nonce.increment();
|
||||
|
||||
assert_eq!(
|
||||
nonce.0,
|
||||
[
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1
|
||||
]
|
||||
);
|
||||
}
|
||||
}
|
||||
3
server/crates/arbiter-server/src/crypto/integrity/mod.rs
Normal file
3
server/crates/arbiter-server/src/crypto/integrity/mod.rs
Normal file
@@ -0,0 +1,3 @@
|
||||
pub mod v1;
|
||||
|
||||
pub use v1::*;
|
||||
681
server/crates/arbiter-server/src/crypto/integrity/v1.rs
Normal file
681
server/crates/arbiter-server/src/crypto/integrity/v1.rs
Normal file
@@ -0,0 +1,681 @@
|
||||
use crate::actors::keyholder;
|
||||
use hmac::Hmac;
|
||||
use sha2::Sha256;
|
||||
use std::future::Future;
|
||||
use std::ops::Deref;
|
||||
use std::pin::Pin;
|
||||
|
||||
use diesel::{ExpressionMethods as _, QueryDsl, dsl::insert_into, sqlite::Sqlite};
|
||||
use diesel_async::{AsyncConnection, RunQueryDsl};
|
||||
use kameo::{actor::ActorRef, error::SendError};
|
||||
use sha2::Digest as _;
|
||||
|
||||
pub mod hashing;
|
||||
use self::hashing::Hashable;
|
||||
|
||||
use crate::{
|
||||
actors::keyholder::{KeyHolder, SignIntegrity, VerifyIntegrity},
|
||||
db::{
|
||||
self,
|
||||
models::{IntegrityEnvelope as IntegrityEnvelopeRow, NewIntegrityEnvelope},
|
||||
schema::integrity_envelope,
|
||||
},
|
||||
};
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum Error {
|
||||
#[error("Database error: {0}")]
|
||||
Database(#[from] db::DatabaseError),
|
||||
|
||||
#[error("KeyHolder error: {0}")]
|
||||
Keyholder(#[from] keyholder::Error),
|
||||
|
||||
#[error("KeyHolder mailbox error")]
|
||||
KeyholderSend,
|
||||
|
||||
#[error("Integrity envelope is missing for entity {entity_kind}")]
|
||||
MissingEnvelope { entity_kind: &'static str },
|
||||
|
||||
#[error(
|
||||
"Integrity payload version mismatch for entity {entity_kind}: expected {expected}, found {found}"
|
||||
)]
|
||||
PayloadVersionMismatch {
|
||||
entity_kind: &'static str,
|
||||
expected: i32,
|
||||
found: i32,
|
||||
},
|
||||
|
||||
#[error("Integrity MAC mismatch for entity {entity_kind}")]
|
||||
MacMismatch { entity_kind: &'static str },
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
#[must_use]
|
||||
pub enum AttestationStatus {
|
||||
Attested,
|
||||
Unavailable,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Verified<T>(T);
|
||||
|
||||
impl<T> AsRef<T> for Verified<T> {
|
||||
fn as_ref(&self) -> &T {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Verified<T> {
|
||||
pub fn into_inner(self) -> T {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Deref for Verified<T> {
|
||||
type Target = T;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
pub const CURRENT_PAYLOAD_VERSION: i32 = 1;
|
||||
pub const INTEGRITY_SUBKEY_TAG: &[u8] = b"arbiter/db-integrity-key/v1";
|
||||
|
||||
pub type HmacSha256 = Hmac<Sha256>;
|
||||
|
||||
pub trait Integrable: Hashable {
|
||||
const KIND: &'static str;
|
||||
const VERSION: i32 = 1;
|
||||
}
|
||||
|
||||
fn payload_hash(payload: &impl Hashable) -> [u8; 32] {
|
||||
let mut hasher = Sha256::new();
|
||||
payload.hash(&mut hasher);
|
||||
hasher.finalize().into()
|
||||
}
|
||||
|
||||
fn push_len_prefixed(out: &mut Vec<u8>, bytes: &[u8]) {
|
||||
out.extend_from_slice(&(bytes.len() as u32).to_be_bytes());
|
||||
out.extend_from_slice(bytes);
|
||||
}
|
||||
|
||||
fn build_mac_input(
|
||||
entity_kind: &str,
|
||||
entity_id: &[u8],
|
||||
payload_version: i32,
|
||||
payload_hash: &[u8; 32],
|
||||
) -> Vec<u8> {
|
||||
let mut out = Vec::with_capacity(8 + entity_kind.len() + entity_id.len() + 32);
|
||||
push_len_prefixed(&mut out, entity_kind.as_bytes());
|
||||
push_len_prefixed(&mut out, entity_id);
|
||||
out.extend_from_slice(&payload_version.to_be_bytes());
|
||||
out.extend_from_slice(payload_hash);
|
||||
out
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct EntityId(Vec<u8>);
|
||||
|
||||
impl Deref for EntityId {
|
||||
type Target = [u8];
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl From<i32> for EntityId {
|
||||
fn from(value: i32) -> Self {
|
||||
Self(value.to_be_bytes().to_vec())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&'_ [u8]> for EntityId {
|
||||
fn from(bytes: &'_ [u8]) -> Self {
|
||||
Self(bytes.to_vec())
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn lookup_verified<E, C, F, Fut>(
|
||||
conn: &mut C,
|
||||
keyholder: &ActorRef<KeyHolder>,
|
||||
entity_id: impl Into<EntityId>,
|
||||
load: F,
|
||||
) -> Result<Verified<E>, Error>
|
||||
where
|
||||
C: AsyncConnection<Backend = Sqlite>,
|
||||
E: Integrable,
|
||||
F: FnOnce(&mut C) -> Fut,
|
||||
Fut: Future<Output = Result<E, db::DatabaseError>>,
|
||||
{
|
||||
let entity = load(conn).await?;
|
||||
verify_entity(conn, keyholder, &entity, entity_id).await?;
|
||||
Ok(Verified(entity))
|
||||
}
|
||||
|
||||
pub async fn lookup_verified_allow_unavailable<E, C, F, Fut>(
|
||||
conn: &mut C,
|
||||
keyholder: &ActorRef<KeyHolder>,
|
||||
entity_id: impl Into<EntityId>,
|
||||
load: F,
|
||||
) -> Result<Verified<E>, Error>
|
||||
where
|
||||
C: AsyncConnection<Backend = Sqlite>,
|
||||
E: Integrable+ 'static,
|
||||
F: FnOnce(&mut C) -> Fut,
|
||||
Fut: Future<Output = Result<E, db::DatabaseError>>,
|
||||
{
|
||||
let entity = load(conn).await?;
|
||||
match check_entity_attestation(conn, keyholder, &entity, entity_id.into()).await? {
|
||||
// IMPORTANT: allow_unavailable mode must succeed with an unattested result when vault key
|
||||
// material is unavailable, otherwise integrity checks can be silently bypassed while sealed.
|
||||
AttestationStatus::Attested | AttestationStatus::Unavailable => Ok(Verified(entity)),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn lookup_verified_from_query<E, Id, C, F>(
|
||||
conn: &mut C,
|
||||
keyholder: &ActorRef<KeyHolder>,
|
||||
load: F,
|
||||
) -> Result<Verified<E>, Error>
|
||||
where
|
||||
C: AsyncConnection<Backend = Sqlite> + Send,
|
||||
E: Integrable,
|
||||
Id: Into<EntityId>,
|
||||
F: for<'a> FnOnce(
|
||||
&'a mut C,
|
||||
) -> Pin<
|
||||
Box<dyn Future<Output = Result<(Id, E), db::DatabaseError>> + Send + 'a>,
|
||||
>,
|
||||
{
|
||||
let (entity_id, entity) = load(conn).await?;
|
||||
verify_entity(conn, keyholder, &entity, entity_id).await?;
|
||||
Ok(Verified(entity))
|
||||
}
|
||||
|
||||
pub async fn sign_entity<E: Integrable, Id: Into<EntityId> + Clone>(
|
||||
conn: &mut impl AsyncConnection<Backend = Sqlite>,
|
||||
keyholder: &ActorRef<KeyHolder>,
|
||||
entity: &E,
|
||||
as_entity_id: Id,
|
||||
) -> Result<Verified<Id>, Error> {
|
||||
let payload_hash = payload_hash(entity);
|
||||
|
||||
let entity_id = as_entity_id.clone().into();
|
||||
|
||||
let mac_input = build_mac_input(E::KIND, &entity_id, E::VERSION, &payload_hash);
|
||||
|
||||
let (key_version, mac) = keyholder
|
||||
.ask(SignIntegrity { mac_input })
|
||||
.await
|
||||
.map_err(|err| match err {
|
||||
kameo::error::SendError::HandlerError(inner) => Error::Keyholder(inner),
|
||||
_ => Error::KeyholderSend,
|
||||
})?;
|
||||
|
||||
insert_into(integrity_envelope::table)
|
||||
.values(NewIntegrityEnvelope {
|
||||
entity_kind: E::KIND.to_owned(),
|
||||
entity_id: entity_id.to_vec(),
|
||||
payload_version: E::VERSION,
|
||||
key_version,
|
||||
mac: mac.to_vec(),
|
||||
})
|
||||
.on_conflict((
|
||||
integrity_envelope::entity_id,
|
||||
integrity_envelope::entity_kind,
|
||||
))
|
||||
.do_update()
|
||||
.set((
|
||||
integrity_envelope::payload_version.eq(E::VERSION),
|
||||
integrity_envelope::key_version.eq(key_version),
|
||||
integrity_envelope::mac.eq(mac),
|
||||
))
|
||||
.execute(conn)
|
||||
.await
|
||||
.map_err(db::DatabaseError::from)?;
|
||||
|
||||
Ok(Verified(as_entity_id))
|
||||
}
|
||||
|
||||
pub async fn check_entity_attestation<E: Integrable>(
|
||||
conn: &mut impl AsyncConnection<Backend = Sqlite>,
|
||||
keyholder: &ActorRef<KeyHolder>,
|
||||
entity: &E,
|
||||
entity_id: impl Into<EntityId>,
|
||||
) -> Result<AttestationStatus, Error> {
|
||||
let entity_id = entity_id.into();
|
||||
let envelope: IntegrityEnvelopeRow = integrity_envelope::table
|
||||
.filter(integrity_envelope::entity_kind.eq(E::KIND))
|
||||
.filter(integrity_envelope::entity_id.eq(&*entity_id))
|
||||
.first(conn)
|
||||
.await
|
||||
.map_err(|err| match err {
|
||||
diesel::result::Error::NotFound => Error::MissingEnvelope {
|
||||
entity_kind: E::KIND,
|
||||
},
|
||||
other => Error::Database(db::DatabaseError::from(other)),
|
||||
})?;
|
||||
|
||||
if envelope.payload_version != E::VERSION {
|
||||
return Err(Error::PayloadVersionMismatch {
|
||||
entity_kind: E::KIND,
|
||||
expected: E::VERSION,
|
||||
found: envelope.payload_version,
|
||||
});
|
||||
}
|
||||
|
||||
let payload_hash = payload_hash(entity);
|
||||
let mac_input = build_mac_input(E::KIND, &entity_id, envelope.payload_version, &payload_hash);
|
||||
|
||||
let result = keyholder
|
||||
.ask(VerifyIntegrity {
|
||||
mac_input,
|
||||
expected_mac: envelope.mac,
|
||||
key_version: envelope.key_version,
|
||||
})
|
||||
.await;
|
||||
|
||||
match result {
|
||||
Ok(true) => Ok(AttestationStatus::Attested),
|
||||
Ok(false) => Err(Error::MacMismatch {
|
||||
entity_kind: E::KIND,
|
||||
}),
|
||||
Err(SendError::HandlerError(keyholder::Error::NotBootstrapped)) => {
|
||||
Ok(AttestationStatus::Unavailable)
|
||||
}
|
||||
Err(_) => Err(Error::KeyholderSend),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn verify_entity<'a, E: Integrable>(
|
||||
conn: &mut impl AsyncConnection<Backend = Sqlite>,
|
||||
keyholder: &ActorRef<KeyHolder>,
|
||||
entity: &'a E,
|
||||
entity_id: impl Into<EntityId>,
|
||||
) -> Result<Verified<&'a E>, Error> {
|
||||
match check_entity_attestation::<E>(conn, keyholder, entity, entity_id).await? {
|
||||
AttestationStatus::Attested => Ok(Verified(entity)),
|
||||
AttestationStatus::Unavailable => Err(Error::Keyholder(keyholder::Error::NotBootstrapped)),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn delete_envelope<E: Integrable>(
|
||||
conn: &mut impl AsyncConnection<Backend = Sqlite>,
|
||||
entity_id: impl Into<EntityId>,
|
||||
) -> Result<usize, Error> {
|
||||
let entity_id = entity_id.into();
|
||||
|
||||
let affected = diesel::delete(
|
||||
integrity_envelope::table
|
||||
.filter(integrity_envelope::entity_kind.eq(E::KIND))
|
||||
.filter(integrity_envelope::entity_id.eq(&*entity_id)),
|
||||
)
|
||||
.execute(conn)
|
||||
.await
|
||||
.map_err(db::DatabaseError::from)?;
|
||||
|
||||
Ok(affected)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use diesel::{ExpressionMethods as _, QueryDsl};
|
||||
use diesel_async::RunQueryDsl;
|
||||
use kameo::{actor::ActorRef, prelude::Spawn};
|
||||
use sha2::Digest;
|
||||
|
||||
use crate::{
|
||||
actors::keyholder::{Bootstrap, KeyHolder},
|
||||
db::{self, schema},
|
||||
safe_cell::{SafeCell, SafeCellHandle as _},
|
||||
};
|
||||
|
||||
use super::hashing::Hashable;
|
||||
use super::{
|
||||
check_entity_attestation, AttestationStatus, Error, Integrable, lookup_verified,
|
||||
lookup_verified_allow_unavailable, lookup_verified_from_query, sign_entity, verify_entity,
|
||||
};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
struct DummyEntity {
|
||||
payload_version: i32,
|
||||
payload: Vec<u8>,
|
||||
}
|
||||
|
||||
impl Hashable for DummyEntity {
|
||||
fn hash<H: Digest>(&self, hasher: &mut H) {
|
||||
self.payload_version.hash(hasher);
|
||||
self.payload.hash(hasher);
|
||||
}
|
||||
}
|
||||
impl Integrable for DummyEntity {
|
||||
const KIND: &'static str = "dummy_entity";
|
||||
}
|
||||
|
||||
async fn bootstrapped_keyholder(db: &db::DatabasePool) -> ActorRef<KeyHolder> {
|
||||
let actor = KeyHolder::spawn(KeyHolder::new(db.clone()).await.unwrap());
|
||||
actor
|
||||
.ask(Bootstrap {
|
||||
seal_key_raw: SafeCell::new(b"integrity-test-seal-key".to_vec()),
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
actor
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn sign_writes_envelope_and_verify_passes() {
|
||||
let db = db::create_test_pool().await;
|
||||
let keyholder = bootstrapped_keyholder(&db).await;
|
||||
let mut conn = db.get().await.unwrap();
|
||||
|
||||
const ENTITY_ID: &[u8] = b"entity-id-7";
|
||||
|
||||
let entity = DummyEntity {
|
||||
payload_version: 1,
|
||||
payload: b"payload-v1".to_vec(),
|
||||
};
|
||||
|
||||
sign_entity(&mut conn, &keyholder, &entity, ENTITY_ID)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let count: i64 = schema::integrity_envelope::table
|
||||
.filter(schema::integrity_envelope::entity_kind.eq("dummy_entity"))
|
||||
.filter(schema::integrity_envelope::entity_id.eq(ENTITY_ID))
|
||||
.count()
|
||||
.get_result(&mut conn)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(count, 1, "envelope row must be created exactly once");
|
||||
let _ = check_entity_attestation(&mut conn, &keyholder, &entity, ENTITY_ID)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn tampered_mac_fails_verification() {
|
||||
let db = db::create_test_pool().await;
|
||||
let keyholder = bootstrapped_keyholder(&db).await;
|
||||
let mut conn = db.get().await.unwrap();
|
||||
|
||||
const ENTITY_ID: &[u8] = b"entity-id-11";
|
||||
|
||||
let entity = DummyEntity {
|
||||
payload_version: 1,
|
||||
payload: b"payload-v1".to_vec(),
|
||||
};
|
||||
|
||||
sign_entity(&mut conn, &keyholder, &entity, ENTITY_ID)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
diesel::update(schema::integrity_envelope::table)
|
||||
.filter(schema::integrity_envelope::entity_kind.eq("dummy_entity"))
|
||||
.filter(schema::integrity_envelope::entity_id.eq(ENTITY_ID))
|
||||
.set(schema::integrity_envelope::mac.eq(vec![0u8; 32]))
|
||||
.execute(&mut conn)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let err = check_entity_attestation(&mut conn, &keyholder, &entity, ENTITY_ID)
|
||||
.await
|
||||
.unwrap_err();
|
||||
assert!(matches!(err, Error::MacMismatch { .. }));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn changed_payload_fails_verification() {
|
||||
let db = db::create_test_pool().await;
|
||||
let keyholder = bootstrapped_keyholder(&db).await;
|
||||
let mut conn = db.get().await.unwrap();
|
||||
|
||||
const ENTITY_ID: &[u8] = b"entity-id-21";
|
||||
|
||||
let entity = DummyEntity {
|
||||
payload_version: 1,
|
||||
payload: b"payload-v1".to_vec(),
|
||||
};
|
||||
|
||||
sign_entity(&mut conn, &keyholder, &entity, ENTITY_ID)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let tampered = DummyEntity {
|
||||
payload: b"payload-v1-but-tampered".to_vec(),
|
||||
..entity
|
||||
};
|
||||
|
||||
let err = check_entity_attestation(&mut conn, &keyholder, &tampered, ENTITY_ID)
|
||||
.await
|
||||
.unwrap_err();
|
||||
assert!(matches!(err, Error::MacMismatch { .. }));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn allow_unavailable_lookup_passes_while_sealed() {
|
||||
let db = db::create_test_pool().await;
|
||||
let keyholder = bootstrapped_keyholder(&db).await;
|
||||
let mut conn = db.get().await.unwrap();
|
||||
|
||||
const ENTITY_ID: &[u8] = b"entity-id-31";
|
||||
|
||||
let entity = DummyEntity {
|
||||
payload_version: 1,
|
||||
payload: b"payload-v1".to_vec(),
|
||||
};
|
||||
|
||||
sign_entity(&mut conn, &keyholder, &entity, ENTITY_ID)
|
||||
.await
|
||||
.unwrap();
|
||||
drop(keyholder);
|
||||
|
||||
let sealed_keyholder = KeyHolder::spawn(KeyHolder::new(db.clone()).await.unwrap());
|
||||
let status = check_entity_attestation(&mut conn, &sealed_keyholder, &entity, ENTITY_ID)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(status, AttestationStatus::Unavailable);
|
||||
|
||||
#[expect(clippy::disallowed_methods, reason = "test only")]
|
||||
lookup_verified_allow_unavailable(&mut conn, &sealed_keyholder, ENTITY_ID, |_| async {
|
||||
Ok::<_, db::DatabaseError>(DummyEntity {
|
||||
payload_version: 1,
|
||||
payload: b"payload-v1".to_vec(),
|
||||
})
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn strict_verify_fails_closed_while_sealed() {
|
||||
let db = db::create_test_pool().await;
|
||||
let keyholder = bootstrapped_keyholder(&db).await;
|
||||
let mut conn = db.get().await.unwrap();
|
||||
|
||||
const ENTITY_ID: &[u8] = b"entity-id-41";
|
||||
|
||||
let entity = DummyEntity {
|
||||
payload_version: 1,
|
||||
payload: b"payload-v1".to_vec(),
|
||||
};
|
||||
|
||||
sign_entity(&mut conn, &keyholder, &entity, ENTITY_ID)
|
||||
.await
|
||||
.unwrap();
|
||||
drop(keyholder);
|
||||
|
||||
let sealed_keyholder = KeyHolder::spawn(KeyHolder::new(db.clone()).await.unwrap());
|
||||
|
||||
let err = verify_entity(&mut conn, &sealed_keyholder, &entity, ENTITY_ID)
|
||||
.await
|
||||
.unwrap_err();
|
||||
assert!(matches!(
|
||||
err,
|
||||
Error::Keyholder(crate::actors::keyholder::Error::NotBootstrapped)
|
||||
));
|
||||
|
||||
let err = lookup_verified(&mut conn, &sealed_keyholder, ENTITY_ID, |_| async {
|
||||
Ok::<_, db::DatabaseError>(DummyEntity {
|
||||
payload_version: 1,
|
||||
payload: b"payload-v1".to_vec(),
|
||||
})
|
||||
})
|
||||
.await
|
||||
.unwrap_err();
|
||||
assert!(matches!(
|
||||
err,
|
||||
Error::Keyholder(crate::actors::keyholder::Error::NotBootstrapped)
|
||||
));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn lookup_verified_supports_loaded_aggregate() {
|
||||
let db = db::create_test_pool().await;
|
||||
let keyholder = bootstrapped_keyholder(&db).await;
|
||||
let mut conn = db.get().await.unwrap();
|
||||
|
||||
const ENTITY_ID: i32 = 77;
|
||||
|
||||
let entity = DummyEntity {
|
||||
payload_version: 1,
|
||||
payload: b"payload-v1".to_vec(),
|
||||
};
|
||||
|
||||
sign_entity(&mut conn, &keyholder, &entity, ENTITY_ID)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let verified = lookup_verified(&mut conn, &keyholder, ENTITY_ID, |_| async {
|
||||
Ok::<_, db::DatabaseError>(DummyEntity {
|
||||
payload_version: 1,
|
||||
payload: b"payload-v1".to_vec(),
|
||||
})
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(verified.payload, b"payload-v1".to_vec());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn lookup_verified_allow_unavailable_works_while_sealed() {
|
||||
let db = db::create_test_pool().await;
|
||||
let keyholder = bootstrapped_keyholder(&db).await;
|
||||
let mut conn = db.get().await.unwrap();
|
||||
|
||||
const ENTITY_ID: i32 = 78;
|
||||
|
||||
let entity = DummyEntity {
|
||||
payload_version: 1,
|
||||
payload: b"payload-v1".to_vec(),
|
||||
};
|
||||
|
||||
sign_entity(&mut conn, &keyholder, &entity, ENTITY_ID)
|
||||
.await
|
||||
.unwrap();
|
||||
drop(keyholder);
|
||||
|
||||
let sealed_keyholder = KeyHolder::spawn(KeyHolder::new(db.clone()).await.unwrap());
|
||||
|
||||
#[expect(clippy::disallowed_methods, reason = "test only")]
|
||||
lookup_verified_allow_unavailable(&mut conn, &sealed_keyholder, ENTITY_ID, |_| async {
|
||||
Ok::<_, db::DatabaseError>(DummyEntity {
|
||||
payload_version: 1,
|
||||
payload: b"payload-v1".to_vec(),
|
||||
})
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn extension_trait_lookup_verified_required_works() {
|
||||
let db = db::create_test_pool().await;
|
||||
let keyholder = bootstrapped_keyholder(&db).await;
|
||||
let mut conn = db.get().await.unwrap();
|
||||
|
||||
const ENTITY_ID: i32 = 79;
|
||||
|
||||
let entity = DummyEntity {
|
||||
payload_version: 1,
|
||||
payload: b"payload-v1".to_vec(),
|
||||
};
|
||||
|
||||
sign_entity(&mut conn, &keyholder, &entity, ENTITY_ID)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let verified = lookup_verified(&mut conn, &keyholder, ENTITY_ID, |_| {
|
||||
Box::pin(async {
|
||||
Ok::<_, db::DatabaseError>(DummyEntity {
|
||||
payload_version: 1,
|
||||
payload: b"payload-v1".to_vec(),
|
||||
})
|
||||
})
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(verified.payload, b"payload-v1".to_vec());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn lookup_verified_from_query_helpers_work() {
|
||||
let db = db::create_test_pool().await;
|
||||
let keyholder = bootstrapped_keyholder(&db).await;
|
||||
let mut conn = db.get().await.unwrap();
|
||||
|
||||
const ENTITY_ID: i32 = 80;
|
||||
|
||||
let entity = DummyEntity {
|
||||
payload_version: 1,
|
||||
payload: b"payload-v1".to_vec(),
|
||||
};
|
||||
|
||||
sign_entity(&mut conn, &keyholder, &entity, ENTITY_ID)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let verified = lookup_verified_from_query(&mut conn, &keyholder, |_| {
|
||||
Box::pin(async {
|
||||
Ok::<_, db::DatabaseError>((
|
||||
ENTITY_ID,
|
||||
DummyEntity {
|
||||
payload_version: 1,
|
||||
payload: b"payload-v1".to_vec(),
|
||||
},
|
||||
))
|
||||
})
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(verified.payload, b"payload-v1".to_vec());
|
||||
|
||||
drop(keyholder);
|
||||
let sealed_keyholder = KeyHolder::spawn(KeyHolder::new(db.clone()).await.unwrap());
|
||||
|
||||
let err = lookup_verified_from_query(&mut conn, &sealed_keyholder, |_| {
|
||||
Box::pin(async {
|
||||
Ok::<_, db::DatabaseError>((
|
||||
ENTITY_ID,
|
||||
DummyEntity {
|
||||
payload_version: 1,
|
||||
payload: b"payload-v1".to_vec(),
|
||||
},
|
||||
))
|
||||
})
|
||||
})
|
||||
.await
|
||||
.unwrap_err();
|
||||
|
||||
assert!(matches!(
|
||||
err,
|
||||
Error::Keyholder(crate::actors::keyholder::Error::NotBootstrapped)
|
||||
));
|
||||
}
|
||||
}
|
||||
107
server/crates/arbiter-server/src/crypto/integrity/v1/hashing.rs
Normal file
107
server/crates/arbiter-server/src/crypto/integrity/v1/hashing.rs
Normal file
@@ -0,0 +1,107 @@
|
||||
use hmac::digest::Digest;
|
||||
use std::collections::HashSet;
|
||||
|
||||
/// Deterministically hash a value by feeding its fields into the hasher in a consistent order.
|
||||
pub trait Hashable {
|
||||
fn hash<H: Digest>(&self, hasher: &mut H);
|
||||
}
|
||||
|
||||
macro_rules! impl_numeric {
|
||||
($($t:ty),*) => {
|
||||
$(
|
||||
impl Hashable for $t {
|
||||
fn hash<H: Digest>(&self, hasher: &mut H) {
|
||||
hasher.update(&self.to_be_bytes());
|
||||
}
|
||||
}
|
||||
)*
|
||||
};
|
||||
}
|
||||
|
||||
impl_numeric!(u8, u16, u32, u64, i8, i16, i32, i64);
|
||||
|
||||
impl Hashable for &[u8] {
|
||||
fn hash<H: Digest>(&self, hasher: &mut H) {
|
||||
hasher.update(self);
|
||||
}
|
||||
}
|
||||
|
||||
impl Hashable for String {
|
||||
fn hash<H: Digest>(&self, hasher: &mut H) {
|
||||
hasher.update(self.as_bytes());
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Hashable + PartialOrd> Hashable for Vec<T> {
|
||||
fn hash<H: Digest>(&self, hasher: &mut H) {
|
||||
let ref_sorted = {
|
||||
let mut sorted = self.iter().collect::<Vec<_>>();
|
||||
sorted.sort_by(|a, b| a.partial_cmp(b).unwrap());
|
||||
sorted
|
||||
};
|
||||
for item in ref_sorted {
|
||||
item.hash(hasher);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Hashable + PartialOrd> Hashable for HashSet<T> {
|
||||
fn hash<H: Digest>(&self, hasher: &mut H) {
|
||||
let ref_sorted = {
|
||||
let mut sorted = self.iter().collect::<Vec<_>>();
|
||||
sorted.sort_by(|a, b| a.partial_cmp(b).unwrap());
|
||||
sorted
|
||||
};
|
||||
for item in ref_sorted {
|
||||
item.hash(hasher);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Hashable> Hashable for Option<T> {
|
||||
fn hash<H: Digest>(&self, hasher: &mut H) {
|
||||
match self {
|
||||
Some(value) => {
|
||||
hasher.update([1]);
|
||||
value.hash(hasher);
|
||||
}
|
||||
None => hasher.update([0]),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Hashable> Hashable for Box<T> {
|
||||
fn hash<H: Digest>(&self, hasher: &mut H) {
|
||||
self.as_ref().hash(hasher);
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Hashable> Hashable for &T {
|
||||
fn hash<H: Digest>(&self, hasher: &mut H) {
|
||||
(*self).hash(hasher);
|
||||
}
|
||||
}
|
||||
|
||||
impl Hashable for alloy::primitives::Address {
|
||||
fn hash<H: Digest>(&self, hasher: &mut H) {
|
||||
hasher.update(self.as_slice());
|
||||
}
|
||||
}
|
||||
|
||||
impl Hashable for alloy::primitives::U256 {
|
||||
fn hash<H: Digest>(&self, hasher: &mut H) {
|
||||
hasher.update(self.to_be_bytes::<32>());
|
||||
}
|
||||
}
|
||||
|
||||
impl Hashable for chrono::Duration {
|
||||
fn hash<H: Digest>(&self, hasher: &mut H) {
|
||||
hasher.update(self.num_seconds().to_be_bytes());
|
||||
}
|
||||
}
|
||||
|
||||
impl Hashable for chrono::DateTime<chrono::Utc> {
|
||||
fn hash<H: Digest>(&self, hasher: &mut H) {
|
||||
hasher.update(self.timestamp_millis().to_be_bytes());
|
||||
}
|
||||
}
|
||||
169
server/crates/arbiter-server/src/crypto/mod.rs
Normal file
169
server/crates/arbiter-server/src/crypto/mod.rs
Normal file
@@ -0,0 +1,169 @@
|
||||
use std::ops::Deref as _;
|
||||
|
||||
use argon2::{Algorithm, Argon2};
|
||||
use chacha20poly1305::{
|
||||
AeadInPlace, Key, KeyInit as _, XChaCha20Poly1305, XNonce,
|
||||
aead::{AeadMut, Error, Payload},
|
||||
};
|
||||
use rand::{
|
||||
Rng as _, SeedableRng as _,
|
||||
rngs::{StdRng, SysRng},
|
||||
};
|
||||
|
||||
use crate::safe_cell::{SafeCell, SafeCellHandle as _};
|
||||
|
||||
pub mod encryption;
|
||||
pub mod integrity;
|
||||
|
||||
use encryption::v1::{Nonce, Salt};
|
||||
|
||||
pub struct KeyCell(pub SafeCell<Key>);
|
||||
impl From<SafeCell<Key>> for KeyCell {
|
||||
fn from(value: SafeCell<Key>) -> Self {
|
||||
Self(value)
|
||||
}
|
||||
}
|
||||
impl TryFrom<SafeCell<Vec<u8>>> for KeyCell {
|
||||
type Error = ();
|
||||
|
||||
fn try_from(mut value: SafeCell<Vec<u8>>) -> Result<Self, Self::Error> {
|
||||
let value = value.read();
|
||||
if value.len() != size_of::<Key>() {
|
||||
return Err(());
|
||||
}
|
||||
let cell = SafeCell::new_inline(|cell_write: &mut Key| {
|
||||
cell_write.copy_from_slice(&value);
|
||||
});
|
||||
Ok(Self(cell))
|
||||
}
|
||||
}
|
||||
|
||||
impl KeyCell {
|
||||
pub fn new_secure_random() -> Self {
|
||||
let key = SafeCell::new_inline(|key_buffer: &mut Key| {
|
||||
#[allow(
|
||||
clippy::unwrap_used,
|
||||
reason = "Rng failure is unrecoverable and should panic"
|
||||
)]
|
||||
let mut rng = StdRng::try_from_rng(&mut SysRng).unwrap();
|
||||
rng.fill_bytes(key_buffer);
|
||||
});
|
||||
|
||||
key.into()
|
||||
}
|
||||
|
||||
pub fn encrypt_in_place(
|
||||
&mut self,
|
||||
nonce: &Nonce,
|
||||
associated_data: &[u8],
|
||||
mut buffer: impl AsMut<Vec<u8>>,
|
||||
) -> Result<(), Error> {
|
||||
let key_reader = self.0.read();
|
||||
let key_ref = key_reader.deref();
|
||||
let cipher = XChaCha20Poly1305::new(key_ref);
|
||||
let nonce = XNonce::from_slice(nonce.0.as_ref());
|
||||
let buffer = buffer.as_mut();
|
||||
cipher.encrypt_in_place(nonce, associated_data, buffer)
|
||||
}
|
||||
pub fn decrypt_in_place(
|
||||
&mut self,
|
||||
nonce: &Nonce,
|
||||
associated_data: &[u8],
|
||||
buffer: &mut SafeCell<Vec<u8>>,
|
||||
) -> Result<(), Error> {
|
||||
let key_reader = self.0.read();
|
||||
let key_ref = key_reader.deref();
|
||||
let cipher = XChaCha20Poly1305::new(key_ref);
|
||||
let nonce = XNonce::from_slice(nonce.0.as_ref());
|
||||
let mut buffer = buffer.write();
|
||||
let buffer: &mut Vec<u8> = buffer.as_mut();
|
||||
cipher.decrypt_in_place(nonce, associated_data, buffer)
|
||||
}
|
||||
|
||||
pub fn encrypt(
|
||||
&mut self,
|
||||
nonce: &Nonce,
|
||||
associated_data: &[u8],
|
||||
plaintext: impl AsRef<[u8]>,
|
||||
) -> Result<Vec<u8>, Error> {
|
||||
let key_reader = self.0.read();
|
||||
let key_ref = key_reader.deref();
|
||||
let mut cipher = XChaCha20Poly1305::new(key_ref);
|
||||
let nonce = XNonce::from_slice(nonce.0.as_ref());
|
||||
|
||||
let ciphertext = cipher.encrypt(
|
||||
nonce,
|
||||
Payload {
|
||||
msg: plaintext.as_ref(),
|
||||
aad: associated_data,
|
||||
},
|
||||
)?;
|
||||
Ok(ciphertext)
|
||||
}
|
||||
}
|
||||
|
||||
/// Derive a fixed-length key from the password using Argon2id, which is designed for password hashing and key derivation.
|
||||
pub fn derive_key(mut password: SafeCell<Vec<u8>>, salt: &Salt) -> KeyCell {
|
||||
let params = {
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
argon2::Params::new(8, 1, 1, None).unwrap()
|
||||
}
|
||||
|
||||
#[cfg(not(debug_assertions))]
|
||||
{
|
||||
argon2::Params::new(262_144, 3, 4, None).unwrap()
|
||||
}
|
||||
};
|
||||
|
||||
#[allow(clippy::unwrap_used)]
|
||||
let hasher = Argon2::new(Algorithm::Argon2id, argon2::Version::V0x13, params);
|
||||
let mut key = SafeCell::new(Key::default());
|
||||
password.read_inline(|password_source| {
|
||||
let mut key_buffer = key.write();
|
||||
let key_buffer: &mut [u8] = key_buffer.as_mut();
|
||||
|
||||
#[allow(
|
||||
clippy::unwrap_used,
|
||||
reason = "Better fail completely than return a weak key"
|
||||
)]
|
||||
hasher
|
||||
.hash_password_into(password_source.deref(), salt, key_buffer)
|
||||
.unwrap();
|
||||
});
|
||||
|
||||
key.into()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{
|
||||
derive_key,
|
||||
encryption::v1::{Nonce, generate_salt},
|
||||
};
|
||||
use crate::safe_cell::{SafeCell, SafeCellHandle as _};
|
||||
|
||||
#[test]
|
||||
pub fn encrypt_decrypt() {
|
||||
static PASSWORD: &[u8] = b"password";
|
||||
let password = SafeCell::new(PASSWORD.to_vec());
|
||||
let salt = generate_salt();
|
||||
|
||||
let mut key = derive_key(password, &salt);
|
||||
let nonce = Nonce(*b"unique nonce 123 1231233"); // 24 bytes for XChaCha20Poly1305
|
||||
let associated_data = b"associated data";
|
||||
let mut buffer = b"secret data".to_vec();
|
||||
|
||||
key.encrypt_in_place(&nonce, associated_data, &mut buffer)
|
||||
.unwrap();
|
||||
assert_ne!(buffer, b"secret data");
|
||||
|
||||
let mut buffer = SafeCell::new(buffer);
|
||||
|
||||
key.decrypt_in_place(&nonce, associated_data, &mut buffer)
|
||||
.unwrap();
|
||||
|
||||
let buffer = buffer.read();
|
||||
assert_eq!(*buffer, b"secret data");
|
||||
}
|
||||
}
|
||||
@@ -5,7 +5,7 @@ use diesel_async::{
|
||||
sync_connection_wrapper::SyncConnectionWrapper,
|
||||
};
|
||||
use diesel_migrations::{EmbeddedMigrations, MigrationHarness, embed_migrations};
|
||||
use miette::Diagnostic;
|
||||
|
||||
use thiserror::Error;
|
||||
use tracing::info;
|
||||
|
||||
@@ -21,29 +21,32 @@ static DB_FILE: &str = "arbiter.sqlite";
|
||||
|
||||
const MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations");
|
||||
|
||||
#[derive(Error, Diagnostic, Debug)]
|
||||
#[derive(Error, Debug)]
|
||||
pub enum DatabaseSetupError {
|
||||
#[error("Failed to determine home directory")]
|
||||
#[diagnostic(code(arbiter::db::home_dir))]
|
||||
HomeDir(std::io::Error),
|
||||
|
||||
#[error(transparent)]
|
||||
#[diagnostic(code(arbiter::db::connection))]
|
||||
Connection(diesel::ConnectionError),
|
||||
|
||||
#[error(transparent)]
|
||||
#[diagnostic(code(arbiter::db::concurrency))]
|
||||
ConcurrencySetup(diesel::result::Error),
|
||||
|
||||
#[error(transparent)]
|
||||
#[diagnostic(code(arbiter::db::migration))]
|
||||
Migration(Box<dyn std::error::Error + Send + Sync>),
|
||||
|
||||
#[error(transparent)]
|
||||
#[diagnostic(code(arbiter::db::pool))]
|
||||
Pool(#[from] PoolInitError),
|
||||
}
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum DatabaseError {
|
||||
#[error("Database connection error")]
|
||||
Pool(#[from] PoolError),
|
||||
#[error("Database query error")]
|
||||
Connection(#[from] diesel::result::Error),
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "info")]
|
||||
fn database_path() -> Result<std::path::PathBuf, DatabaseSetupError> {
|
||||
let arbiter_home = arbiter_proto::home_path().map_err(DatabaseSetupError::HomeDir)?;
|
||||
@@ -92,6 +95,7 @@ fn initialize_database(url: &str) -> Result<(), DatabaseSetupError> {
|
||||
#[tracing::instrument(level = "info")]
|
||||
pub async fn create_pool(url: Option<&str>) -> Result<DatabasePool, DatabaseSetupError> {
|
||||
let database_url = url.map(String::from).unwrap_or(
|
||||
#[allow(clippy::expect_used)]
|
||||
database_path()?
|
||||
.to_str()
|
||||
.expect("database path is not valid UTF-8")
|
||||
@@ -129,17 +133,20 @@ pub async fn create_pool(url: Option<&str>) -> Result<DatabasePool, DatabaseSetu
|
||||
Ok(pool)
|
||||
}
|
||||
|
||||
#[mutants::skip]
|
||||
pub async fn create_test_pool() -> DatabasePool {
|
||||
use rand::distr::{Alphanumeric, SampleString as _};
|
||||
|
||||
let tempfile_name = Alphanumeric.sample_string(&mut rand::rng(), 16);
|
||||
|
||||
let file = std::env::temp_dir().join(tempfile_name);
|
||||
let url = format!(
|
||||
"{}?mode=rwc",
|
||||
file.to_str().expect("temp file path is not valid UTF-8")
|
||||
);
|
||||
#[allow(clippy::expect_used)]
|
||||
let url = file
|
||||
.to_str()
|
||||
.expect("temp file path is not valid UTF-8")
|
||||
.to_string();
|
||||
|
||||
#[allow(clippy::expect_used)]
|
||||
create_pool(Some(&url))
|
||||
.await
|
||||
.expect("Failed to create test database pool")
|
||||
|
||||
@@ -2,15 +2,16 @@
|
||||
#![allow(clippy::all)]
|
||||
|
||||
use crate::db::schema::{
|
||||
self, aead_encrypted, arbiter_settings, evm_basic_grant, evm_ether_transfer_grant, evm_ether_transfer_grant_target, evm_ether_transfer_limit, evm_token_transfer_grant, evm_token_transfer_log, evm_token_transfer_volume_limit, evm_transaction_log, evm_wallet, root_key_history, tls_history
|
||||
self, aead_encrypted, arbiter_settings, evm_basic_grant, evm_ether_transfer_grant,
|
||||
evm_ether_transfer_grant_target, evm_ether_transfer_limit, evm_token_transfer_grant,
|
||||
evm_token_transfer_log, evm_token_transfer_volume_limit, evm_transaction_log, evm_wallet,
|
||||
integrity_envelope, root_key_history, tls_history,
|
||||
};
|
||||
use chrono::{DateTime, Utc};
|
||||
use diesel::{prelude::*, sqlite::Sqlite};
|
||||
use restructed::Models;
|
||||
|
||||
pub mod types {
|
||||
use std::os::unix;
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
use diesel::{
|
||||
deserialize::{FromSql, FromSqlRow},
|
||||
@@ -20,7 +21,7 @@ pub mod types {
|
||||
sqlite::{Sqlite, SqliteType},
|
||||
};
|
||||
|
||||
#[derive(Debug, FromSqlRow, AsExpression)]
|
||||
#[derive(Debug, FromSqlRow, AsExpression, Clone)]
|
||||
#[diesel(sql_type = Integer)]
|
||||
#[repr(transparent)] // hint compiler to optimize the wrapper struct away
|
||||
pub struct SqliteTimestamp(pub DateTime<Utc>);
|
||||
@@ -35,9 +36,9 @@ pub mod types {
|
||||
SqliteTimestamp(dt)
|
||||
}
|
||||
}
|
||||
impl Into<chrono::DateTime<Utc>> for SqliteTimestamp {
|
||||
fn into(self) -> chrono::DateTime<Utc> {
|
||||
self.0
|
||||
impl From<SqliteTimestamp> for chrono::DateTime<Utc> {
|
||||
fn from(ts: SqliteTimestamp) -> Self {
|
||||
ts.0
|
||||
}
|
||||
}
|
||||
|
||||
@@ -65,12 +66,46 @@ pub mod types {
|
||||
};
|
||||
|
||||
let unix_timestamp = bytes.read_long();
|
||||
let datetime = DateTime::from_timestamp(unix_timestamp, 0)
|
||||
.ok_or("Timestamp is out of bounds")?;
|
||||
let datetime =
|
||||
DateTime::from_timestamp(unix_timestamp, 0).ok_or("Timestamp is out of bounds")?;
|
||||
|
||||
Ok(SqliteTimestamp(datetime))
|
||||
}
|
||||
}
|
||||
|
||||
/// Key algorithm stored in the `useragent_client.key_type` column.
|
||||
/// Values must stay stable — they are persisted in the database.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, FromSqlRow, AsExpression, strum::FromRepr)]
|
||||
#[diesel(sql_type = Integer)]
|
||||
#[repr(i32)]
|
||||
pub enum KeyType {
|
||||
Ed25519 = 1,
|
||||
EcdsaSecp256k1 = 2,
|
||||
Rsa = 3,
|
||||
}
|
||||
|
||||
impl ToSql<Integer, Sqlite> for KeyType {
|
||||
fn to_sql<'b>(
|
||||
&'b self,
|
||||
out: &mut diesel::serialize::Output<'b, '_, Sqlite>,
|
||||
) -> diesel::serialize::Result {
|
||||
out.set_value(*self as i32);
|
||||
Ok(IsNull::No)
|
||||
}
|
||||
}
|
||||
|
||||
impl FromSql<Integer, Sqlite> for KeyType {
|
||||
fn from_sql(
|
||||
mut bytes: <Sqlite as diesel::backend::Backend>::RawValue<'_>,
|
||||
) -> diesel::deserialize::Result<Self> {
|
||||
let Some(SqliteType::Long) = bytes.value_type() else {
|
||||
return Err("Expected Integer for KeyType".into());
|
||||
};
|
||||
let discriminant = bytes.read_long();
|
||||
KeyType::from_repr(discriminant as i32)
|
||||
.ok_or_else(|| format!("Unknown KeyType discriminant: {discriminant}").into())
|
||||
}
|
||||
}
|
||||
}
|
||||
pub use types::*;
|
||||
|
||||
@@ -150,12 +185,53 @@ pub struct EvmWallet {
|
||||
pub created_at: SqliteTimestamp,
|
||||
}
|
||||
|
||||
#[derive(Queryable, Debug)]
|
||||
#[derive(Models, Queryable, Debug, Insertable, Selectable, Clone)]
|
||||
#[diesel(table_name = schema::evm_wallet_access, check_for_backend(Sqlite))]
|
||||
#[view(
|
||||
NewEvmWalletAccess,
|
||||
derive(Insertable),
|
||||
omit(id, created_at),
|
||||
attributes_with = "deriveless"
|
||||
)]
|
||||
#[view(
|
||||
CoreEvmWalletAccess,
|
||||
derive(Insertable),
|
||||
omit(created_at),
|
||||
attributes_with = "deriveless"
|
||||
)]
|
||||
pub struct EvmWalletAccess {
|
||||
pub id: i32,
|
||||
pub wallet_id: i32,
|
||||
pub client_id: i32,
|
||||
pub created_at: SqliteTimestamp,
|
||||
}
|
||||
|
||||
#[derive(Models, Queryable, Debug, Insertable, Selectable)]
|
||||
#[diesel(table_name = schema::client_metadata, check_for_backend(Sqlite))]
|
||||
pub struct ProgramClientMetadata {
|
||||
pub id: i32,
|
||||
pub name: String,
|
||||
pub description: Option<String>,
|
||||
pub version: Option<String>,
|
||||
pub created_at: SqliteTimestamp,
|
||||
}
|
||||
|
||||
#[derive(Models, Queryable, Debug, Insertable, Selectable)]
|
||||
#[diesel(table_name = schema::client_metadata_history, check_for_backend(Sqlite))]
|
||||
pub struct ProgramClientMetadataHistory {
|
||||
pub id: i32,
|
||||
pub metadata_id: i32,
|
||||
pub client_id: i32,
|
||||
pub created_at: SqliteTimestamp,
|
||||
}
|
||||
|
||||
#[derive(Models, Queryable, Debug, Insertable, Selectable)]
|
||||
#[diesel(table_name = schema::program_client, check_for_backend(Sqlite))]
|
||||
pub struct ProgramClient {
|
||||
pub id: i32,
|
||||
pub nonce: i32,
|
||||
pub public_key: Vec<u8>,
|
||||
pub metadata_id: i32,
|
||||
pub created_at: SqliteTimestamp,
|
||||
pub updated_at: SqliteTimestamp,
|
||||
}
|
||||
@@ -168,6 +244,7 @@ pub struct UseragentClient {
|
||||
pub public_key: Vec<u8>,
|
||||
pub created_at: SqliteTimestamp,
|
||||
pub updated_at: SqliteTimestamp,
|
||||
pub key_type: KeyType,
|
||||
}
|
||||
|
||||
#[derive(Models, Queryable, Debug, Insertable, Selectable)]
|
||||
@@ -194,8 +271,7 @@ pub struct EvmEtherTransferLimit {
|
||||
)]
|
||||
pub struct EvmBasicGrant {
|
||||
pub id: i32,
|
||||
pub wallet_id: i32, // references evm_wallet.id
|
||||
pub client_id: i32, // references program_client.id
|
||||
pub wallet_access_id: i32, // references evm_wallet_access.id
|
||||
pub chain_id: i32,
|
||||
pub valid_from: Option<SqliteTimestamp>,
|
||||
pub valid_until: Option<SqliteTimestamp>,
|
||||
@@ -218,8 +294,7 @@ pub struct EvmBasicGrant {
|
||||
pub struct EvmTransactionLog {
|
||||
pub id: i32,
|
||||
pub grant_id: i32,
|
||||
pub client_id: i32,
|
||||
pub wallet_id: i32,
|
||||
pub wallet_access_id: i32,
|
||||
pub chain_id: i32,
|
||||
pub eth_value: Vec<u8>,
|
||||
pub signed_at: SqliteTimestamp,
|
||||
@@ -253,7 +328,6 @@ pub struct EvmEtherTransferGrantTarget {
|
||||
pub address: Vec<u8>,
|
||||
}
|
||||
|
||||
|
||||
#[derive(Models, Queryable, Debug, Insertable, Selectable)]
|
||||
#[diesel(table_name = evm_token_transfer_grant, check_for_backend(Sqlite))]
|
||||
#[view(
|
||||
@@ -302,3 +376,22 @@ pub struct EvmTokenTransferLog {
|
||||
pub value: Vec<u8>,
|
||||
pub created_at: SqliteTimestamp,
|
||||
}
|
||||
|
||||
#[derive(Models, Queryable, Debug, Insertable, Selectable)]
|
||||
#[diesel(table_name = integrity_envelope, check_for_backend(Sqlite))]
|
||||
#[view(
|
||||
NewIntegrityEnvelope,
|
||||
derive(Insertable),
|
||||
omit(id, signed_at, created_at),
|
||||
attributes_with = "deriveless"
|
||||
)]
|
||||
pub struct IntegrityEnvelope {
|
||||
pub id: i32,
|
||||
pub entity_kind: String,
|
||||
pub entity_id: Vec<u8>,
|
||||
pub payload_version: i32,
|
||||
pub key_version: i32,
|
||||
pub mac: Vec<u8>,
|
||||
pub signed_at: SqliteTimestamp,
|
||||
pub created_at: SqliteTimestamp,
|
||||
}
|
||||
|
||||
@@ -20,11 +20,29 @@ diesel::table! {
|
||||
}
|
||||
}
|
||||
|
||||
diesel::table! {
|
||||
client_metadata (id) {
|
||||
id -> Integer,
|
||||
name -> Text,
|
||||
description -> Nullable<Text>,
|
||||
version -> Nullable<Text>,
|
||||
created_at -> Integer,
|
||||
}
|
||||
}
|
||||
|
||||
diesel::table! {
|
||||
client_metadata_history (id) {
|
||||
id -> Integer,
|
||||
metadata_id -> Integer,
|
||||
client_id -> Integer,
|
||||
created_at -> Integer,
|
||||
}
|
||||
}
|
||||
|
||||
diesel::table! {
|
||||
evm_basic_grant (id) {
|
||||
id -> Integer,
|
||||
wallet_id -> Integer,
|
||||
client_id -> Integer,
|
||||
wallet_access_id -> Integer,
|
||||
chain_id -> Integer,
|
||||
valid_from -> Nullable<Integer>,
|
||||
valid_until -> Nullable<Integer>,
|
||||
@@ -95,9 +113,8 @@ diesel::table! {
|
||||
diesel::table! {
|
||||
evm_transaction_log (id) {
|
||||
id -> Integer,
|
||||
wallet_access_id -> Integer,
|
||||
grant_id -> Integer,
|
||||
client_id -> Integer,
|
||||
wallet_id -> Integer,
|
||||
chain_id -> Integer,
|
||||
eth_value -> Binary,
|
||||
signed_at -> Integer,
|
||||
@@ -113,11 +130,34 @@ diesel::table! {
|
||||
}
|
||||
}
|
||||
|
||||
diesel::table! {
|
||||
evm_wallet_access (id) {
|
||||
id -> Integer,
|
||||
wallet_id -> Integer,
|
||||
client_id -> Integer,
|
||||
created_at -> Integer,
|
||||
}
|
||||
}
|
||||
|
||||
diesel::table! {
|
||||
integrity_envelope (id) {
|
||||
id -> Integer,
|
||||
entity_kind -> Text,
|
||||
entity_id -> Binary,
|
||||
payload_version -> Integer,
|
||||
key_version -> Integer,
|
||||
mac -> Binary,
|
||||
signed_at -> Integer,
|
||||
created_at -> Integer,
|
||||
}
|
||||
}
|
||||
|
||||
diesel::table! {
|
||||
program_client (id) {
|
||||
id -> Integer,
|
||||
nonce -> Integer,
|
||||
public_key -> Binary,
|
||||
metadata_id -> Integer,
|
||||
created_at -> Integer,
|
||||
updated_at -> Integer,
|
||||
}
|
||||
@@ -151,6 +191,7 @@ diesel::table! {
|
||||
id -> Integer,
|
||||
nonce -> Integer,
|
||||
public_key -> Binary,
|
||||
key_type -> Integer,
|
||||
created_at -> Integer,
|
||||
updated_at -> Integer,
|
||||
}
|
||||
@@ -159,8 +200,9 @@ diesel::table! {
|
||||
diesel::joinable!(aead_encrypted -> root_key_history (associated_root_key_id));
|
||||
diesel::joinable!(arbiter_settings -> root_key_history (root_key_id));
|
||||
diesel::joinable!(arbiter_settings -> tls_history (tls_id));
|
||||
diesel::joinable!(evm_basic_grant -> evm_wallet (wallet_id));
|
||||
diesel::joinable!(evm_basic_grant -> program_client (client_id));
|
||||
diesel::joinable!(client_metadata_history -> client_metadata (metadata_id));
|
||||
diesel::joinable!(client_metadata_history -> program_client (client_id));
|
||||
diesel::joinable!(evm_basic_grant -> evm_wallet_access (wallet_access_id));
|
||||
diesel::joinable!(evm_ether_transfer_grant -> evm_basic_grant (basic_grant_id));
|
||||
diesel::joinable!(evm_ether_transfer_grant -> evm_ether_transfer_limit (limit_id));
|
||||
diesel::joinable!(evm_ether_transfer_grant_target -> evm_ether_transfer_grant (grant_id));
|
||||
@@ -168,11 +210,18 @@ diesel::joinable!(evm_token_transfer_grant -> evm_basic_grant (basic_grant_id));
|
||||
diesel::joinable!(evm_token_transfer_log -> evm_token_transfer_grant (grant_id));
|
||||
diesel::joinable!(evm_token_transfer_log -> evm_transaction_log (log_id));
|
||||
diesel::joinable!(evm_token_transfer_volume_limit -> evm_token_transfer_grant (grant_id));
|
||||
diesel::joinable!(evm_transaction_log -> evm_basic_grant (grant_id));
|
||||
diesel::joinable!(evm_transaction_log -> evm_wallet_access (wallet_access_id));
|
||||
diesel::joinable!(evm_wallet -> aead_encrypted (aead_encrypted_id));
|
||||
diesel::joinable!(evm_wallet_access -> evm_wallet (wallet_id));
|
||||
diesel::joinable!(evm_wallet_access -> program_client (client_id));
|
||||
diesel::joinable!(program_client -> client_metadata (metadata_id));
|
||||
|
||||
diesel::allow_tables_to_appear_in_same_query!(
|
||||
aead_encrypted,
|
||||
arbiter_settings,
|
||||
client_metadata,
|
||||
client_metadata_history,
|
||||
evm_basic_grant,
|
||||
evm_ether_transfer_grant,
|
||||
evm_ether_transfer_grant_target,
|
||||
@@ -182,6 +231,8 @@ diesel::allow_tables_to_appear_in_same_query!(
|
||||
evm_token_transfer_volume_limit,
|
||||
evm_transaction_log,
|
||||
evm_wallet,
|
||||
evm_wallet_access,
|
||||
integrity_envelope,
|
||||
program_client,
|
||||
root_key_history,
|
||||
tls_history,
|
||||
|
||||
@@ -6,18 +6,23 @@ use alloy::{
|
||||
primitives::{TxKind, U256},
|
||||
};
|
||||
use chrono::Utc;
|
||||
use diesel::{ExpressionMethods as _, QueryDsl, QueryResult, insert_into, sqlite::Sqlite};
|
||||
use diesel::{ExpressionMethods as _, QueryDsl as _, QueryResult, insert_into, sqlite::Sqlite};
|
||||
use diesel_async::{AsyncConnection, RunQueryDsl};
|
||||
use kameo::actor::ActorRef;
|
||||
|
||||
use crate::{
|
||||
actors::keyholder::KeyHolder,
|
||||
crypto::integrity::{self, Verified},
|
||||
db::{
|
||||
self,
|
||||
models::{EvmBasicGrant, NewEvmBasicGrant, NewEvmTransactionLog, SqliteTimestamp},
|
||||
self, DatabaseError,
|
||||
models::{
|
||||
EvmBasicGrant, EvmWalletAccess, NewEvmBasicGrant, NewEvmTransactionLog, SqliteTimestamp,
|
||||
},
|
||||
schema::{self, evm_transaction_log},
|
||||
},
|
||||
evm::policies::{
|
||||
DatabaseID, EvalContext, EvalViolation, FullGrant, Grant, Policy, SharedGrantSettings,
|
||||
SpecificGrant, SpecificMeaning, ether_transfer::EtherTransfer,
|
||||
CombinedSettings, DatabaseID, EvalContext, EvalViolation, Grant, Policy,
|
||||
SharedGrantSettings, SpecificGrant, SpecificMeaning, ether_transfer::EtherTransfer,
|
||||
token_transfers::TokenTransfer,
|
||||
},
|
||||
};
|
||||
@@ -26,76 +31,45 @@ pub mod policies;
|
||||
mod utils;
|
||||
|
||||
/// Errors that can only occur once the transaction meaning is known (during policy evaluation)
|
||||
#[derive(Debug, thiserror::Error, miette::Diagnostic)]
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum PolicyError {
|
||||
#[error("Database connection pool error")]
|
||||
#[diagnostic(code(arbiter_server::evm::policy_error::pool))]
|
||||
Pool(#[from] db::PoolError),
|
||||
#[error("Database returned error")]
|
||||
#[diagnostic(code(arbiter_server::evm::policy_error::database))]
|
||||
Database(#[from] diesel::result::Error),
|
||||
#[error("Database error")]
|
||||
Database(#[from] crate::db::DatabaseError),
|
||||
#[error("Transaction violates policy: {0:?}")]
|
||||
#[diagnostic(code(arbiter_server::evm::policy_error::violation))]
|
||||
Violations(Vec<EvalViolation>),
|
||||
#[error("No matching grant found")]
|
||||
#[diagnostic(code(arbiter_server::evm::policy_error::no_matching_grant))]
|
||||
NoMatchingGrant,
|
||||
|
||||
#[error("Integrity error: {0}")]
|
||||
Integrity(#[from] integrity::Error),
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error, miette::Diagnostic)]
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum VetError {
|
||||
#[error("Contract creation transactions are not supported")]
|
||||
#[diagnostic(code(arbiter_server::evm::vet_error::contract_creation_unsupported))]
|
||||
ContractCreationNotSupported,
|
||||
#[error("Engine can't classify this transaction")]
|
||||
#[diagnostic(code(arbiter_server::evm::vet_error::unsupported))]
|
||||
UnsupportedTransactionType,
|
||||
#[error("Policy evaluation failed: {1}")]
|
||||
#[diagnostic(code(arbiter_server::evm::vet_error::evaluated))]
|
||||
Evaluated(SpecificMeaning, #[source] PolicyError),
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error, miette::Diagnostic)]
|
||||
pub enum SignError {
|
||||
#[error("Database connection pool error")]
|
||||
#[diagnostic(code(arbiter_server::evm::database_error))]
|
||||
Pool(#[from] db::PoolError),
|
||||
#[error("Database returned error")]
|
||||
#[diagnostic(code(arbiter_server::evm::database_error))]
|
||||
Database(#[from] diesel::result::Error),
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error, miette::Diagnostic)]
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum AnalyzeError {
|
||||
#[error("Engine doesn't support granting permissions for contract creation")]
|
||||
#[diagnostic(code(arbiter_server::evm::analyze_error::contract_creation_not_supported))]
|
||||
ContractCreationNotSupported,
|
||||
|
||||
#[error("Unsupported transaction type")]
|
||||
#[diagnostic(code(arbiter_server::evm::analyze_error::unsupported_transaction_type))]
|
||||
UnsupportedTransactionType,
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error, miette::Diagnostic)]
|
||||
pub enum CreationError {
|
||||
#[error("Database connection pool error")]
|
||||
#[diagnostic(code(arbiter_server::evm::creation_error::database_error))]
|
||||
Pool(#[from] db::PoolError),
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum ListError {
|
||||
#[error("Database error")]
|
||||
Database(#[from] crate::db::DatabaseError),
|
||||
|
||||
#[error("Database returned error")]
|
||||
#[diagnostic(code(arbiter_server::evm::creation_error::database_error))]
|
||||
Database(#[from] diesel::result::Error),
|
||||
}
|
||||
|
||||
#[derive(Debug, thiserror::Error, miette::Diagnostic)]
|
||||
pub enum ListGrantsError {
|
||||
#[error("Database connection pool error")]
|
||||
#[diagnostic(code(arbiter_server::evm::list_grants_error::pool))]
|
||||
Pool(#[from] db::PoolError),
|
||||
|
||||
#[error("Database returned error")]
|
||||
#[diagnostic(code(arbiter_server::evm::list_grants_error::database))]
|
||||
Database(#[from] diesel::result::Error),
|
||||
#[error("Integrity verification failed for grant")]
|
||||
Integrity(#[from] integrity::Error),
|
||||
}
|
||||
|
||||
/// Controls whether a transaction should be executed or only validated
|
||||
@@ -116,19 +90,26 @@ async fn check_shared_constraints(
|
||||
let mut violations = Vec::new();
|
||||
let now = Utc::now();
|
||||
|
||||
if shared.chain != context.chain {
|
||||
violations.push(EvalViolation::MismatchingChainId {
|
||||
expected: shared.chain,
|
||||
actual: context.chain,
|
||||
});
|
||||
return Ok(violations);
|
||||
}
|
||||
|
||||
// Validity window
|
||||
if shared.valid_from.map_or(false, |t| now < t) || shared.valid_until.map_or(false, |t| now > t)
|
||||
{
|
||||
if shared.valid_from.is_some_and(|t| now < t) || shared.valid_until.is_some_and(|t| now > t) {
|
||||
violations.push(EvalViolation::InvalidTime);
|
||||
}
|
||||
|
||||
// Gas fee caps
|
||||
let fee_exceeded = shared
|
||||
.max_gas_fee_per_gas
|
||||
.map_or(false, |cap| U256::from(context.max_fee_per_gas) > cap);
|
||||
let priority_exceeded = shared.max_priority_fee_per_gas.map_or(false, |cap| {
|
||||
U256::from(context.max_priority_fee_per_gas) > cap
|
||||
});
|
||||
.is_some_and(|cap| U256::from(context.max_fee_per_gas) > cap);
|
||||
let priority_exceeded = shared
|
||||
.max_priority_fee_per_gas
|
||||
.is_some_and(|cap| U256::from(context.max_priority_fee_per_gas) > cap);
|
||||
if fee_exceeded || priority_exceeded {
|
||||
violations.push(EvalViolation::GasLimitExceeded {
|
||||
max_gas_fee_per_gas: shared.max_gas_fee_per_gas,
|
||||
@@ -157,6 +138,7 @@ async fn check_shared_constraints(
|
||||
// Supporting only EIP-1559 transactions for now, but we can easily extend this to support legacy transactions if needed
|
||||
pub struct Engine {
|
||||
db: db::DatabasePool,
|
||||
keyholder: ActorRef<KeyHolder>,
|
||||
}
|
||||
|
||||
impl Engine {
|
||||
@@ -165,28 +147,68 @@ impl Engine {
|
||||
context: EvalContext,
|
||||
meaning: &P::Meaning,
|
||||
run_kind: RunKind,
|
||||
) -> Result<(), PolicyError> {
|
||||
let mut conn = self.db.get().await?;
|
||||
) -> Result<(), PolicyError>
|
||||
where
|
||||
P::Settings: Clone,
|
||||
{
|
||||
let mut conn = self.db.get().await.map_err(DatabaseError::from)?;
|
||||
|
||||
let grant = P::try_find_grant(&context, &mut conn)
|
||||
.await?
|
||||
let verified_settings =
|
||||
match integrity::lookup_verified_from_query(&mut conn, &self.keyholder, |conn| {
|
||||
let context = context.clone();
|
||||
Box::pin(async move {
|
||||
let grant = P::try_find_grant(&context, conn)
|
||||
.await
|
||||
.map_err(DatabaseError::from)?
|
||||
.ok_or_else(|| DatabaseError::from(diesel::result::Error::NotFound))?;
|
||||
|
||||
Ok::<_, DatabaseError>((grant.common_settings_id, grant.settings))
|
||||
})
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(verified) => verified,
|
||||
Err(integrity::Error::Database(DatabaseError::Connection(
|
||||
diesel::result::Error::NotFound,
|
||||
))) => return Err(PolicyError::NoMatchingGrant),
|
||||
Err(err) => return Err(PolicyError::Integrity(err)),
|
||||
};
|
||||
|
||||
let mut grant = P::try_find_grant(&context, &mut conn)
|
||||
.await
|
||||
.map_err(DatabaseError::from)?
|
||||
.ok_or(PolicyError::NoMatchingGrant)?;
|
||||
|
||||
let mut violations =
|
||||
check_shared_constraints(&context, &grant.shared, grant.shared_grant_id, &mut conn)
|
||||
.await?;
|
||||
violations.extend(P::evaluate(&context, meaning, &grant, &mut conn).await?);
|
||||
// IMPORTANT: policy evaluation uses extra non-integrity fields from Grant
|
||||
// (e.g., per-policy ids), so we currently reload Grant after the query-native
|
||||
// integrity check over canonicalized settings.
|
||||
grant.settings = verified_settings.into_inner();
|
||||
|
||||
let mut violations = check_shared_constraints(
|
||||
&context,
|
||||
&grant.settings.shared,
|
||||
grant.common_settings_id,
|
||||
&mut conn,
|
||||
)
|
||||
.await
|
||||
.map_err(DatabaseError::from)?;
|
||||
violations.extend(
|
||||
P::evaluate(&context, meaning, &grant, &mut conn)
|
||||
.await
|
||||
.map_err(DatabaseError::from)?,
|
||||
);
|
||||
|
||||
if !violations.is_empty() {
|
||||
return Err(PolicyError::Violations(violations));
|
||||
} else if run_kind == RunKind::Execution {
|
||||
}
|
||||
|
||||
if run_kind == RunKind::Execution {
|
||||
conn.transaction(|conn| {
|
||||
Box::pin(async move {
|
||||
let log_id: i32 = insert_into(evm_transaction_log::table)
|
||||
.values(&NewEvmTransactionLog {
|
||||
grant_id: grant.shared_grant_id,
|
||||
client_id: context.client_id,
|
||||
wallet_id: context.wallet_id,
|
||||
grant_id: grant.common_settings_id,
|
||||
wallet_access_id: context.target.id,
|
||||
chain_id: context.chain as i32,
|
||||
eth_value: utils::u256_to_bytes(context.value).to_vec(),
|
||||
signed_at: Utc::now().into(),
|
||||
@@ -200,7 +222,8 @@ impl Engine {
|
||||
QueryResult::Ok(())
|
||||
})
|
||||
})
|
||||
.await?;
|
||||
.await
|
||||
.map_err(DatabaseError::from)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -208,16 +231,19 @@ impl Engine {
|
||||
}
|
||||
|
||||
impl Engine {
|
||||
pub fn new(db: db::DatabasePool) -> Self {
|
||||
Self { db }
|
||||
pub fn new(db: db::DatabasePool, keyholder: ActorRef<KeyHolder>) -> Self {
|
||||
Self { db, keyholder }
|
||||
}
|
||||
|
||||
pub async fn create_grant<P: Policy>(
|
||||
&self,
|
||||
client_id: i32,
|
||||
full_grant: FullGrant<P::Settings>,
|
||||
) -> Result<i32, CreationError> {
|
||||
full_grant: CombinedSettings<P::Settings>,
|
||||
) -> Result<Verified<i32>, DatabaseError>
|
||||
where
|
||||
P::Settings: Clone,
|
||||
{
|
||||
let mut conn = self.db.get().await?;
|
||||
let keyholder = self.keyholder.clone();
|
||||
|
||||
let id = conn
|
||||
.transaction(|conn| {
|
||||
@@ -226,26 +252,25 @@ impl Engine {
|
||||
|
||||
let basic_grant: EvmBasicGrant = insert_into(evm_basic_grant::table)
|
||||
.values(&NewEvmBasicGrant {
|
||||
wallet_id: full_grant.basic.wallet_id,
|
||||
chain_id: full_grant.basic.chain as i32,
|
||||
client_id: client_id,
|
||||
valid_from: full_grant.basic.valid_from.map(SqliteTimestamp),
|
||||
valid_until: full_grant.basic.valid_until.map(SqliteTimestamp),
|
||||
chain_id: full_grant.shared.chain as i32,
|
||||
wallet_access_id: full_grant.shared.wallet_access_id,
|
||||
valid_from: full_grant.shared.valid_from.map(SqliteTimestamp),
|
||||
valid_until: full_grant.shared.valid_until.map(SqliteTimestamp),
|
||||
max_gas_fee_per_gas: full_grant
|
||||
.basic
|
||||
.shared
|
||||
.max_gas_fee_per_gas
|
||||
.map(|fee| utils::u256_to_bytes(fee).to_vec()),
|
||||
max_priority_fee_per_gas: full_grant
|
||||
.basic
|
||||
.shared
|
||||
.max_priority_fee_per_gas
|
||||
.map(|fee| utils::u256_to_bytes(fee).to_vec()),
|
||||
rate_limit_count: full_grant
|
||||
.basic
|
||||
.shared
|
||||
.rate_limit
|
||||
.as_ref()
|
||||
.map(|rl| rl.count as i32),
|
||||
rate_limit_window_secs: full_grant
|
||||
.basic
|
||||
.shared
|
||||
.rate_limit
|
||||
.as_ref()
|
||||
.map(|rl| rl.window.num_seconds() as i32),
|
||||
@@ -255,7 +280,14 @@ impl Engine {
|
||||
.get_result(conn)
|
||||
.await?;
|
||||
|
||||
P::create_grant(&basic_grant, &full_grant.specific, conn).await
|
||||
P::create_grant(&basic_grant, &full_grant.specific, conn).await?;
|
||||
|
||||
let verified_entity_id =
|
||||
integrity::sign_entity(conn, &keyholder, &full_grant, basic_grant.id)
|
||||
.await
|
||||
.map_err(|_| diesel::result::Error::RollbackTransaction)?;
|
||||
|
||||
QueryResult::Ok(verified_entity_id)
|
||||
})
|
||||
})
|
||||
.await?;
|
||||
@@ -263,41 +295,53 @@ impl Engine {
|
||||
Ok(id)
|
||||
}
|
||||
|
||||
pub async fn list_all_grants(&self) -> Result<Vec<Grant<SpecificGrant>>, ListGrantsError> {
|
||||
let mut conn = self.db.get().await?;
|
||||
async fn list_one_kind<Kind: Policy, Y>(
|
||||
&self,
|
||||
conn: &mut impl AsyncConnection<Backend = Sqlite>,
|
||||
) -> Result<Vec<Grant<Y>>, ListError>
|
||||
where
|
||||
Y: From<Kind::Settings>,
|
||||
{
|
||||
let all_grants = Kind::find_all_grants(conn)
|
||||
.await
|
||||
.map_err(DatabaseError::from)?;
|
||||
|
||||
let mut verified_grants = Vec::with_capacity(all_grants.len());
|
||||
|
||||
// Verify integrity of all grants before returning any results.
|
||||
for grant in all_grants {
|
||||
integrity::verify_entity(
|
||||
conn,
|
||||
&self.keyholder,
|
||||
&grant.settings,
|
||||
grant.common_settings_id,
|
||||
)
|
||||
.await?;
|
||||
|
||||
verified_grants.push(Grant {
|
||||
id: grant.id,
|
||||
common_settings_id: grant.common_settings_id,
|
||||
settings: grant.settings.generalize(),
|
||||
});
|
||||
}
|
||||
|
||||
Ok(verified_grants)
|
||||
}
|
||||
|
||||
pub async fn list_all_grants(&self) -> Result<Vec<Grant<SpecificGrant>>, ListError> {
|
||||
let mut conn = self.db.get().await.map_err(DatabaseError::from)?;
|
||||
|
||||
let mut grants: Vec<Grant<SpecificGrant>> = Vec::new();
|
||||
|
||||
grants.extend(
|
||||
EtherTransfer::find_all_grants(&mut conn)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|g| Grant {
|
||||
id: g.id,
|
||||
shared_grant_id: g.shared_grant_id,
|
||||
shared: g.shared,
|
||||
settings: SpecificGrant::EtherTransfer(g.settings),
|
||||
}),
|
||||
);
|
||||
grants.extend(
|
||||
TokenTransfer::find_all_grants(&mut conn)
|
||||
.await?
|
||||
.into_iter()
|
||||
.map(|g| Grant {
|
||||
id: g.id,
|
||||
shared_grant_id: g.shared_grant_id,
|
||||
shared: g.shared,
|
||||
settings: SpecificGrant::TokenTransfer(g.settings),
|
||||
}),
|
||||
);
|
||||
grants.extend(self.list_one_kind::<EtherTransfer, _>(&mut conn).await?);
|
||||
grants.extend(self.list_one_kind::<TokenTransfer, _>(&mut conn).await?);
|
||||
|
||||
Ok(grants)
|
||||
}
|
||||
|
||||
pub async fn evaluate_transaction(
|
||||
&self,
|
||||
wallet_id: i32,
|
||||
client_id: i32,
|
||||
target: EvmWalletAccess,
|
||||
transaction: TxEip1559,
|
||||
run_kind: RunKind,
|
||||
) -> Result<SpecificMeaning, VetError> {
|
||||
@@ -305,8 +349,7 @@ impl Engine {
|
||||
return Err(VetError::ContractCreationNotSupported);
|
||||
};
|
||||
let context = policies::EvalContext {
|
||||
wallet_id,
|
||||
client_id,
|
||||
target,
|
||||
chain: transaction.chain_id,
|
||||
to,
|
||||
value: transaction.value,
|
||||
@@ -337,3 +380,255 @@ impl Engine {
|
||||
Err(VetError::UnsupportedTransactionType)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use alloy::primitives::{Address, Bytes, U256, address};
|
||||
use chrono::{Duration, Utc};
|
||||
use diesel::{SelectableHelper, insert_into};
|
||||
use diesel_async::RunQueryDsl;
|
||||
use rstest::rstest;
|
||||
|
||||
use crate::db::{
|
||||
self, DatabaseConnection,
|
||||
models::{
|
||||
EvmBasicGrant, EvmWalletAccess, NewEvmBasicGrant, NewEvmTransactionLog, SqliteTimestamp,
|
||||
},
|
||||
schema::{evm_basic_grant, evm_transaction_log},
|
||||
};
|
||||
use crate::evm::policies::{
|
||||
EvalContext, EvalViolation, SharedGrantSettings, TransactionRateLimit,
|
||||
};
|
||||
|
||||
use super::check_shared_constraints;
|
||||
|
||||
const WALLET_ACCESS_ID: i32 = 1;
|
||||
const CHAIN_ID: u64 = 1;
|
||||
const RECIPIENT: Address = address!("1111111111111111111111111111111111111111");
|
||||
|
||||
fn context() -> EvalContext {
|
||||
EvalContext {
|
||||
target: EvmWalletAccess {
|
||||
id: WALLET_ACCESS_ID,
|
||||
wallet_id: 10,
|
||||
client_id: 20,
|
||||
created_at: SqliteTimestamp(Utc::now()),
|
||||
},
|
||||
chain: CHAIN_ID,
|
||||
to: RECIPIENT,
|
||||
value: U256::ZERO,
|
||||
calldata: Bytes::new(),
|
||||
max_fee_per_gas: 100,
|
||||
max_priority_fee_per_gas: 10,
|
||||
}
|
||||
}
|
||||
|
||||
fn shared_settings() -> SharedGrantSettings {
|
||||
SharedGrantSettings {
|
||||
wallet_access_id: WALLET_ACCESS_ID,
|
||||
chain: CHAIN_ID,
|
||||
valid_from: None,
|
||||
valid_until: None,
|
||||
max_gas_fee_per_gas: None,
|
||||
max_priority_fee_per_gas: None,
|
||||
rate_limit: None,
|
||||
}
|
||||
}
|
||||
|
||||
async fn insert_basic_grant(
|
||||
conn: &mut DatabaseConnection,
|
||||
shared: &SharedGrantSettings,
|
||||
) -> EvmBasicGrant {
|
||||
insert_into(evm_basic_grant::table)
|
||||
.values(NewEvmBasicGrant {
|
||||
wallet_access_id: shared.wallet_access_id,
|
||||
chain_id: shared.chain as i32,
|
||||
valid_from: shared.valid_from.map(SqliteTimestamp),
|
||||
valid_until: shared.valid_until.map(SqliteTimestamp),
|
||||
max_gas_fee_per_gas: shared
|
||||
.max_gas_fee_per_gas
|
||||
.map(|fee| super::utils::u256_to_bytes(fee).to_vec()),
|
||||
max_priority_fee_per_gas: shared
|
||||
.max_priority_fee_per_gas
|
||||
.map(|fee| super::utils::u256_to_bytes(fee).to_vec()),
|
||||
rate_limit_count: shared.rate_limit.as_ref().map(|limit| limit.count as i32),
|
||||
rate_limit_window_secs: shared
|
||||
.rate_limit
|
||||
.as_ref()
|
||||
.map(|limit| limit.window.num_seconds() as i32),
|
||||
revoked_at: None,
|
||||
})
|
||||
.returning(EvmBasicGrant::as_select())
|
||||
.get_result(conn)
|
||||
.await
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[case::matching_chain(CHAIN_ID, false)]
|
||||
#[case::mismatching_chain(CHAIN_ID + 1, true)]
|
||||
#[tokio::test]
|
||||
async fn check_shared_constraints_enforces_chain_id(
|
||||
#[case] context_chain: u64,
|
||||
#[case] expect_mismatch: bool,
|
||||
) {
|
||||
let db = db::create_test_pool().await;
|
||||
let mut conn = db.get().await.unwrap();
|
||||
|
||||
let context = EvalContext {
|
||||
chain: context_chain,
|
||||
..context()
|
||||
};
|
||||
|
||||
let violations = check_shared_constraints(&context, &shared_settings(), 999, &mut *conn)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(
|
||||
violations
|
||||
.iter()
|
||||
.any(|violation| matches!(violation, EvalViolation::MismatchingChainId { .. })),
|
||||
expect_mismatch
|
||||
);
|
||||
|
||||
if expect_mismatch {
|
||||
assert_eq!(violations.len(), 1);
|
||||
} else {
|
||||
assert!(violations.is_empty());
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[case::valid_from_in_bounds(Some(Utc::now() - Duration::hours(1)), None, false)]
|
||||
#[case::valid_from_out_of_bounds(Some(Utc::now() + Duration::hours(1)), None, true)]
|
||||
#[case::valid_until_in_bounds(None, Some(Utc::now() + Duration::hours(1)), false)]
|
||||
#[case::valid_until_out_of_bounds(None, Some(Utc::now() - Duration::hours(1)), true)]
|
||||
#[tokio::test]
|
||||
async fn check_shared_constraints_enforces_validity_window(
|
||||
#[case] valid_from: Option<chrono::DateTime<Utc>>,
|
||||
#[case] valid_until: Option<chrono::DateTime<Utc>>,
|
||||
#[case] expect_invalid_time: bool,
|
||||
) {
|
||||
let db = db::create_test_pool().await;
|
||||
let mut conn = db.get().await.unwrap();
|
||||
|
||||
let shared = SharedGrantSettings {
|
||||
valid_from,
|
||||
valid_until,
|
||||
..shared_settings()
|
||||
};
|
||||
|
||||
let violations = check_shared_constraints(&context(), &shared, 999, &mut *conn)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(
|
||||
violations
|
||||
.iter()
|
||||
.any(|violation| matches!(violation, EvalViolation::InvalidTime)),
|
||||
expect_invalid_time
|
||||
);
|
||||
|
||||
if expect_invalid_time {
|
||||
assert_eq!(violations.len(), 1);
|
||||
} else {
|
||||
assert!(violations.is_empty());
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[case::max_fee_within_limit(Some(U256::from(100u64)), None, 100, 10, false)]
|
||||
#[case::max_fee_exceeded(Some(U256::from(99u64)), None, 100, 10, true)]
|
||||
#[case::priority_fee_within_limit(None, Some(U256::from(10u64)), 100, 10, false)]
|
||||
#[case::priority_fee_exceeded(None, Some(U256::from(9u64)), 100, 10, true)]
|
||||
#[tokio::test]
|
||||
async fn check_shared_constraints_enforces_gas_fee_caps(
|
||||
#[case] max_gas_fee_per_gas: Option<U256>,
|
||||
#[case] max_priority_fee_per_gas: Option<U256>,
|
||||
#[case] actual_max_fee_per_gas: u128,
|
||||
#[case] actual_max_priority_fee_per_gas: u128,
|
||||
#[case] expect_gas_limit_violation: bool,
|
||||
) {
|
||||
let db = db::create_test_pool().await;
|
||||
let mut conn = db.get().await.unwrap();
|
||||
|
||||
let context = EvalContext {
|
||||
max_fee_per_gas: actual_max_fee_per_gas,
|
||||
max_priority_fee_per_gas: actual_max_priority_fee_per_gas,
|
||||
..context()
|
||||
};
|
||||
|
||||
let shared = SharedGrantSettings {
|
||||
max_gas_fee_per_gas,
|
||||
max_priority_fee_per_gas,
|
||||
..shared_settings()
|
||||
};
|
||||
let violations = check_shared_constraints(&context, &shared, 999, &mut *conn)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(
|
||||
violations
|
||||
.iter()
|
||||
.any(|violation| matches!(violation, EvalViolation::GasLimitExceeded { .. })),
|
||||
expect_gas_limit_violation
|
||||
);
|
||||
|
||||
if expect_gas_limit_violation {
|
||||
assert_eq!(violations.len(), 1);
|
||||
} else {
|
||||
assert!(violations.is_empty());
|
||||
}
|
||||
}
|
||||
|
||||
#[rstest]
|
||||
#[case::under_rate_limit(2, false)]
|
||||
#[case::at_rate_limit(1, true)]
|
||||
#[tokio::test]
|
||||
async fn check_shared_constraints_enforces_rate_limit(
|
||||
#[case] rate_limit_count: u32,
|
||||
#[case] expect_rate_limit_violation: bool,
|
||||
) {
|
||||
let db = db::create_test_pool().await;
|
||||
let mut conn = db.get().await.unwrap();
|
||||
|
||||
let shared = SharedGrantSettings {
|
||||
rate_limit: Some(TransactionRateLimit {
|
||||
count: rate_limit_count,
|
||||
window: Duration::hours(1),
|
||||
}),
|
||||
..shared_settings()
|
||||
};
|
||||
|
||||
let basic_grant = insert_basic_grant(&mut conn, &shared).await;
|
||||
|
||||
insert_into(evm_transaction_log::table)
|
||||
.values(NewEvmTransactionLog {
|
||||
grant_id: basic_grant.id,
|
||||
wallet_access_id: WALLET_ACCESS_ID,
|
||||
chain_id: CHAIN_ID as i32,
|
||||
eth_value: super::utils::u256_to_bytes(U256::ZERO).to_vec(),
|
||||
signed_at: SqliteTimestamp(Utc::now()),
|
||||
})
|
||||
.execute(&mut *conn)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let violations = check_shared_constraints(&context(), &shared, basic_grant.id, &mut *conn)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(
|
||||
violations
|
||||
.iter()
|
||||
.any(|violation| matches!(violation, EvalViolation::RateLimitExceeded)),
|
||||
expect_rate_limit_violation
|
||||
);
|
||||
|
||||
if expect_rate_limit_violation {
|
||||
assert_eq!(violations.len(), 1);
|
||||
} else {
|
||||
assert!(violations.is_empty());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,11 +6,12 @@ use diesel::{
|
||||
ExpressionMethods as _, QueryDsl, SelectableHelper, result::QueryResult, sqlite::Sqlite,
|
||||
};
|
||||
use diesel_async::{AsyncConnection, RunQueryDsl};
|
||||
use miette::Diagnostic;
|
||||
|
||||
use thiserror::Error;
|
||||
|
||||
use crate::{
|
||||
db::models::{self, EvmBasicGrant},
|
||||
crypto::integrity::v1::Integrable,
|
||||
db::models::{self, EvmBasicGrant, EvmWalletAccess},
|
||||
evm::utils,
|
||||
};
|
||||
|
||||
@@ -19,9 +20,8 @@ pub mod token_transfers;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct EvalContext {
|
||||
// Which wallet is this transaction for
|
||||
pub client_id: i32,
|
||||
pub wallet_id: i32,
|
||||
// Which wallet is this transaction for and who requested it
|
||||
pub target: EvmWalletAccess,
|
||||
|
||||
// The transaction data
|
||||
pub chain: ChainId,
|
||||
@@ -34,48 +34,44 @@ pub struct EvalContext {
|
||||
pub max_priority_fee_per_gas: u128,
|
||||
}
|
||||
|
||||
#[derive(Debug, Error, Diagnostic)]
|
||||
#[derive(Debug, Error)]
|
||||
pub enum EvalViolation {
|
||||
#[error("This grant doesn't allow transactions to the target address {target}")]
|
||||
#[diagnostic(code(arbiter_server::evm::eval_violation::invalid_target))]
|
||||
InvalidTarget { target: Address },
|
||||
|
||||
#[error("Gas limit exceeded for this grant")]
|
||||
#[diagnostic(code(arbiter_server::evm::eval_violation::gas_limit_exceeded))]
|
||||
GasLimitExceeded {
|
||||
max_gas_fee_per_gas: Option<U256>,
|
||||
max_priority_fee_per_gas: Option<U256>,
|
||||
},
|
||||
|
||||
#[error("Rate limit exceeded for this grant")]
|
||||
#[diagnostic(code(arbiter_server::evm::eval_violation::rate_limit_exceeded))]
|
||||
RateLimitExceeded,
|
||||
|
||||
#[error("Transaction exceeds volumetric limits of the grant")]
|
||||
#[diagnostic(code(arbiter_server::evm::eval_violation::volumetric_limit_exceeded))]
|
||||
VolumetricLimitExceeded,
|
||||
|
||||
#[error("Transaction is outside of the grant's validity period")]
|
||||
#[diagnostic(code(arbiter_server::evm::eval_violation::invalid_time))]
|
||||
InvalidTime,
|
||||
|
||||
#[error("Transaction type is not allowed by this grant")]
|
||||
#[diagnostic(code(arbiter_server::evm::eval_violation::invalid_transaction_type))]
|
||||
InvalidTransactionType,
|
||||
|
||||
#[error("Mismatching chain ID")]
|
||||
MismatchingChainId { expected: ChainId, actual: ChainId },
|
||||
}
|
||||
|
||||
pub type DatabaseID = i32;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Grant<PolicySettings> {
|
||||
pub id: DatabaseID,
|
||||
pub shared_grant_id: DatabaseID, // ID of the basic grant for shared-logic checks like rate limits and validity periods
|
||||
pub shared: SharedGrantSettings,
|
||||
pub settings: PolicySettings,
|
||||
pub common_settings_id: DatabaseID, // ID of the basic grant for shared-logic checks like rate limits and validity periods
|
||||
pub settings: CombinedSettings<PolicySettings>,
|
||||
}
|
||||
|
||||
|
||||
pub trait Policy: Sized {
|
||||
type Settings: Send + Sync + 'static + Into<SpecificGrant>;
|
||||
type Settings: Send + Sync + 'static + Into<SpecificGrant> + Integrable;
|
||||
type Meaning: Display + std::fmt::Debug + Send + Sync + 'static + Into<SpecificMeaning>;
|
||||
|
||||
fn analyze(context: &EvalContext) -> Option<Self::Meaning>;
|
||||
@@ -131,13 +127,13 @@ pub enum SpecificMeaning {
|
||||
TokenTransfer(token_transfers::Meaning),
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)]
|
||||
pub struct TransactionRateLimit {
|
||||
pub count: u32,
|
||||
pub window: Duration,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)]
|
||||
pub struct VolumeRateLimit {
|
||||
pub max_volume: U256,
|
||||
pub window: Duration,
|
||||
@@ -145,7 +141,7 @@ pub struct VolumeRateLimit {
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
||||
pub struct SharedGrantSettings {
|
||||
pub wallet_id: i32,
|
||||
pub wallet_access_id: i32,
|
||||
pub chain: ChainId,
|
||||
|
||||
pub valid_from: Option<DateTime<Utc>>,
|
||||
@@ -158,9 +154,9 @@ pub struct SharedGrantSettings {
|
||||
}
|
||||
|
||||
impl SharedGrantSettings {
|
||||
fn try_from_model(model: EvmBasicGrant) -> QueryResult<Self> {
|
||||
pub(crate) fn try_from_model(model: EvmBasicGrant) -> QueryResult<Self> {
|
||||
Ok(Self {
|
||||
wallet_id: model.wallet_id,
|
||||
wallet_access_id: model.wallet_access_id,
|
||||
chain: model.chain_id as u64, // safe because chain_id is stored as i32 but is guaranteed to be a valid ChainId by the API when creating grants
|
||||
valid_from: model.valid_from.map(Into::into),
|
||||
valid_until: model.valid_until.map(Into::into),
|
||||
@@ -198,12 +194,63 @@ impl SharedGrantSettings {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum SpecificGrant {
|
||||
EtherTransfer(ether_transfer::Settings),
|
||||
TokenTransfer(token_transfers::Settings),
|
||||
}
|
||||
|
||||
pub struct FullGrant<PolicyGrant> {
|
||||
pub basic: SharedGrantSettings,
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CombinedSettings<PolicyGrant> {
|
||||
pub shared: SharedGrantSettings,
|
||||
pub specific: PolicyGrant,
|
||||
}
|
||||
|
||||
impl<P> CombinedSettings<P> {
|
||||
pub fn generalize<Y: From<P>>(self) -> CombinedSettings<Y> {
|
||||
CombinedSettings {
|
||||
shared: self.shared,
|
||||
specific: self.specific.into(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<P: Integrable> Integrable for CombinedSettings<P> {
|
||||
const KIND: &'static str = P::KIND;
|
||||
const VERSION: i32 = P::VERSION;
|
||||
}
|
||||
|
||||
use crate::crypto::integrity::hashing::Hashable;
|
||||
|
||||
impl Hashable for TransactionRateLimit {
|
||||
fn hash<H: sha2::Digest>(&self, hasher: &mut H) {
|
||||
self.count.hash(hasher);
|
||||
self.window.hash(hasher);
|
||||
}
|
||||
}
|
||||
|
||||
impl Hashable for VolumeRateLimit {
|
||||
fn hash<H: sha2::Digest>(&self, hasher: &mut H) {
|
||||
self.max_volume.hash(hasher);
|
||||
self.window.hash(hasher);
|
||||
}
|
||||
}
|
||||
|
||||
impl Hashable for SharedGrantSettings {
|
||||
fn hash<H: sha2::Digest>(&self, hasher: &mut H) {
|
||||
self.wallet_access_id.hash(hasher);
|
||||
self.chain.hash(hasher);
|
||||
self.valid_from.hash(hasher);
|
||||
self.valid_until.hash(hasher);
|
||||
self.max_gas_fee_per_gas.hash(hasher);
|
||||
self.max_priority_fee_per_gas.hash(hasher);
|
||||
self.rate_limit.hash(hasher);
|
||||
}
|
||||
}
|
||||
|
||||
impl<P: Hashable> Hashable for CombinedSettings<P> {
|
||||
fn hash<H: sha2::Digest>(&self, hasher: &mut H) {
|
||||
self.shared.hash(hasher);
|
||||
self.specific.hash(hasher);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,13 +8,14 @@ use diesel::sqlite::Sqlite;
|
||||
use diesel::{ExpressionMethods, JoinOnDsl, prelude::*};
|
||||
use diesel_async::{AsyncConnection, RunQueryDsl};
|
||||
|
||||
use crate::crypto::integrity::v1::Integrable;
|
||||
use crate::db::models::{
|
||||
EvmBasicGrant, EvmEtherTransferGrant, EvmEtherTransferGrantTarget, EvmEtherTransferLimit,
|
||||
NewEvmEtherTransferLimit, SqliteTimestamp,
|
||||
};
|
||||
use crate::db::schema::{evm_basic_grant, evm_ether_transfer_limit, evm_transaction_log};
|
||||
use crate::evm::policies::{
|
||||
Grant, SharedGrantSettings, SpecificGrant, SpecificMeaning, VolumeRateLimit,
|
||||
CombinedSettings, Grant, SharedGrantSettings, SpecificGrant, SpecificMeaning, VolumeRateLimit,
|
||||
};
|
||||
use crate::{
|
||||
db::{
|
||||
@@ -36,34 +37,42 @@ use super::{DatabaseID, EvalContext, EvalViolation};
|
||||
// Plain ether transfer
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
||||
pub struct Meaning {
|
||||
to: Address,
|
||||
value: U256,
|
||||
pub(crate) to: Address,
|
||||
pub(crate) value: U256,
|
||||
}
|
||||
impl Display for Meaning {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"Ether transfer of {} to {}",
|
||||
self.value,
|
||||
self.to.to_string()
|
||||
)
|
||||
write!(f, "Ether transfer of {} to {}", self.value, self.to)
|
||||
}
|
||||
}
|
||||
impl Into<SpecificMeaning> for Meaning {
|
||||
fn into(self) -> SpecificMeaning {
|
||||
SpecificMeaning::EtherTransfer(self)
|
||||
impl From<Meaning> for SpecificMeaning {
|
||||
fn from(val: Meaning) -> SpecificMeaning {
|
||||
SpecificMeaning::EtherTransfer(val)
|
||||
}
|
||||
}
|
||||
|
||||
// A grant for ether transfers, which can be scoped to specific target addresses and volume limits
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Settings {
|
||||
target: Vec<Address>,
|
||||
limit: VolumeRateLimit,
|
||||
pub target: Vec<Address>,
|
||||
pub limit: VolumeRateLimit,
|
||||
}
|
||||
impl Integrable for Settings {
|
||||
const KIND: &'static str = "EtherTransfer";
|
||||
}
|
||||
|
||||
impl Into<SpecificGrant> for Settings {
|
||||
fn into(self) -> SpecificGrant {
|
||||
SpecificGrant::EtherTransfer(self)
|
||||
use crate::crypto::integrity::hashing::Hashable;
|
||||
|
||||
impl Hashable for Settings {
|
||||
fn hash<H: sha2::Digest>(&self, hasher: &mut H) {
|
||||
self.target.hash(hasher);
|
||||
self.limit.hash(hasher);
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Settings> for SpecificGrant {
|
||||
fn from(val: Settings) -> SpecificGrant {
|
||||
SpecificGrant::EtherTransfer(val)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -95,20 +104,22 @@ async fn query_relevant_past_transaction(
|
||||
|
||||
async fn check_rate_limits(
|
||||
grant: &Grant<Settings>,
|
||||
current_transfer_value: U256,
|
||||
db: &mut impl AsyncConnection<Backend = Sqlite>,
|
||||
) -> QueryResult<Vec<EvalViolation>> {
|
||||
let mut violations = Vec::new();
|
||||
let window = grant.settings.limit.window;
|
||||
let window = grant.settings.specific.limit.window;
|
||||
|
||||
let past_transaction = query_relevant_past_transaction(grant.id, window, db).await?;
|
||||
let past_transaction =
|
||||
query_relevant_past_transaction(grant.common_settings_id, window, db).await?;
|
||||
|
||||
let window_start = chrono::Utc::now() - grant.settings.limit.window;
|
||||
let cumulative_volume: U256 = past_transaction
|
||||
let window_start = chrono::Utc::now() - grant.settings.specific.limit.window;
|
||||
let prospective_cumulative_volume: U256 = past_transaction
|
||||
.iter()
|
||||
.filter(|(_, timestamp)| timestamp >= &window_start)
|
||||
.fold(U256::default(), |acc, (value, _)| acc + *value);
|
||||
.fold(current_transfer_value, |acc, (value, _)| acc + *value);
|
||||
|
||||
if cumulative_volume > grant.settings.limit.max_volume {
|
||||
if prospective_cumulative_volume > grant.settings.specific.limit.max_volume {
|
||||
violations.push(EvalViolation::VolumetricLimitExceeded);
|
||||
}
|
||||
|
||||
@@ -141,11 +152,11 @@ impl Policy for EtherTransfer {
|
||||
let mut violations = Vec::new();
|
||||
|
||||
// Check if the target address is within the grant's allowed targets
|
||||
if !grant.settings.target.contains(&meaning.to) {
|
||||
if !grant.settings.specific.target.contains(&meaning.to) {
|
||||
violations.push(EvalViolation::InvalidTarget { target: meaning.to });
|
||||
}
|
||||
|
||||
let rate_violations = check_rate_limits(grant, db).await?;
|
||||
let rate_violations = check_rate_limits(grant, meaning.value, db).await?;
|
||||
violations.extend(rate_violations);
|
||||
|
||||
Ok(violations)
|
||||
@@ -200,9 +211,8 @@ impl Policy for EtherTransfer {
|
||||
.inner_join(evm_basic_grant::table)
|
||||
.inner_join(evm_ether_transfer_grant_target::table)
|
||||
.filter(
|
||||
evm_basic_grant::wallet_id
|
||||
.eq(context.wallet_id)
|
||||
.and(evm_basic_grant::client_id.eq(context.client_id))
|
||||
evm_basic_grant::wallet_access_id
|
||||
.eq(context.target.id)
|
||||
.and(evm_basic_grant::revoked_at.is_null())
|
||||
.and(evm_ether_transfer_grant_target::address.eq(&target_bytes)),
|
||||
)
|
||||
@@ -240,20 +250,21 @@ impl Policy for EtherTransfer {
|
||||
})
|
||||
.collect();
|
||||
|
||||
let settings = Settings {
|
||||
target: targets,
|
||||
limit: VolumeRateLimit {
|
||||
max_volume: utils::try_bytes_to_u256(&limit.max_volume)
|
||||
.map_err(|err| diesel::result::Error::DeserializationError(Box::new(err)))?,
|
||||
window: chrono::Duration::seconds(limit.window_secs as i64),
|
||||
},
|
||||
};
|
||||
|
||||
Ok(Some(Grant {
|
||||
id: grant.id,
|
||||
shared_grant_id: grant.basic_grant_id,
|
||||
common_settings_id: grant.basic_grant_id,
|
||||
settings: CombinedSettings {
|
||||
shared: SharedGrantSettings::try_from_model(basic_grant)?,
|
||||
settings,
|
||||
specific: Settings {
|
||||
target: targets,
|
||||
limit: VolumeRateLimit {
|
||||
max_volume: utils::try_bytes_to_u256(&limit.max_volume).map_err(|err| {
|
||||
diesel::result::Error::DeserializationError(Box::new(err))
|
||||
})?,
|
||||
window: chrono::Duration::seconds(limit.window_secs as i64),
|
||||
},
|
||||
},
|
||||
},
|
||||
}))
|
||||
}
|
||||
|
||||
@@ -331,9 +342,10 @@ impl Policy for EtherTransfer {
|
||||
|
||||
Ok(Grant {
|
||||
id: specific.id,
|
||||
shared_grant_id: specific.basic_grant_id,
|
||||
common_settings_id: specific.basic_grant_id,
|
||||
settings: CombinedSettings {
|
||||
shared: SharedGrantSettings::try_from_model(basic)?,
|
||||
settings: Settings {
|
||||
specific: Settings {
|
||||
target: targets,
|
||||
limit: VolumeRateLimit {
|
||||
max_volume: utils::try_bytes_to_u256(&limit.max_volume).map_err(
|
||||
@@ -342,6 +354,7 @@ impl Policy for EtherTransfer {
|
||||
window: Duration::seconds(limit.window_secs as i64),
|
||||
},
|
||||
},
|
||||
},
|
||||
})
|
||||
})
|
||||
.collect()
|
||||
|
||||
@@ -5,20 +5,22 @@ use diesel_async::RunQueryDsl;
|
||||
|
||||
use crate::db::{
|
||||
self, DatabaseConnection,
|
||||
models::{EvmBasicGrant, NewEvmBasicGrant, NewEvmTransactionLog, SqliteTimestamp},
|
||||
models::{
|
||||
EvmBasicGrant, EvmWalletAccess, NewEvmBasicGrant, NewEvmTransactionLog, SqliteTimestamp,
|
||||
},
|
||||
schema::{evm_basic_grant, evm_transaction_log},
|
||||
};
|
||||
use crate::evm::{
|
||||
policies::{
|
||||
EvalContext, EvalViolation, Grant, Policy, SharedGrantSettings, VolumeRateLimit,
|
||||
CombinedSettings, EvalContext, EvalViolation, Grant, Policy, SharedGrantSettings,
|
||||
VolumeRateLimit,
|
||||
},
|
||||
utils,
|
||||
};
|
||||
|
||||
use super::{EtherTransfer, Settings};
|
||||
|
||||
const WALLET_ID: i32 = 1;
|
||||
const CLIENT_ID: i32 = 2;
|
||||
const WALLET_ACCESS_ID: i32 = 1;
|
||||
const CHAIN_ID: u64 = 1;
|
||||
|
||||
const ALLOWED: Address = address!("1111111111111111111111111111111111111111");
|
||||
@@ -26,8 +28,12 @@ const OTHER: Address = address!("2222222222222222222222222222222222222222");
|
||||
|
||||
fn ctx(to: Address, value: U256) -> EvalContext {
|
||||
EvalContext {
|
||||
wallet_id: WALLET_ID,
|
||||
client_id: CLIENT_ID,
|
||||
target: EvmWalletAccess {
|
||||
id: WALLET_ACCESS_ID,
|
||||
wallet_id: 10,
|
||||
client_id: 20,
|
||||
created_at: SqliteTimestamp(Utc::now()),
|
||||
},
|
||||
chain: CHAIN_ID,
|
||||
to,
|
||||
value,
|
||||
@@ -40,8 +46,7 @@ fn ctx(to: Address, value: U256) -> EvalContext {
|
||||
async fn insert_basic(conn: &mut DatabaseConnection, revoked: bool) -> EvmBasicGrant {
|
||||
insert_into(evm_basic_grant::table)
|
||||
.values(NewEvmBasicGrant {
|
||||
wallet_id: WALLET_ID,
|
||||
client_id: CLIENT_ID,
|
||||
wallet_access_id: WALLET_ACCESS_ID,
|
||||
chain_id: CHAIN_ID as i32,
|
||||
valid_from: None,
|
||||
valid_until: None,
|
||||
@@ -69,7 +74,7 @@ fn make_settings(targets: Vec<Address>, max_volume: u64) -> Settings {
|
||||
|
||||
fn shared() -> SharedGrantSettings {
|
||||
SharedGrantSettings {
|
||||
wallet_id: WALLET_ID,
|
||||
wallet_access_id: WALLET_ACCESS_ID,
|
||||
chain: CHAIN_ID,
|
||||
valid_from: None,
|
||||
valid_until: None,
|
||||
@@ -79,8 +84,6 @@ fn shared() -> SharedGrantSettings {
|
||||
}
|
||||
}
|
||||
|
||||
// ── analyze ─────────────────────────────────────────────────────────────
|
||||
|
||||
#[test]
|
||||
fn analyze_matches_empty_calldata() {
|
||||
let m = EtherTransfer::analyze(&ctx(ALLOWED, U256::from(1_000u64))).unwrap();
|
||||
@@ -97,8 +100,6 @@ fn analyze_rejects_nonempty_calldata() {
|
||||
assert!(EtherTransfer::analyze(&context).is_none());
|
||||
}
|
||||
|
||||
// ── evaluate ────────────────────────────────────────────────────────────
|
||||
|
||||
#[tokio::test]
|
||||
async fn evaluate_passes_for_allowed_target() {
|
||||
let db = db::create_test_pool().await;
|
||||
@@ -106,9 +107,11 @@ async fn evaluate_passes_for_allowed_target() {
|
||||
|
||||
let grant = Grant {
|
||||
id: 999,
|
||||
shared_grant_id: 999,
|
||||
common_settings_id: 999,
|
||||
settings: CombinedSettings {
|
||||
shared: shared(),
|
||||
settings: make_settings(vec![ALLOWED], 1_000_000),
|
||||
specific: make_settings(vec![ALLOWED], 1_000_000),
|
||||
},
|
||||
};
|
||||
let context = ctx(ALLOWED, U256::from(100u64));
|
||||
let m = EtherTransfer::analyze(&context).unwrap();
|
||||
@@ -125,9 +128,11 @@ async fn evaluate_rejects_disallowed_target() {
|
||||
|
||||
let grant = Grant {
|
||||
id: 999,
|
||||
shared_grant_id: 999,
|
||||
common_settings_id: 999,
|
||||
settings: CombinedSettings {
|
||||
shared: shared(),
|
||||
settings: make_settings(vec![ALLOWED], 1_000_000),
|
||||
specific: make_settings(vec![ALLOWED], 1_000_000),
|
||||
},
|
||||
};
|
||||
let context = ctx(OTHER, U256::from(100u64));
|
||||
let m = EtherTransfer::analyze(&context).unwrap();
|
||||
@@ -154,8 +159,7 @@ async fn evaluate_passes_when_volume_within_limit() {
|
||||
insert_into(evm_transaction_log::table)
|
||||
.values(NewEvmTransactionLog {
|
||||
grant_id,
|
||||
client_id: CLIENT_ID,
|
||||
wallet_id: WALLET_ID,
|
||||
wallet_access_id: WALLET_ACCESS_ID,
|
||||
chain_id: CHAIN_ID as i32,
|
||||
eth_value: utils::u256_to_bytes(U256::from(500u64)).to_vec(),
|
||||
signed_at: SqliteTimestamp(Utc::now()),
|
||||
@@ -166,9 +170,11 @@ async fn evaluate_passes_when_volume_within_limit() {
|
||||
|
||||
let grant = Grant {
|
||||
id: grant_id,
|
||||
shared_grant_id: basic.id,
|
||||
common_settings_id: basic.id,
|
||||
settings: CombinedSettings {
|
||||
shared: shared(),
|
||||
settings,
|
||||
specific: settings,
|
||||
},
|
||||
};
|
||||
let context = ctx(ALLOWED, U256::from(100u64));
|
||||
let m = EtherTransfer::analyze(&context).unwrap();
|
||||
@@ -195,10 +201,9 @@ async fn evaluate_rejects_volume_over_limit() {
|
||||
insert_into(evm_transaction_log::table)
|
||||
.values(NewEvmTransactionLog {
|
||||
grant_id,
|
||||
client_id: CLIENT_ID,
|
||||
wallet_id: WALLET_ID,
|
||||
wallet_access_id: WALLET_ACCESS_ID,
|
||||
chain_id: CHAIN_ID as i32,
|
||||
eth_value: utils::u256_to_bytes(U256::from(1_001u64)).to_vec(),
|
||||
eth_value: utils::u256_to_bytes(U256::from(1_000u64)).to_vec(),
|
||||
signed_at: SqliteTimestamp(Utc::now()),
|
||||
})
|
||||
.execute(&mut *conn)
|
||||
@@ -207,11 +212,13 @@ async fn evaluate_rejects_volume_over_limit() {
|
||||
|
||||
let grant = Grant {
|
||||
id: grant_id,
|
||||
shared_grant_id: basic.id,
|
||||
common_settings_id: basic.id,
|
||||
settings: CombinedSettings {
|
||||
shared: shared(),
|
||||
settings,
|
||||
specific: settings,
|
||||
},
|
||||
};
|
||||
let context = ctx(ALLOWED, U256::from(100u64));
|
||||
let context = ctx(ALLOWED, U256::from(1u64));
|
||||
let m = EtherTransfer::analyze(&context).unwrap();
|
||||
let v = EtherTransfer::evaluate(&context, &m, &grant, &mut *conn)
|
||||
.await
|
||||
@@ -233,14 +240,13 @@ async fn evaluate_passes_at_exactly_volume_limit() {
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Exactly at the limit — the check is `>`, so this should not violate
|
||||
// Exactly at the limit including current transfer — check is `>`, so this should not violate
|
||||
insert_into(evm_transaction_log::table)
|
||||
.values(NewEvmTransactionLog {
|
||||
grant_id,
|
||||
client_id: CLIENT_ID,
|
||||
wallet_id: WALLET_ID,
|
||||
wallet_access_id: WALLET_ACCESS_ID,
|
||||
chain_id: CHAIN_ID as i32,
|
||||
eth_value: utils::u256_to_bytes(U256::from(1_000u64)).to_vec(),
|
||||
eth_value: utils::u256_to_bytes(U256::from(900u64)).to_vec(),
|
||||
signed_at: SqliteTimestamp(Utc::now()),
|
||||
})
|
||||
.execute(&mut *conn)
|
||||
@@ -249,9 +255,11 @@ async fn evaluate_passes_at_exactly_volume_limit() {
|
||||
|
||||
let grant = Grant {
|
||||
id: grant_id,
|
||||
shared_grant_id: basic.id,
|
||||
common_settings_id: basic.id,
|
||||
settings: CombinedSettings {
|
||||
shared: shared(),
|
||||
settings,
|
||||
specific: settings,
|
||||
},
|
||||
};
|
||||
let context = ctx(ALLOWED, U256::from(100u64));
|
||||
let m = EtherTransfer::analyze(&context).unwrap();
|
||||
@@ -264,8 +272,6 @@ async fn evaluate_passes_at_exactly_volume_limit() {
|
||||
);
|
||||
}
|
||||
|
||||
// ── try_find_grant ───────────────────────────────────────────────────────
|
||||
|
||||
#[tokio::test]
|
||||
async fn try_find_grant_roundtrip() {
|
||||
let db = db::create_test_pool().await;
|
||||
@@ -283,8 +289,11 @@ async fn try_find_grant_roundtrip() {
|
||||
|
||||
assert!(found.is_some());
|
||||
let g = found.unwrap();
|
||||
assert_eq!(g.settings.target, vec![ALLOWED]);
|
||||
assert_eq!(g.settings.limit.max_volume, U256::from(1_000_000u64));
|
||||
assert_eq!(g.settings.specific.target, vec![ALLOWED]);
|
||||
assert_eq!(
|
||||
g.settings.specific.limit.max_volume,
|
||||
U256::from(1_000_000u64)
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -321,7 +330,36 @@ async fn try_find_grant_wrong_target_returns_none() {
|
||||
assert!(found.is_none());
|
||||
}
|
||||
|
||||
// ── find_all_grants ──────────────────────────────────────────────────────
|
||||
proptest::proptest! {
|
||||
#[test]
|
||||
fn target_order_does_not_affect_hash(
|
||||
raw_addrs in proptest::collection::vec(proptest::prelude::any::<[u8; 20]>(), 0..8),
|
||||
seed in proptest::prelude::any::<u64>(),
|
||||
max_volume in proptest::prelude::any::<u64>(),
|
||||
window_secs in 1i64..=86400,
|
||||
) {
|
||||
use rand::{SeedableRng, seq::SliceRandom};
|
||||
use sha2::Digest;
|
||||
use crate::crypto::integrity::hashing::Hashable;
|
||||
|
||||
let addrs: Vec<Address> = raw_addrs.iter().map(|b| Address::from(*b)).collect();
|
||||
let mut shuffled = addrs.clone();
|
||||
shuffled.shuffle(&mut rand::rngs::StdRng::seed_from_u64(seed));
|
||||
|
||||
let limit = VolumeRateLimit {
|
||||
max_volume: U256::from(max_volume),
|
||||
window: Duration::seconds(window_secs),
|
||||
};
|
||||
|
||||
let mut h1 = sha2::Sha256::new();
|
||||
Settings { target: addrs, limit: limit.clone() }.hash(&mut h1);
|
||||
|
||||
let mut h2 = sha2::Sha256::new();
|
||||
Settings { target: shuffled, limit }.hash(&mut h2);
|
||||
|
||||
proptest::prop_assert_eq!(h1.finalize(), h2.finalize());
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn find_all_grants_empty_db() {
|
||||
@@ -348,7 +386,7 @@ async fn find_all_grants_excludes_revoked() {
|
||||
|
||||
let all = EtherTransfer::find_all_grants(&mut *conn).await.unwrap();
|
||||
assert_eq!(all.len(), 1);
|
||||
assert_eq!(all[0].settings.target, vec![ALLOWED]);
|
||||
assert_eq!(all[0].settings.specific.target, vec![ALLOWED]);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -364,8 +402,11 @@ async fn find_all_grants_multiple_targets() {
|
||||
|
||||
let all = EtherTransfer::find_all_grants(&mut *conn).await.unwrap();
|
||||
assert_eq!(all.len(), 1);
|
||||
assert_eq!(all[0].settings.target.len(), 2);
|
||||
assert_eq!(all[0].settings.limit.max_volume, U256::from(1_000_000u64));
|
||||
assert_eq!(all[0].settings.specific.target.len(), 2);
|
||||
assert_eq!(
|
||||
all[0].settings.specific.limit.max_volume,
|
||||
U256::from(1_000_000u64)
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
||||
@@ -1,20 +1,5 @@
|
||||
use std::collections::HashMap;
|
||||
|
||||
use alloy::{
|
||||
primitives::{Address, U256},
|
||||
sol_types::SolCall,
|
||||
};
|
||||
use arbiter_tokens_registry::evm::nonfungible::{self, TokenInfo};
|
||||
use chrono::{DateTime, Duration, Utc};
|
||||
use diesel::dsl::{auto_type, insert_into};
|
||||
use diesel::sqlite::Sqlite;
|
||||
use diesel::{ExpressionMethods, prelude::*};
|
||||
use diesel_async::{AsyncConnection, RunQueryDsl};
|
||||
|
||||
use crate::db::models::{
|
||||
EvmBasicGrant, EvmTokenTransferGrant, EvmTokenTransferVolumeLimit, NewEvmTokenTransferGrant,
|
||||
NewEvmTokenTransferLog, NewEvmTokenTransferVolumeLimit, SqliteTimestamp,
|
||||
};
|
||||
use crate::db::schema::{
|
||||
evm_basic_grant, evm_token_transfer_grant, evm_token_transfer_log,
|
||||
evm_token_transfer_volume_limit,
|
||||
@@ -26,6 +11,25 @@ use crate::evm::{
|
||||
},
|
||||
utils,
|
||||
};
|
||||
use crate::{
|
||||
crypto::integrity::Integrable,
|
||||
db::models::{
|
||||
EvmBasicGrant, EvmTokenTransferGrant, EvmTokenTransferVolumeLimit,
|
||||
NewEvmTokenTransferGrant, NewEvmTokenTransferLog, NewEvmTokenTransferVolumeLimit,
|
||||
SqliteTimestamp,
|
||||
},
|
||||
evm::policies::CombinedSettings,
|
||||
};
|
||||
use alloy::{
|
||||
primitives::{Address, U256},
|
||||
sol_types::SolCall,
|
||||
};
|
||||
use arbiter_tokens_registry::evm::nonfungible::{self, TokenInfo};
|
||||
use chrono::{DateTime, Duration, Utc};
|
||||
use diesel::dsl::{auto_type, insert_into};
|
||||
use diesel::sqlite::Sqlite;
|
||||
use diesel::{ExpressionMethods, prelude::*};
|
||||
use diesel_async::{AsyncConnection, RunQueryDsl};
|
||||
|
||||
use super::{DatabaseID, EvalContext, EvalViolation};
|
||||
|
||||
@@ -38,9 +42,9 @@ fn grant_join() -> _ {
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
||||
pub struct Meaning {
|
||||
token: &'static TokenInfo,
|
||||
to: Address,
|
||||
value: U256,
|
||||
pub token: &'static TokenInfo,
|
||||
pub to: Address,
|
||||
pub value: U256,
|
||||
}
|
||||
impl std::fmt::Display for Meaning {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
@@ -51,21 +55,36 @@ impl std::fmt::Display for Meaning {
|
||||
)
|
||||
}
|
||||
}
|
||||
impl Into<SpecificMeaning> for Meaning {
|
||||
fn into(self) -> SpecificMeaning {
|
||||
SpecificMeaning::TokenTransfer(self)
|
||||
impl From<Meaning> for SpecificMeaning {
|
||||
fn from(val: Meaning) -> SpecificMeaning {
|
||||
SpecificMeaning::TokenTransfer(val)
|
||||
}
|
||||
}
|
||||
|
||||
// A grant for token transfers, which can be scoped to specific target addresses and volume limits
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Settings {
|
||||
token_contract: Address,
|
||||
target: Option<Address>,
|
||||
volume_limits: Vec<VolumeRateLimit>,
|
||||
pub token_contract: Address,
|
||||
pub target: Option<Address>,
|
||||
pub volume_limits: Vec<VolumeRateLimit>,
|
||||
}
|
||||
impl Into<SpecificGrant> for Settings {
|
||||
fn into(self) -> SpecificGrant {
|
||||
SpecificGrant::TokenTransfer(self)
|
||||
impl Integrable for Settings {
|
||||
const KIND: &'static str = "TokenTransfer";
|
||||
}
|
||||
|
||||
use crate::crypto::integrity::hashing::Hashable;
|
||||
|
||||
impl Hashable for Settings {
|
||||
fn hash<H: sha2::Digest>(&self, hasher: &mut H) {
|
||||
self.token_contract.hash(hasher);
|
||||
self.target.hash(hasher);
|
||||
self.volume_limits.hash(hasher);
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Settings> for SpecificGrant {
|
||||
fn from(val: Settings) -> SpecificGrant {
|
||||
SpecificGrant::TokenTransfer(val)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -100,24 +119,32 @@ async fn query_relevant_past_transfers(
|
||||
|
||||
async fn check_volume_rate_limits(
|
||||
grant: &Grant<Settings>,
|
||||
current_transfer_value: U256,
|
||||
db: &mut impl AsyncConnection<Backend = Sqlite>,
|
||||
) -> QueryResult<Vec<EvalViolation>> {
|
||||
let mut violations = Vec::new();
|
||||
|
||||
let Some(longest_window) = grant.settings.volume_limits.iter().map(|l| l.window).max() else {
|
||||
let Some(longest_window) = grant
|
||||
.settings
|
||||
.specific
|
||||
.volume_limits
|
||||
.iter()
|
||||
.map(|l| l.window)
|
||||
.max()
|
||||
else {
|
||||
return Ok(violations);
|
||||
};
|
||||
|
||||
let past_transfers = query_relevant_past_transfers(grant.id, longest_window, db).await?;
|
||||
|
||||
for limit in &grant.settings.volume_limits {
|
||||
for limit in &grant.settings.specific.volume_limits {
|
||||
let window_start = chrono::Utc::now() - limit.window;
|
||||
let cumulative_volume: U256 = past_transfers
|
||||
let prospective_cumulative_volume: U256 = past_transfers
|
||||
.iter()
|
||||
.filter(|(_, timestamp)| timestamp >= &window_start)
|
||||
.fold(U256::default(), |acc, (value, _)| acc + *value);
|
||||
.fold(current_transfer_value, |acc, (value, _)| acc + *value);
|
||||
|
||||
if cumulative_volume > limit.max_volume {
|
||||
if prospective_cumulative_volume > limit.max_volume {
|
||||
violations.push(EvalViolation::VolumetricLimitExceeded);
|
||||
break;
|
||||
}
|
||||
@@ -156,13 +183,13 @@ impl Policy for TokenTransfer {
|
||||
return Ok(violations);
|
||||
}
|
||||
|
||||
if let Some(allowed) = grant.settings.target {
|
||||
if allowed != meaning.to {
|
||||
if let Some(allowed) = grant.settings.specific.target
|
||||
&& allowed != meaning.to
|
||||
{
|
||||
violations.push(EvalViolation::InvalidTarget { target: meaning.to });
|
||||
}
|
||||
}
|
||||
|
||||
let rate_violations = check_volume_rate_limits(grant, db).await?;
|
||||
let rate_violations = check_volume_rate_limits(grant, meaning.value, db).await?;
|
||||
violations.extend(rate_violations);
|
||||
|
||||
Ok(violations)
|
||||
@@ -208,8 +235,7 @@ impl Policy for TokenTransfer {
|
||||
|
||||
let grant: Option<(EvmBasicGrant, EvmTokenTransferGrant)> = grant_join()
|
||||
.filter(evm_basic_grant::revoked_at.is_null())
|
||||
.filter(evm_basic_grant::wallet_id.eq(context.wallet_id))
|
||||
.filter(evm_basic_grant::client_id.eq(context.client_id))
|
||||
.filter(evm_basic_grant::wallet_access_id.eq(context.target.id))
|
||||
.filter(evm_token_transfer_grant::token_contract.eq(&token_contract_bytes))
|
||||
.select((
|
||||
EvmBasicGrant::as_select(),
|
||||
@@ -260,17 +286,17 @@ impl Policy for TokenTransfer {
|
||||
}
|
||||
};
|
||||
|
||||
let settings = Settings {
|
||||
Ok(Some(Grant {
|
||||
id: token_grant.id,
|
||||
common_settings_id: token_grant.basic_grant_id,
|
||||
settings: CombinedSettings {
|
||||
shared: SharedGrantSettings::try_from_model(basic_grant)?,
|
||||
specific: Settings {
|
||||
token_contract: Address::from(token_contract),
|
||||
target,
|
||||
volume_limits,
|
||||
};
|
||||
|
||||
Ok(Some(Grant {
|
||||
id: token_grant.id,
|
||||
shared_grant_id: token_grant.basic_grant_id,
|
||||
shared: SharedGrantSettings::try_from_model(basic_grant)?,
|
||||
settings,
|
||||
},
|
||||
},
|
||||
}))
|
||||
}
|
||||
|
||||
@@ -368,13 +394,15 @@ impl Policy for TokenTransfer {
|
||||
|
||||
Ok(Grant {
|
||||
id: specific.id,
|
||||
shared_grant_id: specific.basic_grant_id,
|
||||
common_settings_id: specific.basic_grant_id,
|
||||
settings: CombinedSettings {
|
||||
shared: SharedGrantSettings::try_from_model(basic)?,
|
||||
settings: Settings {
|
||||
specific: Settings {
|
||||
token_contract: Address::from(token_contract),
|
||||
target,
|
||||
volume_limits,
|
||||
},
|
||||
},
|
||||
})
|
||||
})
|
||||
.collect()
|
||||
|
||||
@@ -6,12 +6,15 @@ use diesel_async::RunQueryDsl;
|
||||
|
||||
use crate::db::{
|
||||
self, DatabaseConnection,
|
||||
models::{EvmBasicGrant, NewEvmBasicGrant, SqliteTimestamp},
|
||||
models::{EvmBasicGrant, EvmWalletAccess, NewEvmBasicGrant, SqliteTimestamp},
|
||||
schema::evm_basic_grant,
|
||||
};
|
||||
use crate::evm::{
|
||||
abi::IERC20::transferCall,
|
||||
policies::{EvalContext, EvalViolation, Grant, Policy, SharedGrantSettings, VolumeRateLimit},
|
||||
policies::{
|
||||
CombinedSettings, EvalContext, EvalViolation, Grant, Policy, SharedGrantSettings,
|
||||
VolumeRateLimit,
|
||||
},
|
||||
utils,
|
||||
};
|
||||
|
||||
@@ -21,8 +24,7 @@ use super::{Settings, TokenTransfer};
|
||||
const CHAIN_ID: u64 = 1;
|
||||
const DAI: Address = address!("6B175474E89094C44Da98b954EedeAC495271d0F");
|
||||
|
||||
const WALLET_ID: i32 = 1;
|
||||
const CLIENT_ID: i32 = 2;
|
||||
const WALLET_ACCESS_ID: i32 = 1;
|
||||
|
||||
const RECIPIENT: Address = address!("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa");
|
||||
const OTHER: Address = address!("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb");
|
||||
@@ -38,8 +40,12 @@ fn transfer_calldata(to: Address, value: U256) -> Bytes {
|
||||
|
||||
fn ctx(to: Address, calldata: Bytes) -> EvalContext {
|
||||
EvalContext {
|
||||
wallet_id: WALLET_ID,
|
||||
client_id: CLIENT_ID,
|
||||
target: EvmWalletAccess {
|
||||
id: WALLET_ACCESS_ID,
|
||||
wallet_id: 10,
|
||||
client_id: 20,
|
||||
created_at: SqliteTimestamp(Utc::now()),
|
||||
},
|
||||
chain: CHAIN_ID,
|
||||
to,
|
||||
value: U256::ZERO,
|
||||
@@ -52,8 +58,7 @@ fn ctx(to: Address, calldata: Bytes) -> EvalContext {
|
||||
async fn insert_basic(conn: &mut DatabaseConnection, revoked: bool) -> EvmBasicGrant {
|
||||
insert_into(evm_basic_grant::table)
|
||||
.values(NewEvmBasicGrant {
|
||||
wallet_id: WALLET_ID,
|
||||
client_id: CLIENT_ID,
|
||||
wallet_access_id: WALLET_ACCESS_ID,
|
||||
chain_id: CHAIN_ID as i32,
|
||||
valid_from: None,
|
||||
valid_until: None,
|
||||
@@ -86,7 +91,7 @@ fn make_settings(target: Option<Address>, max_volume: Option<u64>) -> Settings {
|
||||
|
||||
fn shared() -> SharedGrantSettings {
|
||||
SharedGrantSettings {
|
||||
wallet_id: WALLET_ID,
|
||||
wallet_access_id: WALLET_ACCESS_ID,
|
||||
chain: CHAIN_ID,
|
||||
valid_from: None,
|
||||
valid_until: None,
|
||||
@@ -96,8 +101,6 @@ fn shared() -> SharedGrantSettings {
|
||||
}
|
||||
}
|
||||
|
||||
// ── analyze ─────────────────────────────────────────────────────────────
|
||||
|
||||
#[test]
|
||||
fn analyze_known_token_valid_calldata() {
|
||||
let calldata = transfer_calldata(RECIPIENT, U256::from(100u64));
|
||||
@@ -123,8 +126,6 @@ fn analyze_empty_calldata_returns_none() {
|
||||
assert!(TokenTransfer::analyze(&ctx(DAI, Bytes::new())).is_none());
|
||||
}
|
||||
|
||||
// ── evaluate ────────────────────────────────────────────────────────────
|
||||
|
||||
#[tokio::test]
|
||||
async fn evaluate_rejects_nonzero_eth_value() {
|
||||
let db = db::create_test_pool().await;
|
||||
@@ -132,18 +133,28 @@ async fn evaluate_rejects_nonzero_eth_value() {
|
||||
|
||||
let grant = Grant {
|
||||
id: 999,
|
||||
shared_grant_id: 999,
|
||||
common_settings_id: 999,
|
||||
settings: CombinedSettings {
|
||||
shared: shared(),
|
||||
settings: make_settings(None, None),
|
||||
specific: make_settings(None, None),
|
||||
},
|
||||
};
|
||||
let calldata = transfer_calldata(RECIPIENT, U256::from(100u64));
|
||||
let mut context = ctx(DAI, calldata);
|
||||
context.value = U256::from(1u64); // ETH attached to an ERC-20 call
|
||||
|
||||
let m = TokenTransfer::analyze(&EvalContext { value: U256::ZERO, ..context.clone() })
|
||||
let m = TokenTransfer::analyze(&EvalContext {
|
||||
value: U256::ZERO,
|
||||
..context.clone()
|
||||
})
|
||||
.unwrap();
|
||||
let v = TokenTransfer::evaluate(&context, &m, &grant, &mut *conn).await.unwrap();
|
||||
assert!(v.iter().any(|e| matches!(e, EvalViolation::InvalidTransactionType)));
|
||||
let v = TokenTransfer::evaluate(&context, &m, &grant, &mut *conn)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(
|
||||
v.iter()
|
||||
.any(|e| matches!(e, EvalViolation::InvalidTransactionType))
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -153,14 +164,18 @@ async fn evaluate_passes_any_recipient_when_no_restriction() {
|
||||
|
||||
let grant = Grant {
|
||||
id: 999,
|
||||
shared_grant_id: 999,
|
||||
common_settings_id: 999,
|
||||
settings: CombinedSettings {
|
||||
shared: shared(),
|
||||
settings: make_settings(None, None),
|
||||
specific: make_settings(None, None),
|
||||
},
|
||||
};
|
||||
let calldata = transfer_calldata(RECIPIENT, U256::from(100u64));
|
||||
let context = ctx(DAI, calldata);
|
||||
let m = TokenTransfer::analyze(&context).unwrap();
|
||||
let v = TokenTransfer::evaluate(&context, &m, &grant, &mut *conn).await.unwrap();
|
||||
let v = TokenTransfer::evaluate(&context, &m, &grant, &mut *conn)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(v.is_empty());
|
||||
}
|
||||
|
||||
@@ -171,14 +186,18 @@ async fn evaluate_passes_matching_restricted_recipient() {
|
||||
|
||||
let grant = Grant {
|
||||
id: 999,
|
||||
shared_grant_id: 999,
|
||||
common_settings_id: 999,
|
||||
settings: CombinedSettings {
|
||||
shared: shared(),
|
||||
settings: make_settings(Some(RECIPIENT), None),
|
||||
specific: make_settings(Some(RECIPIENT), None),
|
||||
},
|
||||
};
|
||||
let calldata = transfer_calldata(RECIPIENT, U256::from(100u64));
|
||||
let context = ctx(DAI, calldata);
|
||||
let m = TokenTransfer::analyze(&context).unwrap();
|
||||
let v = TokenTransfer::evaluate(&context, &m, &grant, &mut *conn).await.unwrap();
|
||||
let v = TokenTransfer::evaluate(&context, &m, &grant, &mut *conn)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(v.is_empty());
|
||||
}
|
||||
|
||||
@@ -189,27 +208,36 @@ async fn evaluate_rejects_wrong_restricted_recipient() {
|
||||
|
||||
let grant = Grant {
|
||||
id: 999,
|
||||
shared_grant_id: 999,
|
||||
common_settings_id: 999,
|
||||
settings: CombinedSettings {
|
||||
shared: shared(),
|
||||
settings: make_settings(Some(RECIPIENT), None),
|
||||
specific: make_settings(Some(RECIPIENT), None),
|
||||
},
|
||||
};
|
||||
let calldata = transfer_calldata(OTHER, U256::from(100u64));
|
||||
let context = ctx(DAI, calldata);
|
||||
let m = TokenTransfer::analyze(&context).unwrap();
|
||||
let v = TokenTransfer::evaluate(&context, &m, &grant, &mut *conn).await.unwrap();
|
||||
assert!(v.iter().any(|e| matches!(e, EvalViolation::InvalidTarget { .. })));
|
||||
let v = TokenTransfer::evaluate(&context, &m, &grant, &mut *conn)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(
|
||||
v.iter()
|
||||
.any(|e| matches!(e, EvalViolation::InvalidTarget { .. }))
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn evaluate_passes_volume_within_limit() {
|
||||
async fn evaluate_passes_volume_at_exact_limit() {
|
||||
let db = db::create_test_pool().await;
|
||||
let mut conn = db.get().await.unwrap();
|
||||
|
||||
let basic = insert_basic(&mut conn, false).await;
|
||||
let settings = make_settings(None, Some(1_000));
|
||||
let grant_id = TokenTransfer::create_grant(&basic, &settings, &mut *conn).await.unwrap();
|
||||
let grant_id = TokenTransfer::create_grant(&basic, &settings, &mut *conn)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Record a past transfer of 500 (within 1000 limit)
|
||||
// Record a past transfer of 900, with current transfer 100 => exactly 1000 limit
|
||||
use crate::db::{models::NewEvmTokenTransferLog, schema::evm_token_transfer_log};
|
||||
insert_into(evm_token_transfer_log::table)
|
||||
.values(NewEvmTokenTransferLog {
|
||||
@@ -218,18 +246,30 @@ async fn evaluate_passes_volume_within_limit() {
|
||||
chain_id: CHAIN_ID as i32,
|
||||
token_contract: DAI.to_vec(),
|
||||
recipient_address: RECIPIENT.to_vec(),
|
||||
value: utils::u256_to_bytes(U256::from(500u64)).to_vec(),
|
||||
value: utils::u256_to_bytes(U256::from(900u64)).to_vec(),
|
||||
})
|
||||
.execute(&mut *conn)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let grant = Grant { id: grant_id, shared_grant_id: basic.id, shared: shared(), settings };
|
||||
let grant = Grant {
|
||||
id: grant_id,
|
||||
common_settings_id: basic.id,
|
||||
settings: CombinedSettings {
|
||||
shared: shared(),
|
||||
specific: settings,
|
||||
},
|
||||
};
|
||||
let calldata = transfer_calldata(RECIPIENT, U256::from(100u64));
|
||||
let context = ctx(DAI, calldata);
|
||||
let m = TokenTransfer::analyze(&context).unwrap();
|
||||
let v = TokenTransfer::evaluate(&context, &m, &grant, &mut *conn).await.unwrap();
|
||||
assert!(!v.iter().any(|e| matches!(e, EvalViolation::VolumetricLimitExceeded)));
|
||||
let v = TokenTransfer::evaluate(&context, &m, &grant, &mut *conn)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(
|
||||
!v.iter()
|
||||
.any(|e| matches!(e, EvalViolation::VolumetricLimitExceeded))
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -239,7 +279,9 @@ async fn evaluate_rejects_volume_over_limit() {
|
||||
|
||||
let basic = insert_basic(&mut conn, false).await;
|
||||
let settings = make_settings(None, Some(1_000));
|
||||
let grant_id = TokenTransfer::create_grant(&basic, &settings, &mut *conn).await.unwrap();
|
||||
let grant_id = TokenTransfer::create_grant(&basic, &settings, &mut *conn)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
use crate::db::{models::NewEvmTokenTransferLog, schema::evm_token_transfer_log};
|
||||
insert_into(evm_token_transfer_log::table)
|
||||
@@ -249,18 +291,30 @@ async fn evaluate_rejects_volume_over_limit() {
|
||||
chain_id: CHAIN_ID as i32,
|
||||
token_contract: DAI.to_vec(),
|
||||
recipient_address: RECIPIENT.to_vec(),
|
||||
value: utils::u256_to_bytes(U256::from(1_001u64)).to_vec(),
|
||||
value: utils::u256_to_bytes(U256::from(1_000u64)).to_vec(),
|
||||
})
|
||||
.execute(&mut *conn)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let grant = Grant { id: grant_id, shared_grant_id: basic.id, shared: shared(), settings };
|
||||
let calldata = transfer_calldata(RECIPIENT, U256::from(100u64));
|
||||
let grant = Grant {
|
||||
id: grant_id,
|
||||
common_settings_id: basic.id,
|
||||
settings: CombinedSettings {
|
||||
shared: shared(),
|
||||
specific: settings,
|
||||
},
|
||||
};
|
||||
let calldata = transfer_calldata(RECIPIENT, U256::from(1u64));
|
||||
let context = ctx(DAI, calldata);
|
||||
let m = TokenTransfer::analyze(&context).unwrap();
|
||||
let v = TokenTransfer::evaluate(&context, &m, &grant, &mut *conn).await.unwrap();
|
||||
assert!(v.iter().any(|e| matches!(e, EvalViolation::VolumetricLimitExceeded)));
|
||||
let v = TokenTransfer::evaluate(&context, &m, &grant, &mut *conn)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(
|
||||
v.iter()
|
||||
.any(|e| matches!(e, EvalViolation::VolumetricLimitExceeded))
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -270,15 +324,22 @@ async fn evaluate_no_volume_limits_always_passes() {
|
||||
|
||||
let grant = Grant {
|
||||
id: 999,
|
||||
shared_grant_id: 999,
|
||||
common_settings_id: 999,
|
||||
settings: CombinedSettings {
|
||||
shared: shared(),
|
||||
settings: make_settings(None, None), // no volume limits
|
||||
specific: make_settings(None, None), // no volume limits
|
||||
},
|
||||
};
|
||||
let calldata = transfer_calldata(RECIPIENT, U256::from(u64::MAX));
|
||||
let context = ctx(DAI, calldata);
|
||||
let m = TokenTransfer::analyze(&context).unwrap();
|
||||
let v = TokenTransfer::evaluate(&context, &m, &grant, &mut *conn).await.unwrap();
|
||||
assert!(!v.iter().any(|e| matches!(e, EvalViolation::VolumetricLimitExceeded)));
|
||||
let v = TokenTransfer::evaluate(&context, &m, &grant, &mut *conn)
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(
|
||||
!v.iter()
|
||||
.any(|e| matches!(e, EvalViolation::VolumetricLimitExceeded))
|
||||
);
|
||||
}
|
||||
|
||||
// ── try_find_grant ───────────────────────────────────────────────────────
|
||||
@@ -290,7 +351,9 @@ async fn try_find_grant_roundtrip() {
|
||||
|
||||
let basic = insert_basic(&mut conn, false).await;
|
||||
let settings = make_settings(Some(RECIPIENT), Some(5_000));
|
||||
TokenTransfer::create_grant(&basic, &settings, &mut *conn).await.unwrap();
|
||||
TokenTransfer::create_grant(&basic, &settings, &mut *conn)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let calldata = transfer_calldata(RECIPIENT, U256::from(100u64));
|
||||
let found = TokenTransfer::try_find_grant(&ctx(DAI, calldata), &mut *conn)
|
||||
@@ -299,10 +362,13 @@ async fn try_find_grant_roundtrip() {
|
||||
|
||||
assert!(found.is_some());
|
||||
let g = found.unwrap();
|
||||
assert_eq!(g.settings.token_contract, DAI);
|
||||
assert_eq!(g.settings.target, Some(RECIPIENT));
|
||||
assert_eq!(g.settings.volume_limits.len(), 1);
|
||||
assert_eq!(g.settings.volume_limits[0].max_volume, U256::from(5_000u64));
|
||||
assert_eq!(g.settings.specific.token_contract, DAI);
|
||||
assert_eq!(g.settings.specific.target, Some(RECIPIENT));
|
||||
assert_eq!(g.settings.specific.volume_limits.len(), 1);
|
||||
assert_eq!(
|
||||
g.settings.specific.volume_limits[0].max_volume,
|
||||
U256::from(5_000u64)
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -312,7 +378,9 @@ async fn try_find_grant_revoked_returns_none() {
|
||||
|
||||
let basic = insert_basic(&mut conn, true).await;
|
||||
let settings = make_settings(None, None);
|
||||
TokenTransfer::create_grant(&basic, &settings, &mut *conn).await.unwrap();
|
||||
TokenTransfer::create_grant(&basic, &settings, &mut *conn)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let calldata = transfer_calldata(RECIPIENT, U256::from(1u64));
|
||||
let found = TokenTransfer::try_find_grant(&ctx(DAI, calldata), &mut *conn)
|
||||
@@ -328,7 +396,9 @@ async fn try_find_grant_unknown_token_returns_none() {
|
||||
|
||||
let basic = insert_basic(&mut conn, false).await;
|
||||
let settings = make_settings(None, None);
|
||||
TokenTransfer::create_grant(&basic, &settings, &mut *conn).await.unwrap();
|
||||
TokenTransfer::create_grant(&basic, &settings, &mut *conn)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Query with a different token contract
|
||||
let calldata = transfer_calldata(RECIPIENT, U256::from(1u64));
|
||||
@@ -338,7 +408,39 @@ async fn try_find_grant_unknown_token_returns_none() {
|
||||
assert!(found.is_none());
|
||||
}
|
||||
|
||||
// ── find_all_grants ──────────────────────────────────────────────────────
|
||||
proptest::proptest! {
|
||||
#[test]
|
||||
fn volume_limits_order_does_not_affect_hash(
|
||||
raw_limits in proptest::collection::vec(
|
||||
(proptest::prelude::any::<u64>(), 1i64..=86400),
|
||||
0..8,
|
||||
),
|
||||
seed in proptest::prelude::any::<u64>(),
|
||||
) {
|
||||
use rand::{SeedableRng, seq::SliceRandom};
|
||||
use sha2::Digest;
|
||||
use crate::crypto::integrity::hashing::Hashable;
|
||||
|
||||
let limits: Vec<VolumeRateLimit> = raw_limits
|
||||
.iter()
|
||||
.map(|(max_vol, window_secs)| VolumeRateLimit {
|
||||
max_volume: U256::from(*max_vol),
|
||||
window: Duration::seconds(*window_secs),
|
||||
})
|
||||
.collect();
|
||||
|
||||
let mut shuffled = limits.clone();
|
||||
shuffled.shuffle(&mut rand::rngs::StdRng::seed_from_u64(seed));
|
||||
|
||||
let mut h1 = sha2::Sha256::new();
|
||||
Settings { token_contract: DAI, target: None, volume_limits: limits }.hash(&mut h1);
|
||||
|
||||
let mut h2 = sha2::Sha256::new();
|
||||
Settings { token_contract: DAI, target: None, volume_limits: shuffled }.hash(&mut h2);
|
||||
|
||||
proptest::prop_assert_eq!(h1.finalize(), h2.finalize());
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn find_all_grants_empty_db() {
|
||||
@@ -355,9 +457,13 @@ async fn find_all_grants_excludes_revoked() {
|
||||
|
||||
let settings = make_settings(None, Some(1_000));
|
||||
let active = insert_basic(&mut conn, false).await;
|
||||
TokenTransfer::create_grant(&active, &settings, &mut *conn).await.unwrap();
|
||||
TokenTransfer::create_grant(&active, &settings, &mut *conn)
|
||||
.await
|
||||
.unwrap();
|
||||
let revoked = insert_basic(&mut conn, true).await;
|
||||
TokenTransfer::create_grant(&revoked, &settings, &mut *conn).await.unwrap();
|
||||
TokenTransfer::create_grant(&revoked, &settings, &mut *conn)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let all = TokenTransfer::find_all_grants(&mut *conn).await.unwrap();
|
||||
assert_eq!(all.len(), 1);
|
||||
@@ -370,12 +476,17 @@ async fn find_all_grants_loads_volume_limits() {
|
||||
|
||||
let basic = insert_basic(&mut conn, false).await;
|
||||
let settings = make_settings(None, Some(9_999));
|
||||
TokenTransfer::create_grant(&basic, &settings, &mut *conn).await.unwrap();
|
||||
TokenTransfer::create_grant(&basic, &settings, &mut *conn)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let all = TokenTransfer::find_all_grants(&mut *conn).await.unwrap();
|
||||
assert_eq!(all.len(), 1);
|
||||
assert_eq!(all[0].settings.volume_limits.len(), 1);
|
||||
assert_eq!(all[0].settings.volume_limits[0].max_volume, U256::from(9_999u64));
|
||||
assert_eq!(all[0].settings.specific.volume_limits.len(), 1);
|
||||
assert_eq!(
|
||||
all[0].settings.specific.volume_limits[0].max_volume,
|
||||
U256::from(9_999u64)
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -388,7 +499,11 @@ async fn find_all_grants_multiple_grants_batch_loaded() {
|
||||
.await
|
||||
.unwrap();
|
||||
let b2 = insert_basic(&mut conn, false).await;
|
||||
TokenTransfer::create_grant(&b2, &make_settings(Some(RECIPIENT), Some(2_000)), &mut *conn)
|
||||
TokenTransfer::create_grant(
|
||||
&b2,
|
||||
&make_settings(Some(RECIPIENT), Some(2_000)),
|
||||
&mut *conn,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
use std::sync::Mutex;
|
||||
|
||||
use crate::safe_cell::{SafeCell, SafeCellHandle as _};
|
||||
use alloy::{
|
||||
consensus::SignableTransaction,
|
||||
network::{TxSigner, TxSignerSync},
|
||||
primitives::{Address, ChainId, Signature, B256},
|
||||
primitives::{Address, B256, ChainId, Signature},
|
||||
signers::{Error, Result, Signer, SignerSync, utils::secret_key_to_address},
|
||||
};
|
||||
use async_trait::async_trait;
|
||||
use k256::ecdsa::{self, signature::hazmat::PrehashSigner, RecoveryId, SigningKey};
|
||||
use memsafe::MemSafe;
|
||||
use k256::ecdsa::{self, RecoveryId, SigningKey, signature::hazmat::PrehashSigner};
|
||||
|
||||
/// An Ethereum signer that stores its secp256k1 secret key inside a
|
||||
/// hardware-protected [`MemSafe`] cell.
|
||||
@@ -20,7 +20,7 @@ use memsafe::MemSafe;
|
||||
/// Because [`MemSafe::read`] requires `&mut self` while the [`Signer`] trait
|
||||
/// requires `&self`, the cell is wrapped in a [`Mutex`].
|
||||
pub struct SafeSigner {
|
||||
key: Mutex<MemSafe<SigningKey>>,
|
||||
key: Mutex<SafeCell<SigningKey>>,
|
||||
address: Address,
|
||||
chain_id: Option<ChainId>,
|
||||
}
|
||||
@@ -42,14 +42,13 @@ impl std::fmt::Debug for SafeSigner {
|
||||
/// rejection, but we retry to be correct).
|
||||
///
|
||||
/// Returns the protected key bytes and the derived Ethereum address.
|
||||
pub fn generate(rng: &mut impl rand::Rng) -> (MemSafe<[u8; 32]>, Address) {
|
||||
pub fn generate(rng: &mut impl rand::Rng) -> (SafeCell<[u8; 32]>, Address) {
|
||||
loop {
|
||||
let mut cell = MemSafe::new([0u8; 32]).expect("MemSafe allocation");
|
||||
{
|
||||
let mut w = cell.write().expect("MemSafe write");
|
||||
rng.fill_bytes(w.as_mut());
|
||||
}
|
||||
let reader = cell.read().expect("MemSafe read");
|
||||
let mut cell = SafeCell::new_inline(|w: &mut [u8; 32]| {
|
||||
rng.fill_bytes(w);
|
||||
});
|
||||
|
||||
let reader = cell.read();
|
||||
if let Ok(sk) = SigningKey::from_slice(reader.as_ref()) {
|
||||
let address = secret_key_to_address(&sk);
|
||||
drop(reader);
|
||||
@@ -64,8 +63,8 @@ impl SafeSigner {
|
||||
/// The key bytes are read from protected memory, parsed as a secp256k1
|
||||
/// scalar, and immediately moved into a new [`MemSafe`] cell. The raw
|
||||
/// bytes are never exposed outside this function.
|
||||
pub fn from_memsafe(mut cell: MemSafe<Vec<u8>>) -> Result<Self> {
|
||||
let reader = cell.read().map_err(Error::other)?;
|
||||
pub fn from_cell(mut cell: SafeCell<Vec<u8>>) -> Result<Self> {
|
||||
let reader = cell.read();
|
||||
let sk = SigningKey::from_slice(reader.as_slice()).map_err(Error::other)?;
|
||||
drop(reader);
|
||||
Self::new(sk)
|
||||
@@ -75,7 +74,7 @@ impl SafeSigner {
|
||||
/// memory region.
|
||||
pub fn new(key: SigningKey) -> Result<Self> {
|
||||
let address = secret_key_to_address(&key);
|
||||
let cell = MemSafe::new(key).map_err(Error::other)?;
|
||||
let cell = SafeCell::new(key);
|
||||
Ok(Self {
|
||||
key: Mutex::new(cell),
|
||||
address,
|
||||
@@ -84,25 +83,25 @@ impl SafeSigner {
|
||||
}
|
||||
|
||||
fn sign_hash_inner(&self, hash: &B256) -> Result<Signature> {
|
||||
#[allow(clippy::expect_used)]
|
||||
let mut cell = self.key.lock().expect("SafeSigner mutex poisoned");
|
||||
let reader = cell.read().map_err(Error::other)?;
|
||||
let reader = cell.read();
|
||||
let sig: (ecdsa::Signature, RecoveryId) = reader.sign_prehash(hash.as_ref())?;
|
||||
Ok(sig.into())
|
||||
}
|
||||
|
||||
fn sign_tx_inner(
|
||||
&self,
|
||||
tx: &mut dyn SignableTransaction<Signature>,
|
||||
) -> Result<Signature> {
|
||||
if let Some(chain_id) = self.chain_id {
|
||||
if !tx.set_chain_id_checked(chain_id) {
|
||||
fn sign_tx_inner(&self, tx: &mut dyn SignableTransaction<Signature>) -> Result<Signature> {
|
||||
if let Some(chain_id) = self.chain_id
|
||||
&& !tx.set_chain_id_checked(chain_id)
|
||||
{
|
||||
return Err(Error::TransactionChainIdMismatch {
|
||||
signer: chain_id,
|
||||
tx: tx.chain_id().unwrap(),
|
||||
#[allow(clippy::expect_used)]
|
||||
tx: tx.chain_id().expect("Chain ID is guaranteed to be set"),
|
||||
});
|
||||
}
|
||||
}
|
||||
self.sign_hash_inner(&tx.signature_hash()).map_err(Error::other)
|
||||
self.sign_hash_inner(&tx.signature_hash())
|
||||
.map_err(Error::other)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
116
server/crates/arbiter-server/src/grpc/client.rs
Normal file
116
server/crates/arbiter-server/src/grpc/client.rs
Normal file
@@ -0,0 +1,116 @@
|
||||
use arbiter_proto::{
|
||||
proto::client::{
|
||||
ClientRequest, ClientResponse, client_request::Payload as ClientRequestPayload,
|
||||
client_response::Payload as ClientResponsePayload,
|
||||
},
|
||||
transport::{Receiver, Sender, grpc::GrpcBi},
|
||||
};
|
||||
use kameo::actor::{ActorRef, Spawn as _};
|
||||
use tonic::Status;
|
||||
use tracing::{info, warn};
|
||||
|
||||
use crate::{
|
||||
actors::client::{ClientConnection, session::ClientSession},
|
||||
grpc::request_tracker::RequestTracker,
|
||||
};
|
||||
|
||||
mod auth;
|
||||
mod evm;
|
||||
mod inbound;
|
||||
mod outbound;
|
||||
mod vault;
|
||||
|
||||
async fn dispatch_loop(
|
||||
mut bi: GrpcBi<ClientRequest, ClientResponse>,
|
||||
actor: ActorRef<ClientSession>,
|
||||
mut request_tracker: RequestTracker,
|
||||
) {
|
||||
loop {
|
||||
let Some(message) = bi.recv().await else {
|
||||
return;
|
||||
};
|
||||
|
||||
let conn = match message {
|
||||
Ok(conn) => conn,
|
||||
Err(err) => {
|
||||
warn!(error = ?err, "Failed to receive client request");
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let request_id = match request_tracker.request(conn.request_id) {
|
||||
Ok(id) => id,
|
||||
Err(err) => {
|
||||
let _ = bi.send(Err(err)).await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let Some(payload) = conn.payload else {
|
||||
let _ = bi
|
||||
.send(Err(Status::invalid_argument(
|
||||
"Missing client request payload",
|
||||
)))
|
||||
.await;
|
||||
return;
|
||||
};
|
||||
|
||||
match dispatch_inner(&actor, payload).await {
|
||||
Ok(response) => {
|
||||
if bi
|
||||
.send(Ok(ClientResponse {
|
||||
request_id: Some(request_id),
|
||||
payload: Some(response),
|
||||
}))
|
||||
.await
|
||||
.is_err()
|
||||
{
|
||||
return;
|
||||
}
|
||||
}
|
||||
Err(status) => {
|
||||
let _ = bi.send(Err(status)).await;
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn dispatch_inner(
|
||||
actor: &ActorRef<ClientSession>,
|
||||
payload: ClientRequestPayload,
|
||||
) -> Result<ClientResponsePayload, Status> {
|
||||
match payload {
|
||||
ClientRequestPayload::Vault(req) => vault::dispatch(actor, req).await,
|
||||
ClientRequestPayload::Evm(req) => evm::dispatch(actor, req).await,
|
||||
ClientRequestPayload::Auth(..) => {
|
||||
warn!("Unsupported post-auth client auth request");
|
||||
Err(Status::invalid_argument("Unsupported client request"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn start(mut conn: ClientConnection, mut bi: GrpcBi<ClientRequest, ClientResponse>) {
|
||||
let mut request_tracker = RequestTracker::default();
|
||||
|
||||
let client_id = match auth::start(&mut conn, &mut bi, &mut request_tracker).await {
|
||||
Ok(id) => id,
|
||||
Err(err) => {
|
||||
let _ = bi
|
||||
.send(Err(Status::unauthenticated(format!(
|
||||
"Authentication failed: {}",
|
||||
err
|
||||
))))
|
||||
.await;
|
||||
warn!(error = ?err, "Client authentication failed");
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
let actor = ClientSession::spawn(ClientSession::new(conn, client_id));
|
||||
let actor_for_cleanup = actor.clone();
|
||||
|
||||
info!("Client authenticated successfully");
|
||||
dispatch_loop(bi, actor, request_tracker).await;
|
||||
actor_for_cleanup.kill();
|
||||
}
|
||||
206
server/crates/arbiter-server/src/grpc/client/auth.rs
Normal file
206
server/crates/arbiter-server/src/grpc/client/auth.rs
Normal file
@@ -0,0 +1,206 @@
|
||||
use arbiter_proto::{
|
||||
ClientMetadata,
|
||||
proto::{
|
||||
client::{
|
||||
ClientRequest, ClientResponse,
|
||||
auth::{
|
||||
self as proto_auth, AuthChallenge as ProtoAuthChallenge,
|
||||
AuthChallengeRequest as ProtoAuthChallengeRequest,
|
||||
AuthChallengeSolution as ProtoAuthChallengeSolution, AuthResult as ProtoAuthResult,
|
||||
request::Payload as AuthRequestPayload, response::Payload as AuthResponsePayload,
|
||||
},
|
||||
client_request::Payload as ClientRequestPayload,
|
||||
client_response::Payload as ClientResponsePayload,
|
||||
},
|
||||
shared::ClientInfo as ProtoClientInfo,
|
||||
},
|
||||
transport::{Bi, Error as TransportError, Receiver, Sender, grpc::GrpcBi},
|
||||
};
|
||||
use async_trait::async_trait;
|
||||
use tonic::Status;
|
||||
use tracing::warn;
|
||||
|
||||
use crate::{
|
||||
actors::client::{self, ClientConnection, auth},
|
||||
grpc::request_tracker::RequestTracker,
|
||||
};
|
||||
|
||||
pub struct AuthTransportAdapter<'a> {
|
||||
bi: &'a mut GrpcBi<ClientRequest, ClientResponse>,
|
||||
request_tracker: &'a mut RequestTracker,
|
||||
}
|
||||
|
||||
impl<'a> AuthTransportAdapter<'a> {
|
||||
pub fn new(
|
||||
bi: &'a mut GrpcBi<ClientRequest, ClientResponse>,
|
||||
request_tracker: &'a mut RequestTracker,
|
||||
) -> Self {
|
||||
Self {
|
||||
bi,
|
||||
request_tracker,
|
||||
}
|
||||
}
|
||||
|
||||
fn response_to_proto(response: auth::Outbound) -> AuthResponsePayload {
|
||||
match response {
|
||||
auth::Outbound::AuthChallenge { pubkey, nonce } => {
|
||||
AuthResponsePayload::Challenge(ProtoAuthChallenge {
|
||||
pubkey: pubkey.to_bytes().to_vec(),
|
||||
nonce,
|
||||
})
|
||||
}
|
||||
auth::Outbound::AuthSuccess => {
|
||||
AuthResponsePayload::Result(ProtoAuthResult::Success.into())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn error_to_proto(error: auth::Error) -> AuthResponsePayload {
|
||||
AuthResponsePayload::Result(
|
||||
match error {
|
||||
auth::Error::InvalidChallengeSolution => ProtoAuthResult::InvalidSignature,
|
||||
auth::Error::ApproveError(auth::ApproveError::Denied) => {
|
||||
ProtoAuthResult::ApprovalDenied
|
||||
}
|
||||
auth::Error::ApproveError(auth::ApproveError::Upstream(
|
||||
crate::actors::flow_coordinator::ApprovalError::NoUserAgentsConnected,
|
||||
)) => ProtoAuthResult::NoUserAgentsOnline,
|
||||
auth::Error::ApproveError(auth::ApproveError::Internal)
|
||||
| auth::Error::DatabasePoolUnavailable
|
||||
| auth::Error::DatabaseOperationFailed
|
||||
| auth::Error::IntegrityCheckFailed
|
||||
| auth::Error::Transport => ProtoAuthResult::Internal,
|
||||
}
|
||||
.into(),
|
||||
)
|
||||
}
|
||||
|
||||
async fn send_client_response(
|
||||
&mut self,
|
||||
payload: AuthResponsePayload,
|
||||
) -> Result<(), TransportError> {
|
||||
self.bi
|
||||
.send(Ok(ClientResponse {
|
||||
request_id: Some(self.request_tracker.current_request_id()),
|
||||
payload: Some(ClientResponsePayload::Auth(proto_auth::Response {
|
||||
payload: Some(payload),
|
||||
})),
|
||||
}))
|
||||
.await
|
||||
}
|
||||
|
||||
async fn send_auth_result(&mut self, result: ProtoAuthResult) -> Result<(), TransportError> {
|
||||
self.send_client_response(AuthResponsePayload::Result(result.into()))
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Sender<Result<auth::Outbound, auth::Error>> for AuthTransportAdapter<'_> {
|
||||
async fn send(
|
||||
&mut self,
|
||||
item: Result<auth::Outbound, auth::Error>,
|
||||
) -> Result<(), TransportError> {
|
||||
let payload = match item {
|
||||
Ok(message) => AuthTransportAdapter::response_to_proto(message),
|
||||
Err(err) => AuthTransportAdapter::error_to_proto(err),
|
||||
};
|
||||
|
||||
self.send_client_response(payload).await
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Receiver<auth::Inbound> for AuthTransportAdapter<'_> {
|
||||
async fn recv(&mut self) -> Option<auth::Inbound> {
|
||||
let request = match self.bi.recv().await? {
|
||||
Ok(request) => request,
|
||||
Err(error) => {
|
||||
warn!(error = ?error, "grpc client recv failed; closing stream");
|
||||
return None;
|
||||
}
|
||||
};
|
||||
|
||||
match self.request_tracker.request(request.request_id) {
|
||||
Ok(request_id) => request_id,
|
||||
Err(error) => {
|
||||
let _ = self.bi.send(Err(error)).await;
|
||||
return None;
|
||||
}
|
||||
};
|
||||
let payload = request.payload?;
|
||||
let ClientRequestPayload::Auth(auth_request) = payload else {
|
||||
let _ = self
|
||||
.bi
|
||||
.send(Err(Status::invalid_argument(
|
||||
"Unsupported client auth request",
|
||||
)))
|
||||
.await;
|
||||
return None;
|
||||
};
|
||||
let Some(payload) = auth_request.payload else {
|
||||
let _ = self
|
||||
.bi
|
||||
.send(Err(Status::invalid_argument(
|
||||
"Missing client auth request payload",
|
||||
)))
|
||||
.await;
|
||||
return None;
|
||||
};
|
||||
|
||||
match payload {
|
||||
AuthRequestPayload::ChallengeRequest(ProtoAuthChallengeRequest {
|
||||
pubkey,
|
||||
client_info,
|
||||
}) => {
|
||||
let Some(client_info) = client_info else {
|
||||
let _ = self
|
||||
.bi
|
||||
.send(Err(Status::invalid_argument("Missing client info")))
|
||||
.await;
|
||||
return None;
|
||||
};
|
||||
let Ok(pubkey) = <[u8; 32]>::try_from(pubkey) else {
|
||||
let _ = self.send_auth_result(ProtoAuthResult::InvalidKey).await;
|
||||
return None;
|
||||
};
|
||||
let Ok(pubkey) = ed25519_dalek::VerifyingKey::from_bytes(&pubkey) else {
|
||||
let _ = self.send_auth_result(ProtoAuthResult::InvalidKey).await;
|
||||
return None;
|
||||
};
|
||||
Some(auth::Inbound::AuthChallengeRequest {
|
||||
pubkey,
|
||||
metadata: client_metadata_from_proto(client_info),
|
||||
})
|
||||
}
|
||||
AuthRequestPayload::ChallengeSolution(ProtoAuthChallengeSolution { signature }) => {
|
||||
let Ok(signature) = ed25519_dalek::Signature::try_from(signature.as_slice()) else {
|
||||
let _ = self
|
||||
.send_auth_result(ProtoAuthResult::InvalidSignature)
|
||||
.await;
|
||||
return None;
|
||||
};
|
||||
Some(auth::Inbound::AuthChallengeSolution { signature })
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Bi<auth::Inbound, Result<auth::Outbound, auth::Error>> for AuthTransportAdapter<'_> {}
|
||||
|
||||
fn client_metadata_from_proto(metadata: ProtoClientInfo) -> ClientMetadata {
|
||||
ClientMetadata {
|
||||
name: metadata.name,
|
||||
description: metadata.description,
|
||||
version: metadata.version,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn start(
|
||||
conn: &mut ClientConnection,
|
||||
bi: &mut GrpcBi<ClientRequest, ClientResponse>,
|
||||
request_tracker: &mut RequestTracker,
|
||||
) -> Result<i32, auth::Error> {
|
||||
let mut transport = AuthTransportAdapter::new(bi, request_tracker);
|
||||
client::auth::authenticate(conn, &mut transport).await
|
||||
}
|
||||
87
server/crates/arbiter-server/src/grpc/client/evm.rs
Normal file
87
server/crates/arbiter-server/src/grpc/client/evm.rs
Normal file
@@ -0,0 +1,87 @@
|
||||
use arbiter_proto::proto::{
|
||||
client::{
|
||||
client_response::Payload as ClientResponsePayload,
|
||||
evm::{
|
||||
self as proto_evm, request::Payload as EvmRequestPayload,
|
||||
response::Payload as EvmResponsePayload,
|
||||
},
|
||||
},
|
||||
evm::{
|
||||
EvmError as ProtoEvmError, EvmSignTransactionResponse,
|
||||
evm_sign_transaction_response::Result as EvmSignTransactionResult,
|
||||
},
|
||||
};
|
||||
use kameo::actor::ActorRef;
|
||||
use tonic::Status;
|
||||
use tracing::warn;
|
||||
|
||||
use crate::{
|
||||
actors::client::session::{ClientSession, HandleSignTransaction, SignTransactionRpcError},
|
||||
grpc::{
|
||||
Convert, TryConvert,
|
||||
common::inbound::{RawEvmAddress, RawEvmTransaction},
|
||||
},
|
||||
};
|
||||
|
||||
fn wrap_response(payload: EvmResponsePayload) -> ClientResponsePayload {
|
||||
ClientResponsePayload::Evm(proto_evm::Response {
|
||||
payload: Some(payload),
|
||||
})
|
||||
}
|
||||
|
||||
pub(super) async fn dispatch(
|
||||
actor: &ActorRef<ClientSession>,
|
||||
req: proto_evm::Request,
|
||||
) -> Result<ClientResponsePayload, Status> {
|
||||
let Some(payload) = req.payload else {
|
||||
return Err(Status::invalid_argument(
|
||||
"Missing client EVM request payload",
|
||||
));
|
||||
};
|
||||
|
||||
match payload {
|
||||
EvmRequestPayload::SignTransaction(request) => {
|
||||
let address = RawEvmAddress(request.wallet_address).try_convert()?;
|
||||
let transaction = RawEvmTransaction(request.rlp_transaction).try_convert()?;
|
||||
|
||||
let response = match actor
|
||||
.ask(HandleSignTransaction {
|
||||
wallet_address: address,
|
||||
transaction,
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(signature) => EvmSignTransactionResponse {
|
||||
result: Some(EvmSignTransactionResult::Signature(
|
||||
signature.as_bytes().to_vec(),
|
||||
)),
|
||||
},
|
||||
Err(kameo::error::SendError::HandlerError(SignTransactionRpcError::Vet(
|
||||
vet_error,
|
||||
))) => EvmSignTransactionResponse {
|
||||
result: Some(vet_error.convert()),
|
||||
},
|
||||
Err(kameo::error::SendError::HandlerError(SignTransactionRpcError::Internal)) => {
|
||||
EvmSignTransactionResponse {
|
||||
result: Some(EvmSignTransactionResult::Error(
|
||||
ProtoEvmError::Internal.into(),
|
||||
)),
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
warn!(error = ?err, "Failed to sign EVM transaction");
|
||||
EvmSignTransactionResponse {
|
||||
result: Some(EvmSignTransactionResult::Error(
|
||||
ProtoEvmError::Internal.into(),
|
||||
)),
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
Ok(wrap_response(EvmResponsePayload::SignTransaction(response)))
|
||||
}
|
||||
EvmRequestPayload::AnalyzeTransaction(_) => Err(Status::unimplemented(
|
||||
"EVM transaction analysis is not yet implemented",
|
||||
)),
|
||||
}
|
||||
}
|
||||
1
server/crates/arbiter-server/src/grpc/client/inbound.rs
Normal file
1
server/crates/arbiter-server/src/grpc/client/inbound.rs
Normal file
@@ -0,0 +1 @@
|
||||
|
||||
1
server/crates/arbiter-server/src/grpc/client/outbound.rs
Normal file
1
server/crates/arbiter-server/src/grpc/client/outbound.rs
Normal file
@@ -0,0 +1 @@
|
||||
|
||||
47
server/crates/arbiter-server/src/grpc/client/vault.rs
Normal file
47
server/crates/arbiter-server/src/grpc/client/vault.rs
Normal file
@@ -0,0 +1,47 @@
|
||||
use arbiter_proto::proto::{
|
||||
client::{
|
||||
client_response::Payload as ClientResponsePayload,
|
||||
vault::{
|
||||
self as proto_vault, request::Payload as VaultRequestPayload,
|
||||
response::Payload as VaultResponsePayload,
|
||||
},
|
||||
},
|
||||
shared::VaultState as ProtoVaultState,
|
||||
};
|
||||
use kameo::{actor::ActorRef, error::SendError};
|
||||
use tonic::Status;
|
||||
use tracing::warn;
|
||||
|
||||
use crate::actors::{
|
||||
client::session::{ClientSession, Error, HandleQueryVaultState},
|
||||
keyholder::KeyHolderState,
|
||||
};
|
||||
|
||||
pub(super) async fn dispatch(
|
||||
actor: &ActorRef<ClientSession>,
|
||||
req: proto_vault::Request,
|
||||
) -> Result<ClientResponsePayload, Status> {
|
||||
let Some(payload) = req.payload else {
|
||||
return Err(Status::invalid_argument(
|
||||
"Missing client vault request payload",
|
||||
));
|
||||
};
|
||||
|
||||
match payload {
|
||||
VaultRequestPayload::QueryState(_) => {
|
||||
let state = match actor.ask(HandleQueryVaultState {}).await {
|
||||
Ok(KeyHolderState::Unbootstrapped) => ProtoVaultState::Unbootstrapped,
|
||||
Ok(KeyHolderState::Sealed) => ProtoVaultState::Sealed,
|
||||
Ok(KeyHolderState::Unsealed) => ProtoVaultState::Unsealed,
|
||||
Err(SendError::HandlerError(Error::Internal)) => ProtoVaultState::Error,
|
||||
Err(err) => {
|
||||
warn!(error = ?err, "Failed to query vault state");
|
||||
ProtoVaultState::Error
|
||||
}
|
||||
};
|
||||
Ok(ClientResponsePayload::Vault(proto_vault::Response {
|
||||
payload: Some(VaultResponsePayload::State(state.into())),
|
||||
}))
|
||||
}
|
||||
}
|
||||
}
|
||||
2
server/crates/arbiter-server/src/grpc/common.rs
Normal file
2
server/crates/arbiter-server/src/grpc/common.rs
Normal file
@@ -0,0 +1,2 @@
|
||||
pub mod inbound;
|
||||
pub mod outbound;
|
||||
35
server/crates/arbiter-server/src/grpc/common/inbound.rs
Normal file
35
server/crates/arbiter-server/src/grpc/common/inbound.rs
Normal file
@@ -0,0 +1,35 @@
|
||||
use alloy::{consensus::TxEip1559, primitives::Address, rlp::Decodable as _};
|
||||
|
||||
use crate::grpc::TryConvert;
|
||||
|
||||
pub struct RawEvmAddress(pub Vec<u8>);
|
||||
impl TryConvert for RawEvmAddress {
|
||||
type Output = Address;
|
||||
|
||||
type Error = tonic::Status;
|
||||
|
||||
fn try_convert(self) -> Result<Self::Output, Self::Error> {
|
||||
let wallet_address = match <[u8; 20]>::try_from(self.0.as_slice()) {
|
||||
Ok(address) => Address::from(address),
|
||||
Err(_) => {
|
||||
return Err(tonic::Status::invalid_argument(
|
||||
"Invalid EVM wallet address",
|
||||
));
|
||||
}
|
||||
};
|
||||
Ok(wallet_address)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct RawEvmTransaction(pub Vec<u8>);
|
||||
impl TryConvert for RawEvmTransaction {
|
||||
type Output = TxEip1559;
|
||||
|
||||
type Error = tonic::Status;
|
||||
|
||||
fn try_convert(self) -> Result<Self::Output, Self::Error> {
|
||||
let tx = TxEip1559::decode(&mut self.0.as_slice())
|
||||
.map_err(|_| tonic::Status::invalid_argument("Invalid EVM transaction format"))?;
|
||||
Ok(tx)
|
||||
}
|
||||
}
|
||||
125
server/crates/arbiter-server/src/grpc/common/outbound.rs
Normal file
125
server/crates/arbiter-server/src/grpc/common/outbound.rs
Normal file
@@ -0,0 +1,125 @@
|
||||
use alloy::primitives::U256;
|
||||
use arbiter_proto::proto::{
|
||||
evm::{
|
||||
EvmError as ProtoEvmError,
|
||||
evm_sign_transaction_response::Result as EvmSignTransactionResult,
|
||||
},
|
||||
shared::evm::{
|
||||
EvalViolation as ProtoEvalViolation, GasLimitExceededViolation, NoMatchingGrantError,
|
||||
PolicyViolationsError, SpecificMeaning as ProtoSpecificMeaning,
|
||||
TokenInfo as ProtoTokenInfo, TransactionEvalError as ProtoTransactionEvalError,
|
||||
eval_violation as proto_eval_violation, eval_violation::Kind as ProtoEvalViolationKind,
|
||||
specific_meaning::Meaning as ProtoSpecificMeaningKind,
|
||||
transaction_eval_error::Kind as ProtoTransactionEvalErrorKind,
|
||||
},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
evm::{
|
||||
PolicyError, VetError,
|
||||
policies::{EvalViolation, SpecificMeaning},
|
||||
},
|
||||
grpc::Convert,
|
||||
};
|
||||
|
||||
fn u256_to_proto_bytes(value: U256) -> Vec<u8> {
|
||||
value.to_be_bytes::<32>().to_vec()
|
||||
}
|
||||
|
||||
impl Convert for SpecificMeaning {
|
||||
type Output = ProtoSpecificMeaning;
|
||||
|
||||
fn convert(self) -> Self::Output {
|
||||
let kind = match self {
|
||||
SpecificMeaning::EtherTransfer(meaning) => ProtoSpecificMeaningKind::EtherTransfer(
|
||||
arbiter_proto::proto::shared::evm::EtherTransferMeaning {
|
||||
to: meaning.to.to_vec(),
|
||||
value: u256_to_proto_bytes(meaning.value),
|
||||
},
|
||||
),
|
||||
SpecificMeaning::TokenTransfer(meaning) => ProtoSpecificMeaningKind::TokenTransfer(
|
||||
arbiter_proto::proto::shared::evm::TokenTransferMeaning {
|
||||
token: Some(ProtoTokenInfo {
|
||||
symbol: meaning.token.symbol.to_string(),
|
||||
address: meaning.token.contract.to_vec(),
|
||||
chain_id: meaning.token.chain,
|
||||
}),
|
||||
to: meaning.to.to_vec(),
|
||||
value: u256_to_proto_bytes(meaning.value),
|
||||
},
|
||||
),
|
||||
};
|
||||
|
||||
ProtoSpecificMeaning {
|
||||
meaning: Some(kind),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Convert for EvalViolation {
|
||||
type Output = ProtoEvalViolation;
|
||||
|
||||
fn convert(self) -> Self::Output {
|
||||
let kind = match self {
|
||||
EvalViolation::InvalidTarget { target } => {
|
||||
ProtoEvalViolationKind::InvalidTarget(target.to_vec())
|
||||
}
|
||||
EvalViolation::GasLimitExceeded {
|
||||
max_gas_fee_per_gas,
|
||||
max_priority_fee_per_gas,
|
||||
} => ProtoEvalViolationKind::GasLimitExceeded(GasLimitExceededViolation {
|
||||
max_gas_fee_per_gas: max_gas_fee_per_gas.map(u256_to_proto_bytes),
|
||||
max_priority_fee_per_gas: max_priority_fee_per_gas.map(u256_to_proto_bytes),
|
||||
}),
|
||||
EvalViolation::RateLimitExceeded => ProtoEvalViolationKind::RateLimitExceeded(()),
|
||||
EvalViolation::VolumetricLimitExceeded => {
|
||||
ProtoEvalViolationKind::VolumetricLimitExceeded(())
|
||||
}
|
||||
EvalViolation::InvalidTime => ProtoEvalViolationKind::InvalidTime(()),
|
||||
EvalViolation::InvalidTransactionType => {
|
||||
ProtoEvalViolationKind::InvalidTransactionType(())
|
||||
}
|
||||
EvalViolation::MismatchingChainId { expected, actual } => {
|
||||
ProtoEvalViolationKind::ChainIdMismatch(proto_eval_violation::ChainIdMismatch {
|
||||
expected,
|
||||
actual,
|
||||
})
|
||||
}
|
||||
};
|
||||
|
||||
ProtoEvalViolation { kind: Some(kind) }
|
||||
}
|
||||
}
|
||||
|
||||
impl Convert for VetError {
|
||||
type Output = EvmSignTransactionResult;
|
||||
|
||||
fn convert(self) -> Self::Output {
|
||||
let kind = match self {
|
||||
VetError::ContractCreationNotSupported => {
|
||||
ProtoTransactionEvalErrorKind::ContractCreationNotSupported(())
|
||||
}
|
||||
VetError::UnsupportedTransactionType => {
|
||||
ProtoTransactionEvalErrorKind::UnsupportedTransactionType(())
|
||||
}
|
||||
VetError::Evaluated(meaning, policy_error) => match policy_error {
|
||||
PolicyError::NoMatchingGrant => {
|
||||
ProtoTransactionEvalErrorKind::NoMatchingGrant(NoMatchingGrantError {
|
||||
meaning: Some(meaning.convert()),
|
||||
})
|
||||
}
|
||||
PolicyError::Violations(violations) => {
|
||||
ProtoTransactionEvalErrorKind::PolicyViolations(PolicyViolationsError {
|
||||
meaning: Some(meaning.convert()),
|
||||
violations: violations.into_iter().map(Convert::convert).collect(),
|
||||
})
|
||||
}
|
||||
PolicyError::Database(_) | PolicyError::Integrity(_) => {
|
||||
return EvmSignTransactionResult::Error(ProtoEvmError::Internal.into());
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
EvmSignTransactionResult::EvalError(ProtoTransactionEvalError { kind: Some(kind) })
|
||||
}
|
||||
}
|
||||
78
server/crates/arbiter-server/src/grpc/mod.rs
Normal file
78
server/crates/arbiter-server/src/grpc/mod.rs
Normal file
@@ -0,0 +1,78 @@
|
||||
use arbiter_proto::{
|
||||
proto::{
|
||||
client::{ClientRequest, ClientResponse},
|
||||
user_agent::{UserAgentRequest, UserAgentResponse},
|
||||
},
|
||||
transport::grpc::GrpcBi,
|
||||
};
|
||||
use tokio_stream::wrappers::ReceiverStream;
|
||||
use tonic::{Request, Response, Status, async_trait};
|
||||
use tracing::info;
|
||||
|
||||
use crate::{
|
||||
actors::{client::ClientConnection, user_agent::UserAgentConnection},
|
||||
grpc::user_agent::start,
|
||||
};
|
||||
|
||||
mod request_tracker;
|
||||
|
||||
pub mod client;
|
||||
pub mod user_agent;
|
||||
|
||||
mod common;
|
||||
|
||||
pub trait Convert {
|
||||
type Output;
|
||||
|
||||
fn convert(self) -> Self::Output;
|
||||
}
|
||||
|
||||
pub trait TryConvert {
|
||||
type Output;
|
||||
type Error;
|
||||
|
||||
fn try_convert(self) -> Result<Self::Output, Self::Error>;
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl arbiter_proto::proto::arbiter_service_server::ArbiterService for super::Server {
|
||||
type UserAgentStream = ReceiverStream<Result<UserAgentResponse, Status>>;
|
||||
type ClientStream = ReceiverStream<Result<ClientResponse, Status>>;
|
||||
|
||||
#[tracing::instrument(level = "debug", skip(self))]
|
||||
async fn client(
|
||||
&self,
|
||||
request: Request<tonic::Streaming<ClientRequest>>,
|
||||
) -> Result<Response<Self::ClientStream>, Status> {
|
||||
let req_stream = request.into_inner();
|
||||
let (bi, rx) = GrpcBi::from_bi_stream(req_stream);
|
||||
let props = ClientConnection::new(self.context.db.clone(), self.context.actors.clone());
|
||||
tokio::spawn(client::start(props, bi));
|
||||
|
||||
info!(event = "connection established", "grpc.client");
|
||||
|
||||
Ok(Response::new(rx))
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "debug", skip(self))]
|
||||
async fn user_agent(
|
||||
&self,
|
||||
request: Request<tonic::Streaming<UserAgentRequest>>,
|
||||
) -> Result<Response<Self::UserAgentStream>, Status> {
|
||||
let req_stream = request.into_inner();
|
||||
|
||||
let (bi, rx) = GrpcBi::from_bi_stream(req_stream);
|
||||
|
||||
tokio::spawn(start(
|
||||
UserAgentConnection {
|
||||
db: self.context.db.clone(),
|
||||
actors: self.context.actors.clone(),
|
||||
},
|
||||
bi,
|
||||
));
|
||||
|
||||
info!(event = "connection established", "grpc.user_agent");
|
||||
|
||||
Ok(Response::new(rx))
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user