Compare commits
69 Commits
push-yyxvk
...
PoC-terror
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
099f76166e | ||
|
|
66026e903a | ||
|
|
3360d3c8c7 | ||
|
|
02980468db | ||
| 84978afd58 | |||
|
|
4cb5b303dc | ||
| 8fde3cec41 | |||
| 17ac195c5d | |||
| c1c5d14133 | |||
| 47144bdf81 | |||
| 42760bbd79 | |||
| d29bca853b | |||
| f8d27a1454 | |||
| 6030f30901 | |||
| a3c401194f | |||
|
|
6386510f52 | ||
| ec36e5c2ea | |||
|
|
ba86d18250 | ||
|
|
606a1f3774 | ||
|
|
b3a67ffc00 | ||
|
|
168290040c | ||
|
|
2b27da224e | ||
|
|
9e92b168ba | ||
|
|
bd159c35e8 | ||
|
|
b3e378b5fc | ||
|
|
b7c4f2e735 | ||
|
|
4a5dd3eea7 | ||
|
|
5af6d8dd9c | ||
|
|
5dfe390ac3 | ||
|
|
43c7b211c3 | ||
|
|
c5f9cfcaa0 | ||
|
|
67fce6f06a | ||
|
|
191b126462 | ||
|
|
cb05407bb6 | ||
| 4beb34764d | |||
|
|
4b4a8f4489 | ||
|
|
54d0fe0505 | ||
|
|
06f4d628db | ||
|
|
657f47e32f | ||
|
|
86f8feb291 | ||
|
|
6deec731e2 | ||
|
|
f5a5c62181 | ||
|
|
b8afd94b21 | ||
|
|
7b57965952 | ||
|
|
9dca7aff27 | ||
|
|
4d1f047baf | ||
|
|
925c7a211f | ||
|
|
d81120f59c | ||
|
|
e118eceb85 | ||
|
|
4a84fe9339 | ||
|
|
c6e13dc476 | ||
|
|
8f5d4cc385 | ||
|
|
2ffd60973d | ||
|
|
08af101b2e | ||
|
|
bb58868333 | ||
|
|
b05cdeec66 | ||
|
|
9ec465706a | ||
|
|
46a3c1768c | ||
|
|
6c8a67c520 | ||
|
|
bbaed3fb97 | ||
|
|
4700bc407e | ||
|
|
281fbcb31d | ||
|
|
a55221573b | ||
|
|
45acb45a05 | ||
|
|
11f1caa6da | ||
|
|
f769c9119b | ||
|
|
1145642255 | ||
|
|
9f33277a4f | ||
|
|
0a8e1dce3f |
5
.gitignore
vendored
5
.gitignore
vendored
@@ -1 +1,4 @@
|
|||||||
target/
|
target/
|
||||||
|
scripts/__pycache__/
|
||||||
|
.DS_Store
|
||||||
|
.cargo/config.toml
|
||||||
1
.vscode/settings.json
vendored
1
.vscode/settings.json
vendored
@@ -1,3 +1,2 @@
|
|||||||
{
|
{
|
||||||
"git.enabled": false
|
|
||||||
}
|
}
|
||||||
@@ -8,7 +8,7 @@ when:
|
|||||||
include: ['.woodpecker/server-*.yaml', 'server/**']
|
include: ['.woodpecker/server-*.yaml', 'server/**']
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: test
|
- name: audit
|
||||||
image: jdxcode/mise:latest
|
image: jdxcode/mise:latest
|
||||||
directory: server
|
directory: server
|
||||||
environment:
|
environment:
|
||||||
|
|||||||
25
.woodpecker/server-lint.yaml
Normal file
25
.woodpecker/server-lint.yaml
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
when:
|
||||||
|
- event: pull_request
|
||||||
|
path:
|
||||||
|
include: ['.woodpecker/server-*.yaml', 'server/**']
|
||||||
|
- event: push
|
||||||
|
branch: main
|
||||||
|
path:
|
||||||
|
include: ['.woodpecker/server-*.yaml', 'server/**']
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: lint
|
||||||
|
image: jdxcode/mise:latest
|
||||||
|
directory: server
|
||||||
|
environment:
|
||||||
|
CARGO_TERM_COLOR: always
|
||||||
|
CARGO_TARGET_DIR: /usr/local/cargo/target
|
||||||
|
CARGO_HOME: /usr/local/cargo/registry
|
||||||
|
volumes:
|
||||||
|
- cargo-target:/usr/local/cargo/target
|
||||||
|
- cargo-registry:/usr/local/cargo/registry
|
||||||
|
commands:
|
||||||
|
- apt-get update && apt-get install -y pkg-config
|
||||||
|
- mise install rust
|
||||||
|
- mise install protoc
|
||||||
|
- mise exec rust -- cargo clippy --all-targets --all-features -- -D warnings
|
||||||
26
.woodpecker/server-vet.yaml
Normal file
26
.woodpecker/server-vet.yaml
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
when:
|
||||||
|
- event: pull_request
|
||||||
|
path:
|
||||||
|
include: ['.woodpecker/server-*.yaml', 'server/**']
|
||||||
|
- event: push
|
||||||
|
branch: main
|
||||||
|
path:
|
||||||
|
include: ['.woodpecker/server-*.yaml', 'server/**']
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: vet
|
||||||
|
image: jdxcode/mise:latest
|
||||||
|
directory: server
|
||||||
|
environment:
|
||||||
|
CARGO_TERM_COLOR: always
|
||||||
|
CARGO_TARGET_DIR: /usr/local/cargo/target
|
||||||
|
CARGO_HOME: /usr/local/cargo/registry
|
||||||
|
volumes:
|
||||||
|
- cargo-target:/usr/local/cargo/target
|
||||||
|
- cargo-registry:/usr/local/cargo/registry
|
||||||
|
commands:
|
||||||
|
- apt-get update && apt-get install -y pkg-config
|
||||||
|
# Install only the necessary Rust toolchain and test runner to speed up the CI
|
||||||
|
- mise install rust
|
||||||
|
- mise install cargo:cargo-vet
|
||||||
|
- mise exec cargo:cargo-vet -- cargo vet
|
||||||
@@ -3,7 +3,6 @@
|
|||||||
Arbiter is a permissioned signing service for cryptocurrency wallets. It runs as a background service on the user's machine with an optional client application for vault management.
|
Arbiter is a permissioned signing service for cryptocurrency wallets. It runs as a background service on the user's machine with an optional client application for vault management.
|
||||||
|
|
||||||
**Core principle:** The vault NEVER exposes key material. It only produces signatures when a request satisfies the configured policies.
|
**Core principle:** The vault NEVER exposes key material. It only produces signatures when a request satisfies the configured policies.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## 1. Peer Types
|
## 1. Peer Types
|
||||||
|
|||||||
@@ -4,6 +4,52 @@ This document covers concrete technology choices and dependencies. For the archi
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
## Client Connection Flow
|
||||||
|
|
||||||
|
### New Client Approval
|
||||||
|
|
||||||
|
When a client whose public key is not yet in the database connects, all connected user agents are asked to approve the connection. The first agent to respond determines the outcome; remaining requests are cancelled via a watch channel.
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
flowchart TD
|
||||||
|
A([Client connects]) --> B[Receive AuthChallengeRequest]
|
||||||
|
B --> C{pubkey in DB?}
|
||||||
|
|
||||||
|
C -- yes --> D[Read nonce\nIncrement nonce in DB]
|
||||||
|
D --> G
|
||||||
|
|
||||||
|
C -- no --> E[Ask all UserAgents:\nClientConnectionRequest]
|
||||||
|
E --> F{First response}
|
||||||
|
F -- denied --> Z([Reject connection])
|
||||||
|
F -- approved --> F2[Cancel remaining\nUserAgent requests]
|
||||||
|
F2 --> F3[INSERT client\nnonce = 1]
|
||||||
|
F3 --> G[Send AuthChallenge\nwith nonce]
|
||||||
|
|
||||||
|
G --> H[Receive AuthChallengeSolution]
|
||||||
|
H --> I{Signature valid?}
|
||||||
|
I -- no --> Z
|
||||||
|
I -- yes --> J([Session started])
|
||||||
|
```
|
||||||
|
|
||||||
|
### Known Issue: Concurrent Registration Race (TOCTOU)
|
||||||
|
|
||||||
|
Two connections presenting the same previously-unknown public key can race through the approval flow simultaneously:
|
||||||
|
|
||||||
|
1. Both check the DB → neither is registered.
|
||||||
|
2. Both request approval from user agents → both receive approval.
|
||||||
|
3. Both `INSERT` the client record → the second insert silently overwrites the first, resetting the nonce.
|
||||||
|
|
||||||
|
This means the first connection's nonce is invalidated by the second, causing its challenge verification to fail. A fix requires either serialising new-client registration (e.g. an in-memory lock keyed on pubkey) or replacing the separate check + insert with an `INSERT OR IGNORE` / upsert guarded by a unique constraint on `public_key`.
|
||||||
|
|
||||||
|
### Nonce Semantics
|
||||||
|
|
||||||
|
The `program_client.nonce` column stores the **next usable nonce** — i.e. it is always one ahead of the nonce last issued in a challenge.
|
||||||
|
|
||||||
|
- **New client:** inserted with `nonce = 1`; the first challenge is issued with `nonce = 0`.
|
||||||
|
- **Existing client:** the current DB value is read and used as the challenge nonce, then immediately incremented within the same exclusive transaction, preventing replay.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
## Cryptography
|
## Cryptography
|
||||||
|
|
||||||
### Authentication
|
### Authentication
|
||||||
@@ -27,6 +73,82 @@ This document covers concrete technology choices and dependencies. For the archi
|
|||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
## EVM Policy Engine
|
||||||
|
|
||||||
|
### Overview
|
||||||
|
|
||||||
|
The EVM engine classifies incoming transactions, enforces grant constraints, and records executions. It is the sole path through which a wallet key is used for signing.
|
||||||
|
|
||||||
|
The central abstraction is the `Policy` trait. Each implementation handles one semantic transaction category and owns its own database tables for grant storage and transaction logging.
|
||||||
|
|
||||||
|
### Transaction Evaluation Flow
|
||||||
|
|
||||||
|
`Engine::evaluate_transaction` runs the following steps in order:
|
||||||
|
|
||||||
|
1. **Classify** — Each registered policy's `analyze(context)` inspects the transaction fields (`chain`, `to`, `value`, `calldata`). The first one returning `Some(meaning)` wins. If none match, the transaction is rejected as `UnsupportedTransactionType`.
|
||||||
|
2. **Find grant** — `Policy::try_find_grant` queries for a non-revoked grant covering this wallet, client, chain, and target address.
|
||||||
|
3. **Check shared constraints** — `check_shared_constraints` runs in the engine before any policy-specific logic. It enforces the validity window, gas fee caps, and transaction count rate limit (see below).
|
||||||
|
4. **Evaluate** — `Policy::evaluate` checks the decoded meaning against the grant's policy-specific constraints and returns any violations.
|
||||||
|
5. **Record** — If `RunKind::Execution` and there are no violations, the engine writes to `evm_transaction_log` and calls `Policy::record_transaction` for any policy-specific logging (e.g., token transfer volume).
|
||||||
|
|
||||||
|
### Policy Trait
|
||||||
|
|
||||||
|
| Method | Purpose |
|
||||||
|
|---|---|
|
||||||
|
| `analyze` | Pure — classifies a transaction into a typed `Meaning`, or `None` if this policy doesn't apply |
|
||||||
|
| `evaluate` | Checks the `Meaning` against a `Grant`; returns a list of `EvalViolation`s |
|
||||||
|
| `create_grant` | Inserts policy-specific rows; returns the specific grant ID |
|
||||||
|
| `try_find_grant` | Finds a matching non-revoked grant for the given `EvalContext` |
|
||||||
|
| `find_all_grants` | Returns all non-revoked grants (used for listing) |
|
||||||
|
| `record_transaction` | Persists policy-specific data after execution |
|
||||||
|
|
||||||
|
`analyze` and `evaluate` are intentionally separate: classification is pure and cheap, while evaluation may involve DB queries (e.g., fetching past transfer volume).
|
||||||
|
|
||||||
|
### Registered Policies
|
||||||
|
|
||||||
|
**EtherTransfer** — plain ETH transfers (empty calldata)
|
||||||
|
|
||||||
|
- Grant requires: allowlist of recipient addresses + one volumetric rate limit (max ETH over a time window)
|
||||||
|
- Violations: recipient not in allowlist, cumulative ETH volume exceeded
|
||||||
|
|
||||||
|
**TokenTransfer** — ERC-20 `transfer(address,uint256)` calls
|
||||||
|
|
||||||
|
- Recognised by ABI-decoding the `transfer(address,uint256)` selector against a static registry of known token contracts (`arbiter_tokens_registry`)
|
||||||
|
- Grant requires: token contract address, optional recipient restriction, zero or more volumetric rate limits
|
||||||
|
- Violations: recipient mismatch, any volumetric limit exceeded
|
||||||
|
|
||||||
|
### Grant Model
|
||||||
|
|
||||||
|
Every grant has two layers:
|
||||||
|
|
||||||
|
- **Shared (`evm_basic_grant`)** — wallet, chain, validity period, gas fee caps, transaction count rate limit. One row per grant regardless of type.
|
||||||
|
- **Specific** — policy-owned tables (`evm_ether_transfer_grant`, `evm_token_transfer_grant`, etc.) holding type-specific configuration.
|
||||||
|
|
||||||
|
`find_all_grants` uses a `#[diesel::auto_type]` base join between the specific and shared tables, then batch-loads related rows (targets, volume limits) in two additional queries to avoid N+1.
|
||||||
|
|
||||||
|
The engine exposes `list_all_grants` which collects across all policy types into `Vec<Grant<SpecificGrant>>` via a blanket `From<Grant<S>> for Grant<SpecificGrant>` conversion.
|
||||||
|
|
||||||
|
### Shared Constraints (enforced by the engine)
|
||||||
|
|
||||||
|
These are checked centrally in `check_shared_constraints` before policy evaluation:
|
||||||
|
|
||||||
|
| Constraint | Fields | Behaviour |
|
||||||
|
|---|---|---|
|
||||||
|
| Validity window | `valid_from`, `valid_until` | Emits `InvalidTime` if current time is outside the range |
|
||||||
|
| Gas fee cap | `max_gas_fee_per_gas`, `max_priority_fee_per_gas` | Emits `GasLimitExceeded` if either cap is breached |
|
||||||
|
| Tx count rate limit | `rate_limit` (`count` + `window`) | Counts rows in `evm_transaction_log` within the window; emits `RateLimitExceeded` if at or above the limit |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### Known Limitations
|
||||||
|
|
||||||
|
- **Only EIP-1559 transactions are supported.** Legacy and EIP-2930 types are rejected outright.
|
||||||
|
- **No opaque-calldata (unknown contract) grant type.** The architecture describes a category for unrecognised contracts, but no policy implements it yet. Any transaction that is not a plain ETH transfer or a known ERC-20 transfer is unconditionally rejected.
|
||||||
|
- **Token registry is static.** Tokens are recognised only if they appear in the hard-coded `arbiter_tokens_registry` crate. There is no mechanism to register additional contracts at runtime.
|
||||||
|
- **Nonce management is not implemented.** The architecture lists nonce deduplication as a core responsibility, but no nonce tracking or enforcement exists yet.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
## Memory Protection
|
## Memory Protection
|
||||||
|
|
||||||
The unsealed root key must be held in a hardened memory cell resistant to dumps, page swaps, and hibernation.
|
The unsealed root key must be held in a hardened memory cell resistant to dumps, page swaps, and hibernation.
|
||||||
|
|||||||
190
LICENSE
Normal file
190
LICENSE
Normal file
@@ -0,0 +1,190 @@
|
|||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
Copyright 2026 MarketTakers
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
13
README.md
Normal file
13
README.md
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
# Arbiter
|
||||||
|
> Policy-first multi-client wallet daemon, allowing permissioned transactions across blockchains
|
||||||
|
|
||||||
|
## Security warning
|
||||||
|
Arbiter can't meaningfully protect against host compromise. Potential attack flow:
|
||||||
|
- Attacker steals TLS keys from database
|
||||||
|
- Pretends to be server; just accepts user agent challenge solutions
|
||||||
|
- Pretend to be in sealed state and performing DH with client
|
||||||
|
- Steals user password and derives seal key
|
||||||
|
|
||||||
|
While this attack is highly targetive, it's still possible.
|
||||||
|
|
||||||
|
> This software is experimental. Do not use with funds you cannot afford to lose.
|
||||||
31
app/.dart_tool/extension_discovery/README.md
Normal file
31
app/.dart_tool/extension_discovery/README.md
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
Extension Discovery Cache
|
||||||
|
=========================
|
||||||
|
|
||||||
|
This folder is used by `package:extension_discovery` to cache lists of
|
||||||
|
packages that contains extensions for other packages.
|
||||||
|
|
||||||
|
DO NOT USE THIS FOLDER
|
||||||
|
----------------------
|
||||||
|
|
||||||
|
* Do not read (or rely) the contents of this folder.
|
||||||
|
* Do write to this folder.
|
||||||
|
|
||||||
|
If you're interested in the lists of extensions stored in this folder use the
|
||||||
|
API offered by package `extension_discovery` to get this information.
|
||||||
|
|
||||||
|
If this package doesn't work for your use-case, then don't try to read the
|
||||||
|
contents of this folder. It may change, and will not remain stable.
|
||||||
|
|
||||||
|
Use package `extension_discovery`
|
||||||
|
---------------------------------
|
||||||
|
|
||||||
|
If you want to access information from this folder.
|
||||||
|
|
||||||
|
Feel free to delete this folder
|
||||||
|
-------------------------------
|
||||||
|
|
||||||
|
Files in this folder act as a cache, and the cache is discarded if the files
|
||||||
|
are older than the modification time of `.dart_tool/package_config.json`.
|
||||||
|
|
||||||
|
Hence, it should never be necessary to clear this cache manually, if you find a
|
||||||
|
need to do please file a bug.
|
||||||
1
app/.dart_tool/extension_discovery/vs_code.json
Normal file
1
app/.dart_tool/extension_discovery/vs_code.json
Normal file
@@ -0,0 +1 @@
|
|||||||
|
{"version":2,"entries":[{"package":"app","rootUri":"../","packageUri":"lib/"}]}
|
||||||
178
app/.dart_tool/package_config.json
Normal file
178
app/.dart_tool/package_config.json
Normal file
@@ -0,0 +1,178 @@
|
|||||||
|
{
|
||||||
|
"configVersion": 2,
|
||||||
|
"packages": [
|
||||||
|
{
|
||||||
|
"name": "async",
|
||||||
|
"rootUri": "file:///Users/kaska/.pub-cache/hosted/pub.dev/async-2.13.0",
|
||||||
|
"packageUri": "lib/",
|
||||||
|
"languageVersion": "3.4"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "boolean_selector",
|
||||||
|
"rootUri": "file:///Users/kaska/.pub-cache/hosted/pub.dev/boolean_selector-2.1.2",
|
||||||
|
"packageUri": "lib/",
|
||||||
|
"languageVersion": "3.1"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "characters",
|
||||||
|
"rootUri": "file:///Users/kaska/.pub-cache/hosted/pub.dev/characters-1.4.0",
|
||||||
|
"packageUri": "lib/",
|
||||||
|
"languageVersion": "3.4"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "clock",
|
||||||
|
"rootUri": "file:///Users/kaska/.pub-cache/hosted/pub.dev/clock-1.1.2",
|
||||||
|
"packageUri": "lib/",
|
||||||
|
"languageVersion": "3.4"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "collection",
|
||||||
|
"rootUri": "file:///Users/kaska/.pub-cache/hosted/pub.dev/collection-1.19.1",
|
||||||
|
"packageUri": "lib/",
|
||||||
|
"languageVersion": "3.4"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "cupertino_icons",
|
||||||
|
"rootUri": "file:///Users/kaska/.pub-cache/hosted/pub.dev/cupertino_icons-1.0.8",
|
||||||
|
"packageUri": "lib/",
|
||||||
|
"languageVersion": "3.1"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "fake_async",
|
||||||
|
"rootUri": "file:///Users/kaska/.pub-cache/hosted/pub.dev/fake_async-1.3.3",
|
||||||
|
"packageUri": "lib/",
|
||||||
|
"languageVersion": "3.3"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "flutter",
|
||||||
|
"rootUri": "file:///Users/kaska/.local/share/mise/installs/flutter/3.38.9-stable/packages/flutter",
|
||||||
|
"packageUri": "lib/",
|
||||||
|
"languageVersion": "3.8"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "flutter_lints",
|
||||||
|
"rootUri": "file:///Users/kaska/.pub-cache/hosted/pub.dev/flutter_lints-6.0.0",
|
||||||
|
"packageUri": "lib/",
|
||||||
|
"languageVersion": "3.8"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "flutter_test",
|
||||||
|
"rootUri": "file:///Users/kaska/.local/share/mise/installs/flutter/3.38.9-stable/packages/flutter_test",
|
||||||
|
"packageUri": "lib/",
|
||||||
|
"languageVersion": "3.8"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "leak_tracker",
|
||||||
|
"rootUri": "file:///Users/kaska/.pub-cache/hosted/pub.dev/leak_tracker-11.0.2",
|
||||||
|
"packageUri": "lib/",
|
||||||
|
"languageVersion": "3.2"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "leak_tracker_flutter_testing",
|
||||||
|
"rootUri": "file:///Users/kaska/.pub-cache/hosted/pub.dev/leak_tracker_flutter_testing-3.0.10",
|
||||||
|
"packageUri": "lib/",
|
||||||
|
"languageVersion": "3.2"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "leak_tracker_testing",
|
||||||
|
"rootUri": "file:///Users/kaska/.pub-cache/hosted/pub.dev/leak_tracker_testing-3.0.2",
|
||||||
|
"packageUri": "lib/",
|
||||||
|
"languageVersion": "3.2"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "lints",
|
||||||
|
"rootUri": "file:///Users/kaska/.pub-cache/hosted/pub.dev/lints-6.1.0",
|
||||||
|
"packageUri": "lib/",
|
||||||
|
"languageVersion": "3.8"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "matcher",
|
||||||
|
"rootUri": "file:///Users/kaska/.pub-cache/hosted/pub.dev/matcher-0.12.17",
|
||||||
|
"packageUri": "lib/",
|
||||||
|
"languageVersion": "3.4"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "material_color_utilities",
|
||||||
|
"rootUri": "file:///Users/kaska/.pub-cache/hosted/pub.dev/material_color_utilities-0.11.1",
|
||||||
|
"packageUri": "lib/",
|
||||||
|
"languageVersion": "2.17"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "meta",
|
||||||
|
"rootUri": "file:///Users/kaska/.pub-cache/hosted/pub.dev/meta-1.17.0",
|
||||||
|
"packageUri": "lib/",
|
||||||
|
"languageVersion": "3.5"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "path",
|
||||||
|
"rootUri": "file:///Users/kaska/.pub-cache/hosted/pub.dev/path-1.9.1",
|
||||||
|
"packageUri": "lib/",
|
||||||
|
"languageVersion": "3.4"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "sky_engine",
|
||||||
|
"rootUri": "file:///Users/kaska/.local/share/mise/installs/flutter/3.38.9-stable/bin/cache/pkg/sky_engine",
|
||||||
|
"packageUri": "lib/",
|
||||||
|
"languageVersion": "3.8"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "source_span",
|
||||||
|
"rootUri": "file:///Users/kaska/.pub-cache/hosted/pub.dev/source_span-1.10.2",
|
||||||
|
"packageUri": "lib/",
|
||||||
|
"languageVersion": "3.1"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "stack_trace",
|
||||||
|
"rootUri": "file:///Users/kaska/.pub-cache/hosted/pub.dev/stack_trace-1.12.1",
|
||||||
|
"packageUri": "lib/",
|
||||||
|
"languageVersion": "3.4"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "stream_channel",
|
||||||
|
"rootUri": "file:///Users/kaska/.pub-cache/hosted/pub.dev/stream_channel-2.1.4",
|
||||||
|
"packageUri": "lib/",
|
||||||
|
"languageVersion": "3.3"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "string_scanner",
|
||||||
|
"rootUri": "file:///Users/kaska/.pub-cache/hosted/pub.dev/string_scanner-1.4.1",
|
||||||
|
"packageUri": "lib/",
|
||||||
|
"languageVersion": "3.1"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "term_glyph",
|
||||||
|
"rootUri": "file:///Users/kaska/.pub-cache/hosted/pub.dev/term_glyph-1.2.2",
|
||||||
|
"packageUri": "lib/",
|
||||||
|
"languageVersion": "3.1"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "test_api",
|
||||||
|
"rootUri": "file:///Users/kaska/.pub-cache/hosted/pub.dev/test_api-0.7.7",
|
||||||
|
"packageUri": "lib/",
|
||||||
|
"languageVersion": "3.5"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "vector_math",
|
||||||
|
"rootUri": "file:///Users/kaska/.pub-cache/hosted/pub.dev/vector_math-2.2.0",
|
||||||
|
"packageUri": "lib/",
|
||||||
|
"languageVersion": "3.1"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "vm_service",
|
||||||
|
"rootUri": "file:///Users/kaska/.pub-cache/hosted/pub.dev/vm_service-15.0.2",
|
||||||
|
"packageUri": "lib/",
|
||||||
|
"languageVersion": "3.5"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "app",
|
||||||
|
"rootUri": "../",
|
||||||
|
"packageUri": "lib/",
|
||||||
|
"languageVersion": "3.10"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"generator": "pub",
|
||||||
|
"generatorVersion": "3.10.8",
|
||||||
|
"flutterRoot": "file:///Users/kaska/.local/share/mise/installs/flutter/3.38.9-stable",
|
||||||
|
"flutterVersion": "3.38.9",
|
||||||
|
"pubCache": "file:///Users/kaska/.pub-cache"
|
||||||
|
}
|
||||||
230
app/.dart_tool/package_graph.json
Normal file
230
app/.dart_tool/package_graph.json
Normal file
@@ -0,0 +1,230 @@
|
|||||||
|
{
|
||||||
|
"roots": [
|
||||||
|
"app"
|
||||||
|
],
|
||||||
|
"packages": [
|
||||||
|
{
|
||||||
|
"name": "app",
|
||||||
|
"version": "1.0.0+1",
|
||||||
|
"dependencies": [
|
||||||
|
"cupertino_icons",
|
||||||
|
"flutter"
|
||||||
|
],
|
||||||
|
"devDependencies": [
|
||||||
|
"flutter_lints",
|
||||||
|
"flutter_test"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "flutter_lints",
|
||||||
|
"version": "6.0.0",
|
||||||
|
"dependencies": [
|
||||||
|
"lints"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "flutter_test",
|
||||||
|
"version": "0.0.0",
|
||||||
|
"dependencies": [
|
||||||
|
"clock",
|
||||||
|
"collection",
|
||||||
|
"fake_async",
|
||||||
|
"flutter",
|
||||||
|
"leak_tracker_flutter_testing",
|
||||||
|
"matcher",
|
||||||
|
"meta",
|
||||||
|
"path",
|
||||||
|
"stack_trace",
|
||||||
|
"stream_channel",
|
||||||
|
"test_api",
|
||||||
|
"vector_math"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "cupertino_icons",
|
||||||
|
"version": "1.0.8",
|
||||||
|
"dependencies": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "flutter",
|
||||||
|
"version": "0.0.0",
|
||||||
|
"dependencies": [
|
||||||
|
"characters",
|
||||||
|
"collection",
|
||||||
|
"material_color_utilities",
|
||||||
|
"meta",
|
||||||
|
"sky_engine",
|
||||||
|
"vector_math"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "lints",
|
||||||
|
"version": "6.1.0",
|
||||||
|
"dependencies": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "stream_channel",
|
||||||
|
"version": "2.1.4",
|
||||||
|
"dependencies": [
|
||||||
|
"async"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "meta",
|
||||||
|
"version": "1.17.0",
|
||||||
|
"dependencies": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "collection",
|
||||||
|
"version": "1.19.1",
|
||||||
|
"dependencies": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "leak_tracker_flutter_testing",
|
||||||
|
"version": "3.0.10",
|
||||||
|
"dependencies": [
|
||||||
|
"flutter",
|
||||||
|
"leak_tracker",
|
||||||
|
"leak_tracker_testing",
|
||||||
|
"matcher",
|
||||||
|
"meta"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "vector_math",
|
||||||
|
"version": "2.2.0",
|
||||||
|
"dependencies": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "stack_trace",
|
||||||
|
"version": "1.12.1",
|
||||||
|
"dependencies": [
|
||||||
|
"path"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "clock",
|
||||||
|
"version": "1.1.2",
|
||||||
|
"dependencies": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "fake_async",
|
||||||
|
"version": "1.3.3",
|
||||||
|
"dependencies": [
|
||||||
|
"clock",
|
||||||
|
"collection"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "path",
|
||||||
|
"version": "1.9.1",
|
||||||
|
"dependencies": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "matcher",
|
||||||
|
"version": "0.12.17",
|
||||||
|
"dependencies": [
|
||||||
|
"async",
|
||||||
|
"meta",
|
||||||
|
"stack_trace",
|
||||||
|
"term_glyph",
|
||||||
|
"test_api"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "test_api",
|
||||||
|
"version": "0.7.7",
|
||||||
|
"dependencies": [
|
||||||
|
"async",
|
||||||
|
"boolean_selector",
|
||||||
|
"collection",
|
||||||
|
"meta",
|
||||||
|
"source_span",
|
||||||
|
"stack_trace",
|
||||||
|
"stream_channel",
|
||||||
|
"string_scanner",
|
||||||
|
"term_glyph"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "sky_engine",
|
||||||
|
"version": "0.0.0",
|
||||||
|
"dependencies": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "material_color_utilities",
|
||||||
|
"version": "0.11.1",
|
||||||
|
"dependencies": [
|
||||||
|
"collection"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "characters",
|
||||||
|
"version": "1.4.0",
|
||||||
|
"dependencies": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "async",
|
||||||
|
"version": "2.13.0",
|
||||||
|
"dependencies": [
|
||||||
|
"collection",
|
||||||
|
"meta"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "leak_tracker_testing",
|
||||||
|
"version": "3.0.2",
|
||||||
|
"dependencies": [
|
||||||
|
"leak_tracker",
|
||||||
|
"matcher",
|
||||||
|
"meta"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "leak_tracker",
|
||||||
|
"version": "11.0.2",
|
||||||
|
"dependencies": [
|
||||||
|
"clock",
|
||||||
|
"collection",
|
||||||
|
"meta",
|
||||||
|
"path",
|
||||||
|
"vm_service"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "term_glyph",
|
||||||
|
"version": "1.2.2",
|
||||||
|
"dependencies": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "string_scanner",
|
||||||
|
"version": "1.4.1",
|
||||||
|
"dependencies": [
|
||||||
|
"source_span"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "source_span",
|
||||||
|
"version": "1.10.2",
|
||||||
|
"dependencies": [
|
||||||
|
"collection",
|
||||||
|
"path",
|
||||||
|
"term_glyph"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "boolean_selector",
|
||||||
|
"version": "2.1.2",
|
||||||
|
"dependencies": [
|
||||||
|
"source_span",
|
||||||
|
"string_scanner"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"name": "vm_service",
|
||||||
|
"version": "15.0.2",
|
||||||
|
"dependencies": []
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"configVersion": 1
|
||||||
|
}
|
||||||
1
app/.dart_tool/version
Normal file
1
app/.dart_tool/version
Normal file
@@ -0,0 +1 @@
|
|||||||
|
3.38.9
|
||||||
11
app/macos/Flutter/ephemeral/Flutter-Generated.xcconfig
Normal file
11
app/macos/Flutter/ephemeral/Flutter-Generated.xcconfig
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
// This is a generated file; do not edit or check into version control.
|
||||||
|
FLUTTER_ROOT=/Users/kaska/.local/share/mise/installs/flutter/3.38.9-stable
|
||||||
|
FLUTTER_APPLICATION_PATH=/Users/kaska/Documents/Projects/Major/arbiter/app
|
||||||
|
COCOAPODS_PARALLEL_CODE_SIGN=true
|
||||||
|
FLUTTER_BUILD_DIR=build
|
||||||
|
FLUTTER_BUILD_NAME=1.0.0
|
||||||
|
FLUTTER_BUILD_NUMBER=1
|
||||||
|
DART_OBFUSCATION=false
|
||||||
|
TRACK_WIDGET_CREATION=true
|
||||||
|
TREE_SHAKE_ICONS=false
|
||||||
|
PACKAGE_CONFIG=.dart_tool/package_config.json
|
||||||
12
app/macos/Flutter/ephemeral/flutter_export_environment.sh
Executable file
12
app/macos/Flutter/ephemeral/flutter_export_environment.sh
Executable file
@@ -0,0 +1,12 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
# This is a generated file; do not edit or check into version control.
|
||||||
|
export "FLUTTER_ROOT=/Users/kaska/.local/share/mise/installs/flutter/3.38.9-stable"
|
||||||
|
export "FLUTTER_APPLICATION_PATH=/Users/kaska/Documents/Projects/Major/arbiter/app"
|
||||||
|
export "COCOAPODS_PARALLEL_CODE_SIGN=true"
|
||||||
|
export "FLUTTER_BUILD_DIR=build"
|
||||||
|
export "FLUTTER_BUILD_NAME=1.0.0"
|
||||||
|
export "FLUTTER_BUILD_NUMBER=1"
|
||||||
|
export "DART_OBFUSCATION=false"
|
||||||
|
export "TRACK_WIDGET_CREATION=true"
|
||||||
|
export "TREE_SHAKE_ICONS=false"
|
||||||
|
export "PACKAGE_CONFIG=.dart_tool/package_config.json"
|
||||||
@@ -1,89 +0,0 @@
|
|||||||
name: app
|
|
||||||
description: "A new Flutter project."
|
|
||||||
# The following line prevents the package from being accidentally published to
|
|
||||||
# pub.dev using `flutter pub publish`. This is preferred for private packages.
|
|
||||||
publish_to: 'none' # Remove this line if you wish to publish to pub.dev
|
|
||||||
|
|
||||||
# The following defines the version and build number for your application.
|
|
||||||
# A version number is three numbers separated by dots, like 1.2.43
|
|
||||||
# followed by an optional build number separated by a +.
|
|
||||||
# Both the version and the builder number may be overridden in flutter
|
|
||||||
# build by specifying --build-name and --build-number, respectively.
|
|
||||||
# In Android, build-name is used as versionName while build-number used as versionCode.
|
|
||||||
# Read more about Android versioning at https://developer.android.com/studio/publish/versioning
|
|
||||||
# In iOS, build-name is used as CFBundleShortVersionString while build-number is used as CFBundleVersion.
|
|
||||||
# Read more about iOS versioning at
|
|
||||||
# https://developer.apple.com/library/archive/documentation/General/Reference/InfoPlistKeyReference/Articles/CoreFoundationKeys.html
|
|
||||||
# In Windows, build-name is used as the major, minor, and patch parts
|
|
||||||
# of the product and file versions while build-number is used as the build suffix.
|
|
||||||
version: 1.0.0+1
|
|
||||||
|
|
||||||
environment:
|
|
||||||
sdk: ^3.10.8
|
|
||||||
|
|
||||||
# Dependencies specify other packages that your package needs in order to work.
|
|
||||||
# To automatically upgrade your package dependencies to the latest versions
|
|
||||||
# consider running `flutter pub upgrade --major-versions`. Alternatively,
|
|
||||||
# dependencies can be manually updated by changing the version numbers below to
|
|
||||||
# the latest version available on pub.dev. To see which dependencies have newer
|
|
||||||
# versions available, run `flutter pub outdated`.
|
|
||||||
dependencies:
|
|
||||||
flutter:
|
|
||||||
sdk: flutter
|
|
||||||
|
|
||||||
# The following adds the Cupertino Icons font to your application.
|
|
||||||
# Use with the CupertinoIcons class for iOS style icons.
|
|
||||||
cupertino_icons: ^1.0.8
|
|
||||||
|
|
||||||
dev_dependencies:
|
|
||||||
flutter_test:
|
|
||||||
sdk: flutter
|
|
||||||
|
|
||||||
# The "flutter_lints" package below contains a set of recommended lints to
|
|
||||||
# encourage good coding practices. The lint set provided by the package is
|
|
||||||
# activated in the `analysis_options.yaml` file located at the root of your
|
|
||||||
# package. See that file for information about deactivating specific lint
|
|
||||||
# rules and activating additional ones.
|
|
||||||
flutter_lints: ^6.0.0
|
|
||||||
|
|
||||||
# For information on the generic Dart part of this file, see the
|
|
||||||
# following page: https://dart.dev/tools/pub/pubspec
|
|
||||||
|
|
||||||
# The following section is specific to Flutter packages.
|
|
||||||
flutter:
|
|
||||||
|
|
||||||
# The following line ensures that the Material Icons font is
|
|
||||||
# included with your application, so that you can use the icons in
|
|
||||||
# the material Icons class.
|
|
||||||
uses-material-design: true
|
|
||||||
|
|
||||||
# To add assets to your application, add an assets section, like this:
|
|
||||||
# assets:
|
|
||||||
# - images/a_dot_burr.jpeg
|
|
||||||
# - images/a_dot_ham.jpeg
|
|
||||||
|
|
||||||
# An image asset can refer to one or more resolution-specific "variants", see
|
|
||||||
# https://flutter.dev/to/resolution-aware-images
|
|
||||||
|
|
||||||
# For details regarding adding assets from package dependencies, see
|
|
||||||
# https://flutter.dev/to/asset-from-package
|
|
||||||
|
|
||||||
# To add custom fonts to your application, add a fonts section here,
|
|
||||||
# in this "flutter" section. Each entry in this list should have a
|
|
||||||
# "family" key with the font family name, and a "fonts" key with a
|
|
||||||
# list giving the asset and other descriptors for the font. For
|
|
||||||
# example:
|
|
||||||
# fonts:
|
|
||||||
# - family: Schyler
|
|
||||||
# fonts:
|
|
||||||
# - asset: fonts/Schyler-Regular.ttf
|
|
||||||
# - asset: fonts/Schyler-Italic.ttf
|
|
||||||
# style: italic
|
|
||||||
# - family: Trajan Pro
|
|
||||||
# fonts:
|
|
||||||
# - asset: fonts/TrajanPro.ttf
|
|
||||||
# - asset: fonts/TrajanPro_Bold.ttf
|
|
||||||
# weight: 700
|
|
||||||
#
|
|
||||||
# For details regarding fonts from package dependencies,
|
|
||||||
# see https://flutter.dev/to/font-from-package
|
|
||||||
17
mise.lock
17
mise.lock
@@ -10,10 +10,18 @@ backend = "cargo:cargo-features"
|
|||||||
version = "0.11.1"
|
version = "0.11.1"
|
||||||
backend = "cargo:cargo-features-manager"
|
backend = "cargo:cargo-features-manager"
|
||||||
|
|
||||||
|
[[tools."cargo:cargo-insta"]]
|
||||||
|
version = "1.46.3"
|
||||||
|
backend = "cargo:cargo-insta"
|
||||||
|
|
||||||
[[tools."cargo:cargo-nextest"]]
|
[[tools."cargo:cargo-nextest"]]
|
||||||
version = "0.9.126"
|
version = "0.9.126"
|
||||||
backend = "cargo:cargo-nextest"
|
backend = "cargo:cargo-nextest"
|
||||||
|
|
||||||
|
[[tools."cargo:cargo-shear"]]
|
||||||
|
version = "1.9.1"
|
||||||
|
backend = "cargo:cargo-shear"
|
||||||
|
|
||||||
[[tools."cargo:cargo-vet"]]
|
[[tools."cargo:cargo-vet"]]
|
||||||
version = "0.10.2"
|
version = "0.10.2"
|
||||||
backend = "cargo:cargo-vet"
|
backend = "cargo:cargo-vet"
|
||||||
@@ -47,6 +55,15 @@ backend = "aqua:protocolbuffers/protobuf/protoc"
|
|||||||
"platforms.macos-x64" = { checksum = "sha256:312f04713946921cc0187ef34df80241ddca1bab6f564c636885fd2cc90d3f88", url = "https://github.com/protocolbuffers/protobuf/releases/download/v29.6/protoc-29.6-osx-x86_64.zip"}
|
"platforms.macos-x64" = { checksum = "sha256:312f04713946921cc0187ef34df80241ddca1bab6f564c636885fd2cc90d3f88", url = "https://github.com/protocolbuffers/protobuf/releases/download/v29.6/protoc-29.6-osx-x86_64.zip"}
|
||||||
"platforms.windows-x64" = { checksum = "sha256:1ebd7c87baffb9f1c47169b640872bf5fb1e4408079c691af527be9561d8f6f7", url = "https://github.com/protocolbuffers/protobuf/releases/download/v29.6/protoc-29.6-win64.zip"}
|
"platforms.windows-x64" = { checksum = "sha256:1ebd7c87baffb9f1c47169b640872bf5fb1e4408079c691af527be9561d8f6f7", url = "https://github.com/protocolbuffers/protobuf/releases/download/v29.6/protoc-29.6-win64.zip"}
|
||||||
|
|
||||||
|
[[tools.python]]
|
||||||
|
version = "3.14.3"
|
||||||
|
backend = "core:python"
|
||||||
|
"platforms.linux-arm64" = { checksum = "sha256:be0f4dc2932f762292b27d46ea7d3e8e66ddf3969a5eb0254a229015ed402625", url = "https://github.com/astral-sh/python-build-standalone/releases/download/20260303/cpython-3.14.3+20260303-aarch64-unknown-linux-gnu-install_only_stripped.tar.gz"}
|
||||||
|
"platforms.linux-x64" = { checksum = "sha256:0a73413f89efd417871876c9accaab28a9d1e3cd6358fbfff171a38ec99302f0", url = "https://github.com/astral-sh/python-build-standalone/releases/download/20260303/cpython-3.14.3+20260303-x86_64-unknown-linux-gnu-install_only_stripped.tar.gz"}
|
||||||
|
"platforms.macos-arm64" = { checksum = "sha256:4703cdf18b26798fde7b49b6b66149674c25f97127be6a10dbcf29309bdcdcdb", url = "https://github.com/astral-sh/python-build-standalone/releases/download/20260303/cpython-3.14.3+20260303-aarch64-apple-darwin-install_only_stripped.tar.gz"}
|
||||||
|
"platforms.macos-x64" = { checksum = "sha256:76f1cc26e3d262eae8ca546a93e8bded10cf0323613f7e246fea2e10a8115eb7", url = "https://github.com/astral-sh/python-build-standalone/releases/download/20260303/cpython-3.14.3+20260303-x86_64-apple-darwin-install_only_stripped.tar.gz"}
|
||||||
|
"platforms.windows-x64" = { checksum = "sha256:950c5f21a015c1bdd1337f233456df2470fab71e4d794407d27a84cb8b9909a0", url = "https://github.com/astral-sh/python-build-standalone/releases/download/20260303/cpython-3.14.3+20260303-x86_64-pc-windows-msvc-install_only_stripped.tar.gz"}
|
||||||
|
|
||||||
[[tools.rust]]
|
[[tools.rust]]
|
||||||
version = "1.93.0"
|
version = "1.93.0"
|
||||||
backend = "core:rust"
|
backend = "core:rust"
|
||||||
|
|||||||
@@ -2,9 +2,11 @@
|
|||||||
"cargo:diesel_cli" = { version = "2.3.6", features = "sqlite,sqlite-bundled", default-features = false }
|
"cargo:diesel_cli" = { version = "2.3.6", features = "sqlite,sqlite-bundled", default-features = false }
|
||||||
"cargo:cargo-audit" = "0.22.1"
|
"cargo:cargo-audit" = "0.22.1"
|
||||||
"cargo:cargo-vet" = "0.10.2"
|
"cargo:cargo-vet" = "0.10.2"
|
||||||
|
|
||||||
flutter = "3.38.9-stable"
|
flutter = "3.38.9-stable"
|
||||||
protoc = "29.6"
|
protoc = "29.6"
|
||||||
rust = "1.93.0"
|
"rust" = {version = "1.93.0", components = "clippy"}
|
||||||
"cargo:cargo-features-manager" = "0.11.1"
|
"cargo:cargo-features-manager" = "0.11.1"
|
||||||
"cargo:cargo-nextest" = "0.9.126"
|
"cargo:cargo-nextest" = "0.9.126"
|
||||||
|
"cargo:cargo-shear" = "latest"
|
||||||
|
"cargo:cargo-insta" = "1.46.3"
|
||||||
|
python = "3.14.3"
|
||||||
|
|||||||
@@ -2,30 +2,8 @@ syntax = "proto3";
|
|||||||
|
|
||||||
package arbiter;
|
package arbiter;
|
||||||
|
|
||||||
import "auth.proto";
|
import "client.proto";
|
||||||
|
import "user_agent.proto";
|
||||||
message ClientRequest {
|
|
||||||
oneof payload {
|
|
||||||
arbiter.auth.ClientMessage auth_message = 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
message ClientResponse {
|
|
||||||
oneof payload {
|
|
||||||
arbiter.auth.ServerMessage auth_message = 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
message UserAgentRequest {
|
|
||||||
oneof payload {
|
|
||||||
arbiter.auth.ClientMessage auth_message = 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
message UserAgentResponse {
|
|
||||||
oneof payload {
|
|
||||||
arbiter.auth.ServerMessage auth_message = 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
message ServerInfo {
|
message ServerInfo {
|
||||||
string version = 1;
|
string version = 1;
|
||||||
@@ -33,6 +11,6 @@ message ServerInfo {
|
|||||||
}
|
}
|
||||||
|
|
||||||
service ArbiterService {
|
service ArbiterService {
|
||||||
rpc Client(stream ClientRequest) returns (stream ClientResponse);
|
rpc Client(stream arbiter.client.ClientRequest) returns (stream arbiter.client.ClientResponse);
|
||||||
rpc UserAgent(stream UserAgentRequest) returns (stream UserAgentResponse);
|
rpc UserAgent(stream arbiter.user_agent.UserAgentRequest) returns (stream arbiter.user_agent.UserAgentResponse);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,35 +0,0 @@
|
|||||||
syntax = "proto3";
|
|
||||||
|
|
||||||
package arbiter.auth;
|
|
||||||
|
|
||||||
import "google/protobuf/timestamp.proto";
|
|
||||||
|
|
||||||
message AuthChallengeRequest {
|
|
||||||
bytes pubkey = 1;
|
|
||||||
optional string bootstrap_token = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
message AuthChallenge {
|
|
||||||
bytes pubkey = 1;
|
|
||||||
int32 nonce = 2;
|
|
||||||
}
|
|
||||||
|
|
||||||
message AuthChallengeSolution {
|
|
||||||
bytes signature = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message AuthOk {}
|
|
||||||
|
|
||||||
message ClientMessage {
|
|
||||||
oneof payload {
|
|
||||||
AuthChallengeRequest auth_challenge_request = 1;
|
|
||||||
AuthChallengeSolution auth_challenge_solution = 2;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
message ServerMessage {
|
|
||||||
oneof payload {
|
|
||||||
AuthChallenge auth_challenge = 1;
|
|
||||||
AuthOk auth_ok = 2;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
47
protobufs/client.proto
Normal file
47
protobufs/client.proto
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
syntax = "proto3";
|
||||||
|
|
||||||
|
package arbiter.client;
|
||||||
|
|
||||||
|
import "evm.proto";
|
||||||
|
|
||||||
|
message AuthChallengeRequest {
|
||||||
|
bytes pubkey = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message AuthChallenge {
|
||||||
|
bytes pubkey = 1;
|
||||||
|
int32 nonce = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message AuthChallengeSolution {
|
||||||
|
bytes signature = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message AuthOk {}
|
||||||
|
|
||||||
|
message ClientRequest {
|
||||||
|
oneof payload {
|
||||||
|
AuthChallengeRequest auth_challenge_request = 1;
|
||||||
|
AuthChallengeSolution auth_challenge_solution = 2;
|
||||||
|
arbiter.evm.EvmSignTransactionRequest evm_sign_transaction = 3;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
message ClientConnectError {
|
||||||
|
enum Code {
|
||||||
|
UNKNOWN = 0;
|
||||||
|
APPROVAL_DENIED = 1;
|
||||||
|
NO_USER_AGENTS_ONLINE = 2;
|
||||||
|
}
|
||||||
|
Code code = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ClientResponse {
|
||||||
|
oneof payload {
|
||||||
|
AuthChallenge auth_challenge = 1;
|
||||||
|
AuthOk auth_ok = 2;
|
||||||
|
ClientConnectError client_connect_error = 5;
|
||||||
|
arbiter.evm.EvmSignTransactionResponse evm_sign_transaction = 3;
|
||||||
|
arbiter.evm.EvmAnalyzeTransactionResponse evm_analyze_transaction = 4;
|
||||||
|
}
|
||||||
|
}
|
||||||
216
protobufs/evm.proto
Normal file
216
protobufs/evm.proto
Normal file
@@ -0,0 +1,216 @@
|
|||||||
|
syntax = "proto3";
|
||||||
|
|
||||||
|
package arbiter.evm;
|
||||||
|
|
||||||
|
import "google/protobuf/empty.proto";
|
||||||
|
import "google/protobuf/timestamp.proto";
|
||||||
|
|
||||||
|
enum EvmError {
|
||||||
|
EVM_ERROR_UNSPECIFIED = 0;
|
||||||
|
EVM_ERROR_VAULT_SEALED = 1;
|
||||||
|
EVM_ERROR_INTERNAL = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message WalletEntry {
|
||||||
|
bytes address = 1; // 20-byte Ethereum address
|
||||||
|
}
|
||||||
|
|
||||||
|
message WalletList {
|
||||||
|
repeated WalletEntry wallets = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message WalletCreateResponse {
|
||||||
|
oneof result {
|
||||||
|
WalletEntry wallet = 1;
|
||||||
|
EvmError error = 2;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
message WalletListResponse {
|
||||||
|
oneof result {
|
||||||
|
WalletList wallets = 1;
|
||||||
|
EvmError error = 2;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- Grant types ---
|
||||||
|
|
||||||
|
message TransactionRateLimit {
|
||||||
|
uint32 count = 1;
|
||||||
|
int64 window_secs = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message VolumeRateLimit {
|
||||||
|
bytes max_volume = 1; // U256 as big-endian bytes
|
||||||
|
int64 window_secs = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message SharedSettings {
|
||||||
|
int32 wallet_id = 1;
|
||||||
|
uint64 chain_id = 2;
|
||||||
|
optional google.protobuf.Timestamp valid_from = 3;
|
||||||
|
optional google.protobuf.Timestamp valid_until = 4;
|
||||||
|
optional bytes max_gas_fee_per_gas = 5; // U256 as big-endian bytes
|
||||||
|
optional bytes max_priority_fee_per_gas = 6; // U256 as big-endian bytes
|
||||||
|
optional TransactionRateLimit rate_limit = 7;
|
||||||
|
}
|
||||||
|
|
||||||
|
message EtherTransferSettings {
|
||||||
|
repeated bytes targets = 1; // list of 20-byte Ethereum addresses
|
||||||
|
VolumeRateLimit limit = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message TokenTransferSettings {
|
||||||
|
bytes token_contract = 1; // 20-byte Ethereum address
|
||||||
|
optional bytes target = 2; // 20-byte Ethereum address; absent means any recipient allowed
|
||||||
|
repeated VolumeRateLimit volume_limits = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message SpecificGrant {
|
||||||
|
oneof grant {
|
||||||
|
EtherTransferSettings ether_transfer = 1;
|
||||||
|
TokenTransferSettings token_transfer = 2;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
message EtherTransferMeaning {
|
||||||
|
bytes to = 1; // 20-byte Ethereum address
|
||||||
|
bytes value = 2; // U256 as big-endian bytes
|
||||||
|
}
|
||||||
|
|
||||||
|
message TokenInfo {
|
||||||
|
string symbol = 1;
|
||||||
|
bytes address = 2; // 20-byte Ethereum address
|
||||||
|
uint64 chain_id = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mirror of token_transfers::Meaning
|
||||||
|
message TokenTransferMeaning {
|
||||||
|
TokenInfo token = 1;
|
||||||
|
bytes to = 2; // 20-byte Ethereum address
|
||||||
|
bytes value = 3; // U256 as big-endian bytes
|
||||||
|
}
|
||||||
|
|
||||||
|
// Mirror of policies::SpecificMeaning
|
||||||
|
message SpecificMeaning {
|
||||||
|
oneof meaning {
|
||||||
|
EtherTransferMeaning ether_transfer = 1;
|
||||||
|
TokenTransferMeaning token_transfer = 2;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- Eval error types ---
|
||||||
|
message GasLimitExceededViolation {
|
||||||
|
optional bytes max_gas_fee_per_gas = 1; // U256 as big-endian bytes
|
||||||
|
optional bytes max_priority_fee_per_gas = 2; // U256 as big-endian bytes
|
||||||
|
}
|
||||||
|
|
||||||
|
message EvalViolation {
|
||||||
|
oneof kind {
|
||||||
|
bytes invalid_target = 1; // 20-byte Ethereum address
|
||||||
|
GasLimitExceededViolation gas_limit_exceeded = 2;
|
||||||
|
google.protobuf.Empty rate_limit_exceeded = 3;
|
||||||
|
google.protobuf.Empty volumetric_limit_exceeded = 4;
|
||||||
|
google.protobuf.Empty invalid_time = 5;
|
||||||
|
google.protobuf.Empty invalid_transaction_type = 6;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Transaction was classified but no grant covers it
|
||||||
|
message NoMatchingGrantError {
|
||||||
|
SpecificMeaning meaning = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Transaction was classified and a grant was found, but constraints were violated
|
||||||
|
message PolicyViolationsError {
|
||||||
|
SpecificMeaning meaning = 1;
|
||||||
|
repeated EvalViolation violations = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
// top-level error returned when transaction evaluation fails
|
||||||
|
message TransactionEvalError {
|
||||||
|
oneof kind {
|
||||||
|
google.protobuf.Empty contract_creation_not_supported = 1;
|
||||||
|
google.protobuf.Empty unsupported_transaction_type = 2;
|
||||||
|
NoMatchingGrantError no_matching_grant = 3;
|
||||||
|
PolicyViolationsError policy_violations = 4;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- UserAgent grant management ---
|
||||||
|
message EvmGrantCreateRequest {
|
||||||
|
int32 client_id = 1;
|
||||||
|
SharedSettings shared = 2;
|
||||||
|
SpecificGrant specific = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message EvmGrantCreateResponse {
|
||||||
|
oneof result {
|
||||||
|
int32 grant_id = 1;
|
||||||
|
EvmError error = 2;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
message EvmGrantDeleteRequest {
|
||||||
|
int32 grant_id = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message EvmGrantDeleteResponse {
|
||||||
|
oneof result {
|
||||||
|
google.protobuf.Empty ok = 1;
|
||||||
|
EvmError error = 2;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Basic grant info returned in grant listings
|
||||||
|
message GrantEntry {
|
||||||
|
int32 id = 1;
|
||||||
|
int32 client_id = 2;
|
||||||
|
SharedSettings shared = 3;
|
||||||
|
SpecificGrant specific = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
message EvmGrantListRequest {
|
||||||
|
optional int32 wallet_id = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message EvmGrantListResponse {
|
||||||
|
oneof result {
|
||||||
|
EvmGrantList grants = 1;
|
||||||
|
EvmError error = 2;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
message EvmGrantList {
|
||||||
|
repeated GrantEntry grants = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- Client transaction operations ---
|
||||||
|
|
||||||
|
message EvmSignTransactionRequest {
|
||||||
|
bytes wallet_address = 1; // 20-byte Ethereum address
|
||||||
|
bytes rlp_transaction = 2; // RLP-encoded EIP-1559 transaction (unsigned)
|
||||||
|
}
|
||||||
|
|
||||||
|
// oneof because signing and evaluation happen atomically — a signing failure
|
||||||
|
// is always either an eval error or an internal error, never a partial success
|
||||||
|
message EvmSignTransactionResponse {
|
||||||
|
oneof result {
|
||||||
|
bytes signature = 1; // 65-byte signature: r[32] || s[32] || v[1]
|
||||||
|
TransactionEvalError eval_error = 2;
|
||||||
|
EvmError error = 3;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
message EvmAnalyzeTransactionRequest {
|
||||||
|
bytes wallet_address = 1; // 20-byte Ethereum address
|
||||||
|
bytes rlp_transaction = 2; // RLP-encoded EIP-1559 transaction
|
||||||
|
}
|
||||||
|
|
||||||
|
message EvmAnalyzeTransactionResponse {
|
||||||
|
oneof result {
|
||||||
|
SpecificMeaning meaning = 1;
|
||||||
|
TransactionEvalError eval_error = 2;
|
||||||
|
EvmError error = 3;
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,14 +0,0 @@
|
|||||||
syntax = "proto3";
|
|
||||||
|
|
||||||
package arbiter.unseal;
|
|
||||||
|
|
||||||
message UserAgentKeyRequest {}
|
|
||||||
|
|
||||||
message ServerKeyResponse {
|
|
||||||
bytes pubkey = 1;
|
|
||||||
}
|
|
||||||
message UserAgentSealedKey {
|
|
||||||
bytes sealed_key = 1;
|
|
||||||
bytes pubkey = 2;
|
|
||||||
bytes nonce = 3;
|
|
||||||
}
|
|
||||||
144
protobufs/user_agent.proto
Normal file
144
protobufs/user_agent.proto
Normal file
@@ -0,0 +1,144 @@
|
|||||||
|
syntax = "proto3";
|
||||||
|
|
||||||
|
package arbiter.user_agent;
|
||||||
|
|
||||||
|
import "google/protobuf/empty.proto";
|
||||||
|
import "evm.proto";
|
||||||
|
|
||||||
|
enum KeyType {
|
||||||
|
KEY_TYPE_UNSPECIFIED = 0;
|
||||||
|
KEY_TYPE_ED25519 = 1;
|
||||||
|
KEY_TYPE_ECDSA_SECP256K1 = 2;
|
||||||
|
KEY_TYPE_RSA = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- SDK client management ---
|
||||||
|
|
||||||
|
enum SdkClientError {
|
||||||
|
SDK_CLIENT_ERROR_UNSPECIFIED = 0;
|
||||||
|
SDK_CLIENT_ERROR_ALREADY_EXISTS = 1;
|
||||||
|
SDK_CLIENT_ERROR_NOT_FOUND = 2;
|
||||||
|
SDK_CLIENT_ERROR_HAS_RELATED_DATA = 3; // hard-delete blocked by FK (client has grants or transaction logs)
|
||||||
|
SDK_CLIENT_ERROR_INTERNAL = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
message SdkClientApproveRequest {
|
||||||
|
bytes pubkey = 1; // 32-byte ed25519 public key
|
||||||
|
}
|
||||||
|
|
||||||
|
message SdkClientRevokeRequest {
|
||||||
|
int32 client_id = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message SdkClientEntry {
|
||||||
|
int32 id = 1;
|
||||||
|
bytes pubkey = 2;
|
||||||
|
int32 created_at = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message SdkClientList {
|
||||||
|
repeated SdkClientEntry clients = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message SdkClientApproveResponse {
|
||||||
|
oneof result {
|
||||||
|
SdkClientEntry client = 1;
|
||||||
|
SdkClientError error = 2;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
message SdkClientRevokeResponse {
|
||||||
|
oneof result {
|
||||||
|
google.protobuf.Empty ok = 1;
|
||||||
|
SdkClientError error = 2;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
message SdkClientListResponse {
|
||||||
|
oneof result {
|
||||||
|
SdkClientList clients = 1;
|
||||||
|
SdkClientError error = 2;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
message AuthChallengeRequest {
|
||||||
|
bytes pubkey = 1;
|
||||||
|
optional string bootstrap_token = 2;
|
||||||
|
KeyType key_type = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
message AuthChallenge {
|
||||||
|
bytes pubkey = 1;
|
||||||
|
int32 nonce = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
message AuthChallengeSolution {
|
||||||
|
bytes signature = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message AuthOk {}
|
||||||
|
|
||||||
|
message UnsealStart {
|
||||||
|
bytes client_pubkey = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
message UnsealStartResponse {
|
||||||
|
bytes server_pubkey = 1;
|
||||||
|
}
|
||||||
|
message UnsealEncryptedKey {
|
||||||
|
bytes nonce = 1;
|
||||||
|
bytes ciphertext = 2;
|
||||||
|
bytes associated_data = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
enum UnsealResult {
|
||||||
|
UNSEAL_RESULT_UNSPECIFIED = 0;
|
||||||
|
UNSEAL_RESULT_SUCCESS = 1;
|
||||||
|
UNSEAL_RESULT_INVALID_KEY = 2;
|
||||||
|
UNSEAL_RESULT_UNBOOTSTRAPPED = 3;
|
||||||
|
}
|
||||||
|
|
||||||
|
enum VaultState {
|
||||||
|
VAULT_STATE_UNSPECIFIED = 0;
|
||||||
|
VAULT_STATE_UNBOOTSTRAPPED = 1;
|
||||||
|
VAULT_STATE_SEALED = 2;
|
||||||
|
VAULT_STATE_UNSEALED = 3;
|
||||||
|
VAULT_STATE_ERROR = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
message UserAgentRequest {
|
||||||
|
oneof payload {
|
||||||
|
AuthChallengeRequest auth_challenge_request = 1;
|
||||||
|
AuthChallengeSolution auth_challenge_solution = 2;
|
||||||
|
UnsealStart unseal_start = 3;
|
||||||
|
UnsealEncryptedKey unseal_encrypted_key = 4;
|
||||||
|
google.protobuf.Empty query_vault_state = 5;
|
||||||
|
google.protobuf.Empty evm_wallet_create = 6;
|
||||||
|
google.protobuf.Empty evm_wallet_list = 7;
|
||||||
|
arbiter.evm.EvmGrantCreateRequest evm_grant_create = 8;
|
||||||
|
arbiter.evm.EvmGrantDeleteRequest evm_grant_delete = 9;
|
||||||
|
arbiter.evm.EvmGrantListRequest evm_grant_list = 10;
|
||||||
|
// field 11 reserved: was client_connection_response (online approval removed)
|
||||||
|
SdkClientApproveRequest sdk_client_approve = 12;
|
||||||
|
SdkClientRevokeRequest sdk_client_revoke = 13;
|
||||||
|
google.protobuf.Empty sdk_client_list = 14;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
message UserAgentResponse {
|
||||||
|
oneof payload {
|
||||||
|
AuthChallenge auth_challenge = 1;
|
||||||
|
AuthOk auth_ok = 2;
|
||||||
|
UnsealStartResponse unseal_start_response = 3;
|
||||||
|
UnsealResult unseal_result = 4;
|
||||||
|
VaultState vault_state = 5;
|
||||||
|
arbiter.evm.WalletCreateResponse evm_wallet_create = 6;
|
||||||
|
arbiter.evm.WalletListResponse evm_wallet_list = 7;
|
||||||
|
arbiter.evm.EvmGrantCreateResponse evm_grant_create = 8;
|
||||||
|
arbiter.evm.EvmGrantDeleteResponse evm_grant_delete = 9;
|
||||||
|
arbiter.evm.EvmGrantListResponse evm_grant_list = 10;
|
||||||
|
// fields 11, 12 reserved: were client_connection_request, client_connection_cancel (online approval removed)
|
||||||
|
SdkClientApproveResponse sdk_client_approve = 13;
|
||||||
|
SdkClientRevokeResponse sdk_client_revoke = 14;
|
||||||
|
SdkClientListResponse sdk_client_list = 15;
|
||||||
|
}
|
||||||
|
}
|
||||||
BIN
scripts/__pycache__/gen_erc20_registry.cpython-314.pyc
Normal file
BIN
scripts/__pycache__/gen_erc20_registry.cpython-314.pyc
Normal file
Binary file not shown.
150
scripts/gen_erc20_registry.py
Normal file
150
scripts/gen_erc20_registry.py
Normal file
@@ -0,0 +1,150 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Fetch the Uniswap default token list and emit Rust `TokenInfo` statics.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
python3 gen_erc20_registry.py # fetch from IPFS
|
||||||
|
python3 gen_erc20_registry.py tokens.json # local file
|
||||||
|
python3 gen_erc20_registry.py tokens.json out.rs # custom output file
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import re
|
||||||
|
import sys
|
||||||
|
import unicodedata
|
||||||
|
import urllib.request
|
||||||
|
|
||||||
|
UNISWAP_URL = "https://ipfs.io/ipns/tokens.uniswap.org"
|
||||||
|
|
||||||
|
SOLANA_CHAIN_ID = 501000101
|
||||||
|
IDENTIFIER_RE = re.compile(r"[^A-Za-z0-9]+")
|
||||||
|
|
||||||
|
|
||||||
|
def load_tokens(source=None):
|
||||||
|
if source:
|
||||||
|
with open(source) as f:
|
||||||
|
return json.load(f)
|
||||||
|
req = urllib.request.Request(
|
||||||
|
UNISWAP_URL,
|
||||||
|
headers={"Accept": "application/json", "User-Agent": "gen_tokens/1.0"},
|
||||||
|
)
|
||||||
|
with urllib.request.urlopen(req, timeout=60) as resp:
|
||||||
|
return json.loads(resp.read())
|
||||||
|
|
||||||
|
|
||||||
|
def escape(s: str) -> str:
|
||||||
|
return s.replace("\\", "\\\\").replace('"', '\\"')
|
||||||
|
|
||||||
|
|
||||||
|
def to_screaming_case(name: str) -> str:
|
||||||
|
normalized = unicodedata.normalize("NFKD", name or "")
|
||||||
|
ascii_name = normalized.encode("ascii", "ignore").decode("ascii")
|
||||||
|
snake = IDENTIFIER_RE.sub("_", ascii_name).strip("_").upper()
|
||||||
|
if not snake:
|
||||||
|
snake = "TOKEN"
|
||||||
|
if snake[0].isdigit():
|
||||||
|
snake = f"TOKEN_{snake}"
|
||||||
|
return snake
|
||||||
|
|
||||||
|
|
||||||
|
def static_name_for_token(token: dict, used_names: set[str]) -> str:
|
||||||
|
base = to_screaming_case(token.get("name", ""))
|
||||||
|
if base not in used_names:
|
||||||
|
used_names.add(base)
|
||||||
|
return base
|
||||||
|
|
||||||
|
address = token["address"]
|
||||||
|
suffix = f"{token['chainId']}_{address[2:].upper()[-8:]}"
|
||||||
|
candidate = f"{base}_{suffix}"
|
||||||
|
|
||||||
|
i = 2
|
||||||
|
while candidate in used_names:
|
||||||
|
candidate = f"{base}_{suffix}_{i}"
|
||||||
|
i += 1
|
||||||
|
|
||||||
|
used_names.add(candidate)
|
||||||
|
return candidate
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
source = sys.argv[1] if len(sys.argv) > 1 else None
|
||||||
|
output = sys.argv[2] if len(sys.argv) > 2 else "generated_tokens.rs"
|
||||||
|
data = load_tokens(source)
|
||||||
|
tokens = data["tokens"]
|
||||||
|
|
||||||
|
# Deduplicate by (chainId, address)
|
||||||
|
seen = set()
|
||||||
|
unique = []
|
||||||
|
for t in tokens:
|
||||||
|
key = (t["chainId"], t["address"].lower())
|
||||||
|
if key not in seen:
|
||||||
|
seen.add(key)
|
||||||
|
unique.append(t)
|
||||||
|
|
||||||
|
unique.sort(key=lambda t: (t["chainId"], t.get("symbol", "").upper()))
|
||||||
|
evm_tokens = [t for t in unique if t["chainId"] != SOLANA_CHAIN_ID]
|
||||||
|
|
||||||
|
ver = data["version"]
|
||||||
|
lines = []
|
||||||
|
w = lines.append
|
||||||
|
|
||||||
|
w(
|
||||||
|
f"// Auto-generated from Uniswap token list v{ver['major']}.{ver['minor']}.{ver['patch']}"
|
||||||
|
)
|
||||||
|
w(f"// {len(evm_tokens)} tokens")
|
||||||
|
w("// DO NOT EDIT - regenerate with gen_erc20_registry.py")
|
||||||
|
w("")
|
||||||
|
|
||||||
|
used_static_names = set()
|
||||||
|
token_statics = []
|
||||||
|
for t in evm_tokens:
|
||||||
|
static_name = static_name_for_token(t, used_static_names)
|
||||||
|
token_statics.append((static_name, t))
|
||||||
|
|
||||||
|
for static_name, t in token_statics:
|
||||||
|
addr = t["address"]
|
||||||
|
name = escape(t.get("name", ""))
|
||||||
|
symbol = escape(t.get("symbol", ""))
|
||||||
|
decimals = t.get("decimals", 18)
|
||||||
|
logo = t.get("logoURI")
|
||||||
|
chain = t["chainId"]
|
||||||
|
|
||||||
|
logo_val = f'Some("{escape(logo)}")' if logo else "None"
|
||||||
|
|
||||||
|
w(f"pub static {static_name}: TokenInfo = TokenInfo {{")
|
||||||
|
w(f' name: "{name}",')
|
||||||
|
w(f' symbol: "{symbol}",')
|
||||||
|
w(f" decimals: {decimals},")
|
||||||
|
w(f' contract: address!("{addr}"),')
|
||||||
|
w(f" chain: {chain},")
|
||||||
|
w(f" logo_uri: {logo_val},")
|
||||||
|
w("};")
|
||||||
|
w("")
|
||||||
|
|
||||||
|
w("pub static TOKENS: &[&TokenInfo] = &[")
|
||||||
|
for static_name, _ in token_statics:
|
||||||
|
w(f" &{static_name},")
|
||||||
|
w("];")
|
||||||
|
w("")
|
||||||
|
w("pub fn get_token(")
|
||||||
|
w(" chain_id: alloy::primitives::ChainId,")
|
||||||
|
w(" address: alloy::primitives::Address,")
|
||||||
|
w(") -> Option<&'static TokenInfo> {")
|
||||||
|
w(" match (chain_id, address) {")
|
||||||
|
for static_name, t in token_statics:
|
||||||
|
w(
|
||||||
|
f' ({t["chainId"]}, addr) if addr == address!("{t["address"]}") => Some(&{static_name}),'
|
||||||
|
)
|
||||||
|
w(" _ => None,")
|
||||||
|
w(" }")
|
||||||
|
w("}")
|
||||||
|
w("")
|
||||||
|
|
||||||
|
with open(output, "w") as f:
|
||||||
|
f.write("\n".join(lines))
|
||||||
|
|
||||||
|
print(f"Wrote {len(token_statics)} tokens to {output}")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
13
server/.cargo/audit.toml
Normal file
13
server/.cargo/audit.toml
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
[advisories]
|
||||||
|
# RUSTSEC-2023-0071: Marvin Attack timing side-channel in rsa crate.
|
||||||
|
# No fixed version is available upstream.
|
||||||
|
# RSA support is required for Windows Hello / KeyCredentialManager
|
||||||
|
# (https://learn.microsoft.com/en-us/uwp/api/windows.security.credentials.keycredentialmanager.requestcreateasync),
|
||||||
|
# which only issues RSA-2048 keys.
|
||||||
|
# Mitigations in place:
|
||||||
|
# - Signing uses BlindedSigningKey (PSS+SHA-256), which applies blinding to
|
||||||
|
# protect the private key from timing recovery during signing.
|
||||||
|
# - RSA decryption is never performed; we only verify public-key signatures.
|
||||||
|
# - The attack requires local, high-resolution timing access against the
|
||||||
|
# signing process, which is not exposed in our threat model.
|
||||||
|
ignore = ["RUSTSEC-2023-0071"]
|
||||||
3423
server/Cargo.lock
generated
3423
server/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -1,23 +1,25 @@
|
|||||||
[workspace]
|
[workspace]
|
||||||
members = [
|
members = [
|
||||||
"crates/arbiter-client",
|
"crates/*",
|
||||||
"crates/arbiter-proto",
|
|
||||||
"crates/arbiter-server",
|
|
||||||
"crates/arbiter-useragent",
|
|
||||||
]
|
]
|
||||||
resolver = "3"
|
resolver = "3"
|
||||||
|
|
||||||
|
[workspace.lints.clippy]
|
||||||
|
disallowed-methods = "deny"
|
||||||
|
|
||||||
|
|
||||||
[workspace.dependencies]
|
[workspace.dependencies]
|
||||||
prost = "0.14.3"
|
tonic = { version = "0.14.3", features = [
|
||||||
tonic = { version = "0.14.3", features = ["deflate", "gzip", "tls-connect-info", "zstd"] }
|
"deflate",
|
||||||
|
"gzip",
|
||||||
|
"tls-connect-info",
|
||||||
|
"zstd",
|
||||||
|
] }
|
||||||
tracing = "0.1.44"
|
tracing = "0.1.44"
|
||||||
tokio = { version = "1.49.0", features = ["full"] }
|
tokio = { version = "1.49.0", features = ["full"] }
|
||||||
ed25519 = "3.0.0-rc.4"
|
|
||||||
ed25519-dalek = { version = "3.0.0-pre.6", features = ["rand_core"] }
|
ed25519-dalek = { version = "3.0.0-pre.6", features = ["rand_core"] }
|
||||||
chrono = { version = "0.4.43", features = ["serde"] }
|
chrono = { version = "0.4.43", features = ["serde"] }
|
||||||
rand = "0.10.0"
|
rand = "0.10.0"
|
||||||
uuid = "1.20.0"
|
|
||||||
rustls = "0.23.36"
|
rustls = "0.23.36"
|
||||||
smlang = "0.8.0"
|
smlang = "0.8.0"
|
||||||
miette = { version = "7.6.0", features = ["fancy", "serde"] }
|
miette = { version = "7.6.0", features = ["fancy", "serde"] }
|
||||||
@@ -27,3 +29,17 @@ futures = "0.3.31"
|
|||||||
tokio-stream = { version = "0.1.18", features = ["full"] }
|
tokio-stream = { version = "0.1.18", features = ["full"] }
|
||||||
kameo = "0.19.2"
|
kameo = "0.19.2"
|
||||||
prost-types = { version = "0.14.3", features = ["chrono"] }
|
prost-types = { version = "0.14.3", features = ["chrono"] }
|
||||||
|
x25519-dalek = { version = "2.0.1", features = ["getrandom"] }
|
||||||
|
rstest = "0.26.1"
|
||||||
|
rustls-pki-types = "1.14.0"
|
||||||
|
alloy = "1.7.3"
|
||||||
|
rcgen = { version = "0.14.7", features = [
|
||||||
|
"aws_lc_rs",
|
||||||
|
"pem",
|
||||||
|
"x509-parser",
|
||||||
|
"zeroize",
|
||||||
|
], default-features = false }
|
||||||
|
k256 = { version = "0.13.4", features = ["ecdsa", "pkcs8"] }
|
||||||
|
rsa = { version = "0.9", features = ["sha2"] }
|
||||||
|
sha2 = "0.10"
|
||||||
|
spki = "0.7"
|
||||||
|
|||||||
9
server/clippy.toml
Normal file
9
server/clippy.toml
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
disallowed-methods = [
|
||||||
|
# RSA decryption is forbidden: the rsa crate has RUSTSEC-2023-0071 (Marvin Attack).
|
||||||
|
# We only use RSA for Windows Hello (KeyCredentialManager) public-key verification — decryption
|
||||||
|
# is never required and must not be introduced.
|
||||||
|
{ path = "rsa::RsaPrivateKey::decrypt", reason = "RSA decryption is forbidden (RUSTSEC-2023-0071 Marvin Attack). Only PSS signing/verification is permitted." },
|
||||||
|
{ path = "rsa::RsaPrivateKey::decrypt_blinded", reason = "RSA decryption is forbidden (RUSTSEC-2023-0071 Marvin Attack). Only PSS signing/verification is permitted." },
|
||||||
|
{ path = "rsa::traits::Decryptor::decrypt", reason = "RSA decryption is forbidden (RUSTSEC-2023-0071 Marvin Attack). This blocks decrypt() on rsa::{pkcs1v15,oaep}::DecryptingKey." },
|
||||||
|
{ path = "rsa::traits::RandomizedDecryptor::decrypt_with_rng", reason = "RSA decryption is forbidden (RUSTSEC-2023-0071 Marvin Attack). This blocks decrypt_with_rng() on rsa::{pkcs1v15,oaep}::DecryptingKey." },
|
||||||
|
]
|
||||||
BIN
server/crates/.DS_Store
vendored
Normal file
BIN
server/crates/.DS_Store
vendored
Normal file
Binary file not shown.
@@ -3,5 +3,20 @@ name = "arbiter-client"
|
|||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
edition = "2024"
|
edition = "2024"
|
||||||
repository = "https://git.markettakers.org/MarketTakers/arbiter"
|
repository = "https://git.markettakers.org/MarketTakers/arbiter"
|
||||||
|
license = "Apache-2.0"
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
arbiter-proto.path = "../arbiter-proto"
|
||||||
|
alloy.workspace = true
|
||||||
|
tonic.workspace = true
|
||||||
|
tonic.features = ["tls-aws-lc"]
|
||||||
|
tokio.workspace = true
|
||||||
|
tokio-stream.workspace = true
|
||||||
|
ed25519-dalek.workspace = true
|
||||||
|
thiserror.workspace = true
|
||||||
|
http = "1.4.0"
|
||||||
|
rustls-webpki = { version = "0.103.9", features = ["aws-lc-rs"] }
|
||||||
|
async-trait.workspace = true
|
||||||
|
|||||||
@@ -1,14 +1,272 @@
|
|||||||
pub fn add(left: u64, right: u64) -> u64 {
|
use alloy::{
|
||||||
left + right
|
consensus::SignableTransaction,
|
||||||
|
network::TxSigner,
|
||||||
|
primitives::{Address, B256, ChainId, Signature},
|
||||||
|
signers::{Error, Result, Signer},
|
||||||
|
};
|
||||||
|
use arbiter_proto::{
|
||||||
|
format_challenge,
|
||||||
|
proto::{
|
||||||
|
arbiter_service_client::ArbiterServiceClient,
|
||||||
|
client::{
|
||||||
|
AuthChallengeRequest, AuthChallengeSolution, ClientRequest, ClientResponse,
|
||||||
|
client_connect_error, client_request::Payload as ClientRequestPayload,
|
||||||
|
client_response::Payload as ClientResponsePayload,
|
||||||
|
},
|
||||||
|
evm::{
|
||||||
|
EvmSignTransactionRequest, evm_sign_transaction_response::Result as SignResponseResult,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
url::ArbiterUrl,
|
||||||
|
};
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use ed25519_dalek::Signer as _;
|
||||||
|
use tokio::sync::{Mutex, mpsc};
|
||||||
|
use tokio_stream::wrappers::ReceiverStream;
|
||||||
|
use tonic::transport::ClientTlsConfig;
|
||||||
|
|
||||||
|
#[derive(Debug, thiserror::Error)]
|
||||||
|
pub enum ConnectError {
|
||||||
|
#[error("Could not establish connection")]
|
||||||
|
Connection(#[from] tonic::transport::Error),
|
||||||
|
|
||||||
|
#[error("Invalid server URI")]
|
||||||
|
InvalidUri(#[from] http::uri::InvalidUri),
|
||||||
|
|
||||||
|
#[error("Invalid CA certificate")]
|
||||||
|
InvalidCaCert(#[from] webpki::Error),
|
||||||
|
|
||||||
|
#[error("gRPC error")]
|
||||||
|
Grpc(#[from] tonic::Status),
|
||||||
|
|
||||||
|
#[error("Auth challenge was not returned by server")]
|
||||||
|
MissingAuthChallenge,
|
||||||
|
|
||||||
|
#[error("Client approval denied by User Agent")]
|
||||||
|
ApprovalDenied,
|
||||||
|
|
||||||
|
#[error("No User Agents online to approve client")]
|
||||||
|
NoUserAgentsOnline,
|
||||||
|
|
||||||
|
#[error("Unexpected auth response payload")]
|
||||||
|
UnexpectedAuthResponse,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
#[derive(Debug, thiserror::Error)]
|
||||||
mod tests {
|
enum ClientSignError {
|
||||||
use super::*;
|
#[error("Transport channel closed")]
|
||||||
|
ChannelClosed,
|
||||||
|
|
||||||
#[test]
|
#[error("Connection closed by server")]
|
||||||
fn it_works() {
|
ConnectionClosed,
|
||||||
let result = add(2, 2);
|
|
||||||
assert_eq!(result, 4);
|
#[error("Invalid response payload")]
|
||||||
|
InvalidResponse,
|
||||||
|
|
||||||
|
#[error("Remote signing was rejected")]
|
||||||
|
Rejected,
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ClientTransport {
|
||||||
|
sender: mpsc::Sender<ClientRequest>,
|
||||||
|
receiver: tonic::Streaming<ClientResponse>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ClientTransport {
|
||||||
|
async fn send(&mut self, request: ClientRequest) -> std::result::Result<(), ClientSignError> {
|
||||||
|
self.sender
|
||||||
|
.send(request)
|
||||||
|
.await
|
||||||
|
.map_err(|_| ClientSignError::ChannelClosed)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn recv(&mut self) -> std::result::Result<ClientResponse, ClientSignError> {
|
||||||
|
match self.receiver.message().await {
|
||||||
|
Ok(Some(resp)) => Ok(resp),
|
||||||
|
Ok(None) => Err(ClientSignError::ConnectionClosed),
|
||||||
|
Err(_) => Err(ClientSignError::ConnectionClosed),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct ArbiterSigner {
|
||||||
|
transport: Mutex<ClientTransport>,
|
||||||
|
address: Address,
|
||||||
|
chain_id: Option<ChainId>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ArbiterSigner {
|
||||||
|
pub async fn connect_grpc(
|
||||||
|
url: ArbiterUrl,
|
||||||
|
key: ed25519_dalek::SigningKey,
|
||||||
|
address: Address,
|
||||||
|
) -> std::result::Result<Self, ConnectError> {
|
||||||
|
let anchor = webpki::anchor_from_trusted_cert(&url.ca_cert)?.to_owned();
|
||||||
|
let tls = ClientTlsConfig::new().trust_anchor(anchor);
|
||||||
|
|
||||||
|
// NOTE: We intentionally keep the same URL construction strategy as the user-agent crate
|
||||||
|
// to avoid behavior drift between the two clients.
|
||||||
|
let channel = tonic::transport::Channel::from_shared(format!("{}:{}", url.host, url.port))?
|
||||||
|
.tls_config(tls)?
|
||||||
|
.connect()
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let mut client = ArbiterServiceClient::new(channel);
|
||||||
|
let (tx, rx) = mpsc::channel(16);
|
||||||
|
let response_stream = client.client(ReceiverStream::new(rx)).await?.into_inner();
|
||||||
|
|
||||||
|
let mut transport = ClientTransport {
|
||||||
|
sender: tx,
|
||||||
|
receiver: response_stream,
|
||||||
|
};
|
||||||
|
|
||||||
|
authenticate(&mut transport, key).await?;
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
transport: Mutex::new(transport),
|
||||||
|
address,
|
||||||
|
chain_id: None,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn sign_transaction_via_arbiter(
|
||||||
|
&self,
|
||||||
|
tx: &mut dyn SignableTransaction<Signature>,
|
||||||
|
) -> Result<Signature> {
|
||||||
|
if let Some(chain_id) = self.chain_id
|
||||||
|
&& !tx.set_chain_id_checked(chain_id)
|
||||||
|
{
|
||||||
|
return Err(Error::TransactionChainIdMismatch {
|
||||||
|
signer: chain_id,
|
||||||
|
tx: tx.chain_id().unwrap(),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut rlp_transaction = Vec::new();
|
||||||
|
tx.encode_for_signing(&mut rlp_transaction);
|
||||||
|
|
||||||
|
let request = ClientRequest {
|
||||||
|
payload: Some(ClientRequestPayload::EvmSignTransaction(
|
||||||
|
EvmSignTransactionRequest {
|
||||||
|
wallet_address: self.address.as_slice().to_vec(),
|
||||||
|
rlp_transaction,
|
||||||
|
},
|
||||||
|
)),
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut transport = self.transport.lock().await;
|
||||||
|
transport.send(request).await.map_err(Error::other)?;
|
||||||
|
let response = transport.recv().await.map_err(Error::other)?;
|
||||||
|
|
||||||
|
let payload = response
|
||||||
|
.payload
|
||||||
|
.ok_or_else(|| Error::other(ClientSignError::InvalidResponse))?;
|
||||||
|
|
||||||
|
let ClientResponsePayload::EvmSignTransaction(sign_response) = payload else {
|
||||||
|
return Err(Error::other(ClientSignError::InvalidResponse));
|
||||||
|
};
|
||||||
|
|
||||||
|
let Some(result) = sign_response.result else {
|
||||||
|
return Err(Error::other(ClientSignError::InvalidResponse));
|
||||||
|
};
|
||||||
|
|
||||||
|
match result {
|
||||||
|
SignResponseResult::Signature(bytes) => {
|
||||||
|
Signature::try_from(bytes.as_slice()).map_err(Error::other)
|
||||||
|
}
|
||||||
|
SignResponseResult::EvalError(_) | SignResponseResult::Error(_) => {
|
||||||
|
Err(Error::other(ClientSignError::Rejected))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn authenticate(
|
||||||
|
transport: &mut ClientTransport,
|
||||||
|
key: ed25519_dalek::SigningKey,
|
||||||
|
) -> std::result::Result<(), ConnectError> {
|
||||||
|
transport
|
||||||
|
.send(ClientRequest {
|
||||||
|
payload: Some(ClientRequestPayload::AuthChallengeRequest(
|
||||||
|
AuthChallengeRequest {
|
||||||
|
pubkey: key.verifying_key().to_bytes().to_vec(),
|
||||||
|
},
|
||||||
|
)),
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.map_err(|_| ConnectError::UnexpectedAuthResponse)?;
|
||||||
|
|
||||||
|
let response = transport
|
||||||
|
.recv()
|
||||||
|
.await
|
||||||
|
.map_err(|_| ConnectError::MissingAuthChallenge)?;
|
||||||
|
|
||||||
|
let payload = response.payload.ok_or(ConnectError::MissingAuthChallenge)?;
|
||||||
|
match payload {
|
||||||
|
ClientResponsePayload::AuthChallenge(challenge) => {
|
||||||
|
let challenge_payload = format_challenge(challenge.nonce, &challenge.pubkey);
|
||||||
|
let signature = key.sign(&challenge_payload).to_bytes().to_vec();
|
||||||
|
|
||||||
|
transport
|
||||||
|
.send(ClientRequest {
|
||||||
|
payload: Some(ClientRequestPayload::AuthChallengeSolution(
|
||||||
|
AuthChallengeSolution { signature },
|
||||||
|
)),
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.map_err(|_| ConnectError::UnexpectedAuthResponse)?;
|
||||||
|
|
||||||
|
// Current server flow does not emit `AuthOk` for SDK clients, so we proceed after
|
||||||
|
// sending the solution. If authentication fails, the first business request will return
|
||||||
|
// a `ClientConnectError` or the stream will close.
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
ClientResponsePayload::ClientConnectError(err) => {
|
||||||
|
match client_connect_error::Code::try_from(err.code)
|
||||||
|
.unwrap_or(client_connect_error::Code::Unknown)
|
||||||
|
{
|
||||||
|
client_connect_error::Code::ApprovalDenied => Err(ConnectError::ApprovalDenied),
|
||||||
|
client_connect_error::Code::NoUserAgentsOnline => {
|
||||||
|
Err(ConnectError::NoUserAgentsOnline)
|
||||||
|
}
|
||||||
|
client_connect_error::Code::Unknown => Err(ConnectError::UnexpectedAuthResponse),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ => Err(ConnectError::UnexpectedAuthResponse),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl Signer for ArbiterSigner {
|
||||||
|
async fn sign_hash(&self, _hash: &B256) -> Result<Signature> {
|
||||||
|
Err(Error::other(
|
||||||
|
"hash-only signing is not supported for ArbiterSigner; use transaction signing",
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn address(&self) -> Address {
|
||||||
|
self.address
|
||||||
|
}
|
||||||
|
|
||||||
|
fn chain_id(&self) -> Option<ChainId> {
|
||||||
|
self.chain_id
|
||||||
|
}
|
||||||
|
|
||||||
|
fn set_chain_id(&mut self, chain_id: Option<ChainId>) {
|
||||||
|
self.chain_id = chain_id;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl TxSigner<Signature> for ArbiterSigner {
|
||||||
|
fn address(&self) -> Address {
|
||||||
|
self.address
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn sign_transaction(
|
||||||
|
&self,
|
||||||
|
tx: &mut dyn SignableTransaction<Signature>,
|
||||||
|
) -> Result<Signature> {
|
||||||
|
self.sign_transaction_via_arbiter(tx).await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,23 +3,35 @@ name = "arbiter-proto"
|
|||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
edition = "2024"
|
edition = "2024"
|
||||||
repository = "https://git.markettakers.org/MarketTakers/arbiter"
|
repository = "https://git.markettakers.org/MarketTakers/arbiter"
|
||||||
|
license = "Apache-2.0"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
tonic.workspace = true
|
tonic.workspace = true
|
||||||
prost.workspace = true
|
|
||||||
bytes = "1.11.1"
|
|
||||||
prost-derive = "0.14.3"
|
|
||||||
prost-types.workspace = true
|
|
||||||
tonic-prost = "0.14.3"
|
|
||||||
rkyv = "0.8.15"
|
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
futures.workspace = true
|
futures.workspace = true
|
||||||
kameo.workspace = true
|
|
||||||
hex = "0.4.3"
|
hex = "0.4.3"
|
||||||
|
tonic-prost = "0.14.3"
|
||||||
|
prost = "0.14.3"
|
||||||
|
kameo.workspace = true
|
||||||
|
url = "2.5.8"
|
||||||
|
miette.workspace = true
|
||||||
|
thiserror.workspace = true
|
||||||
|
rustls-pki-types.workspace = true
|
||||||
|
base64 = "0.22.1"
|
||||||
|
prost-types.workspace = true
|
||||||
|
tracing.workspace = true
|
||||||
|
async-trait.workspace = true
|
||||||
|
|
||||||
[build-dependencies]
|
[build-dependencies]
|
||||||
prost-build = "0.14.3"
|
|
||||||
serde_json = "1"
|
|
||||||
tonic-prost-build = "0.14.3"
|
tonic-prost-build = "0.14.3"
|
||||||
|
protoc-bin-vendored = "3"
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
rstest.workspace = true
|
||||||
|
rand.workspace = true
|
||||||
|
rcgen.workspace = true
|
||||||
|
|
||||||
|
[package.metadata.cargo-shear]
|
||||||
|
ignored = ["tonic-prost", "prost", "kameo"]
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -3,16 +3,25 @@ use tonic_prost_build::configure;
|
|||||||
static PROTOBUF_DIR: &str = "../../../protobufs";
|
static PROTOBUF_DIR: &str = "../../../protobufs";
|
||||||
|
|
||||||
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||||
|
if std::env::var("PROTOC").is_err() {
|
||||||
|
println!("cargo:warning=PROTOC environment variable not set, using vendored protoc");
|
||||||
|
let protoc = protoc_bin_vendored::protoc_bin_path().unwrap();
|
||||||
|
unsafe { std::env::set_var("PROTOC", protoc) };
|
||||||
|
}
|
||||||
|
|
||||||
|
println!("cargo::rerun-if-changed={PROTOBUF_DIR}");
|
||||||
|
|
||||||
configure()
|
configure()
|
||||||
.message_attribute(".", "#[derive(::kameo::Reply)]")
|
.message_attribute(".", "#[derive(::kameo::Reply)]")
|
||||||
.compile_protos(
|
.compile_protos(
|
||||||
&[
|
&[
|
||||||
format!("{}/arbiter.proto", PROTOBUF_DIR),
|
format!("{}/arbiter.proto", PROTOBUF_DIR),
|
||||||
format!("{}/auth.proto", PROTOBUF_DIR),
|
format!("{}/user_agent.proto", PROTOBUF_DIR),
|
||||||
|
format!("{}/client.proto", PROTOBUF_DIR),
|
||||||
|
format!("{}/evm.proto", PROTOBUF_DIR),
|
||||||
],
|
],
|
||||||
&[PROTOBUF_DIR.to_string()],
|
&[PROTOBUF_DIR.to_string()],
|
||||||
)
|
)
|
||||||
|
|
||||||
.unwrap();
|
.unwrap();
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,19 +1,28 @@
|
|||||||
use crate::proto::auth::AuthChallenge;
|
pub mod transport;
|
||||||
|
pub mod url;
|
||||||
|
|
||||||
|
use base64::{Engine, prelude::BASE64_STANDARD};
|
||||||
|
|
||||||
pub mod proto {
|
pub mod proto {
|
||||||
tonic::include_proto!("arbiter");
|
tonic::include_proto!("arbiter");
|
||||||
|
|
||||||
pub mod auth {
|
pub mod user_agent {
|
||||||
tonic::include_proto!("arbiter.auth");
|
tonic::include_proto!("arbiter.user_agent");
|
||||||
|
}
|
||||||
|
|
||||||
|
pub mod client {
|
||||||
|
tonic::include_proto!("arbiter.client");
|
||||||
|
}
|
||||||
|
|
||||||
|
pub mod evm {
|
||||||
|
tonic::include_proto!("arbiter.evm");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub mod transport;
|
pub static BOOTSTRAP_PATH: &str = "bootstrap_token";
|
||||||
|
|
||||||
pub static BOOTSTRAP_TOKEN_PATH: &'static str = "bootstrap_token";
|
|
||||||
|
|
||||||
pub fn home_path() -> Result<std::path::PathBuf, std::io::Error> {
|
pub fn home_path() -> Result<std::path::PathBuf, std::io::Error> {
|
||||||
static ARBITER_HOME: &'static str = ".arbiter";
|
static ARBITER_HOME: &str = ".arbiter";
|
||||||
let home_dir = std::env::home_dir().ok_or(std::io::Error::new(
|
let home_dir = std::env::home_dir().ok_or(std::io::Error::new(
|
||||||
std::io::ErrorKind::PermissionDenied,
|
std::io::ErrorKind::PermissionDenied,
|
||||||
"can not get home directory",
|
"can not get home directory",
|
||||||
@@ -25,7 +34,7 @@ pub fn home_path() -> Result<std::path::PathBuf, std::io::Error> {
|
|||||||
Ok(arbiter_home)
|
Ok(arbiter_home)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn format_challenge(challenge: &AuthChallenge) -> Vec<u8> {
|
pub fn format_challenge(nonce: i32, pubkey: &[u8]) -> Vec<u8> {
|
||||||
let concat_form = format!("{}:{}", challenge.nonce, hex::encode(&challenge.pubkey));
|
let concat_form = format!("{}:{}", nonce, BASE64_STANDARD.encode(pubkey));
|
||||||
concat_form.into_bytes().to_vec()
|
concat_form.into_bytes()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,46 +1,309 @@
|
|||||||
use futures::{Stream, StreamExt};
|
//! Transport-facing abstractions for protocol/session code.
|
||||||
use tokio::sync::mpsc::{self, error::SendError};
|
//!
|
||||||
use tonic::{Status, Streaming};
|
//! This module separates three concerns:
|
||||||
|
//!
|
||||||
|
//! - protocol/session logic wants a small duplex interface ([`Bi`])
|
||||||
|
//! - transport adapters push concrete stream items to an underlying IO layer
|
||||||
|
//! - transport boundaries translate between protocol-facing and transport-facing
|
||||||
|
//! item types via direction-specific converters
|
||||||
|
//!
|
||||||
|
//! [`Bi`] is intentionally minimal and transport-agnostic:
|
||||||
|
//! - [`Bi::recv`] yields inbound protocol messages
|
||||||
|
//! - [`Bi::send`] accepts outbound protocol/domain items
|
||||||
|
//!
|
||||||
|
//! # Generic Ordering Rule
|
||||||
|
//!
|
||||||
|
//! This module uses a single convention consistently: when a type or trait is
|
||||||
|
//! parameterized by protocol message directions, the generic parameters are
|
||||||
|
//! declared as `Inbound` first, then `Outbound`.
|
||||||
|
//!
|
||||||
|
//! For [`Bi`], that means `Bi<Inbound, Outbound>`:
|
||||||
|
//! - `recv() -> Option<Inbound>`
|
||||||
|
//! - `send(Outbound)`
|
||||||
|
//!
|
||||||
|
//! For adapter types that are parameterized by direction-specific converters,
|
||||||
|
//! inbound-related converter parameters are declared before outbound-related
|
||||||
|
//! converter parameters.
|
||||||
|
//!
|
||||||
|
//! [`RecvConverter`] and [`SendConverter`] are infallible conversion traits used
|
||||||
|
//! by adapters to map between protocol-facing and transport-facing item types.
|
||||||
|
//! The traits themselves are not result-aware; adapters decide how transport
|
||||||
|
//! errors are handled before (or instead of) conversion.
|
||||||
|
//!
|
||||||
|
//! [`grpc::GrpcAdapter`] combines:
|
||||||
|
//! - a tonic inbound stream
|
||||||
|
//! - a Tokio sender for outbound transport items
|
||||||
|
//! - a [`RecvConverter`] for the receive path
|
||||||
|
//! - a [`SendConverter`] for the send path
|
||||||
|
//!
|
||||||
|
//! [`DummyTransport`] is a no-op implementation useful for tests and local actor
|
||||||
|
//! execution where no real network stream exists.
|
||||||
|
//!
|
||||||
|
//! # Component Interaction
|
||||||
|
//!
|
||||||
|
//! ```text
|
||||||
|
//! inbound (network -> protocol)
|
||||||
|
//! ============================
|
||||||
|
//!
|
||||||
|
//! tonic::Streaming<RecvTransport>
|
||||||
|
//! -> grpc::GrpcAdapter::recv()
|
||||||
|
//! |
|
||||||
|
//! +--> on `Ok(item)`: RecvConverter::convert(RecvTransport) -> Inbound
|
||||||
|
//! +--> on `Err(status)`: log error and close stream (`None`)
|
||||||
|
//! -> Bi::recv()
|
||||||
|
//! -> protocol/session actor
|
||||||
|
//!
|
||||||
|
//! outbound (protocol -> network)
|
||||||
|
//! ==============================
|
||||||
|
//!
|
||||||
|
//! protocol/session actor
|
||||||
|
//! -> Bi::send(Outbound)
|
||||||
|
//! -> grpc::GrpcAdapter::send()
|
||||||
|
//! |
|
||||||
|
//! +--> SendConverter::convert(Outbound) -> SendTransport
|
||||||
|
//! -> Tokio mpsc::Sender<SendTransport>
|
||||||
|
//! -> tonic response stream
|
||||||
|
//! ```
|
||||||
|
//!
|
||||||
|
//! # Design Notes
|
||||||
|
//!
|
||||||
|
//! - `send()` returns [`Error`] only for transport delivery failures (for
|
||||||
|
//! example, when the outbound channel is closed).
|
||||||
|
//! - [`grpc::GrpcAdapter`] logs tonic receive errors and treats them as stream
|
||||||
|
//! closure (`None`).
|
||||||
|
//! - When protocol-facing and transport-facing types are identical, use
|
||||||
|
//! [`IdentityRecvConverter`] / [`IdentitySendConverter`].
|
||||||
|
|
||||||
|
use std::marker::PhantomData;
|
||||||
|
|
||||||
// Abstraction for stream for sans-io capabilities
|
use async_trait::async_trait;
|
||||||
pub trait Bi<T, U>: Stream<Item = Result<T, Status>> + Send + Sync + 'static {
|
|
||||||
type Error;
|
/// Errors returned by transport adapters implementing [`Bi`].
|
||||||
fn send(
|
#[derive(thiserror::Error, Debug)]
|
||||||
&mut self,
|
pub enum Error {
|
||||||
item: Result<U, Status>,
|
#[error("Transport channel is closed")]
|
||||||
) -> impl std::future::Future<Output = Result<(), Self::Error>> + Send;
|
ChannelClosed,
|
||||||
|
#[error("Unexpected message received")]
|
||||||
|
UnexpectedMessage,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Bi-directional stream abstraction for handling gRPC streaming requests and responses
|
/// Receives one message from `transport` and extracts a value from it using
|
||||||
pub struct BiStream<T, U> {
|
/// `extractor`. Returns [`Error::ChannelClosed`] if the transport closes and
|
||||||
pub request_stream: Streaming<T>,
|
/// [`Error::UnexpectedMessage`] if `extractor` returns `None`.
|
||||||
pub response_sender: mpsc::Sender<Result<U, Status>>,
|
pub async fn expect_message<T, Inbound, Outbound, Target, F>(
|
||||||
}
|
transport: &mut T,
|
||||||
|
extractor: F,
|
||||||
impl<T, U> Stream for BiStream<T, U>
|
) -> Result<Target, Error>
|
||||||
where
|
where
|
||||||
T: Send + 'static,
|
T: Bi<Inbound, Outbound> + ?Sized,
|
||||||
U: Send + 'static,
|
F: FnOnce(Inbound) -> Option<Target>,
|
||||||
{
|
{
|
||||||
type Item = Result<T, Status>;
|
let msg = transport.recv().await.ok_or(Error::ChannelClosed)?;
|
||||||
|
extractor(msg).ok_or(Error::UnexpectedMessage)
|
||||||
|
}
|
||||||
|
|
||||||
fn poll_next(
|
/// Minimal bidirectional transport abstraction used by protocol code.
|
||||||
mut self: std::pin::Pin<&mut Self>,
|
///
|
||||||
cx: &mut std::task::Context<'_>,
|
/// `Bi<Inbound, Outbound>` models a duplex channel with:
|
||||||
) -> std::task::Poll<Option<Self::Item>> {
|
/// - inbound items of type `Inbound` read via [`Bi::recv`]
|
||||||
self.request_stream.poll_next_unpin(cx)
|
/// - outbound items of type `Outbound` written via [`Bi::send`]
|
||||||
|
#[async_trait]
|
||||||
|
pub trait Bi<Inbound, Outbound>: Send + Sync + 'static {
|
||||||
|
async fn send(&mut self, item: Outbound) -> Result<(), Error>;
|
||||||
|
|
||||||
|
async fn recv(&mut self) -> Option<Inbound>;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Converts transport-facing inbound items into protocol-facing inbound items.
|
||||||
|
pub trait RecvConverter: Send + Sync + 'static {
|
||||||
|
type Input;
|
||||||
|
type Output;
|
||||||
|
|
||||||
|
fn convert(&self, item: Self::Input) -> Self::Output;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Converts protocol/domain outbound items into transport-facing outbound items.
|
||||||
|
pub trait SendConverter: Send + Sync + 'static {
|
||||||
|
type Input;
|
||||||
|
type Output;
|
||||||
|
|
||||||
|
fn convert(&self, item: Self::Input) -> Self::Output;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A [`RecvConverter`] that forwards values unchanged.
|
||||||
|
pub struct IdentityRecvConverter<T> {
|
||||||
|
_marker: PhantomData<T>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> IdentityRecvConverter<T> {
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
_marker: PhantomData,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T, U> Bi<T, U> for BiStream<T, U>
|
impl<T> Default for IdentityRecvConverter<T> {
|
||||||
where
|
fn default() -> Self {
|
||||||
T: Send + 'static,
|
Self::new()
|
||||||
U: Send + 'static,
|
}
|
||||||
{
|
}
|
||||||
type Error = SendError<Result<U, Status>>;
|
|
||||||
|
impl<T> RecvConverter for IdentityRecvConverter<T>
|
||||||
async fn send(&mut self, item: Result<U, Status>) -> Result<(), Self::Error> {
|
where
|
||||||
self.response_sender.send(item).await
|
T: Send + Sync + 'static,
|
||||||
|
{
|
||||||
|
type Input = T;
|
||||||
|
type Output = T;
|
||||||
|
|
||||||
|
fn convert(&self, item: Self::Input) -> Self::Output {
|
||||||
|
item
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A [`SendConverter`] that forwards values unchanged.
|
||||||
|
pub struct IdentitySendConverter<T> {
|
||||||
|
_marker: PhantomData<T>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> IdentitySendConverter<T> {
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
_marker: PhantomData,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> Default for IdentitySendConverter<T> {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::new()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> SendConverter for IdentitySendConverter<T>
|
||||||
|
where
|
||||||
|
T: Send + Sync + 'static,
|
||||||
|
{
|
||||||
|
type Input = T;
|
||||||
|
type Output = T;
|
||||||
|
|
||||||
|
fn convert(&self, item: Self::Input) -> Self::Output {
|
||||||
|
item
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// gRPC-specific transport adapters and helpers.
|
||||||
|
pub mod grpc {
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use futures::StreamExt;
|
||||||
|
use tokio::sync::mpsc;
|
||||||
|
use tonic::Streaming;
|
||||||
|
|
||||||
|
use super::{Bi, Error, RecvConverter, SendConverter};
|
||||||
|
|
||||||
|
/// [`Bi`] adapter backed by a tonic gRPC bidirectional stream.
|
||||||
|
///
|
||||||
|
/// Tonic receive errors are logged and treated as stream closure (`None`).
|
||||||
|
/// The receive converter is only invoked for successful inbound transport
|
||||||
|
/// items.
|
||||||
|
pub struct GrpcAdapter<InboundConverter, OutboundConverter>
|
||||||
|
where
|
||||||
|
InboundConverter: RecvConverter,
|
||||||
|
OutboundConverter: SendConverter,
|
||||||
|
{
|
||||||
|
sender: mpsc::Sender<OutboundConverter::Output>,
|
||||||
|
receiver: Streaming<InboundConverter::Input>,
|
||||||
|
inbound_converter: InboundConverter,
|
||||||
|
outbound_converter: OutboundConverter,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<InboundTransport, Inbound, InboundConverter, OutboundConverter>
|
||||||
|
GrpcAdapter<InboundConverter, OutboundConverter>
|
||||||
|
where
|
||||||
|
InboundConverter: RecvConverter<Input = InboundTransport, Output = Inbound>,
|
||||||
|
OutboundConverter: SendConverter,
|
||||||
|
{
|
||||||
|
pub fn new(
|
||||||
|
sender: mpsc::Sender<OutboundConverter::Output>,
|
||||||
|
receiver: Streaming<InboundTransport>,
|
||||||
|
inbound_converter: InboundConverter,
|
||||||
|
outbound_converter: OutboundConverter,
|
||||||
|
) -> Self {
|
||||||
|
Self {
|
||||||
|
sender,
|
||||||
|
receiver,
|
||||||
|
inbound_converter,
|
||||||
|
outbound_converter,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl<InboundConverter, OutboundConverter> Bi<InboundConverter::Output, OutboundConverter::Input>
|
||||||
|
for GrpcAdapter<InboundConverter, OutboundConverter>
|
||||||
|
where
|
||||||
|
InboundConverter: RecvConverter,
|
||||||
|
OutboundConverter: SendConverter,
|
||||||
|
OutboundConverter::Input: Send + 'static,
|
||||||
|
OutboundConverter::Output: Send + 'static,
|
||||||
|
{
|
||||||
|
#[tracing::instrument(level = "trace", skip(self, item))]
|
||||||
|
async fn send(&mut self, item: OutboundConverter::Input) -> Result<(), Error> {
|
||||||
|
let outbound = self.outbound_converter.convert(item);
|
||||||
|
self.sender
|
||||||
|
.send(outbound)
|
||||||
|
.await
|
||||||
|
.map_err(|_| Error::ChannelClosed)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tracing::instrument(level = "trace", skip(self))]
|
||||||
|
async fn recv(&mut self) -> Option<InboundConverter::Output> {
|
||||||
|
match self.receiver.next().await {
|
||||||
|
Some(Ok(item)) => Some(self.inbound_converter.convert(item)),
|
||||||
|
Some(Err(error)) => {
|
||||||
|
tracing::error!(error = ?error, "grpc transport recv failed; closing stream");
|
||||||
|
None
|
||||||
|
}
|
||||||
|
None => None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// No-op [`Bi`] transport for tests and manual actor usage.
|
||||||
|
///
|
||||||
|
/// `send` drops all items and succeeds. [`Bi::recv`] never resolves and therefore
|
||||||
|
/// does not busy-wait or spuriously close the stream.
|
||||||
|
pub struct DummyTransport<Inbound, Outbound> {
|
||||||
|
_marker: PhantomData<(Inbound, Outbound)>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Inbound, Outbound> DummyTransport<Inbound, Outbound> {
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
_marker: PhantomData,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<Inbound, Outbound> Default for DummyTransport<Inbound, Outbound> {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self::new()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl<Inbound, Outbound> Bi<Inbound, Outbound> for DummyTransport<Inbound, Outbound>
|
||||||
|
where
|
||||||
|
Inbound: Send + Sync + 'static,
|
||||||
|
Outbound: Send + Sync + 'static,
|
||||||
|
{
|
||||||
|
async fn send(&mut self, _item: Outbound) -> Result<(), Error> {
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn recv(&mut self) -> Option<Inbound> {
|
||||||
|
std::future::pending::<()>().await;
|
||||||
|
None
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
128
server/crates/arbiter-proto/src/url.rs
Normal file
128
server/crates/arbiter-proto/src/url.rs
Normal file
@@ -0,0 +1,128 @@
|
|||||||
|
use std::fmt::Display;
|
||||||
|
|
||||||
|
use base64::{Engine as _, prelude::BASE64_URL_SAFE};
|
||||||
|
use rustls_pki_types::CertificateDer;
|
||||||
|
|
||||||
|
const ARBITER_URL_SCHEME: &str = "arbiter";
|
||||||
|
const CERT_QUERY_KEY: &str = "cert";
|
||||||
|
const BOOTSTRAP_TOKEN_QUERY_KEY: &str = "bootstrap_token";
|
||||||
|
|
||||||
|
pub struct ArbiterUrl {
|
||||||
|
pub host: String,
|
||||||
|
pub port: u16,
|
||||||
|
pub ca_cert: CertificateDer<'static>,
|
||||||
|
pub bootstrap_token: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Display for ArbiterUrl {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
let mut base = format!(
|
||||||
|
"{ARBITER_URL_SCHEME}://{}:{}?{CERT_QUERY_KEY}={}",
|
||||||
|
self.host,
|
||||||
|
self.port,
|
||||||
|
BASE64_URL_SAFE.encode(&self.ca_cert)
|
||||||
|
);
|
||||||
|
if let Some(token) = &self.bootstrap_token {
|
||||||
|
base.push_str(&format!("&{BOOTSTRAP_TOKEN_QUERY_KEY}={}", token));
|
||||||
|
}
|
||||||
|
f.write_str(&base)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, thiserror::Error, miette::Diagnostic)]
|
||||||
|
pub enum Error {
|
||||||
|
#[error("Invalid URL scheme, expected '{ARBITER_URL_SCHEME}://'")]
|
||||||
|
#[diagnostic(
|
||||||
|
code(arbiter::url::invalid_scheme),
|
||||||
|
help("The URL must start with '{ARBITER_URL_SCHEME}://'")
|
||||||
|
)]
|
||||||
|
InvalidScheme,
|
||||||
|
#[error("Missing host in URL")]
|
||||||
|
#[diagnostic(
|
||||||
|
code(arbiter::url::missing_host),
|
||||||
|
help("The URL must include a host, e.g., '{ARBITER_URL_SCHEME}://127.0.0.1:<port>'")
|
||||||
|
)]
|
||||||
|
MissingHost,
|
||||||
|
#[error("Missing port in URL")]
|
||||||
|
#[diagnostic(
|
||||||
|
code(arbiter::url::missing_port),
|
||||||
|
help("The URL must include a port, e.g., '{ARBITER_URL_SCHEME}://127.0.0.1:1234'")
|
||||||
|
)]
|
||||||
|
MissingPort,
|
||||||
|
#[error("Missing 'cert' query parameter in URL")]
|
||||||
|
#[diagnostic(
|
||||||
|
code(arbiter::url::missing_cert),
|
||||||
|
help("The URL must include a 'cert' query parameter")
|
||||||
|
)]
|
||||||
|
MissingCert,
|
||||||
|
#[error("Invalid base64 in 'cert' query parameter: {0}")]
|
||||||
|
#[diagnostic(code(arbiter::url::invalid_cert_base64))]
|
||||||
|
InvalidCertBase64(#[from] base64::DecodeError),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> TryFrom<&'a str> for ArbiterUrl {
|
||||||
|
type Error = Error;
|
||||||
|
|
||||||
|
fn try_from(value: &'a str) -> Result<Self, Self::Error> {
|
||||||
|
let url = url::Url::parse(value).map_err(|_| Error::InvalidScheme)?;
|
||||||
|
|
||||||
|
if url.scheme() != ARBITER_URL_SCHEME {
|
||||||
|
return Err(Error::InvalidScheme);
|
||||||
|
}
|
||||||
|
|
||||||
|
let host = url.host_str().ok_or(Error::MissingHost)?.to_string();
|
||||||
|
let port = url.port().ok_or(Error::MissingPort)?;
|
||||||
|
let cert_str = url
|
||||||
|
.query_pairs()
|
||||||
|
.find(|(k, _)| k == CERT_QUERY_KEY)
|
||||||
|
.ok_or(Error::MissingCert)?
|
||||||
|
.1;
|
||||||
|
|
||||||
|
let cert = BASE64_URL_SAFE.decode(cert_str.as_ref())?;
|
||||||
|
let cert = CertificateDer::from_slice(&cert).into_owned();
|
||||||
|
|
||||||
|
let bootstrap_token = url
|
||||||
|
.query_pairs()
|
||||||
|
.find(|(k, _)| k == BOOTSTRAP_TOKEN_QUERY_KEY)
|
||||||
|
.map(|(_, v)| v.to_string());
|
||||||
|
|
||||||
|
Ok(ArbiterUrl {
|
||||||
|
host,
|
||||||
|
port,
|
||||||
|
ca_cert: cert,
|
||||||
|
bootstrap_token,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use rcgen::generate_simple_self_signed;
|
||||||
|
use rstest::rstest;
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[rstest]
|
||||||
|
|
||||||
|
fn test_parsing_correctness(
|
||||||
|
#[values("127.0.0.1", "localhost", "192.168.1.1", "some.domain.com")] host: &str,
|
||||||
|
|
||||||
|
#[values(None, Some("token123".to_string()))] bootstrap_token: Option<String>,
|
||||||
|
) {
|
||||||
|
let cert = generate_simple_self_signed(&["Arbiter CA".into()]).unwrap();
|
||||||
|
let cert = cert.cert.der();
|
||||||
|
|
||||||
|
let url = ArbiterUrl {
|
||||||
|
host: host.to_string(),
|
||||||
|
port: 1234,
|
||||||
|
ca_cert: cert.clone().into_owned(),
|
||||||
|
bootstrap_token,
|
||||||
|
};
|
||||||
|
let url_str = url.to_string();
|
||||||
|
let parsed_url = ArbiterUrl::try_from(url_str.as_str()).unwrap();
|
||||||
|
assert_eq!(url.host, parsed_url.host);
|
||||||
|
assert_eq!(url.port, parsed_url.port);
|
||||||
|
assert_eq!(url.ca_cert.to_vec(), parsed_url.ca_cert.to_vec());
|
||||||
|
assert_eq!(url.bootstrap_token, parsed_url.bootstrap_token);
|
||||||
|
}
|
||||||
|
}
|
||||||
BIN
server/crates/arbiter-server/.DS_Store
vendored
Normal file
BIN
server/crates/arbiter-server/.DS_Store
vendored
Normal file
Binary file not shown.
@@ -3,26 +3,25 @@ name = "arbiter-server"
|
|||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
edition = "2024"
|
edition = "2024"
|
||||||
repository = "https://git.markettakers.org/MarketTakers/arbiter"
|
repository = "https://git.markettakers.org/MarketTakers/arbiter"
|
||||||
|
license = "Apache-2.0"
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
diesel = { version = "2.3.6", features = [
|
diesel = { version = "2.3.6", features = ["chrono", "returning_clauses_for_sqlite_3_35", "serde_json", "time", "uuid"] }
|
||||||
"sqlite",
|
|
||||||
"uuid",
|
|
||||||
"time",
|
|
||||||
"chrono",
|
|
||||||
"serde_json",
|
|
||||||
] }
|
|
||||||
diesel-async = { version = "0.7.4", features = [
|
diesel-async = { version = "0.7.4", features = [
|
||||||
"bb8",
|
"bb8",
|
||||||
"migrations",
|
"migrations",
|
||||||
"sqlite",
|
"sqlite",
|
||||||
"tokio",
|
"tokio",
|
||||||
] }
|
] }
|
||||||
ed25519.workspace = true
|
|
||||||
ed25519-dalek.workspace = true
|
ed25519-dalek.workspace = true
|
||||||
arbiter-proto.path = "../arbiter-proto"
|
arbiter-proto.path = "../arbiter-proto"
|
||||||
tracing.workspace = true
|
tracing.workspace = true
|
||||||
|
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||||
tonic.workspace = true
|
tonic.workspace = true
|
||||||
|
tonic.features = ["tls-aws-lc"]
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
rustls.workspace = true
|
rustls.workspace = true
|
||||||
smlang.workspace = true
|
smlang.workspace = true
|
||||||
@@ -30,32 +29,29 @@ miette.workspace = true
|
|||||||
thiserror.workspace = true
|
thiserror.workspace = true
|
||||||
diesel_migrations = { version = "2.3.1", features = ["sqlite"] }
|
diesel_migrations = { version = "2.3.1", features = ["sqlite"] }
|
||||||
async-trait.workspace = true
|
async-trait.workspace = true
|
||||||
statig = { version = "0.4.1", features = ["async"] }
|
|
||||||
secrecy = "0.10.3"
|
secrecy = "0.10.3"
|
||||||
futures.workspace = true
|
futures.workspace = true
|
||||||
tokio-stream.workspace = true
|
tokio-stream.workspace = true
|
||||||
dashmap = "6.1.0"
|
dashmap = "6.1.0"
|
||||||
rand.workspace = true
|
rand.workspace = true
|
||||||
rcgen = { version = "0.14.7", features = [
|
rcgen.workspace = true
|
||||||
"aws_lc_rs",
|
|
||||||
"pem",
|
|
||||||
"x509-parser",
|
|
||||||
"zeroize",
|
|
||||||
], default-features = false }
|
|
||||||
rkyv = { version = "0.8.15", features = [
|
|
||||||
"aligned",
|
|
||||||
"little_endian",
|
|
||||||
"pointer_width_64",
|
|
||||||
] }
|
|
||||||
restructed = "0.2.2"
|
|
||||||
chrono.workspace = true
|
chrono.workspace = true
|
||||||
bytes = "1.11.1"
|
|
||||||
memsafe = "0.4.0"
|
memsafe = "0.4.0"
|
||||||
chacha20poly1305 = { version = "0.10.1", features = ["std"] }
|
|
||||||
zeroize = { version = "1.8.2", features = ["std", "simd"] }
|
zeroize = { version = "1.8.2", features = ["std", "simd"] }
|
||||||
kameo.workspace = true
|
kameo.workspace = true
|
||||||
prost-types.workspace = true
|
x25519-dalek.workspace = true
|
||||||
|
chacha20poly1305 = { version = "0.10.1", features = ["std"] }
|
||||||
|
argon2 = { version = "0.5.3", features = ["zeroize"] }
|
||||||
|
restructed = "0.2.2"
|
||||||
|
strum = { version = "0.27.2", features = ["derive"] }
|
||||||
|
pem = "3.0.6"
|
||||||
|
k256.workspace = true
|
||||||
|
rsa.workspace = true
|
||||||
|
sha2.workspace = true
|
||||||
|
spki.workspace = true
|
||||||
|
alloy.workspace = true
|
||||||
|
arbiter-tokens-registry.path = "../arbiter-tokens-registry"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
|
insta = "1.46.3"
|
||||||
test-log = { version = "0.2", default-features = false, features = ["trace"] }
|
test-log = { version = "0.2", default-features = false, features = ["trace"] }
|
||||||
tempfile = "3.25.0"
|
|
||||||
@@ -1,31 +1,159 @@
|
|||||||
create table if not exists aead_encrypted (
|
create table if not exists root_key_history (
|
||||||
id INTEGER not null PRIMARY KEY,
|
id INTEGER not null PRIMARY KEY,
|
||||||
current_nonce integer not null default(1), -- if re-encrypted, this should be incremented
|
-- root key stored as aead encrypted artifact, with only difference that it's decrypted by unseal key (derived from user password)
|
||||||
|
root_key_encryption_nonce blob not null default(1), -- if re-encrypted, this should be incremented. Used for encrypting root key
|
||||||
|
data_encryption_nonce blob not null default(1), -- nonce used for encrypting with key itself
|
||||||
ciphertext blob not null,
|
ciphertext blob not null,
|
||||||
tag blob not null,
|
tag blob not null,
|
||||||
schema_version integer not null default(1) -- server would need to reencrypt, because this means that we have changed algorithm
|
schema_version integer not null default(1), -- server would need to reencrypt, because this means that we have changed algorithm
|
||||||
|
salt blob not null -- for key deriviation
|
||||||
|
) STRICT;
|
||||||
|
|
||||||
|
create table if not exists aead_encrypted (
|
||||||
|
id INTEGER not null PRIMARY KEY,
|
||||||
|
current_nonce blob not null default(1), -- if re-encrypted, this should be incremented
|
||||||
|
ciphertext blob not null,
|
||||||
|
tag blob not null,
|
||||||
|
schema_version integer not null default(1), -- server would need to reencrypt, because this means that we have changed algorithm
|
||||||
|
associated_root_key_id integer not null references root_key_history (id) on delete RESTRICT,
|
||||||
|
created_at integer not null default(unixepoch ('now'))
|
||||||
|
) STRICT;
|
||||||
|
|
||||||
|
create unique index if not exists uniq_nonce_per_root_key on aead_encrypted (
|
||||||
|
current_nonce,
|
||||||
|
associated_root_key_id
|
||||||
|
);
|
||||||
|
|
||||||
|
create table if not exists tls_history (
|
||||||
|
id INTEGER not null PRIMARY KEY,
|
||||||
|
cert text not null,
|
||||||
|
cert_key text not null, -- PEM Encoded private key
|
||||||
|
ca_cert text not null,
|
||||||
|
ca_key text not null, -- PEM Encoded private key
|
||||||
|
created_at integer not null default(unixepoch ('now'))
|
||||||
) STRICT;
|
) STRICT;
|
||||||
|
|
||||||
-- This is a singleton
|
-- This is a singleton
|
||||||
create table if not exists arbiter_settings (
|
create table if not exists arbiter_settings (
|
||||||
id INTEGER not null PRIMARY KEY CHECK (id = 1), -- singleton row, id must be 1
|
id INTEGER not null PRIMARY KEY CHECK (id = 1), -- singleton row, id must be 1
|
||||||
root_key_id integer references aead_encrypted (id) on delete RESTRICT, -- if null, means wasn't bootstrapped yet
|
root_key_id integer references root_key_history (id) on delete RESTRICT, -- if null, means wasn't bootstrapped yet
|
||||||
cert_key blob not null,
|
tls_id integer references tls_history (id) on delete RESTRICT
|
||||||
cert blob not null
|
|
||||||
) STRICT;
|
) STRICT;
|
||||||
|
|
||||||
|
insert into arbiter_settings (id) values (1) on conflict do nothing; -- ensure singleton row exists
|
||||||
|
|
||||||
create table if not exists useragent_client (
|
create table if not exists useragent_client (
|
||||||
id integer not null primary key,
|
id integer not null primary key,
|
||||||
nonce integer not null default (1), -- used for auth challenge
|
nonce integer not null default(1), -- used for auth challenge
|
||||||
public_key blob not null,
|
public_key blob not null,
|
||||||
|
key_type integer not null default(1), -- 1=Ed25519, 2=ECDSA(secp256k1)
|
||||||
created_at integer not null default(unixepoch ('now')),
|
created_at integer not null default(unixepoch ('now')),
|
||||||
updated_at integer not null default(unixepoch ('now'))
|
updated_at integer not null default(unixepoch ('now'))
|
||||||
) STRICT;
|
) STRICT;
|
||||||
|
|
||||||
create table if not exists program_client (
|
create table if not exists program_client (
|
||||||
id integer not null primary key,
|
id integer not null primary key,
|
||||||
nonce integer not null default (1), -- used for auth challenge
|
nonce integer not null default(1), -- used for auth challenge
|
||||||
public_key blob not null,
|
public_key blob not null,
|
||||||
created_at integer not null default(unixepoch ('now')),
|
created_at integer not null default(unixepoch ('now')),
|
||||||
updated_at integer not null default(unixepoch ('now'))
|
updated_at integer not null default(unixepoch ('now'))
|
||||||
) STRICT;
|
) STRICT;
|
||||||
|
|
||||||
|
create table if not exists evm_wallet (
|
||||||
|
id integer not null primary key,
|
||||||
|
address blob not null, -- 20-byte Ethereum address
|
||||||
|
aead_encrypted_id integer not null references aead_encrypted (id) on delete RESTRICT,
|
||||||
|
created_at integer not null default(unixepoch ('now'))
|
||||||
|
) STRICT;
|
||||||
|
|
||||||
|
create unique index if not exists uniq_evm_wallet_address on evm_wallet (address);
|
||||||
|
create unique index if not exists uniq_evm_wallet_aead on evm_wallet (aead_encrypted_id);
|
||||||
|
|
||||||
|
create table if not exists evm_ether_transfer_limit (
|
||||||
|
id integer not null primary key,
|
||||||
|
window_secs integer not null, -- window duration in seconds
|
||||||
|
max_volume blob not null -- big-endian 32-byte U256
|
||||||
|
) STRICT;
|
||||||
|
|
||||||
|
-- Shared grant properties: client scope, timeframe, fee caps, and rate limit
|
||||||
|
create table if not exists evm_basic_grant (
|
||||||
|
id integer not null primary key,
|
||||||
|
wallet_id integer not null references evm_wallet(id) on delete restrict,
|
||||||
|
client_id integer not null references program_client(id) on delete restrict,
|
||||||
|
chain_id integer not null, -- EIP-155 chain ID
|
||||||
|
valid_from integer, -- unix timestamp (seconds), null = no lower bound
|
||||||
|
valid_until integer, -- unix timestamp (seconds), null = no upper bound
|
||||||
|
max_gas_fee_per_gas blob, -- big-endian 32-byte U256, null = unlimited
|
||||||
|
max_priority_fee_per_gas blob, -- big-endian 32-byte U256, null = unlimited
|
||||||
|
rate_limit_count integer, -- max transactions in window, null = unlimited
|
||||||
|
rate_limit_window_secs integer, -- window duration in seconds, null = unlimited
|
||||||
|
revoked_at integer, -- unix timestamp when revoked, null = still active
|
||||||
|
created_at integer not null default(unixepoch('now'))
|
||||||
|
) STRICT;
|
||||||
|
|
||||||
|
-- Shared transaction log for all EVM grants, used for rate limit tracking and auditing
|
||||||
|
create table if not exists evm_transaction_log (
|
||||||
|
id integer not null primary key,
|
||||||
|
grant_id integer not null references evm_basic_grant(id) on delete restrict,
|
||||||
|
client_id integer not null references program_client(id) on delete restrict,
|
||||||
|
wallet_id integer not null references evm_wallet(id) on delete restrict,
|
||||||
|
chain_id integer not null,
|
||||||
|
eth_value blob not null, -- always present on any EVM tx
|
||||||
|
signed_at integer not null default(unixepoch('now'))
|
||||||
|
) STRICT;
|
||||||
|
|
||||||
|
create index if not exists idx_evm_basic_grant_wallet_chain on evm_basic_grant(client_id, wallet_id, chain_id);
|
||||||
|
|
||||||
|
-- ===============================
|
||||||
|
-- ERC20 token transfer grant
|
||||||
|
-- ===============================
|
||||||
|
create table if not exists evm_token_transfer_grant (
|
||||||
|
id integer not null primary key,
|
||||||
|
basic_grant_id integer not null unique references evm_basic_grant(id) on delete cascade,
|
||||||
|
token_contract blob not null, -- 20-byte ERC20 contract address
|
||||||
|
receiver blob -- 20-byte recipient address or null if every recipient allowed
|
||||||
|
) STRICT;
|
||||||
|
|
||||||
|
-- Per-window volume limits for token transfer grants
|
||||||
|
create table if not exists evm_token_transfer_volume_limit (
|
||||||
|
id integer not null primary key,
|
||||||
|
grant_id integer not null references evm_token_transfer_grant(id) on delete cascade,
|
||||||
|
window_secs integer not null, -- window duration in seconds
|
||||||
|
max_volume blob not null -- big-endian 32-byte U256
|
||||||
|
) STRICT;
|
||||||
|
|
||||||
|
-- Log table for token transfer grant usage
|
||||||
|
create table if not exists evm_token_transfer_log (
|
||||||
|
id integer not null primary key,
|
||||||
|
grant_id integer not null references evm_token_transfer_grant(id) on delete restrict,
|
||||||
|
log_id integer not null references evm_transaction_log(id) on delete restrict,
|
||||||
|
chain_id integer not null, -- EIP-155 chain ID
|
||||||
|
token_contract blob not null, -- 20-byte ERC20 contract address
|
||||||
|
recipient_address blob not null, -- 20-byte recipient address
|
||||||
|
value blob not null, -- big-endian 32-byte U256
|
||||||
|
created_at integer not null default(unixepoch('now'))
|
||||||
|
) STRICT;
|
||||||
|
|
||||||
|
create index if not exists idx_token_transfer_log_grant on evm_token_transfer_log(grant_id);
|
||||||
|
create index if not exists idx_token_transfer_log_log_id on evm_token_transfer_log(log_id);
|
||||||
|
create index if not exists idx_token_transfer_log_chain on evm_token_transfer_log(chain_id);
|
||||||
|
|
||||||
|
|
||||||
|
-- ===============================
|
||||||
|
-- Ether transfer grant (uses base log)
|
||||||
|
-- ===============================
|
||||||
|
create table if not exists evm_ether_transfer_grant (
|
||||||
|
id integer not null primary key,
|
||||||
|
basic_grant_id integer not null unique references evm_basic_grant(id) on delete cascade,
|
||||||
|
limit_id integer not null references evm_ether_transfer_limit(id) on delete restrict
|
||||||
|
) STRICT;
|
||||||
|
|
||||||
|
-- Specific recipient addresses for an ether transfer grant
|
||||||
|
create table if not exists evm_ether_transfer_grant_target (
|
||||||
|
id integer not null primary key,
|
||||||
|
grant_id integer not null references evm_ether_transfer_grant(id) on delete cascade,
|
||||||
|
address blob not null -- 20-byte recipient address
|
||||||
|
) STRICT;
|
||||||
|
|
||||||
|
create unique index if not exists uniq_ether_transfer_target on evm_ether_transfer_grant_target(grant_id, address);
|
||||||
|
|
||||||
|
|||||||
@@ -0,0 +1 @@
|
|||||||
|
DROP INDEX IF EXISTS program_client_public_key_unique;
|
||||||
@@ -0,0 +1,2 @@
|
|||||||
|
CREATE UNIQUE INDEX program_client_public_key_unique
|
||||||
|
ON program_client (public_key);
|
||||||
@@ -1,2 +0,0 @@
|
|||||||
pub mod user_agent;
|
|
||||||
pub mod client;
|
|
||||||
@@ -1,40 +1,37 @@
|
|||||||
use arbiter_proto::{BOOTSTRAP_TOKEN_PATH, home_path};
|
use arbiter_proto::{BOOTSTRAP_PATH, home_path};
|
||||||
use diesel::{ExpressionMethods, QueryDsl};
|
use diesel::QueryDsl;
|
||||||
use diesel_async::RunQueryDsl;
|
use diesel_async::RunQueryDsl;
|
||||||
use kameo::{Actor, messages};
|
use kameo::{Actor, messages};
|
||||||
use memsafe::MemSafe;
|
|
||||||
use miette::Diagnostic;
|
use miette::Diagnostic;
|
||||||
use rand::{RngExt, distr::StandardUniform, make_rng, rngs::StdRng};
|
use rand::{
|
||||||
use secrecy::SecretString;
|
RngExt,
|
||||||
use thiserror::Error;
|
distr::{Alphanumeric},
|
||||||
use tracing::info;
|
make_rng,
|
||||||
use zeroize::{Zeroize, Zeroizing};
|
rngs::StdRng,
|
||||||
|
|
||||||
use crate::{
|
|
||||||
context::{self, ServerContext},
|
|
||||||
db::{self, DatabasePool, schema},
|
|
||||||
};
|
};
|
||||||
|
use thiserror::Error;
|
||||||
|
|
||||||
|
use crate::db::{self, DatabasePool, schema};
|
||||||
const TOKEN_LENGTH: usize = 64;
|
const TOKEN_LENGTH: usize = 64;
|
||||||
|
|
||||||
pub async fn generate_token() -> Result<String, std::io::Error> {
|
pub async fn generate_token() -> Result<String, std::io::Error> {
|
||||||
let rng: StdRng = make_rng();
|
let rng: StdRng = make_rng();
|
||||||
|
|
||||||
let token: String = rng
|
let token: String = rng.sample_iter(Alphanumeric).take(TOKEN_LENGTH).fold(
|
||||||
.sample_iter::<char, _>(StandardUniform)
|
Default::default(),
|
||||||
.take(TOKEN_LENGTH)
|
|mut accum, char| {
|
||||||
.fold(Default::default(), |mut accum, char| {
|
|
||||||
accum += char.to_string().as_str();
|
accum += char.to_string().as_str();
|
||||||
accum
|
accum
|
||||||
});
|
},
|
||||||
|
);
|
||||||
|
|
||||||
tokio::fs::write(home_path()?.join(BOOTSTRAP_TOKEN_PATH), token.as_str()).await?;
|
tokio::fs::write(home_path()?.join(BOOTSTRAP_PATH), token.as_str()).await?;
|
||||||
|
|
||||||
Ok(token)
|
Ok(token)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Error, Debug, Diagnostic)]
|
#[derive(Error, Debug, Diagnostic)]
|
||||||
pub enum BootstrapError {
|
pub enum Error {
|
||||||
#[error("Database error: {0}")]
|
#[error("Database error: {0}")]
|
||||||
#[diagnostic(code(arbiter_server::bootstrap::database))]
|
#[diagnostic(code(arbiter_server::bootstrap::database))]
|
||||||
Database(#[from] db::PoolError),
|
Database(#[from] db::PoolError),
|
||||||
@@ -49,12 +46,12 @@ pub enum BootstrapError {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Actor)]
|
#[derive(Actor)]
|
||||||
pub struct BootstrapActor {
|
pub struct Bootstrapper {
|
||||||
token: Option<String>,
|
token: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl BootstrapActor {
|
impl Bootstrapper {
|
||||||
pub async fn new(db: &DatabasePool) -> Result<Self, BootstrapError> {
|
pub async fn new(db: &DatabasePool) -> Result<Self, Error> {
|
||||||
let mut conn = db.get().await?;
|
let mut conn = db.get().await?;
|
||||||
|
|
||||||
let row_count: i64 = schema::useragent_client::table
|
let row_count: i64 = schema::useragent_client::table
|
||||||
@@ -64,10 +61,9 @@ impl BootstrapActor {
|
|||||||
|
|
||||||
drop(conn);
|
drop(conn);
|
||||||
|
|
||||||
|
|
||||||
let token = if row_count == 0 {
|
let token = if row_count == 0 {
|
||||||
let token = generate_token().await?;
|
let token = generate_token().await?;
|
||||||
info!(%token, "Generated bootstrap token");
|
|
||||||
tokio::fs::write(home_path()?.join(BOOTSTRAP_TOKEN_PATH), token.as_str()).await?;
|
|
||||||
Some(token)
|
Some(token)
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
@@ -75,15 +71,10 @@ impl BootstrapActor {
|
|||||||
|
|
||||||
Ok(Self { token })
|
Ok(Self { token })
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
pub fn get_token(&self) -> Option<String> {
|
|
||||||
self.token.clone()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[messages]
|
#[messages]
|
||||||
impl BootstrapActor {
|
impl Bootstrapper {
|
||||||
#[message]
|
#[message]
|
||||||
pub fn is_correct_token(&self, token: String) -> bool {
|
pub fn is_correct_token(&self, token: String) -> bool {
|
||||||
match &self.token {
|
match &self.token {
|
||||||
@@ -102,3 +93,11 @@ impl BootstrapActor {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[messages]
|
||||||
|
impl Bootstrapper {
|
||||||
|
#[message]
|
||||||
|
pub fn get_token(&self) -> Option<String> {
|
||||||
|
self.token.clone()
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
use arbiter_proto::{
|
|
||||||
proto::{ClientRequest, ClientResponse},
|
|
||||||
transport::Bi,
|
|
||||||
};
|
|
||||||
|
|
||||||
use crate::ServerContext;
|
|
||||||
|
|
||||||
pub(crate) async fn handle_client(
|
|
||||||
_context: ServerContext,
|
|
||||||
_bistream: impl Bi<ClientRequest, ClientResponse>,
|
|
||||||
) {
|
|
||||||
}
|
|
||||||
183
server/crates/arbiter-server/src/actors/client/auth.rs
Normal file
183
server/crates/arbiter-server/src/actors/client/auth.rs
Normal file
@@ -0,0 +1,183 @@
|
|||||||
|
use arbiter_proto::{
|
||||||
|
format_challenge,
|
||||||
|
proto::client::{
|
||||||
|
AuthChallenge, AuthChallengeSolution, ClientConnectError, ClientRequest, ClientResponse,
|
||||||
|
client_connect_error::Code as ConnectErrorCode,
|
||||||
|
client_request::Payload as ClientRequestPayload,
|
||||||
|
client_response::Payload as ClientResponsePayload,
|
||||||
|
},
|
||||||
|
transport::expect_message,
|
||||||
|
};
|
||||||
|
use diesel::{ExpressionMethods as _, OptionalExtension as _, QueryDsl as _, update};
|
||||||
|
use diesel_async::RunQueryDsl as _;
|
||||||
|
use ed25519_dalek::VerifyingKey;
|
||||||
|
use tracing::error;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
actors::client::ClientConnection,
|
||||||
|
db::{self, schema::program_client},
|
||||||
|
};
|
||||||
|
|
||||||
|
use super::session::ClientSession;
|
||||||
|
|
||||||
|
#[derive(thiserror::Error, Debug, Clone, PartialEq, Eq)]
|
||||||
|
pub enum Error {
|
||||||
|
#[error("Unexpected message payload")]
|
||||||
|
UnexpectedMessagePayload,
|
||||||
|
#[error("Invalid client public key length")]
|
||||||
|
InvalidClientPubkeyLength,
|
||||||
|
#[error("Invalid client public key encoding")]
|
||||||
|
InvalidAuthPubkeyEncoding,
|
||||||
|
#[error("Database pool unavailable")]
|
||||||
|
DatabasePoolUnavailable,
|
||||||
|
#[error("Database operation failed")]
|
||||||
|
DatabaseOperationFailed,
|
||||||
|
#[error("Invalid challenge solution")]
|
||||||
|
InvalidChallengeSolution,
|
||||||
|
#[error("Client not registered")]
|
||||||
|
NotRegistered,
|
||||||
|
#[error("Internal error")]
|
||||||
|
InternalError,
|
||||||
|
#[error("Transport error")]
|
||||||
|
Transport,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Atomically reads and increments the nonce for a known client.
|
||||||
|
/// Returns `None` if the pubkey is not registered.
|
||||||
|
async fn get_nonce(
|
||||||
|
db: &db::DatabasePool,
|
||||||
|
pubkey: &VerifyingKey,
|
||||||
|
) -> Result<Option<(i32, i32)>, Error> {
|
||||||
|
let pubkey_bytes = pubkey.as_bytes().to_vec();
|
||||||
|
|
||||||
|
let mut conn = db.get().await.map_err(|e| {
|
||||||
|
error!(error = ?e, "Database pool error");
|
||||||
|
Error::DatabasePoolUnavailable
|
||||||
|
})?;
|
||||||
|
|
||||||
|
conn.exclusive_transaction(|conn| {
|
||||||
|
let pubkey_bytes = pubkey_bytes.clone();
|
||||||
|
Box::pin(async move {
|
||||||
|
let Some((client_id, current_nonce)) = program_client::table
|
||||||
|
.filter(program_client::public_key.eq(&pubkey_bytes))
|
||||||
|
.select((program_client::id, program_client::nonce))
|
||||||
|
.first::<(i32, i32)>(conn)
|
||||||
|
.await
|
||||||
|
.optional()?
|
||||||
|
else {
|
||||||
|
return Result::<_, diesel::result::Error>::Ok(None);
|
||||||
|
};
|
||||||
|
|
||||||
|
update(program_client::table)
|
||||||
|
.filter(program_client::public_key.eq(&pubkey_bytes))
|
||||||
|
.set(program_client::nonce.eq(current_nonce + 1))
|
||||||
|
.execute(conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(Some((client_id, current_nonce)))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.map_err(|e| {
|
||||||
|
error!(error = ?e, "Database error");
|
||||||
|
Error::DatabaseOperationFailed
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn challenge_client(
|
||||||
|
props: &mut ClientConnection,
|
||||||
|
pubkey: VerifyingKey,
|
||||||
|
nonce: i32,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let challenge = AuthChallenge {
|
||||||
|
pubkey: pubkey.as_bytes().to_vec(),
|
||||||
|
nonce,
|
||||||
|
};
|
||||||
|
|
||||||
|
props
|
||||||
|
.transport
|
||||||
|
.send(Ok(ClientResponse {
|
||||||
|
payload: Some(ClientResponsePayload::AuthChallenge(challenge.clone())),
|
||||||
|
}))
|
||||||
|
.await
|
||||||
|
.map_err(|e| {
|
||||||
|
error!(error = ?e, "Failed to send auth challenge");
|
||||||
|
Error::Transport
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let AuthChallengeSolution { signature } =
|
||||||
|
expect_message(&mut *props.transport, |req: ClientRequest| {
|
||||||
|
match req.payload? {
|
||||||
|
ClientRequestPayload::AuthChallengeSolution(s) => Some(s),
|
||||||
|
_ => None,
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.map_err(|e| {
|
||||||
|
error!(error = ?e, "Failed to receive challenge solution");
|
||||||
|
Error::Transport
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let formatted = format_challenge(nonce, &challenge.pubkey);
|
||||||
|
let sig = signature.as_slice().try_into().map_err(|_| {
|
||||||
|
error!("Invalid signature length");
|
||||||
|
Error::InvalidChallengeSolution
|
||||||
|
})?;
|
||||||
|
|
||||||
|
pubkey.verify_strict(&formatted, &sig).map_err(|_| {
|
||||||
|
error!("Challenge solution verification failed");
|
||||||
|
Error::InvalidChallengeSolution
|
||||||
|
})?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn connect_error_code(err: &Error) -> ConnectErrorCode {
|
||||||
|
match err {
|
||||||
|
Error::NotRegistered => ConnectErrorCode::ApprovalDenied,
|
||||||
|
_ => ConnectErrorCode::Unknown,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn authenticate(props: &mut ClientConnection) -> Result<(VerifyingKey, i32), Error> {
|
||||||
|
let Some(ClientRequest {
|
||||||
|
payload: Some(ClientRequestPayload::AuthChallengeRequest(challenge)),
|
||||||
|
}) = props.transport.recv().await
|
||||||
|
else {
|
||||||
|
return Err(Error::Transport);
|
||||||
|
};
|
||||||
|
|
||||||
|
let pubkey_bytes = challenge
|
||||||
|
.pubkey
|
||||||
|
.as_array()
|
||||||
|
.ok_or(Error::InvalidClientPubkeyLength)?;
|
||||||
|
let pubkey =
|
||||||
|
VerifyingKey::from_bytes(pubkey_bytes).map_err(|_| Error::InvalidAuthPubkeyEncoding)?;
|
||||||
|
|
||||||
|
let (client_id, nonce) = match get_nonce(&props.db, &pubkey).await? {
|
||||||
|
Some((client_id, nonce)) => (client_id, nonce),
|
||||||
|
None => return Err(Error::NotRegistered),
|
||||||
|
};
|
||||||
|
|
||||||
|
challenge_client(props, pubkey, nonce).await?;
|
||||||
|
|
||||||
|
Ok((pubkey, client_id))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn authenticate_and_create(mut props: ClientConnection) -> Result<ClientSession, Error> {
|
||||||
|
match authenticate(&mut props).await {
|
||||||
|
Ok((_pubkey, client_id)) => Ok(ClientSession::new(props, client_id)),
|
||||||
|
Err(err) => {
|
||||||
|
let code = connect_error_code(&err);
|
||||||
|
let _ = props
|
||||||
|
.transport
|
||||||
|
.send(Ok(ClientResponse {
|
||||||
|
payload: Some(ClientResponsePayload::ClientConnectError(
|
||||||
|
ClientConnectError { code: code.into() },
|
||||||
|
)),
|
||||||
|
}))
|
||||||
|
.await;
|
||||||
|
Err(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
58
server/crates/arbiter-server/src/actors/client/mod.rs
Normal file
58
server/crates/arbiter-server/src/actors/client/mod.rs
Normal file
@@ -0,0 +1,58 @@
|
|||||||
|
use arbiter_proto::{
|
||||||
|
proto::client::{ClientRequest, ClientResponse},
|
||||||
|
transport::Bi,
|
||||||
|
};
|
||||||
|
use kameo::actor::Spawn;
|
||||||
|
use tracing::{error, info};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
actors::{GlobalActors, client::session::ClientSession},
|
||||||
|
db,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, PartialEq, Eq, thiserror::Error)]
|
||||||
|
pub enum ClientError {
|
||||||
|
#[error("Expected message with payload")]
|
||||||
|
MissingRequestPayload,
|
||||||
|
#[error("Unexpected request payload")]
|
||||||
|
UnexpectedRequestPayload,
|
||||||
|
#[error("State machine error")]
|
||||||
|
StateTransitionFailed,
|
||||||
|
#[error("Connection registration failed")]
|
||||||
|
ConnectionRegistrationFailed,
|
||||||
|
#[error(transparent)]
|
||||||
|
Auth(#[from] auth::Error),
|
||||||
|
}
|
||||||
|
|
||||||
|
pub type Transport = Box<dyn Bi<ClientRequest, Result<ClientResponse, ClientError>> + Send>;
|
||||||
|
|
||||||
|
pub struct ClientConnection {
|
||||||
|
pub(crate) db: db::DatabasePool,
|
||||||
|
pub(crate) transport: Transport,
|
||||||
|
pub(crate) actors: GlobalActors,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ClientConnection {
|
||||||
|
pub fn new(db: db::DatabasePool, transport: Transport, actors: GlobalActors) -> Self {
|
||||||
|
Self {
|
||||||
|
db,
|
||||||
|
transport,
|
||||||
|
actors,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub mod auth;
|
||||||
|
pub mod session;
|
||||||
|
|
||||||
|
pub async fn connect_client(props: ClientConnection) {
|
||||||
|
match auth::authenticate_and_create(props).await {
|
||||||
|
Ok(session) => {
|
||||||
|
ClientSession::spawn(session);
|
||||||
|
info!("Client authenticated, session started");
|
||||||
|
}
|
||||||
|
Err(err) => {
|
||||||
|
error!(?err, "Authentication failed, closing connection");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
151
server/crates/arbiter-server/src/actors/client/session.rs
Normal file
151
server/crates/arbiter-server/src/actors/client/session.rs
Normal file
@@ -0,0 +1,151 @@
|
|||||||
|
use alloy::{consensus::TxEip1559, primitives::Address, rlp::Decodable};
|
||||||
|
use arbiter_proto::proto::{
|
||||||
|
client::{
|
||||||
|
ClientRequest, ClientResponse, client_request::Payload as ClientRequestPayload,
|
||||||
|
client_response::Payload as ClientResponsePayload,
|
||||||
|
},
|
||||||
|
evm::{
|
||||||
|
EvmError, EvmSignTransactionResponse, evm_sign_transaction_response::Result as SignResult,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
use kameo::Actor;
|
||||||
|
use tokio::select;
|
||||||
|
use tracing::{error, info};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
actors::{
|
||||||
|
GlobalActors,
|
||||||
|
client::{ClientConnection, ClientError},
|
||||||
|
evm::ClientSignTransaction,
|
||||||
|
router::RegisterClient,
|
||||||
|
},
|
||||||
|
db,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub struct ClientSession {
|
||||||
|
props: ClientConnection,
|
||||||
|
client_id: i32,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ClientSession {
|
||||||
|
pub(crate) fn new(props: ClientConnection, client_id: i32) -> Self {
|
||||||
|
Self { props, client_id }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn process_transport_inbound(&mut self, req: ClientRequest) -> Output {
|
||||||
|
let msg = req.payload.ok_or_else(|| {
|
||||||
|
error!(actor = "client", "Received message with no payload");
|
||||||
|
ClientError::MissingRequestPayload
|
||||||
|
})?;
|
||||||
|
|
||||||
|
match msg {
|
||||||
|
ClientRequestPayload::EvmSignTransaction(sign_req) => {
|
||||||
|
let wallet_address: [u8; 20] = sign_req
|
||||||
|
.wallet_address
|
||||||
|
.try_into()
|
||||||
|
.map_err(|_| ClientError::UnexpectedRequestPayload)?;
|
||||||
|
|
||||||
|
let mut rlp_bytes: &[u8] = &sign_req.rlp_transaction;
|
||||||
|
let tx = TxEip1559::decode(&mut rlp_bytes)
|
||||||
|
.map_err(|_| ClientError::UnexpectedRequestPayload)?;
|
||||||
|
|
||||||
|
let result = self
|
||||||
|
.props
|
||||||
|
.actors
|
||||||
|
.evm
|
||||||
|
.ask(ClientSignTransaction {
|
||||||
|
client_id: self.client_id,
|
||||||
|
wallet_address: Address::from_slice(&wallet_address),
|
||||||
|
transaction: tx,
|
||||||
|
})
|
||||||
|
.await;
|
||||||
|
|
||||||
|
let response_result = match result {
|
||||||
|
Ok(signature) => SignResult::Signature(signature.as_bytes().to_vec()),
|
||||||
|
Err(err) => {
|
||||||
|
error!(?err, "client sign transaction failed");
|
||||||
|
SignResult::Error(EvmError::Internal.into())
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(ClientResponse {
|
||||||
|
payload: Some(ClientResponsePayload::EvmSignTransaction(
|
||||||
|
EvmSignTransactionResponse {
|
||||||
|
result: Some(response_result),
|
||||||
|
},
|
||||||
|
)),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
_ => Err(ClientError::UnexpectedRequestPayload),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type Output = Result<ClientResponse, ClientError>;
|
||||||
|
|
||||||
|
impl Actor for ClientSession {
|
||||||
|
type Args = Self;
|
||||||
|
|
||||||
|
type Error = ClientError;
|
||||||
|
|
||||||
|
async fn on_start(
|
||||||
|
args: Self::Args,
|
||||||
|
this: kameo::prelude::ActorRef<Self>,
|
||||||
|
) -> Result<Self, Self::Error> {
|
||||||
|
args.props
|
||||||
|
.actors
|
||||||
|
.router
|
||||||
|
.ask(RegisterClient { actor: this })
|
||||||
|
.await
|
||||||
|
.map_err(|_| ClientError::ConnectionRegistrationFailed)?;
|
||||||
|
Ok(args)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn next(
|
||||||
|
&mut self,
|
||||||
|
_actor_ref: kameo::prelude::WeakActorRef<Self>,
|
||||||
|
mailbox_rx: &mut kameo::prelude::MailboxReceiver<Self>,
|
||||||
|
) -> Option<kameo::mailbox::Signal<Self>> {
|
||||||
|
loop {
|
||||||
|
select! {
|
||||||
|
signal = mailbox_rx.recv() => {
|
||||||
|
return signal;
|
||||||
|
}
|
||||||
|
msg = self.props.transport.recv() => {
|
||||||
|
match msg {
|
||||||
|
Some(request) => {
|
||||||
|
match self.process_transport_inbound(request).await {
|
||||||
|
Ok(resp) => {
|
||||||
|
if self.props.transport.send(Ok(resp)).await.is_err() {
|
||||||
|
error!(actor = "client", reason = "channel closed", "send.failed");
|
||||||
|
return Some(kameo::mailbox::Signal::Stop);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(err) => {
|
||||||
|
let _ = self.props.transport.send(Err(err)).await;
|
||||||
|
return Some(kameo::mailbox::Signal::Stop);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
info!(actor = "client", "transport.closed");
|
||||||
|
return Some(kameo::mailbox::Signal::Stop);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ClientSession {
|
||||||
|
pub fn new_test(db: db::DatabasePool, actors: GlobalActors) -> Self {
|
||||||
|
use arbiter_proto::transport::DummyTransport;
|
||||||
|
let transport: super::Transport = Box::new(DummyTransport::new());
|
||||||
|
let props = ClientConnection::new(db, transport, actors);
|
||||||
|
Self {
|
||||||
|
props,
|
||||||
|
client_id: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
246
server/crates/arbiter-server/src/actors/evm/mod.rs
Normal file
246
server/crates/arbiter-server/src/actors/evm/mod.rs
Normal file
@@ -0,0 +1,246 @@
|
|||||||
|
use alloy::{consensus::TxEip1559, primitives::Address, signers::Signature};
|
||||||
|
use diesel::{ExpressionMethods, OptionalExtension as _, QueryDsl, SelectableHelper as _, dsl::insert_into};
|
||||||
|
use diesel_async::RunQueryDsl;
|
||||||
|
use kameo::{Actor, actor::ActorRef, messages};
|
||||||
|
use memsafe::MemSafe;
|
||||||
|
use rand::{SeedableRng, rng, rngs::StdRng};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
actors::keyholder::{CreateNew, Decrypt, KeyHolder},
|
||||||
|
db::{self, DatabasePool, models::{self, EvmBasicGrant, SqliteTimestamp}, schema},
|
||||||
|
evm::{
|
||||||
|
self, RunKind,
|
||||||
|
policies::{
|
||||||
|
FullGrant, SharedGrantSettings, SpecificGrant, SpecificMeaning,
|
||||||
|
ether_transfer::EtherTransfer,
|
||||||
|
token_transfers::TokenTransfer,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
pub use crate::evm::safe_signer;
|
||||||
|
|
||||||
|
#[derive(Debug, thiserror::Error, miette::Diagnostic)]
|
||||||
|
pub enum SignTransactionError {
|
||||||
|
#[error("Wallet not found")]
|
||||||
|
#[diagnostic(code(arbiter::evm::sign::wallet_not_found))]
|
||||||
|
WalletNotFound,
|
||||||
|
|
||||||
|
#[error("Database error: {0}")]
|
||||||
|
#[diagnostic(code(arbiter::evm::sign::database))]
|
||||||
|
Database(#[from] diesel::result::Error),
|
||||||
|
|
||||||
|
#[error("Database pool error: {0}")]
|
||||||
|
#[diagnostic(code(arbiter::evm::sign::pool))]
|
||||||
|
Pool(#[from] db::PoolError),
|
||||||
|
|
||||||
|
#[error("Keyholder error: {0}")]
|
||||||
|
#[diagnostic(code(arbiter::evm::sign::keyholder))]
|
||||||
|
Keyholder(#[from] crate::actors::keyholder::Error),
|
||||||
|
|
||||||
|
#[error("Keyholder mailbox error")]
|
||||||
|
#[diagnostic(code(arbiter::evm::sign::keyholder_send))]
|
||||||
|
KeyholderSend,
|
||||||
|
|
||||||
|
#[error("Signing error: {0}")]
|
||||||
|
#[diagnostic(code(arbiter::evm::sign::signing))]
|
||||||
|
Signing(#[from] alloy::signers::Error),
|
||||||
|
|
||||||
|
#[error("Policy error: {0}")]
|
||||||
|
#[diagnostic(code(arbiter::evm::sign::vet))]
|
||||||
|
Vet(#[from] evm::VetError),
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, thiserror::Error, miette::Diagnostic)]
|
||||||
|
pub enum Error {
|
||||||
|
#[error("Keyholder error: {0}")]
|
||||||
|
#[diagnostic(code(arbiter::evm::keyholder))]
|
||||||
|
Keyholder(#[from] crate::actors::keyholder::Error),
|
||||||
|
|
||||||
|
#[error("Keyholder mailbox error")]
|
||||||
|
#[diagnostic(code(arbiter::evm::keyholder_send))]
|
||||||
|
KeyholderSend,
|
||||||
|
|
||||||
|
#[error("Database error: {0}")]
|
||||||
|
#[diagnostic(code(arbiter::evm::database))]
|
||||||
|
Database(#[from] diesel::result::Error),
|
||||||
|
|
||||||
|
#[error("Database pool error: {0}")]
|
||||||
|
#[diagnostic(code(arbiter::evm::database_pool))]
|
||||||
|
DatabasePool(#[from] db::PoolError),
|
||||||
|
|
||||||
|
#[error("Grant creation error: {0}")]
|
||||||
|
#[diagnostic(code(arbiter::evm::creation))]
|
||||||
|
Creation(#[from] evm::CreationError),
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Actor)]
|
||||||
|
pub struct EvmActor {
|
||||||
|
pub keyholder: ActorRef<KeyHolder>,
|
||||||
|
pub db: DatabasePool,
|
||||||
|
pub rng: StdRng,
|
||||||
|
pub engine: evm::Engine,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl EvmActor {
|
||||||
|
pub fn new(keyholder: ActorRef<KeyHolder>, db: DatabasePool) -> Self {
|
||||||
|
// is it safe to seed rng from system once?
|
||||||
|
// todo: audit
|
||||||
|
let rng = StdRng::from_rng(&mut rng());
|
||||||
|
let engine = evm::Engine::new(db.clone());
|
||||||
|
Self { keyholder, db, rng, engine }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[messages]
|
||||||
|
impl EvmActor {
|
||||||
|
#[message]
|
||||||
|
pub async fn generate(&mut self) -> Result<Address, Error> {
|
||||||
|
let (mut key_cell, address) = safe_signer::generate(&mut self.rng);
|
||||||
|
|
||||||
|
// Move raw key bytes into a Vec<u8> MemSafe for KeyHolder
|
||||||
|
let plaintext = {
|
||||||
|
let reader = key_cell.read().expect("MemSafe read");
|
||||||
|
MemSafe::new(reader.to_vec()).expect("MemSafe allocation")
|
||||||
|
};
|
||||||
|
|
||||||
|
let aead_id: i32 = self
|
||||||
|
.keyholder
|
||||||
|
.ask(CreateNew { plaintext })
|
||||||
|
.await
|
||||||
|
.map_err(|_| Error::KeyholderSend)?;
|
||||||
|
|
||||||
|
let mut conn = self.db.get().await?;
|
||||||
|
insert_into(schema::evm_wallet::table)
|
||||||
|
.values(&models::NewEvmWallet {
|
||||||
|
address: address.as_slice().to_vec(),
|
||||||
|
aead_encrypted_id: aead_id,
|
||||||
|
})
|
||||||
|
.execute(&mut conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(address)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[message]
|
||||||
|
pub async fn list_wallets(&self) -> Result<Vec<Address>, Error> {
|
||||||
|
let mut conn = self.db.get().await?;
|
||||||
|
let rows: Vec<models::EvmWallet> = schema::evm_wallet::table
|
||||||
|
.select(models::EvmWallet::as_select())
|
||||||
|
.load(&mut conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(rows
|
||||||
|
.into_iter()
|
||||||
|
.map(|w| Address::from_slice(&w.address))
|
||||||
|
.collect())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[messages]
|
||||||
|
impl EvmActor {
|
||||||
|
#[message]
|
||||||
|
pub async fn useragent_create_grant(
|
||||||
|
&mut self,
|
||||||
|
client_id: i32,
|
||||||
|
basic: SharedGrantSettings,
|
||||||
|
grant: SpecificGrant,
|
||||||
|
) -> Result<i32, evm::CreationError> {
|
||||||
|
match grant {
|
||||||
|
SpecificGrant::EtherTransfer(settings) => {
|
||||||
|
self.engine
|
||||||
|
.create_grant::<EtherTransfer>(client_id, FullGrant { basic, specific: settings })
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
SpecificGrant::TokenTransfer(settings) => {
|
||||||
|
self.engine
|
||||||
|
.create_grant::<TokenTransfer>(client_id, FullGrant { basic, specific: settings })
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[message]
|
||||||
|
pub async fn useragent_delete_grant(&mut self, grant_id: i32) -> Result<(), Error> {
|
||||||
|
let mut conn = self.db.get().await?;
|
||||||
|
diesel::update(schema::evm_basic_grant::table)
|
||||||
|
.filter(schema::evm_basic_grant::id.eq(grant_id))
|
||||||
|
.set(schema::evm_basic_grant::revoked_at.eq(SqliteTimestamp::now()))
|
||||||
|
.execute(&mut conn)
|
||||||
|
.await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[message]
|
||||||
|
pub async fn useragent_list_grants(
|
||||||
|
&mut self,
|
||||||
|
wallet_id: Option<i32>,
|
||||||
|
) -> Result<Vec<EvmBasicGrant>, Error> {
|
||||||
|
let mut conn = self.db.get().await?;
|
||||||
|
let mut query = schema::evm_basic_grant::table
|
||||||
|
.select(EvmBasicGrant::as_select())
|
||||||
|
.filter(schema::evm_basic_grant::revoked_at.is_null())
|
||||||
|
.into_boxed();
|
||||||
|
if let Some(wid) = wallet_id {
|
||||||
|
query = query.filter(schema::evm_basic_grant::wallet_id.eq(wid));
|
||||||
|
}
|
||||||
|
Ok(query.load(&mut conn).await?)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[message]
|
||||||
|
pub async fn shared_analyze_transaction(
|
||||||
|
&mut self,
|
||||||
|
client_id: i32,
|
||||||
|
wallet_address: Address,
|
||||||
|
transaction: TxEip1559,
|
||||||
|
) -> Result<SpecificMeaning, SignTransactionError> {
|
||||||
|
let mut conn = self.db.get().await?;
|
||||||
|
let wallet = schema::evm_wallet::table
|
||||||
|
.select(models::EvmWallet::as_select())
|
||||||
|
.filter(schema::evm_wallet::address.eq(wallet_address.as_slice()))
|
||||||
|
.first(&mut conn)
|
||||||
|
.await
|
||||||
|
.optional()?
|
||||||
|
.ok_or(SignTransactionError::WalletNotFound)?;
|
||||||
|
drop(conn);
|
||||||
|
|
||||||
|
let meaning = self.engine
|
||||||
|
.evaluate_transaction(wallet.id, client_id, transaction.clone(), RunKind::Execution)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(meaning)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[message]
|
||||||
|
pub async fn client_sign_transaction(
|
||||||
|
&mut self,
|
||||||
|
client_id: i32,
|
||||||
|
wallet_address: Address,
|
||||||
|
mut transaction: TxEip1559,
|
||||||
|
) -> Result<Signature, SignTransactionError> {
|
||||||
|
let mut conn = self.db.get().await?;
|
||||||
|
let wallet = schema::evm_wallet::table
|
||||||
|
.select(models::EvmWallet::as_select())
|
||||||
|
.filter(schema::evm_wallet::address.eq(wallet_address.as_slice()))
|
||||||
|
.first(&mut conn)
|
||||||
|
.await
|
||||||
|
.optional()?
|
||||||
|
.ok_or(SignTransactionError::WalletNotFound)?;
|
||||||
|
drop(conn);
|
||||||
|
|
||||||
|
let raw_key: MemSafe<Vec<u8>> = self
|
||||||
|
.keyholder
|
||||||
|
.ask(Decrypt { aead_id: wallet.aead_encrypted_id })
|
||||||
|
.await
|
||||||
|
.map_err(|_| SignTransactionError::KeyholderSend)?;
|
||||||
|
|
||||||
|
let signer = safe_signer::SafeSigner::from_memsafe(raw_key)?;
|
||||||
|
|
||||||
|
self.engine
|
||||||
|
.evaluate_transaction(wallet.id, client_id, transaction.clone(), RunKind::Execution)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
use alloy::network::TxSignerSync as _;
|
||||||
|
Ok(signer.sign_transaction_sync(&mut transaction)?)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1 @@
|
|||||||
|
pub mod v1;
|
||||||
@@ -0,0 +1,237 @@
|
|||||||
|
use std::ops::Deref as _;
|
||||||
|
|
||||||
|
use argon2::{Algorithm, Argon2, password_hash::Salt as ArgonSalt};
|
||||||
|
use chacha20poly1305::{
|
||||||
|
AeadInPlace, Key, KeyInit as _, XChaCha20Poly1305, XNonce,
|
||||||
|
aead::{AeadMut, Error, Payload},
|
||||||
|
};
|
||||||
|
use memsafe::MemSafe;
|
||||||
|
use rand::{
|
||||||
|
Rng as _, SeedableRng,
|
||||||
|
rngs::{StdRng, SysRng},
|
||||||
|
};
|
||||||
|
|
||||||
|
pub const ROOT_KEY_TAG: &[u8] = "arbiter/seal/v1".as_bytes();
|
||||||
|
pub const TAG: &[u8] = "arbiter/private-key/v1".as_bytes();
|
||||||
|
|
||||||
|
pub const NONCE_LENGTH: usize = 24;
|
||||||
|
|
||||||
|
#[derive(Default)]
|
||||||
|
pub struct Nonce([u8; NONCE_LENGTH]);
|
||||||
|
impl Nonce {
|
||||||
|
pub fn increment(&mut self) {
|
||||||
|
for i in (0..self.0.len()).rev() {
|
||||||
|
if self.0[i] == 0xFF {
|
||||||
|
self.0[i] = 0;
|
||||||
|
} else {
|
||||||
|
self.0[i] += 1;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn to_vec(&self) -> Vec<u8> {
|
||||||
|
self.0.to_vec()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl<'a> TryFrom<&'a [u8]> for Nonce {
|
||||||
|
type Error = ();
|
||||||
|
|
||||||
|
fn try_from(value: &'a [u8]) -> Result<Self, Self::Error> {
|
||||||
|
if value.len() != NONCE_LENGTH {
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
let mut nonce = [0u8; NONCE_LENGTH];
|
||||||
|
nonce.copy_from_slice(value);
|
||||||
|
Ok(Self(nonce))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct KeyCell(pub MemSafe<Key>);
|
||||||
|
impl From<MemSafe<Key>> for KeyCell {
|
||||||
|
fn from(value: MemSafe<Key>) -> Self {
|
||||||
|
Self(value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl TryFrom<MemSafe<Vec<u8>>> for KeyCell {
|
||||||
|
type Error = ();
|
||||||
|
|
||||||
|
fn try_from(mut value: MemSafe<Vec<u8>>) -> Result<Self, Self::Error> {
|
||||||
|
let value = value.read().unwrap();
|
||||||
|
if value.len() != size_of::<Key>() {
|
||||||
|
return Err(());
|
||||||
|
}
|
||||||
|
let mut cell = MemSafe::new(Key::default()).unwrap();
|
||||||
|
{
|
||||||
|
let mut cell_write = cell.write().unwrap();
|
||||||
|
let cell_slice: &mut [u8] = cell_write.as_mut();
|
||||||
|
cell_slice.copy_from_slice(&value);
|
||||||
|
}
|
||||||
|
Ok(Self(cell))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl KeyCell {
|
||||||
|
pub fn new_secure_random() -> Self {
|
||||||
|
let mut key = MemSafe::new(Key::default()).unwrap();
|
||||||
|
{
|
||||||
|
let mut key_buffer = key.write().unwrap();
|
||||||
|
let key_buffer: &mut [u8] = key_buffer.as_mut();
|
||||||
|
|
||||||
|
let mut rng = StdRng::try_from_rng(&mut SysRng).unwrap();
|
||||||
|
rng.fill_bytes(key_buffer);
|
||||||
|
}
|
||||||
|
|
||||||
|
key.into()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn encrypt_in_place(
|
||||||
|
&mut self,
|
||||||
|
nonce: &Nonce,
|
||||||
|
associated_data: &[u8],
|
||||||
|
mut buffer: impl AsMut<Vec<u8>>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let key_reader = self.0.read().unwrap();
|
||||||
|
let key_ref = key_reader.deref();
|
||||||
|
let cipher = XChaCha20Poly1305::new(key_ref);
|
||||||
|
let nonce = XNonce::from_slice(nonce.0.as_ref());
|
||||||
|
let buffer = buffer.as_mut();
|
||||||
|
cipher.encrypt_in_place(nonce, associated_data, buffer)
|
||||||
|
}
|
||||||
|
pub fn decrypt_in_place(
|
||||||
|
&mut self,
|
||||||
|
nonce: &Nonce,
|
||||||
|
associated_data: &[u8],
|
||||||
|
buffer: &mut MemSafe<Vec<u8>>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
let key_reader = self.0.read().unwrap();
|
||||||
|
let key_ref = key_reader.deref();
|
||||||
|
let cipher = XChaCha20Poly1305::new(key_ref);
|
||||||
|
let nonce = XNonce::from_slice(nonce.0.as_ref());
|
||||||
|
let mut buffer = buffer.write().unwrap();
|
||||||
|
let buffer: &mut Vec<u8> = buffer.as_mut();
|
||||||
|
cipher.decrypt_in_place(nonce, associated_data, buffer)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn encrypt(
|
||||||
|
&mut self,
|
||||||
|
nonce: &Nonce,
|
||||||
|
associated_data: &[u8],
|
||||||
|
plaintext: impl AsRef<[u8]>,
|
||||||
|
) -> Result<Vec<u8>, Error> {
|
||||||
|
let key_reader = self.0.read().unwrap();
|
||||||
|
let key_ref = key_reader.deref();
|
||||||
|
let mut cipher = XChaCha20Poly1305::new(key_ref);
|
||||||
|
let nonce = XNonce::from_slice(nonce.0.as_ref());
|
||||||
|
|
||||||
|
let ciphertext = cipher.encrypt(
|
||||||
|
nonce,
|
||||||
|
Payload {
|
||||||
|
msg: plaintext.as_ref(),
|
||||||
|
aad: associated_data,
|
||||||
|
},
|
||||||
|
)?;
|
||||||
|
Ok(ciphertext)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub type Salt = [u8; ArgonSalt::RECOMMENDED_LENGTH];
|
||||||
|
|
||||||
|
pub fn generate_salt() -> Salt {
|
||||||
|
let mut salt = Salt::default();
|
||||||
|
let mut rng = StdRng::try_from_rng(&mut SysRng).unwrap();
|
||||||
|
rng.fill_bytes(&mut salt);
|
||||||
|
salt
|
||||||
|
}
|
||||||
|
|
||||||
|
/// User password might be of different length, have not enough entropy, etc...
|
||||||
|
/// Derive a fixed-length key from the password using Argon2id, which is designed for password hashing and key derivation.
|
||||||
|
pub fn derive_seal_key(mut password: MemSafe<Vec<u8>>, salt: &Salt) -> KeyCell {
|
||||||
|
let params = argon2::Params::new(262_144, 3, 4, None).unwrap();
|
||||||
|
let hasher = Argon2::new(Algorithm::Argon2id, argon2::Version::V0x13, params);
|
||||||
|
let mut key = MemSafe::new(Key::default()).unwrap();
|
||||||
|
{
|
||||||
|
let password_source = password.read().unwrap();
|
||||||
|
let mut key_buffer = key.write().unwrap();
|
||||||
|
let key_buffer: &mut [u8] = key_buffer.as_mut();
|
||||||
|
|
||||||
|
hasher
|
||||||
|
.hash_password_into(password_source.deref(), salt, key_buffer)
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
key.into()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use memsafe::MemSafe;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
pub fn derive_seal_key_deterministic() {
|
||||||
|
static PASSWORD: &[u8] = b"password";
|
||||||
|
let password = MemSafe::new(PASSWORD.to_vec()).unwrap();
|
||||||
|
let password2 = MemSafe::new(PASSWORD.to_vec()).unwrap();
|
||||||
|
let salt = generate_salt();
|
||||||
|
|
||||||
|
let mut key1 = derive_seal_key(password, &salt);
|
||||||
|
let mut key2 = derive_seal_key(password2, &salt);
|
||||||
|
|
||||||
|
let key1_reader = key1.0.read().unwrap();
|
||||||
|
let key2_reader = key2.0.read().unwrap();
|
||||||
|
|
||||||
|
assert_eq!(key1_reader.deref(), key2_reader.deref());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
pub fn successful_derive() {
|
||||||
|
static PASSWORD: &[u8] = b"password";
|
||||||
|
let password = MemSafe::new(PASSWORD.to_vec()).unwrap();
|
||||||
|
let salt = generate_salt();
|
||||||
|
|
||||||
|
let mut key = derive_seal_key(password, &salt);
|
||||||
|
let key_reader = key.0.read().unwrap();
|
||||||
|
let key_ref = key_reader.deref();
|
||||||
|
|
||||||
|
assert_ne!(key_ref.as_slice(), &[0u8; 32][..]);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
pub fn encrypt_decrypt() {
|
||||||
|
static PASSWORD: &[u8] = b"password";
|
||||||
|
let password = MemSafe::new(PASSWORD.to_vec()).unwrap();
|
||||||
|
let salt = generate_salt();
|
||||||
|
|
||||||
|
let mut key = derive_seal_key(password, &salt);
|
||||||
|
let nonce = Nonce(*b"unique nonce 123 1231233"); // 24 bytes for XChaCha20Poly1305
|
||||||
|
let associated_data = b"associated data";
|
||||||
|
let mut buffer = b"secret data".to_vec();
|
||||||
|
|
||||||
|
key.encrypt_in_place(&nonce, associated_data, &mut buffer)
|
||||||
|
.unwrap();
|
||||||
|
assert_ne!(buffer, b"secret data");
|
||||||
|
|
||||||
|
let mut buffer = MemSafe::new(buffer).unwrap();
|
||||||
|
|
||||||
|
key.decrypt_in_place(&nonce, associated_data, &mut buffer)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let buffer = buffer.read().unwrap();
|
||||||
|
assert_eq!(*buffer, b"secret data");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
// We should fuzz this
|
||||||
|
pub fn test_nonce_increment() {
|
||||||
|
let mut nonce = Nonce([0u8; NONCE_LENGTH]);
|
||||||
|
nonce.increment();
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
nonce.0,
|
||||||
|
[
|
||||||
|
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1
|
||||||
|
]
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
408
server/crates/arbiter-server/src/actors/keyholder/mod.rs
Normal file
408
server/crates/arbiter-server/src/actors/keyholder/mod.rs
Normal file
@@ -0,0 +1,408 @@
|
|||||||
|
use chrono::Utc;
|
||||||
|
use diesel::{
|
||||||
|
ExpressionMethods as _, OptionalExtension, QueryDsl, SelectableHelper,
|
||||||
|
dsl::{insert_into, update},
|
||||||
|
};
|
||||||
|
use diesel_async::{AsyncConnection, RunQueryDsl};
|
||||||
|
use kameo::{Actor, Reply, messages};
|
||||||
|
use memsafe::MemSafe;
|
||||||
|
use strum::{EnumDiscriminants, IntoDiscriminant};
|
||||||
|
use tracing::{error, info};
|
||||||
|
|
||||||
|
use crate::db::{
|
||||||
|
self,
|
||||||
|
models::{self, RootKeyHistory},
|
||||||
|
schema::{self},
|
||||||
|
};
|
||||||
|
use encryption::v1::{self, KeyCell, Nonce};
|
||||||
|
|
||||||
|
pub mod encryption;
|
||||||
|
|
||||||
|
#[derive(Default, EnumDiscriminants)]
|
||||||
|
#[strum_discriminants(derive(Reply), vis(pub))]
|
||||||
|
enum State {
|
||||||
|
#[default]
|
||||||
|
Unbootstrapped,
|
||||||
|
Sealed {
|
||||||
|
root_key_history_id: i32,
|
||||||
|
},
|
||||||
|
Unsealed {
|
||||||
|
root_key_history_id: i32,
|
||||||
|
root_key: KeyCell,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, thiserror::Error, miette::Diagnostic)]
|
||||||
|
pub enum Error {
|
||||||
|
#[error("Keyholder is already bootstrapped")]
|
||||||
|
#[diagnostic(code(arbiter::keyholder::already_bootstrapped))]
|
||||||
|
AlreadyBootstrapped,
|
||||||
|
#[error("Keyholder is not bootstrapped")]
|
||||||
|
#[diagnostic(code(arbiter::keyholder::not_bootstrapped))]
|
||||||
|
NotBootstrapped,
|
||||||
|
#[error("Invalid key provided")]
|
||||||
|
#[diagnostic(code(arbiter::keyholder::invalid_key))]
|
||||||
|
InvalidKey,
|
||||||
|
|
||||||
|
#[error("Requested aead entry not found")]
|
||||||
|
#[diagnostic(code(arbiter::keyholder::aead_not_found))]
|
||||||
|
NotFound,
|
||||||
|
|
||||||
|
#[error("Encryption error: {0}")]
|
||||||
|
#[diagnostic(code(arbiter::keyholder::encryption_error))]
|
||||||
|
Encryption(#[from] chacha20poly1305::aead::Error),
|
||||||
|
|
||||||
|
#[error("Database error: {0}")]
|
||||||
|
#[diagnostic(code(arbiter::keyholder::database_error))]
|
||||||
|
DatabaseConnection(#[from] db::PoolError),
|
||||||
|
|
||||||
|
#[error("Database transaction error: {0}")]
|
||||||
|
#[diagnostic(code(arbiter::keyholder::database_transaction_error))]
|
||||||
|
DatabaseTransaction(#[from] diesel::result::Error),
|
||||||
|
|
||||||
|
#[error("Broken database")]
|
||||||
|
#[diagnostic(code(arbiter::keyholder::broken_database))]
|
||||||
|
BrokenDatabase,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Manages vault root key and tracks current state of the vault (bootstrapped/unbootstrapped, sealed/unsealed).
|
||||||
|
/// Provides API for encrypting and decrypting data using the vault root key.
|
||||||
|
/// Abstraction over database to make sure nonces are never reused and encryption keys are never exposed in plaintext outside of this actor.
|
||||||
|
#[derive(Actor)]
|
||||||
|
pub struct KeyHolder {
|
||||||
|
db: db::DatabasePool,
|
||||||
|
state: State,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[messages]
|
||||||
|
impl KeyHolder {
|
||||||
|
pub async fn new(db: db::DatabasePool) -> Result<Self, Error> {
|
||||||
|
let state = {
|
||||||
|
let mut conn = db.get().await?;
|
||||||
|
|
||||||
|
let (root_key_history,) = schema::arbiter_settings::table
|
||||||
|
.left_join(schema::root_key_history::table)
|
||||||
|
.select((Option::<RootKeyHistory>::as_select(),))
|
||||||
|
.get_result::<(Option<RootKeyHistory>,)>(&mut conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
match root_key_history {
|
||||||
|
Some(root_key_history) => State::Sealed {
|
||||||
|
root_key_history_id: root_key_history.id,
|
||||||
|
},
|
||||||
|
None => State::Unbootstrapped,
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(Self { db, state })
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exclusive transaction to avoid race condtions if multiple keyholders write
|
||||||
|
// additional layer of protection against nonce-reuse
|
||||||
|
async fn get_new_nonce(pool: &db::DatabasePool, root_key_id: i32) -> Result<Nonce, Error> {
|
||||||
|
let mut conn = pool.get().await?;
|
||||||
|
|
||||||
|
let nonce = conn
|
||||||
|
.exclusive_transaction(|conn| {
|
||||||
|
Box::pin(async move {
|
||||||
|
let current_nonce: Vec<u8> = schema::root_key_history::table
|
||||||
|
.filter(schema::root_key_history::id.eq(root_key_id))
|
||||||
|
.select(schema::root_key_history::data_encryption_nonce)
|
||||||
|
.first(conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let mut nonce =
|
||||||
|
v1::Nonce::try_from(current_nonce.as_slice()).map_err(|_| {
|
||||||
|
error!(
|
||||||
|
"Broken database: invalid nonce for root key history id={}",
|
||||||
|
root_key_id
|
||||||
|
);
|
||||||
|
Error::BrokenDatabase
|
||||||
|
})?;
|
||||||
|
nonce.increment();
|
||||||
|
|
||||||
|
update(schema::root_key_history::table)
|
||||||
|
.filter(schema::root_key_history::id.eq(root_key_id))
|
||||||
|
.set(schema::root_key_history::data_encryption_nonce.eq(nonce.to_vec()))
|
||||||
|
.execute(conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Result::<_, Error>::Ok(nonce)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(nonce)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[message]
|
||||||
|
pub async fn bootstrap(&mut self, seal_key_raw: MemSafe<Vec<u8>>) -> Result<(), Error> {
|
||||||
|
if !matches!(self.state, State::Unbootstrapped) {
|
||||||
|
return Err(Error::AlreadyBootstrapped);
|
||||||
|
}
|
||||||
|
let salt = v1::generate_salt();
|
||||||
|
let mut seal_key = v1::derive_seal_key(seal_key_raw, &salt);
|
||||||
|
let mut root_key = KeyCell::new_secure_random();
|
||||||
|
|
||||||
|
// Zero nonces are fine because they are one-time
|
||||||
|
let root_key_nonce = v1::Nonce::default();
|
||||||
|
let data_encryption_nonce = v1::Nonce::default();
|
||||||
|
|
||||||
|
let root_key_ciphertext: Vec<u8> = {
|
||||||
|
let root_key_reader = root_key.0.read().unwrap();
|
||||||
|
let root_key_reader = root_key_reader.as_slice();
|
||||||
|
seal_key
|
||||||
|
.encrypt(&root_key_nonce, v1::ROOT_KEY_TAG, root_key_reader)
|
||||||
|
.map_err(|err| {
|
||||||
|
error!(?err, "Fatal bootstrap error");
|
||||||
|
Error::Encryption(err)
|
||||||
|
})?
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut conn = self.db.get().await?;
|
||||||
|
|
||||||
|
let data_encryption_nonce_bytes = data_encryption_nonce.to_vec();
|
||||||
|
let root_key_history_id = conn
|
||||||
|
.transaction(|conn| {
|
||||||
|
Box::pin(async move {
|
||||||
|
let root_key_history_id: i32 = insert_into(schema::root_key_history::table)
|
||||||
|
.values(&models::NewRootKeyHistory {
|
||||||
|
ciphertext: root_key_ciphertext,
|
||||||
|
tag: v1::ROOT_KEY_TAG.to_vec(),
|
||||||
|
root_key_encryption_nonce: root_key_nonce.to_vec(),
|
||||||
|
data_encryption_nonce: data_encryption_nonce_bytes,
|
||||||
|
schema_version: 1,
|
||||||
|
salt: salt.to_vec(),
|
||||||
|
})
|
||||||
|
.returning(schema::root_key_history::id)
|
||||||
|
.get_result(conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
update(schema::arbiter_settings::table)
|
||||||
|
.set(schema::arbiter_settings::root_key_id.eq(root_key_history_id))
|
||||||
|
.execute(conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Result::<_, diesel::result::Error>::Ok(root_key_history_id)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
self.state = State::Unsealed {
|
||||||
|
root_key,
|
||||||
|
root_key_history_id,
|
||||||
|
};
|
||||||
|
|
||||||
|
info!("Keyholder bootstrapped successfully");
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[message]
|
||||||
|
pub async fn try_unseal(&mut self, seal_key_raw: MemSafe<Vec<u8>>) -> Result<(), Error> {
|
||||||
|
let State::Sealed {
|
||||||
|
root_key_history_id,
|
||||||
|
} = &self.state
|
||||||
|
else {
|
||||||
|
return Err(Error::NotBootstrapped);
|
||||||
|
};
|
||||||
|
|
||||||
|
// We don't want to hold connection while doing expensive KDF work
|
||||||
|
let current_key = {
|
||||||
|
let mut conn = self.db.get().await?;
|
||||||
|
schema::root_key_history::table
|
||||||
|
.filter(schema::root_key_history::id.eq(*root_key_history_id))
|
||||||
|
.select(schema::root_key_history::data_encryption_nonce)
|
||||||
|
.select(RootKeyHistory::as_select())
|
||||||
|
.first(&mut conn)
|
||||||
|
.await?
|
||||||
|
};
|
||||||
|
|
||||||
|
let salt = ¤t_key.salt;
|
||||||
|
let salt = v1::Salt::try_from(salt.as_slice()).map_err(|_| {
|
||||||
|
error!("Broken database: invalid salt for root key");
|
||||||
|
Error::BrokenDatabase
|
||||||
|
})?;
|
||||||
|
let mut seal_key = v1::derive_seal_key(seal_key_raw, &salt);
|
||||||
|
|
||||||
|
let mut root_key = MemSafe::new(current_key.ciphertext.clone()).unwrap();
|
||||||
|
|
||||||
|
let nonce = v1::Nonce::try_from(current_key.root_key_encryption_nonce.as_slice()).map_err(
|
||||||
|
|_| {
|
||||||
|
error!("Broken database: invalid nonce for root key");
|
||||||
|
Error::BrokenDatabase
|
||||||
|
},
|
||||||
|
)?;
|
||||||
|
|
||||||
|
seal_key
|
||||||
|
.decrypt_in_place(&nonce, v1::ROOT_KEY_TAG, &mut root_key)
|
||||||
|
.map_err(|err| {
|
||||||
|
error!(?err, "Failed to unseal root key: invalid seal key");
|
||||||
|
Error::InvalidKey
|
||||||
|
})?;
|
||||||
|
|
||||||
|
self.state = State::Unsealed {
|
||||||
|
root_key_history_id: current_key.id,
|
||||||
|
root_key: v1::KeyCell::try_from(root_key).map_err(|err| {
|
||||||
|
error!(?err, "Broken database: invalid encryption key size");
|
||||||
|
Error::BrokenDatabase
|
||||||
|
})?,
|
||||||
|
};
|
||||||
|
|
||||||
|
info!("Keyholder unsealed successfully");
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decrypts the `aead_encrypted` entry with the given ID and returns the plaintext
|
||||||
|
#[message]
|
||||||
|
pub async fn decrypt(&mut self, aead_id: i32) -> Result<MemSafe<Vec<u8>>, Error> {
|
||||||
|
let State::Unsealed { root_key, .. } = &mut self.state else {
|
||||||
|
return Err(Error::NotBootstrapped);
|
||||||
|
};
|
||||||
|
|
||||||
|
let row: models::AeadEncrypted = {
|
||||||
|
let mut conn = self.db.get().await?;
|
||||||
|
schema::aead_encrypted::table
|
||||||
|
.select(models::AeadEncrypted::as_select())
|
||||||
|
.filter(schema::aead_encrypted::id.eq(aead_id))
|
||||||
|
.first(&mut conn)
|
||||||
|
.await
|
||||||
|
.optional()?
|
||||||
|
.ok_or(Error::NotFound)?
|
||||||
|
};
|
||||||
|
|
||||||
|
let nonce = v1::Nonce::try_from(row.current_nonce.as_slice()).map_err(|_| {
|
||||||
|
error!(
|
||||||
|
"Broken database: invalid nonce for aead_encrypted id={}",
|
||||||
|
aead_id
|
||||||
|
);
|
||||||
|
Error::BrokenDatabase
|
||||||
|
})?;
|
||||||
|
let mut output = MemSafe::new(row.ciphertext).unwrap();
|
||||||
|
root_key.decrypt_in_place(&nonce, v1::TAG, &mut output)?;
|
||||||
|
Ok(output)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Creates new `aead_encrypted` entry in the database and returns it's ID
|
||||||
|
#[message]
|
||||||
|
pub async fn create_new(&mut self, mut plaintext: MemSafe<Vec<u8>>) -> Result<i32, Error> {
|
||||||
|
let State::Unsealed {
|
||||||
|
root_key,
|
||||||
|
root_key_history_id,
|
||||||
|
} = &mut self.state
|
||||||
|
else {
|
||||||
|
return Err(Error::NotBootstrapped);
|
||||||
|
};
|
||||||
|
|
||||||
|
// Order matters here - `get_new_nonce` acquires connection, so we need to call it before next acquire
|
||||||
|
// Borrow checker note: &mut borrow a few lines above is disjoint from this field
|
||||||
|
let nonce = Self::get_new_nonce(&self.db, *root_key_history_id).await?;
|
||||||
|
|
||||||
|
let mut ciphertext_buffer = plaintext.write().unwrap();
|
||||||
|
let ciphertext_buffer: &mut Vec<u8> = ciphertext_buffer.as_mut();
|
||||||
|
root_key.encrypt_in_place(&nonce, v1::TAG, &mut *ciphertext_buffer)?;
|
||||||
|
|
||||||
|
let ciphertext = std::mem::take(ciphertext_buffer);
|
||||||
|
|
||||||
|
let mut conn = self.db.get().await?;
|
||||||
|
let aead_id: i32 = insert_into(schema::aead_encrypted::table)
|
||||||
|
.values(&models::NewAeadEncrypted {
|
||||||
|
ciphertext,
|
||||||
|
tag: v1::TAG.to_vec(),
|
||||||
|
current_nonce: nonce.to_vec(),
|
||||||
|
schema_version: 1,
|
||||||
|
associated_root_key_id: *root_key_history_id,
|
||||||
|
created_at: Utc::now().into()
|
||||||
|
})
|
||||||
|
.returning(schema::aead_encrypted::id)
|
||||||
|
.get_result(&mut conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(aead_id)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[message]
|
||||||
|
pub fn get_state(&self) -> StateDiscriminants {
|
||||||
|
self.state.discriminant()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[message]
|
||||||
|
pub fn seal(&mut self) -> Result<(), Error> {
|
||||||
|
let State::Unsealed {
|
||||||
|
root_key_history_id,
|
||||||
|
..
|
||||||
|
} = &self.state
|
||||||
|
else {
|
||||||
|
return Err(Error::NotBootstrapped);
|
||||||
|
};
|
||||||
|
self.state = State::Sealed {
|
||||||
|
root_key_history_id: *root_key_history_id,
|
||||||
|
};
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use diesel::SelectableHelper;
|
||||||
|
|
||||||
|
use diesel_async::RunQueryDsl;
|
||||||
|
use memsafe::MemSafe;
|
||||||
|
|
||||||
|
use crate::db::{self};
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
async fn bootstrapped_actor(db: &db::DatabasePool) -> KeyHolder {
|
||||||
|
let mut actor = KeyHolder::new(db.clone()).await.unwrap();
|
||||||
|
let seal_key = MemSafe::new(b"test-seal-key".to_vec()).unwrap();
|
||||||
|
actor.bootstrap(seal_key).await.unwrap();
|
||||||
|
actor
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[test_log::test]
|
||||||
|
async fn nonce_monotonic_even_when_nonce_allocation_interleaves() {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
let mut actor = bootstrapped_actor(&db).await;
|
||||||
|
let root_key_history_id = match actor.state {
|
||||||
|
State::Unsealed {
|
||||||
|
root_key_history_id,
|
||||||
|
..
|
||||||
|
} => root_key_history_id,
|
||||||
|
_ => panic!("expected unsealed state"),
|
||||||
|
};
|
||||||
|
|
||||||
|
let n1 = KeyHolder::get_new_nonce(&db, root_key_history_id)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
let n2 = KeyHolder::get_new_nonce(&db, root_key_history_id)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert!(n2.to_vec() > n1.to_vec(), "nonce must increase");
|
||||||
|
|
||||||
|
let mut conn = db.get().await.unwrap();
|
||||||
|
let root_row: models::RootKeyHistory = schema::root_key_history::table
|
||||||
|
.select(models::RootKeyHistory::as_select())
|
||||||
|
.first(&mut conn)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(root_row.data_encryption_nonce, n2.to_vec());
|
||||||
|
|
||||||
|
let id = actor
|
||||||
|
.create_new(MemSafe::new(b"post-interleave".to_vec()).unwrap())
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
let row: models::AeadEncrypted = schema::aead_encrypted::table
|
||||||
|
.filter(schema::aead_encrypted::id.eq(id))
|
||||||
|
.select(models::AeadEncrypted::as_select())
|
||||||
|
.first(&mut conn)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert!(
|
||||||
|
row.current_nonce > n2.to_vec(),
|
||||||
|
"next write must advance nonce"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
47
server/crates/arbiter-server/src/actors/mod.rs
Normal file
47
server/crates/arbiter-server/src/actors/mod.rs
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
use kameo::actor::{ActorRef, Spawn};
|
||||||
|
use miette::Diagnostic;
|
||||||
|
use thiserror::Error;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
actors::{bootstrap::Bootstrapper, evm::EvmActor, keyholder::KeyHolder, router::MessageRouter},
|
||||||
|
db,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub mod bootstrap;
|
||||||
|
pub mod client;
|
||||||
|
mod evm;
|
||||||
|
pub mod keyholder;
|
||||||
|
pub mod router;
|
||||||
|
pub mod user_agent;
|
||||||
|
|
||||||
|
#[derive(Error, Debug, Diagnostic)]
|
||||||
|
pub enum SpawnError {
|
||||||
|
#[error("Failed to spawn Bootstrapper actor")]
|
||||||
|
#[diagnostic(code(SpawnError::Bootstrapper))]
|
||||||
|
Bootstrapper(#[from] bootstrap::Error),
|
||||||
|
|
||||||
|
#[error("Failed to spawn KeyHolder actor")]
|
||||||
|
#[diagnostic(code(SpawnError::KeyHolder))]
|
||||||
|
KeyHolder(#[from] keyholder::Error),
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Long-lived actors that are shared across all connections and handle global state and operations
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct GlobalActors {
|
||||||
|
pub key_holder: ActorRef<KeyHolder>,
|
||||||
|
pub bootstrapper: ActorRef<Bootstrapper>,
|
||||||
|
pub router: ActorRef<MessageRouter>,
|
||||||
|
pub evm: ActorRef<EvmActor>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl GlobalActors {
|
||||||
|
pub async fn spawn(db: db::DatabasePool) -> Result<Self, SpawnError> {
|
||||||
|
let key_holder = KeyHolder::spawn(KeyHolder::new(db.clone()).await?);
|
||||||
|
Ok(Self {
|
||||||
|
bootstrapper: Bootstrapper::spawn(Bootstrapper::new(&db).await?),
|
||||||
|
evm: EvmActor::spawn(EvmActor::new(key_holder.clone(), db)),
|
||||||
|
key_holder,
|
||||||
|
router: MessageRouter::spawn(MessageRouter::default()),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
76
server/crates/arbiter-server/src/actors/router/mod.rs
Normal file
76
server/crates/arbiter-server/src/actors/router/mod.rs
Normal file
@@ -0,0 +1,76 @@
|
|||||||
|
use std::{collections::HashMap, ops::ControlFlow};
|
||||||
|
|
||||||
|
use kameo::{
|
||||||
|
Actor,
|
||||||
|
actor::{ActorId, ActorRef},
|
||||||
|
messages,
|
||||||
|
prelude::{ActorStopReason, Context, WeakActorRef},
|
||||||
|
};
|
||||||
|
use tracing::info;
|
||||||
|
|
||||||
|
use crate::actors::{client::session::ClientSession, user_agent::session::UserAgentSession};
|
||||||
|
|
||||||
|
#[derive(Default)]
|
||||||
|
pub struct MessageRouter {
|
||||||
|
pub user_agents: HashMap<ActorId, ActorRef<UserAgentSession>>,
|
||||||
|
pub clients: HashMap<ActorId, ActorRef<ClientSession>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Actor for MessageRouter {
|
||||||
|
type Args = Self;
|
||||||
|
|
||||||
|
type Error = ();
|
||||||
|
|
||||||
|
async fn on_start(args: Self::Args, _: ActorRef<Self>) -> Result<Self, Self::Error> {
|
||||||
|
Ok(args)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn on_link_died(
|
||||||
|
&mut self,
|
||||||
|
_: WeakActorRef<Self>,
|
||||||
|
id: ActorId,
|
||||||
|
_: ActorStopReason,
|
||||||
|
) -> Result<ControlFlow<ActorStopReason>, Self::Error> {
|
||||||
|
if self.user_agents.remove(&id).is_some() {
|
||||||
|
info!(
|
||||||
|
?id,
|
||||||
|
actor = "MessageRouter",
|
||||||
|
event = "useragent.disconnected"
|
||||||
|
);
|
||||||
|
} else if self.clients.remove(&id).is_some() {
|
||||||
|
info!(?id, actor = "MessageRouter", event = "client.disconnected");
|
||||||
|
} else {
|
||||||
|
info!(
|
||||||
|
?id,
|
||||||
|
actor = "MessageRouter",
|
||||||
|
event = "unknown.actor.disconnected"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
Ok(ControlFlow::Continue(()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[messages]
|
||||||
|
impl MessageRouter {
|
||||||
|
#[message(ctx)]
|
||||||
|
pub async fn register_user_agent(
|
||||||
|
&mut self,
|
||||||
|
actor: ActorRef<UserAgentSession>,
|
||||||
|
ctx: &mut Context<Self, ()>,
|
||||||
|
) {
|
||||||
|
info!(id = %actor.id(), actor = "MessageRouter", event = "useragent.connected");
|
||||||
|
ctx.actor_ref().link(&actor).await;
|
||||||
|
self.user_agents.insert(actor.id(), actor);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[message(ctx)]
|
||||||
|
pub async fn register_client(
|
||||||
|
&mut self,
|
||||||
|
actor: ActorRef<ClientSession>,
|
||||||
|
ctx: &mut Context<Self, ()>,
|
||||||
|
) {
|
||||||
|
info!(id = %actor.id(), actor = "MessageRouter", event = "client.connected");
|
||||||
|
ctx.actor_ref().link(&actor).await;
|
||||||
|
self.clients.insert(actor.id(), actor);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,369 +0,0 @@
|
|||||||
use arbiter_proto::proto::{
|
|
||||||
UserAgentRequest, UserAgentResponse,
|
|
||||||
auth::{
|
|
||||||
self, AuthChallenge, AuthChallengeRequest, AuthOk, ClientMessage,
|
|
||||||
ServerMessage as AuthServerMessage, client_message::Payload as ClientAuthPayload,
|
|
||||||
server_message::Payload as ServerAuthPayload,
|
|
||||||
},
|
|
||||||
user_agent_request::Payload as UserAgentRequestPayload,
|
|
||||||
user_agent_response::Payload as UserAgentResponsePayload,
|
|
||||||
};
|
|
||||||
use diesel::{ExpressionMethods as _, OptionalExtension as _, QueryDsl, dsl::update};
|
|
||||||
use diesel_async::{AsyncConnection, RunQueryDsl};
|
|
||||||
use ed25519_dalek::VerifyingKey;
|
|
||||||
use futures::StreamExt;
|
|
||||||
use kameo::{
|
|
||||||
Actor,
|
|
||||||
actor::{ActorRef, Spawn},
|
|
||||||
error::SendError,
|
|
||||||
messages,
|
|
||||||
prelude::Context,
|
|
||||||
};
|
|
||||||
use tokio::sync::mpsc;
|
|
||||||
use tokio::sync::mpsc::Sender;
|
|
||||||
use tonic::Status;
|
|
||||||
use tracing::{error, info};
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
ServerContext,
|
|
||||||
context::bootstrap::{BootstrapActor, ConsumeToken},
|
|
||||||
db::{self, schema},
|
|
||||||
errors::GrpcStatusExt,
|
|
||||||
};
|
|
||||||
|
|
||||||
/// Context for state machine with validated key and sent challenge
|
|
||||||
/// Challenge is then transformed to bytes using shared function and verified
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
pub struct ChallengeContext {
|
|
||||||
challenge: AuthChallenge,
|
|
||||||
key: VerifyingKey,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Request context with deserialized public key for state machine.
|
|
||||||
// This intermediate struct is needed because the state machine branches depending on presence of bootstrap token,
|
|
||||||
// but we want to have the deserialized key in both branches.
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
pub struct AuthRequestContext {
|
|
||||||
pubkey: VerifyingKey,
|
|
||||||
bootstrap_token: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
smlang::statemachine!(
|
|
||||||
name: UserAgent,
|
|
||||||
derive_states: [Debug],
|
|
||||||
custom_error: false,
|
|
||||||
transitions: {
|
|
||||||
*Init + AuthRequest(AuthRequestContext) / auth_request_context = ReceivedAuthRequest(AuthRequestContext),
|
|
||||||
ReceivedAuthRequest(AuthRequestContext) + ReceivedBootstrapToken = Authenticated,
|
|
||||||
|
|
||||||
ReceivedAuthRequest(AuthRequestContext) + SentChallenge(ChallengeContext) / move_challenge = WaitingForChallengeSolution(ChallengeContext),
|
|
||||||
|
|
||||||
WaitingForChallengeSolution(ChallengeContext) + ReceivedGoodSolution = Authenticated,
|
|
||||||
WaitingForChallengeSolution(ChallengeContext) + ReceivedBadSolution = AuthError, // block further transitions, but connection should close anyway
|
|
||||||
}
|
|
||||||
);
|
|
||||||
|
|
||||||
pub struct DummyContext;
|
|
||||||
impl UserAgentStateMachineContext for DummyContext {
|
|
||||||
#[allow(missing_docs)]
|
|
||||||
#[allow(clippy::unused_unit)]
|
|
||||||
fn move_challenge(
|
|
||||||
&mut self,
|
|
||||||
state_data: &AuthRequestContext,
|
|
||||||
event_data: ChallengeContext,
|
|
||||||
) -> Result<ChallengeContext, ()> {
|
|
||||||
Ok(event_data)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(missing_docs)]
|
|
||||||
#[allow(clippy::unused_unit)]
|
|
||||||
fn auth_request_context(
|
|
||||||
&mut self,
|
|
||||||
event_data: AuthRequestContext,
|
|
||||||
) -> Result<AuthRequestContext, ()> {
|
|
||||||
Ok(event_data)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Actor)]
|
|
||||||
pub struct UserAgentActor {
|
|
||||||
db: db::DatabasePool,
|
|
||||||
bootstapper: ActorRef<BootstrapActor>,
|
|
||||||
state: UserAgentStateMachine<DummyContext>,
|
|
||||||
tx: Sender<Result<UserAgentResponse, Status>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl UserAgentActor {
|
|
||||||
pub(crate) fn new(
|
|
||||||
context: ServerContext,
|
|
||||||
tx: Sender<Result<UserAgentResponse, Status>>,
|
|
||||||
) -> Self {
|
|
||||||
Self {
|
|
||||||
db: context.db.clone(),
|
|
||||||
bootstapper: context.bootstrapper.clone(),
|
|
||||||
state: UserAgentStateMachine::new(DummyContext),
|
|
||||||
tx,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn new_manual(
|
|
||||||
db: db::DatabasePool,
|
|
||||||
bootstapper: ActorRef<BootstrapActor>,
|
|
||||||
tx: Sender<Result<UserAgentResponse, Status>>,
|
|
||||||
) -> Self {
|
|
||||||
Self {
|
|
||||||
db,
|
|
||||||
bootstapper,
|
|
||||||
state: UserAgentStateMachine::new(DummyContext),
|
|
||||||
tx,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn transition(&mut self, event: UserAgentEvents) -> Result<(), Status> {
|
|
||||||
self.state.process_event(event).map_err(|e| {
|
|
||||||
error!(?e, "State transition failed");
|
|
||||||
Status::internal("State machine error")
|
|
||||||
})?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn auth_with_bootstrap_token(
|
|
||||||
&mut self,
|
|
||||||
pubkey: ed25519_dalek::VerifyingKey,
|
|
||||||
token: String,
|
|
||||||
) -> Result<UserAgentResponse, Status> {
|
|
||||||
let token_ok: bool = self
|
|
||||||
.bootstapper
|
|
||||||
.ask(ConsumeToken { token })
|
|
||||||
.await
|
|
||||||
.map_err(|e| {
|
|
||||||
error!(?pubkey, "Failed to consume bootstrap token: {e}");
|
|
||||||
Status::internal("Bootstrap token consumption failed")
|
|
||||||
})?;
|
|
||||||
|
|
||||||
if !token_ok {
|
|
||||||
error!(?pubkey, "Invalid bootstrap token provided");
|
|
||||||
return Err(Status::invalid_argument("Invalid bootstrap token"));
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
let mut conn = self.db.get().await.to_status()?;
|
|
||||||
|
|
||||||
diesel::insert_into(schema::useragent_client::table)
|
|
||||||
.values((
|
|
||||||
schema::useragent_client::public_key.eq(pubkey.as_bytes().to_vec()),
|
|
||||||
schema::useragent_client::nonce.eq(1),
|
|
||||||
))
|
|
||||||
.execute(&mut conn)
|
|
||||||
.await
|
|
||||||
.to_status()?;
|
|
||||||
}
|
|
||||||
|
|
||||||
self.transition(UserAgentEvents::ReceivedBootstrapToken)?;
|
|
||||||
|
|
||||||
Ok(auth_response(ServerAuthPayload::AuthOk(AuthOk {})))
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn auth_with_challenge(&mut self, pubkey: VerifyingKey, pubkey_bytes: Vec<u8>) -> Output {
|
|
||||||
let nonce: Option<i32> = {
|
|
||||||
let mut db_conn = self.db.get().await.to_status()?;
|
|
||||||
db_conn
|
|
||||||
.transaction(|conn| {
|
|
||||||
Box::pin(async move {
|
|
||||||
let current_nonce = schema::useragent_client::table
|
|
||||||
.filter(
|
|
||||||
schema::useragent_client::public_key.eq(pubkey.as_bytes().to_vec()),
|
|
||||||
)
|
|
||||||
.select(schema::useragent_client::nonce)
|
|
||||||
.first::<i32>(conn)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
update(schema::useragent_client::table)
|
|
||||||
.filter(
|
|
||||||
schema::useragent_client::public_key.eq(pubkey.as_bytes().to_vec()),
|
|
||||||
)
|
|
||||||
.set(schema::useragent_client::nonce.eq(current_nonce + 1))
|
|
||||||
.execute(conn)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Result::<_, diesel::result::Error>::Ok(current_nonce)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
.optional()
|
|
||||||
.to_status()?
|
|
||||||
};
|
|
||||||
|
|
||||||
let Some(nonce) = nonce else {
|
|
||||||
error!(?pubkey, "Public key not found in database");
|
|
||||||
return Err(Status::unauthenticated("Public key not registered"));
|
|
||||||
};
|
|
||||||
|
|
||||||
let challenge = auth::AuthChallenge {
|
|
||||||
pubkey: pubkey_bytes,
|
|
||||||
nonce: nonce,
|
|
||||||
};
|
|
||||||
|
|
||||||
self.transition(UserAgentEvents::SentChallenge(ChallengeContext {
|
|
||||||
challenge: challenge.clone(),
|
|
||||||
key: pubkey,
|
|
||||||
}))?;
|
|
||||||
|
|
||||||
info!(
|
|
||||||
?pubkey,
|
|
||||||
?challenge,
|
|
||||||
"Sent authentication challenge to client"
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok(auth_response(ServerAuthPayload::AuthChallenge(challenge)))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn verify_challenge_solution(
|
|
||||||
&self,
|
|
||||||
solution: &auth::AuthChallengeSolution,
|
|
||||||
) -> Result<(bool, &ChallengeContext), Status> {
|
|
||||||
let UserAgentStates::WaitingForChallengeSolution(challenge_context) = self.state.state()
|
|
||||||
else {
|
|
||||||
error!("Received challenge solution in invalid state");
|
|
||||||
return Err(Status::invalid_argument(
|
|
||||||
"Invalid state for challenge solution",
|
|
||||||
));
|
|
||||||
};
|
|
||||||
let formatted_challenge = arbiter_proto::format_challenge(&challenge_context.challenge);
|
|
||||||
|
|
||||||
let signature = solution.signature.as_slice().try_into().map_err(|_| {
|
|
||||||
error!(?solution, "Invalid signature length");
|
|
||||||
Status::invalid_argument("Invalid signature length")
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let valid = challenge_context
|
|
||||||
.key
|
|
||||||
.verify_strict(&formatted_challenge, &signature)
|
|
||||||
.is_ok();
|
|
||||||
|
|
||||||
Ok((valid, challenge_context))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type Output = Result<UserAgentResponse, Status>;
|
|
||||||
|
|
||||||
fn auth_response(payload: ServerAuthPayload) -> UserAgentResponse {
|
|
||||||
UserAgentResponse {
|
|
||||||
payload: Some(UserAgentResponsePayload::AuthMessage(AuthServerMessage {
|
|
||||||
payload: Some(payload),
|
|
||||||
})),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[messages]
|
|
||||||
impl UserAgentActor {
|
|
||||||
#[message(ctx)]
|
|
||||||
pub async fn handle_auth_challenge_request(
|
|
||||||
&mut self,
|
|
||||||
req: AuthChallengeRequest,
|
|
||||||
ctx: &mut Context<Self, Output>,
|
|
||||||
) -> Output {
|
|
||||||
let pubkey = req.pubkey.as_array().ok_or(Status::invalid_argument(
|
|
||||||
"Expected pubkey to have specific length",
|
|
||||||
))?;
|
|
||||||
let pubkey = VerifyingKey::from_bytes(pubkey).map_err(|err| {
|
|
||||||
error!(?pubkey, "Failed to convert to VerifyingKey");
|
|
||||||
Status::invalid_argument("Failed to convert pubkey to VerifyingKey")
|
|
||||||
})?;
|
|
||||||
|
|
||||||
self.transition(UserAgentEvents::AuthRequest(AuthRequestContext {
|
|
||||||
pubkey,
|
|
||||||
bootstrap_token: req.bootstrap_token.clone(),
|
|
||||||
}))?;
|
|
||||||
|
|
||||||
match req.bootstrap_token {
|
|
||||||
Some(token) => self.auth_with_bootstrap_token(pubkey, token).await,
|
|
||||||
None => self.auth_with_challenge(pubkey, req.pubkey).await,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[message(ctx)]
|
|
||||||
pub async fn handle_auth_challenge_solution(
|
|
||||||
&mut self,
|
|
||||||
solution: auth::AuthChallengeSolution,
|
|
||||||
ctx: &mut Context<Self, Output>,
|
|
||||||
) -> Output {
|
|
||||||
let (valid, challenge_context) = self.verify_challenge_solution(&solution)?;
|
|
||||||
|
|
||||||
if valid {
|
|
||||||
info!(
|
|
||||||
?challenge_context,
|
|
||||||
"Client provided valid solution to authentication challenge"
|
|
||||||
);
|
|
||||||
self.transition(UserAgentEvents::ReceivedGoodSolution)?;
|
|
||||||
Ok(auth_response(ServerAuthPayload::AuthOk(AuthOk {})))
|
|
||||||
} else {
|
|
||||||
error!("Client provided invalid solution to authentication challenge");
|
|
||||||
self.transition(UserAgentEvents::ReceivedBadSolution)?;
|
|
||||||
Err(Status::unauthenticated("Invalid challenge solution"))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use arbiter_proto::proto::{
|
|
||||||
UserAgentResponse, auth::{AuthChallengeRequest, AuthOk},
|
|
||||||
user_agent_response::Payload as UserAgentResponsePayload,
|
|
||||||
};
|
|
||||||
use kameo::actor::Spawn;
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
actors::user_agent::HandleAuthChallengeRequest, context::bootstrap::BootstrapActor, db,
|
|
||||||
};
|
|
||||||
|
|
||||||
use super::UserAgentActor;
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
#[test_log::test]
|
|
||||||
pub async fn test_bootstrap_token_auth() {
|
|
||||||
let db = db::create_test_pool().await;
|
|
||||||
// explicitly not installing any user_agent pubkeys
|
|
||||||
let bootstrapper = BootstrapActor::new(&db).await.unwrap(); // this will create bootstrap token
|
|
||||||
let token = bootstrapper.get_token().unwrap();
|
|
||||||
|
|
||||||
let bootstrapper_ref = BootstrapActor::spawn(bootstrapper);
|
|
||||||
let user_agent = UserAgentActor::new_manual(
|
|
||||||
db.clone(),
|
|
||||||
bootstrapper_ref,
|
|
||||||
tokio::sync::mpsc::channel(1).0, // dummy channel, we won't actually send responses in this test
|
|
||||||
);
|
|
||||||
let user_agent_ref = UserAgentActor::spawn(user_agent);
|
|
||||||
|
|
||||||
// simulate client sending auth request with bootstrap token
|
|
||||||
let new_key = ed25519_dalek::SigningKey::generate(&mut rand::rng());
|
|
||||||
let pubkey_bytes = new_key.verifying_key().to_bytes().to_vec();
|
|
||||||
|
|
||||||
let result = user_agent_ref
|
|
||||||
.ask(HandleAuthChallengeRequest {
|
|
||||||
req: AuthChallengeRequest {
|
|
||||||
pubkey: pubkey_bytes,
|
|
||||||
bootstrap_token: Some(token),
|
|
||||||
},
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
.expect("Shouldn't fail to send message");
|
|
||||||
|
|
||||||
// auth succeeded
|
|
||||||
assert_eq!(
|
|
||||||
result,
|
|
||||||
UserAgentResponse {
|
|
||||||
payload: Some(UserAgentResponsePayload::AuthMessage(
|
|
||||||
arbiter_proto::proto::auth::ServerMessage {
|
|
||||||
payload: Some(arbiter_proto::proto::auth::server_message::Payload::AuthOk(
|
|
||||||
AuthOk {},
|
|
||||||
)),
|
|
||||||
},
|
|
||||||
)),
|
|
||||||
}
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
mod transport;
|
|
||||||
pub(crate) use transport::handle_user_agent;
|
|
||||||
141
server/crates/arbiter-server/src/actors/user_agent/auth.rs
Normal file
141
server/crates/arbiter-server/src/actors/user_agent/auth.rs
Normal file
@@ -0,0 +1,141 @@
|
|||||||
|
use arbiter_proto::proto::user_agent::{
|
||||||
|
AuthChallengeRequest, AuthChallengeSolution, KeyType as ProtoKeyType, UserAgentRequest,
|
||||||
|
user_agent_request::Payload as UserAgentRequestPayload,
|
||||||
|
};
|
||||||
|
use tracing::error;
|
||||||
|
|
||||||
|
use crate::actors::user_agent::{
|
||||||
|
UserAgentConnection,
|
||||||
|
auth::state::{AuthContext, AuthPublicKey, AuthStateMachine},
|
||||||
|
session::UserAgentSession,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[derive(thiserror::Error, Debug, PartialEq)]
|
||||||
|
pub enum Error {
|
||||||
|
#[error("Unexpected message payload")]
|
||||||
|
UnexpectedMessagePayload,
|
||||||
|
#[error("Invalid client public key length")]
|
||||||
|
InvalidClientPubkeyLength,
|
||||||
|
#[error("Invalid client public key encoding")]
|
||||||
|
InvalidAuthPubkeyEncoding,
|
||||||
|
#[error("Database pool unavailable")]
|
||||||
|
DatabasePoolUnavailable,
|
||||||
|
#[error("Database operation failed")]
|
||||||
|
DatabaseOperationFailed,
|
||||||
|
#[error("Public key not registered")]
|
||||||
|
PublicKeyNotRegistered,
|
||||||
|
#[error("Transport error")]
|
||||||
|
Transport,
|
||||||
|
#[error("Invalid bootstrap token")]
|
||||||
|
InvalidBootstrapToken,
|
||||||
|
#[error("Bootstrapper actor unreachable")]
|
||||||
|
BootstrapperActorUnreachable,
|
||||||
|
#[error("Invalid challenge solution")]
|
||||||
|
InvalidChallengeSolution,
|
||||||
|
}
|
||||||
|
|
||||||
|
mod state;
|
||||||
|
use state::*;
|
||||||
|
|
||||||
|
fn parse_pubkey(key_type: ProtoKeyType, pubkey: Vec<u8>) -> Result<AuthPublicKey, Error> {
|
||||||
|
match key_type {
|
||||||
|
// UNSPECIFIED treated as Ed25519 for backward compatibility
|
||||||
|
ProtoKeyType::Unspecified | ProtoKeyType::Ed25519 => {
|
||||||
|
let pubkey_bytes = pubkey.as_array().ok_or(Error::InvalidClientPubkeyLength)?;
|
||||||
|
let key = ed25519_dalek::VerifyingKey::from_bytes(pubkey_bytes)
|
||||||
|
.map_err(|_| Error::InvalidAuthPubkeyEncoding)?;
|
||||||
|
Ok(AuthPublicKey::Ed25519(key))
|
||||||
|
}
|
||||||
|
ProtoKeyType::EcdsaSecp256k1 => {
|
||||||
|
// Public key is sent as 33-byte SEC1 compressed point
|
||||||
|
let key = k256::ecdsa::VerifyingKey::from_sec1_bytes(&pubkey)
|
||||||
|
.map_err(|_| Error::InvalidAuthPubkeyEncoding)?;
|
||||||
|
Ok(AuthPublicKey::EcdsaSecp256k1(key))
|
||||||
|
}
|
||||||
|
ProtoKeyType::Rsa => {
|
||||||
|
use rsa::pkcs8::DecodePublicKey as _;
|
||||||
|
let key = rsa::RsaPublicKey::from_public_key_der(&pubkey)
|
||||||
|
.map_err(|_| Error::InvalidAuthPubkeyEncoding)?;
|
||||||
|
Ok(AuthPublicKey::Rsa(key))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_auth_event(payload: UserAgentRequestPayload) -> Result<AuthEvents, Error> {
|
||||||
|
match payload {
|
||||||
|
UserAgentRequestPayload::AuthChallengeRequest(AuthChallengeRequest {
|
||||||
|
pubkey,
|
||||||
|
bootstrap_token: None,
|
||||||
|
key_type,
|
||||||
|
}) => {
|
||||||
|
let kt = ProtoKeyType::try_from(key_type).unwrap_or(ProtoKeyType::Unspecified);
|
||||||
|
Ok(AuthEvents::AuthRequest(ChallengeRequest {
|
||||||
|
pubkey: parse_pubkey(kt, pubkey)?,
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
UserAgentRequestPayload::AuthChallengeRequest(AuthChallengeRequest {
|
||||||
|
pubkey,
|
||||||
|
bootstrap_token: Some(token),
|
||||||
|
key_type,
|
||||||
|
}) => {
|
||||||
|
let kt = ProtoKeyType::try_from(key_type).unwrap_or(ProtoKeyType::Unspecified);
|
||||||
|
Ok(AuthEvents::BootstrapAuthRequest(BootstrapAuthRequest {
|
||||||
|
pubkey: parse_pubkey(kt, pubkey)?,
|
||||||
|
token,
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
UserAgentRequestPayload::AuthChallengeSolution(AuthChallengeSolution { signature }) => {
|
||||||
|
Ok(AuthEvents::ReceivedSolution(ChallengeSolution {
|
||||||
|
solution: signature,
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
_ => Err(Error::UnexpectedMessagePayload),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn authenticate(props: &mut UserAgentConnection) -> Result<AuthPublicKey, Error> {
|
||||||
|
let mut state = AuthStateMachine::new(AuthContext::new(props));
|
||||||
|
|
||||||
|
loop {
|
||||||
|
// `state` holds a mutable reference to `props` so we can't access it directly here
|
||||||
|
let transport = state.context_mut().conn.transport.as_mut();
|
||||||
|
let Some(UserAgentRequest {
|
||||||
|
payload: Some(payload),
|
||||||
|
}) = transport.recv().await
|
||||||
|
else {
|
||||||
|
return Err(Error::Transport);
|
||||||
|
};
|
||||||
|
|
||||||
|
let event = parse_auth_event(payload)?;
|
||||||
|
|
||||||
|
match state.process_event(event).await {
|
||||||
|
Ok(AuthStates::AuthOk(key)) => return Ok(key.clone()),
|
||||||
|
Err(AuthError::ActionFailed(err)) => {
|
||||||
|
error!(?err, "State machine action failed");
|
||||||
|
return Err(err);
|
||||||
|
}
|
||||||
|
Err(AuthError::GuardFailed(err)) => {
|
||||||
|
error!(?err, "State machine guard failed");
|
||||||
|
return Err(err);
|
||||||
|
}
|
||||||
|
Err(AuthError::InvalidEvent) => {
|
||||||
|
error!("Invalid event for current state");
|
||||||
|
return Err(Error::InvalidChallengeSolution);
|
||||||
|
}
|
||||||
|
Err(AuthError::TransitionsFailed) => {
|
||||||
|
error!("Invalid state transition");
|
||||||
|
return Err(Error::InvalidChallengeSolution);
|
||||||
|
}
|
||||||
|
|
||||||
|
_ => (),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn authenticate_and_create(
|
||||||
|
mut props: UserAgentConnection,
|
||||||
|
) -> Result<UserAgentSession, Error> {
|
||||||
|
let _key = authenticate(&mut props).await?;
|
||||||
|
let session = UserAgentSession::new(props);
|
||||||
|
Ok(session)
|
||||||
|
}
|
||||||
297
server/crates/arbiter-server/src/actors/user_agent/auth/state.rs
Normal file
297
server/crates/arbiter-server/src/actors/user_agent/auth/state.rs
Normal file
@@ -0,0 +1,297 @@
|
|||||||
|
use arbiter_proto::proto::user_agent::{
|
||||||
|
AuthChallenge, UserAgentResponse, user_agent_response::Payload as UserAgentResponsePayload,
|
||||||
|
};
|
||||||
|
use diesel::{ExpressionMethods as _, OptionalExtension as _, QueryDsl, update};
|
||||||
|
use diesel_async::RunQueryDsl;
|
||||||
|
use tracing::error;
|
||||||
|
|
||||||
|
use super::Error;
|
||||||
|
use crate::{
|
||||||
|
actors::{bootstrap::ConsumeToken, user_agent::UserAgentConnection},
|
||||||
|
db::{models::KeyType, schema},
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Abstraction over Ed25519 / ECDSA-secp256k1 / RSA public keys used during the auth handshake.
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub enum AuthPublicKey {
|
||||||
|
Ed25519(ed25519_dalek::VerifyingKey),
|
||||||
|
/// Compressed SEC1 public key; signature bytes are raw 64-byte (r||s).
|
||||||
|
EcdsaSecp256k1(k256::ecdsa::VerifyingKey),
|
||||||
|
/// RSA-2048+ public key (Windows Hello / KeyCredentialManager); signature bytes are PSS+SHA-256.
|
||||||
|
Rsa(rsa::RsaPublicKey),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AuthPublicKey {
|
||||||
|
/// Canonical bytes stored in DB and echoed back in the challenge.
|
||||||
|
/// Ed25519: raw 32 bytes. ECDSA: SEC1 compressed 33 bytes. RSA: DER-encoded SPKI.
|
||||||
|
pub fn to_stored_bytes(&self) -> Vec<u8> {
|
||||||
|
match self {
|
||||||
|
AuthPublicKey::Ed25519(k) => k.to_bytes().to_vec(),
|
||||||
|
// SEC1 compressed (33 bytes) is the natural compact format for secp256k1
|
||||||
|
AuthPublicKey::EcdsaSecp256k1(k) => k.to_encoded_point(true).as_bytes().to_vec(),
|
||||||
|
AuthPublicKey::Rsa(k) => {
|
||||||
|
use rsa::pkcs8::EncodePublicKey as _;
|
||||||
|
k.to_public_key_der()
|
||||||
|
.expect("rsa SPKI encoding is infallible")
|
||||||
|
.to_vec()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn key_type(&self) -> KeyType {
|
||||||
|
match self {
|
||||||
|
AuthPublicKey::Ed25519(_) => KeyType::Ed25519,
|
||||||
|
AuthPublicKey::EcdsaSecp256k1(_) => KeyType::EcdsaSecp256k1,
|
||||||
|
AuthPublicKey::Rsa(_) => KeyType::Rsa,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct ChallengeRequest {
|
||||||
|
pub pubkey: AuthPublicKey,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct BootstrapAuthRequest {
|
||||||
|
pub pubkey: AuthPublicKey,
|
||||||
|
pub token: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct ChallengeContext {
|
||||||
|
pub challenge: AuthChallenge,
|
||||||
|
pub key: AuthPublicKey,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct ChallengeSolution {
|
||||||
|
pub solution: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
smlang::statemachine!(
|
||||||
|
name: Auth,
|
||||||
|
custom_error: true,
|
||||||
|
transitions: {
|
||||||
|
*Init + AuthRequest(ChallengeRequest) / async prepare_challenge = SentChallenge(ChallengeContext),
|
||||||
|
Init + BootstrapAuthRequest(BootstrapAuthRequest) [async verify_bootstrap_token] / provide_key_bootstrap = AuthOk(AuthPublicKey),
|
||||||
|
SentChallenge(ChallengeContext) + ReceivedSolution(ChallengeSolution) [async verify_solution] / provide_key = AuthOk(AuthPublicKey),
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
async fn create_nonce(db: &crate::db::DatabasePool, pubkey_bytes: &[u8]) -> Result<i32, Error> {
|
||||||
|
let mut db_conn = db.get().await.map_err(|e| {
|
||||||
|
error!(error = ?e, "Database pool error");
|
||||||
|
Error::DatabasePoolUnavailable
|
||||||
|
})?;
|
||||||
|
db_conn
|
||||||
|
.exclusive_transaction(|conn| {
|
||||||
|
Box::pin(async move {
|
||||||
|
let current_nonce = schema::useragent_client::table
|
||||||
|
.filter(schema::useragent_client::public_key.eq(pubkey_bytes.to_vec()))
|
||||||
|
.select(schema::useragent_client::nonce)
|
||||||
|
.first::<i32>(conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
update(schema::useragent_client::table)
|
||||||
|
.filter(schema::useragent_client::public_key.eq(pubkey_bytes.to_vec()))
|
||||||
|
.set(schema::useragent_client::nonce.eq(current_nonce + 1))
|
||||||
|
.execute(conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Result::<_, diesel::result::Error>::Ok(current_nonce)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.optional()
|
||||||
|
.map_err(|e| {
|
||||||
|
error!(error = ?e, "Database error");
|
||||||
|
Error::DatabaseOperationFailed
|
||||||
|
})?
|
||||||
|
.ok_or_else(|| {
|
||||||
|
error!(?pubkey_bytes, "Public key not found in database");
|
||||||
|
Error::PublicKeyNotRegistered
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn register_key(db: &crate::db::DatabasePool, pubkey: &AuthPublicKey) -> Result<(), Error> {
|
||||||
|
let pubkey_bytes = pubkey.to_stored_bytes();
|
||||||
|
let key_type = pubkey.key_type();
|
||||||
|
let mut conn = db.get().await.map_err(|e| {
|
||||||
|
error!(error = ?e, "Database pool error");
|
||||||
|
Error::DatabasePoolUnavailable
|
||||||
|
})?;
|
||||||
|
|
||||||
|
diesel::insert_into(schema::useragent_client::table)
|
||||||
|
.values((
|
||||||
|
schema::useragent_client::public_key.eq(pubkey_bytes),
|
||||||
|
schema::useragent_client::nonce.eq(1),
|
||||||
|
schema::useragent_client::key_type.eq(key_type),
|
||||||
|
))
|
||||||
|
.execute(&mut conn)
|
||||||
|
.await
|
||||||
|
.map_err(|e| {
|
||||||
|
error!(error = ?e, "Database error");
|
||||||
|
Error::DatabaseOperationFailed
|
||||||
|
})?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct AuthContext<'a> {
|
||||||
|
pub(super) conn: &'a mut UserAgentConnection,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> AuthContext<'a> {
|
||||||
|
pub fn new(conn: &'a mut UserAgentConnection) -> Self {
|
||||||
|
Self { conn }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AuthStateMachineContext for AuthContext<'_> {
|
||||||
|
type Error = Error;
|
||||||
|
|
||||||
|
async fn verify_solution(
|
||||||
|
&self,
|
||||||
|
ChallengeContext { challenge, key }: &ChallengeContext,
|
||||||
|
ChallengeSolution { solution }: &ChallengeSolution,
|
||||||
|
) -> Result<bool, Self::Error> {
|
||||||
|
let formatted = arbiter_proto::format_challenge(challenge.nonce, &challenge.pubkey);
|
||||||
|
|
||||||
|
let valid = match key {
|
||||||
|
AuthPublicKey::Ed25519(vk) => {
|
||||||
|
let sig = solution.as_slice().try_into().map_err(|_| {
|
||||||
|
error!(?solution, "Invalid Ed25519 signature length");
|
||||||
|
Error::InvalidChallengeSolution
|
||||||
|
})?;
|
||||||
|
vk.verify_strict(&formatted, &sig).is_ok()
|
||||||
|
}
|
||||||
|
AuthPublicKey::EcdsaSecp256k1(vk) => {
|
||||||
|
use k256::ecdsa::signature::Verifier as _;
|
||||||
|
let sig = k256::ecdsa::Signature::try_from(solution.as_slice()).map_err(|_| {
|
||||||
|
error!(?solution, "Invalid ECDSA signature bytes");
|
||||||
|
Error::InvalidChallengeSolution
|
||||||
|
})?;
|
||||||
|
vk.verify(&formatted, &sig).is_ok()
|
||||||
|
}
|
||||||
|
AuthPublicKey::Rsa(pk) => {
|
||||||
|
use rsa::signature::Verifier as _;
|
||||||
|
let verifying_key = rsa::pss::VerifyingKey::<sha2::Sha256>::new(pk.clone());
|
||||||
|
let sig = rsa::pss::Signature::try_from(solution.as_slice()).map_err(|_| {
|
||||||
|
error!(?solution, "Invalid RSA signature bytes");
|
||||||
|
Error::InvalidChallengeSolution
|
||||||
|
})?;
|
||||||
|
verifying_key.verify(&formatted, &sig).is_ok()
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(valid)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn prepare_challenge(
|
||||||
|
&mut self,
|
||||||
|
ChallengeRequest { pubkey }: ChallengeRequest,
|
||||||
|
) -> Result<ChallengeContext, Self::Error> {
|
||||||
|
let stored_bytes = pubkey.to_stored_bytes();
|
||||||
|
let nonce = create_nonce(&self.conn.db, &stored_bytes).await?;
|
||||||
|
|
||||||
|
let challenge = AuthChallenge {
|
||||||
|
pubkey: stored_bytes,
|
||||||
|
nonce,
|
||||||
|
};
|
||||||
|
|
||||||
|
self.conn
|
||||||
|
.transport
|
||||||
|
.send(Ok(UserAgentResponse {
|
||||||
|
payload: Some(UserAgentResponsePayload::AuthChallenge(challenge.clone())),
|
||||||
|
}))
|
||||||
|
.await
|
||||||
|
.map_err(|e| {
|
||||||
|
error!(?e, "Failed to send auth challenge");
|
||||||
|
Error::Transport
|
||||||
|
})?;
|
||||||
|
|
||||||
|
Ok(ChallengeContext {
|
||||||
|
challenge,
|
||||||
|
key: pubkey,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(missing_docs)]
|
||||||
|
#[allow(clippy::result_unit_err)]
|
||||||
|
async fn verify_bootstrap_token(
|
||||||
|
&self,
|
||||||
|
BootstrapAuthRequest { pubkey, token }: &BootstrapAuthRequest,
|
||||||
|
) -> Result<bool, Self::Error> {
|
||||||
|
let token_ok: bool = self
|
||||||
|
.conn
|
||||||
|
.actors
|
||||||
|
.bootstrapper
|
||||||
|
.ask(ConsumeToken {
|
||||||
|
token: token.clone(),
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.map_err(|e| {
|
||||||
|
error!(?e, "Failed to consume bootstrap token");
|
||||||
|
Error::BootstrapperActorUnreachable
|
||||||
|
})?;
|
||||||
|
|
||||||
|
if !token_ok {
|
||||||
|
error!("Invalid bootstrap token provided");
|
||||||
|
return Err(Error::InvalidBootstrapToken);
|
||||||
|
}
|
||||||
|
|
||||||
|
register_key(&self.conn.db, pubkey).await?;
|
||||||
|
|
||||||
|
Ok(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn provide_key_bootstrap(
|
||||||
|
&mut self,
|
||||||
|
event_data: BootstrapAuthRequest,
|
||||||
|
) -> Result<AuthPublicKey, Self::Error> {
|
||||||
|
Ok(event_data.pubkey)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn provide_key(
|
||||||
|
&mut self,
|
||||||
|
state_data: &ChallengeContext,
|
||||||
|
_: ChallengeSolution,
|
||||||
|
) -> Result<AuthPublicKey, Self::Error> {
|
||||||
|
// ChallengeContext.key cannot be taken by value because smlang passes it by ref;
|
||||||
|
// we reconstruct stored bytes and return them wrapped in Ed25519 placeholder.
|
||||||
|
// Session uses only the raw bytes, so we carry them via a Vec<u8>.
|
||||||
|
// IMPORTANT: do NOT simplify this by storing the key type separately — the
|
||||||
|
// `AuthPublicKey` enum IS the source of truth for key bytes and type.
|
||||||
|
//
|
||||||
|
// smlang state-machine trait requires returning an owned value from `provide_key`,
|
||||||
|
// but `state_data` is only available by shared reference here. We extract the
|
||||||
|
// stored bytes and re-wrap as the correct variant so the caller can call
|
||||||
|
// `to_stored_bytes()` / `key_type()` without losing information.
|
||||||
|
let bytes = state_data.challenge.pubkey.clone();
|
||||||
|
let key_type = state_data.key.key_type();
|
||||||
|
let rebuilt = match key_type {
|
||||||
|
crate::db::models::KeyType::Ed25519 => {
|
||||||
|
let arr: &[u8; 32] = bytes
|
||||||
|
.as_slice()
|
||||||
|
.try_into()
|
||||||
|
.expect("ed25519 pubkey must be 32 bytes in challenge");
|
||||||
|
AuthPublicKey::Ed25519(
|
||||||
|
ed25519_dalek::VerifyingKey::from_bytes(arr)
|
||||||
|
.expect("key was already validated in parse_auth_event"),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
crate::db::models::KeyType::EcdsaSecp256k1 => {
|
||||||
|
// bytes are SEC1 compressed (33 bytes produced by to_encoded_point(true))
|
||||||
|
AuthPublicKey::EcdsaSecp256k1(
|
||||||
|
k256::ecdsa::VerifyingKey::from_sec1_bytes(&bytes)
|
||||||
|
.expect("ecdsa key was already validated in parse_auth_event"),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
crate::db::models::KeyType::Rsa => {
|
||||||
|
use rsa::pkcs8::DecodePublicKey as _;
|
||||||
|
AuthPublicKey::Rsa(
|
||||||
|
rsa::RsaPublicKey::from_public_key_der(&bytes)
|
||||||
|
.expect("rsa key was already validated in parse_auth_event"),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
Ok(rebuilt)
|
||||||
|
}
|
||||||
|
}
|
||||||
65
server/crates/arbiter-server/src/actors/user_agent/mod.rs
Normal file
65
server/crates/arbiter-server/src/actors/user_agent/mod.rs
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
use arbiter_proto::{
|
||||||
|
proto::user_agent::{UserAgentRequest, UserAgentResponse},
|
||||||
|
transport::Bi,
|
||||||
|
};
|
||||||
|
use kameo::actor::Spawn as _;
|
||||||
|
use tracing::{error, info};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
actors::{GlobalActors, user_agent::session::UserAgentSession},
|
||||||
|
db::{self},
|
||||||
|
};
|
||||||
|
|
||||||
|
#[derive(Debug, thiserror::Error, PartialEq)]
|
||||||
|
pub enum TransportResponseError {
|
||||||
|
#[error("Expected message with payload")]
|
||||||
|
MissingRequestPayload,
|
||||||
|
#[error("Unexpected request payload")]
|
||||||
|
UnexpectedRequestPayload,
|
||||||
|
#[error("Invalid state for unseal encrypted key")]
|
||||||
|
InvalidStateForUnsealEncryptedKey,
|
||||||
|
#[error("client_pubkey must be 32 bytes")]
|
||||||
|
InvalidClientPubkeyLength,
|
||||||
|
#[error("State machine error")]
|
||||||
|
StateTransitionFailed,
|
||||||
|
#[error("Vault is not available")]
|
||||||
|
KeyHolderActorUnreachable,
|
||||||
|
#[error(transparent)]
|
||||||
|
Auth(#[from] auth::Error),
|
||||||
|
#[error("Failed registering connection")]
|
||||||
|
ConnectionRegistrationFailed,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub type Transport =
|
||||||
|
Box<dyn Bi<UserAgentRequest, Result<UserAgentResponse, TransportResponseError>> + Send>;
|
||||||
|
|
||||||
|
pub struct UserAgentConnection {
|
||||||
|
db: db::DatabasePool,
|
||||||
|
actors: GlobalActors,
|
||||||
|
transport: Transport,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl UserAgentConnection {
|
||||||
|
pub fn new(db: db::DatabasePool, actors: GlobalActors, transport: Transport) -> Self {
|
||||||
|
Self {
|
||||||
|
db,
|
||||||
|
actors,
|
||||||
|
transport,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub mod auth;
|
||||||
|
pub mod session;
|
||||||
|
|
||||||
|
pub async fn connect_user_agent(props: UserAgentConnection) {
|
||||||
|
match auth::authenticate_and_create(props).await {
|
||||||
|
Ok(session) => {
|
||||||
|
UserAgentSession::spawn(session);
|
||||||
|
info!("User authenticated, session started");
|
||||||
|
}
|
||||||
|
Err(err) => {
|
||||||
|
error!(?err, "Authentication failed, closing connection");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
587
server/crates/arbiter-server/src/actors/user_agent/session.rs
Normal file
587
server/crates/arbiter-server/src/actors/user_agent/session.rs
Normal file
@@ -0,0 +1,587 @@
|
|||||||
|
use std::{ops::DerefMut, sync::Mutex};
|
||||||
|
|
||||||
|
use arbiter_proto::proto::{
|
||||||
|
evm as evm_proto,
|
||||||
|
user_agent::{
|
||||||
|
SdkClientApproveRequest, SdkClientApproveResponse, SdkClientEntry,
|
||||||
|
SdkClientError as ProtoSdkClientError, SdkClientList, SdkClientListResponse,
|
||||||
|
SdkClientRevokeRequest, SdkClientRevokeResponse, UnsealEncryptedKey, UnsealResult,
|
||||||
|
UnsealStart, UnsealStartResponse, UserAgentRequest, UserAgentResponse,
|
||||||
|
sdk_client_approve_response, sdk_client_list_response, sdk_client_revoke_response,
|
||||||
|
user_agent_request::Payload as UserAgentRequestPayload,
|
||||||
|
user_agent_response::Payload as UserAgentResponsePayload,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
use chacha20poly1305::{AeadInPlace, XChaCha20Poly1305, XNonce, aead::KeyInit};
|
||||||
|
use diesel::{ExpressionMethods as _, QueryDsl as _, dsl::insert_into};
|
||||||
|
use diesel_async::RunQueryDsl as _;
|
||||||
|
use kameo::{Actor, error::SendError, prelude::Context};
|
||||||
|
use memsafe::MemSafe;
|
||||||
|
use tokio::select;
|
||||||
|
use tracing::{error, info};
|
||||||
|
use x25519_dalek::{EphemeralSecret, PublicKey};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
actors::{
|
||||||
|
evm::{Generate, ListWallets},
|
||||||
|
keyholder::{self, TryUnseal},
|
||||||
|
router::RegisterUserAgent,
|
||||||
|
user_agent::{TransportResponseError, UserAgentConnection},
|
||||||
|
},
|
||||||
|
db::schema::program_client,
|
||||||
|
};
|
||||||
|
|
||||||
|
mod state;
|
||||||
|
use state::{DummyContext, UnsealContext, UserAgentEvents, UserAgentStateMachine, UserAgentStates};
|
||||||
|
|
||||||
|
// Error for consumption by other actors
|
||||||
|
#[derive(Debug, thiserror::Error, PartialEq)]
|
||||||
|
pub enum Error {
|
||||||
|
#[error("User agent session ended due to connection loss")]
|
||||||
|
ConnectionLost,
|
||||||
|
|
||||||
|
#[error("User agent session ended due to unexpected message")]
|
||||||
|
UnexpectedMessage,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct UserAgentSession {
|
||||||
|
props: UserAgentConnection,
|
||||||
|
state: UserAgentStateMachine<DummyContext>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl UserAgentSession {
|
||||||
|
pub(crate) fn new(props: UserAgentConnection) -> Self {
|
||||||
|
Self {
|
||||||
|
props,
|
||||||
|
state: UserAgentStateMachine::new(DummyContext),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn transition(&mut self, event: UserAgentEvents) -> Result<(), TransportResponseError> {
|
||||||
|
self.state.process_event(event).map_err(|e| {
|
||||||
|
error!(?e, "State transition failed");
|
||||||
|
TransportResponseError::StateTransitionFailed
|
||||||
|
})?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn send_msg<Reply: kameo::Reply>(
|
||||||
|
&mut self,
|
||||||
|
msg: UserAgentResponsePayload,
|
||||||
|
_ctx: &mut Context<Self, Reply>,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
self.props
|
||||||
|
.transport
|
||||||
|
.send(Ok(response(msg)))
|
||||||
|
.await
|
||||||
|
.map_err(|_| {
|
||||||
|
error!(
|
||||||
|
actor = "useragent",
|
||||||
|
reason = "channel closed",
|
||||||
|
"send.failed"
|
||||||
|
);
|
||||||
|
Error::ConnectionLost
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn expect_msg<Extractor, Msg, Reply>(
|
||||||
|
&mut self,
|
||||||
|
extractor: Extractor,
|
||||||
|
ctx: &mut Context<Self, Reply>,
|
||||||
|
) -> Result<Msg, Error>
|
||||||
|
where
|
||||||
|
Extractor: FnOnce(UserAgentRequestPayload) -> Option<Msg>,
|
||||||
|
Reply: kameo::Reply,
|
||||||
|
{
|
||||||
|
let msg = self.props.transport.recv().await.ok_or_else(|| {
|
||||||
|
error!(
|
||||||
|
actor = "useragent",
|
||||||
|
reason = "channel closed",
|
||||||
|
"recv.failed"
|
||||||
|
);
|
||||||
|
ctx.stop();
|
||||||
|
Error::ConnectionLost
|
||||||
|
})?;
|
||||||
|
|
||||||
|
msg.payload.and_then(extractor).ok_or_else(|| {
|
||||||
|
error!(
|
||||||
|
actor = "useragent",
|
||||||
|
reason = "unexpected message",
|
||||||
|
"recv.failed"
|
||||||
|
);
|
||||||
|
ctx.stop();
|
||||||
|
Error::UnexpectedMessage
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl UserAgentSession {
|
||||||
|
pub async fn process_transport_inbound(&mut self, req: UserAgentRequest) -> Output {
|
||||||
|
let msg = req.payload.ok_or_else(|| {
|
||||||
|
error!(actor = "useragent", "Received message with no payload");
|
||||||
|
TransportResponseError::MissingRequestPayload
|
||||||
|
})?;
|
||||||
|
|
||||||
|
match msg {
|
||||||
|
UserAgentRequestPayload::UnsealStart(unseal_start) => {
|
||||||
|
self.handle_unseal_request(unseal_start).await
|
||||||
|
}
|
||||||
|
UserAgentRequestPayload::UnsealEncryptedKey(unseal_encrypted_key) => {
|
||||||
|
self.handle_unseal_encrypted_key(unseal_encrypted_key).await
|
||||||
|
}
|
||||||
|
UserAgentRequestPayload::EvmWalletCreate(_) => self.handle_evm_wallet_create().await,
|
||||||
|
UserAgentRequestPayload::EvmWalletList(_) => self.handle_evm_wallet_list().await,
|
||||||
|
UserAgentRequestPayload::SdkClientApprove(req) => {
|
||||||
|
self.handle_sdk_client_approve(req).await
|
||||||
|
}
|
||||||
|
UserAgentRequestPayload::SdkClientRevoke(req) => {
|
||||||
|
self.handle_sdk_client_revoke(req).await
|
||||||
|
}
|
||||||
|
UserAgentRequestPayload::SdkClientList(_) => self.handle_sdk_client_list().await,
|
||||||
|
_ => Err(TransportResponseError::UnexpectedRequestPayload),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type Output = Result<UserAgentResponse, TransportResponseError>;
|
||||||
|
|
||||||
|
fn response(payload: UserAgentResponsePayload) -> UserAgentResponse {
|
||||||
|
UserAgentResponse {
|
||||||
|
payload: Some(payload),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl UserAgentSession {
|
||||||
|
async fn handle_unseal_request(&mut self, req: UnsealStart) -> Output {
|
||||||
|
let secret = EphemeralSecret::random();
|
||||||
|
let public_key = PublicKey::from(&secret);
|
||||||
|
|
||||||
|
let client_pubkey_bytes: [u8; 32] = req
|
||||||
|
.client_pubkey
|
||||||
|
.try_into()
|
||||||
|
.map_err(|_| TransportResponseError::InvalidClientPubkeyLength)?;
|
||||||
|
|
||||||
|
let client_public_key = PublicKey::from(client_pubkey_bytes);
|
||||||
|
|
||||||
|
self.transition(UserAgentEvents::UnsealRequest(UnsealContext {
|
||||||
|
secret: Mutex::new(Some(secret)),
|
||||||
|
client_public_key,
|
||||||
|
}))?;
|
||||||
|
|
||||||
|
Ok(response(UserAgentResponsePayload::UnsealStartResponse(
|
||||||
|
UnsealStartResponse {
|
||||||
|
server_pubkey: public_key.as_bytes().to_vec(),
|
||||||
|
},
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle_unseal_encrypted_key(&mut self, req: UnsealEncryptedKey) -> Output {
|
||||||
|
let UserAgentStates::WaitingForUnsealKey(unseal_context) = self.state.state() else {
|
||||||
|
error!("Received unseal encrypted key in invalid state");
|
||||||
|
return Err(TransportResponseError::InvalidStateForUnsealEncryptedKey);
|
||||||
|
};
|
||||||
|
let ephemeral_secret = {
|
||||||
|
let mut secret_lock = unseal_context.secret.lock().unwrap();
|
||||||
|
let secret = secret_lock.take();
|
||||||
|
match secret {
|
||||||
|
Some(secret) => secret,
|
||||||
|
None => {
|
||||||
|
drop(secret_lock);
|
||||||
|
error!("Ephemeral secret already taken");
|
||||||
|
self.transition(UserAgentEvents::ReceivedInvalidKey)?;
|
||||||
|
return Ok(response(UserAgentResponsePayload::UnsealResult(
|
||||||
|
UnsealResult::InvalidKey.into(),
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let nonce = XNonce::from_slice(&req.nonce);
|
||||||
|
|
||||||
|
let shared_secret = ephemeral_secret.diffie_hellman(&unseal_context.client_public_key);
|
||||||
|
let cipher = XChaCha20Poly1305::new(shared_secret.as_bytes().into());
|
||||||
|
|
||||||
|
let mut seal_key_buffer = MemSafe::new(req.ciphertext.clone()).unwrap();
|
||||||
|
|
||||||
|
let decryption_result = {
|
||||||
|
let mut write_handle = seal_key_buffer.write().unwrap();
|
||||||
|
let write_handle = write_handle.deref_mut();
|
||||||
|
cipher.decrypt_in_place(nonce, &req.associated_data, write_handle)
|
||||||
|
};
|
||||||
|
|
||||||
|
match decryption_result {
|
||||||
|
Ok(_) => {
|
||||||
|
match self
|
||||||
|
.props
|
||||||
|
.actors
|
||||||
|
.key_holder
|
||||||
|
.ask(TryUnseal {
|
||||||
|
seal_key_raw: seal_key_buffer,
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(_) => {
|
||||||
|
info!("Successfully unsealed key with client-provided key");
|
||||||
|
self.transition(UserAgentEvents::ReceivedValidKey)?;
|
||||||
|
Ok(response(UserAgentResponsePayload::UnsealResult(
|
||||||
|
UnsealResult::Success.into(),
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
Err(SendError::HandlerError(keyholder::Error::InvalidKey)) => {
|
||||||
|
self.transition(UserAgentEvents::ReceivedInvalidKey)?;
|
||||||
|
Ok(response(UserAgentResponsePayload::UnsealResult(
|
||||||
|
UnsealResult::InvalidKey.into(),
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
Err(SendError::HandlerError(err)) => {
|
||||||
|
error!(?err, "Keyholder failed to unseal key");
|
||||||
|
self.transition(UserAgentEvents::ReceivedInvalidKey)?;
|
||||||
|
Ok(response(UserAgentResponsePayload::UnsealResult(
|
||||||
|
UnsealResult::InvalidKey.into(),
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
Err(err) => {
|
||||||
|
error!(?err, "Failed to send unseal request to keyholder");
|
||||||
|
self.transition(UserAgentEvents::ReceivedInvalidKey)?;
|
||||||
|
Err(TransportResponseError::KeyHolderActorUnreachable)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(err) => {
|
||||||
|
error!(?err, "Failed to decrypt unseal key");
|
||||||
|
self.transition(UserAgentEvents::ReceivedInvalidKey)?;
|
||||||
|
Ok(response(UserAgentResponsePayload::UnsealResult(
|
||||||
|
UnsealResult::InvalidKey.into(),
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl UserAgentSession {
|
||||||
|
async fn handle_evm_wallet_create(&mut self) -> Output {
|
||||||
|
use evm_proto::wallet_create_response::Result as CreateResult;
|
||||||
|
|
||||||
|
let result = match self.props.actors.evm.ask(Generate {}).await {
|
||||||
|
Ok(address) => CreateResult::Wallet(evm_proto::WalletEntry {
|
||||||
|
address: address.as_slice().to_vec(),
|
||||||
|
}),
|
||||||
|
Err(err) => CreateResult::Error(map_evm_error("wallet create", err).into()),
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(response(UserAgentResponsePayload::EvmWalletCreate(
|
||||||
|
evm_proto::WalletCreateResponse {
|
||||||
|
result: Some(result),
|
||||||
|
},
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle_evm_wallet_list(&mut self) -> Output {
|
||||||
|
use evm_proto::wallet_list_response::Result as ListResult;
|
||||||
|
|
||||||
|
let result = match self.props.actors.evm.ask(ListWallets {}).await {
|
||||||
|
Ok(wallets) => ListResult::Wallets(evm_proto::WalletList {
|
||||||
|
wallets: wallets
|
||||||
|
.into_iter()
|
||||||
|
.map(|addr| evm_proto::WalletEntry {
|
||||||
|
address: addr.as_slice().to_vec(),
|
||||||
|
})
|
||||||
|
.collect(),
|
||||||
|
}),
|
||||||
|
Err(err) => ListResult::Error(map_evm_error("wallet list", err).into()),
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(response(UserAgentResponsePayload::EvmWalletList(
|
||||||
|
evm_proto::WalletListResponse {
|
||||||
|
result: Some(result),
|
||||||
|
},
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl UserAgentSession {
|
||||||
|
async fn handle_sdk_client_approve(&mut self, req: SdkClientApproveRequest) -> Output {
|
||||||
|
use sdk_client_approve_response::Result as ApproveResult;
|
||||||
|
|
||||||
|
if req.pubkey.len() != 32 {
|
||||||
|
return Ok(response(UserAgentResponsePayload::SdkClientApprove(
|
||||||
|
SdkClientApproveResponse {
|
||||||
|
result: Some(ApproveResult::Error(ProtoSdkClientError::Internal.into())),
|
||||||
|
},
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
|
||||||
|
let now = std::time::SystemTime::now()
|
||||||
|
.duration_since(std::time::UNIX_EPOCH)
|
||||||
|
.unwrap_or_default()
|
||||||
|
.as_secs() as i32;
|
||||||
|
|
||||||
|
let mut conn = match self.props.db.get().await {
|
||||||
|
Ok(c) => c,
|
||||||
|
Err(e) => {
|
||||||
|
error!(?e, "Failed to get DB connection for sdk_client_approve");
|
||||||
|
return Ok(response(UserAgentResponsePayload::SdkClientApprove(
|
||||||
|
SdkClientApproveResponse {
|
||||||
|
result: Some(ApproveResult::Error(ProtoSdkClientError::Internal.into())),
|
||||||
|
},
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let pubkey_bytes = req.pubkey.clone();
|
||||||
|
let insert_result = insert_into(program_client::table)
|
||||||
|
.values((
|
||||||
|
program_client::public_key.eq(&pubkey_bytes),
|
||||||
|
program_client::nonce.eq(1), // pre-incremented; challenge will use nonce=0
|
||||||
|
program_client::created_at.eq(now),
|
||||||
|
program_client::updated_at.eq(now),
|
||||||
|
))
|
||||||
|
.execute(&mut conn)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
match insert_result {
|
||||||
|
Ok(_) => {
|
||||||
|
match program_client::table
|
||||||
|
.filter(program_client::public_key.eq(&pubkey_bytes))
|
||||||
|
.order(program_client::id.desc())
|
||||||
|
.select((
|
||||||
|
program_client::id,
|
||||||
|
program_client::public_key,
|
||||||
|
program_client::created_at,
|
||||||
|
))
|
||||||
|
.first::<(i32, Vec<u8>, i32)>(&mut conn)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok((id, pubkey, created_at)) => Ok(response(
|
||||||
|
UserAgentResponsePayload::SdkClientApprove(SdkClientApproveResponse {
|
||||||
|
result: Some(ApproveResult::Client(SdkClientEntry {
|
||||||
|
id,
|
||||||
|
pubkey,
|
||||||
|
created_at,
|
||||||
|
})),
|
||||||
|
}),
|
||||||
|
)),
|
||||||
|
Err(e) => {
|
||||||
|
error!(?e, "Failed to fetch inserted SDK client");
|
||||||
|
Ok(response(UserAgentResponsePayload::SdkClientApprove(
|
||||||
|
SdkClientApproveResponse {
|
||||||
|
result: Some(ApproveResult::Error(
|
||||||
|
ProtoSdkClientError::Internal.into(),
|
||||||
|
)),
|
||||||
|
},
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(diesel::result::Error::DatabaseError(
|
||||||
|
diesel::result::DatabaseErrorKind::UniqueViolation,
|
||||||
|
_,
|
||||||
|
)) => Ok(response(UserAgentResponsePayload::SdkClientApprove(
|
||||||
|
SdkClientApproveResponse {
|
||||||
|
result: Some(ApproveResult::Error(
|
||||||
|
ProtoSdkClientError::AlreadyExists.into(),
|
||||||
|
)),
|
||||||
|
},
|
||||||
|
))),
|
||||||
|
Err(e) => {
|
||||||
|
error!(?e, "Failed to insert SDK client");
|
||||||
|
Ok(response(UserAgentResponsePayload::SdkClientApprove(
|
||||||
|
SdkClientApproveResponse {
|
||||||
|
result: Some(ApproveResult::Error(ProtoSdkClientError::Internal.into())),
|
||||||
|
},
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle_sdk_client_list(&mut self) -> Output {
|
||||||
|
let mut conn = match self.props.db.get().await {
|
||||||
|
Ok(c) => c,
|
||||||
|
Err(e) => {
|
||||||
|
error!(?e, "Failed to get DB connection for sdk_client_list");
|
||||||
|
return Ok(response(UserAgentResponsePayload::SdkClientList(
|
||||||
|
SdkClientListResponse {
|
||||||
|
result: Some(sdk_client_list_response::Result::Error(
|
||||||
|
ProtoSdkClientError::Internal.into(),
|
||||||
|
)),
|
||||||
|
},
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
match program_client::table
|
||||||
|
.select((
|
||||||
|
program_client::id,
|
||||||
|
program_client::public_key,
|
||||||
|
program_client::created_at,
|
||||||
|
))
|
||||||
|
.load::<(i32, Vec<u8>, i32)>(&mut conn)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(rows) => Ok(response(UserAgentResponsePayload::SdkClientList(
|
||||||
|
SdkClientListResponse {
|
||||||
|
result: Some(sdk_client_list_response::Result::Clients(SdkClientList {
|
||||||
|
clients: rows
|
||||||
|
.into_iter()
|
||||||
|
.map(|(id, pubkey, created_at)| SdkClientEntry {
|
||||||
|
id,
|
||||||
|
pubkey,
|
||||||
|
created_at,
|
||||||
|
})
|
||||||
|
.collect(),
|
||||||
|
})),
|
||||||
|
},
|
||||||
|
))),
|
||||||
|
Err(e) => {
|
||||||
|
error!(?e, "Failed to list SDK clients");
|
||||||
|
Ok(response(UserAgentResponsePayload::SdkClientList(
|
||||||
|
SdkClientListResponse {
|
||||||
|
result: Some(sdk_client_list_response::Result::Error(
|
||||||
|
ProtoSdkClientError::Internal.into(),
|
||||||
|
)),
|
||||||
|
},
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn handle_sdk_client_revoke(&mut self, req: SdkClientRevokeRequest) -> Output {
|
||||||
|
use sdk_client_revoke_response::Result as RevokeResult;
|
||||||
|
|
||||||
|
let mut conn = match self.props.db.get().await {
|
||||||
|
Ok(c) => c,
|
||||||
|
Err(e) => {
|
||||||
|
error!(?e, "Failed to get DB connection for sdk_client_revoke");
|
||||||
|
return Ok(response(UserAgentResponsePayload::SdkClientRevoke(
|
||||||
|
SdkClientRevokeResponse {
|
||||||
|
result: Some(RevokeResult::Error(ProtoSdkClientError::Internal.into())),
|
||||||
|
},
|
||||||
|
)));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
match diesel::delete(program_client::table)
|
||||||
|
.filter(program_client::id.eq(req.client_id))
|
||||||
|
.execute(&mut conn)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(0) => Ok(response(UserAgentResponsePayload::SdkClientRevoke(
|
||||||
|
SdkClientRevokeResponse {
|
||||||
|
result: Some(RevokeResult::Error(ProtoSdkClientError::NotFound.into())),
|
||||||
|
},
|
||||||
|
))),
|
||||||
|
Ok(_) => Ok(response(UserAgentResponsePayload::SdkClientRevoke(
|
||||||
|
SdkClientRevokeResponse {
|
||||||
|
result: Some(RevokeResult::Ok(())),
|
||||||
|
},
|
||||||
|
))),
|
||||||
|
Err(diesel::result::Error::DatabaseError(
|
||||||
|
diesel::result::DatabaseErrorKind::ForeignKeyViolation,
|
||||||
|
_,
|
||||||
|
)) => Ok(response(UserAgentResponsePayload::SdkClientRevoke(
|
||||||
|
SdkClientRevokeResponse {
|
||||||
|
result: Some(RevokeResult::Error(
|
||||||
|
ProtoSdkClientError::HasRelatedData.into(),
|
||||||
|
)),
|
||||||
|
},
|
||||||
|
))),
|
||||||
|
Err(e) => {
|
||||||
|
error!(?e, "Failed to delete SDK client");
|
||||||
|
Ok(response(UserAgentResponsePayload::SdkClientRevoke(
|
||||||
|
SdkClientRevokeResponse {
|
||||||
|
result: Some(RevokeResult::Error(ProtoSdkClientError::Internal.into())),
|
||||||
|
},
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn map_evm_error<M>(op: &str, err: SendError<M, crate::actors::evm::Error>) -> evm_proto::EvmError {
|
||||||
|
use crate::actors::{evm::Error as EvmError, keyholder::Error as KhError};
|
||||||
|
match err {
|
||||||
|
SendError::HandlerError(EvmError::Keyholder(KhError::NotBootstrapped)) => {
|
||||||
|
evm_proto::EvmError::VaultSealed
|
||||||
|
}
|
||||||
|
SendError::HandlerError(err) => {
|
||||||
|
error!(?err, "EVM {op} failed");
|
||||||
|
evm_proto::EvmError::Internal
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
error!("EVM actor unreachable during {op}");
|
||||||
|
evm_proto::EvmError::Internal
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Actor for UserAgentSession {
|
||||||
|
type Args = Self;
|
||||||
|
|
||||||
|
type Error = TransportResponseError;
|
||||||
|
|
||||||
|
async fn on_start(
|
||||||
|
args: Self::Args,
|
||||||
|
this: kameo::prelude::ActorRef<Self>,
|
||||||
|
) -> Result<Self, Self::Error> {
|
||||||
|
args.props
|
||||||
|
.actors
|
||||||
|
.router
|
||||||
|
.ask(RegisterUserAgent {
|
||||||
|
actor: this.clone(),
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.map_err(|err| {
|
||||||
|
error!(?err, "Failed to register user agent connection with router");
|
||||||
|
TransportResponseError::ConnectionRegistrationFailed
|
||||||
|
})?;
|
||||||
|
Ok(args)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn next(
|
||||||
|
&mut self,
|
||||||
|
_actor_ref: kameo::prelude::WeakActorRef<Self>,
|
||||||
|
mailbox_rx: &mut kameo::prelude::MailboxReceiver<Self>,
|
||||||
|
) -> Option<kameo::mailbox::Signal<Self>> {
|
||||||
|
loop {
|
||||||
|
select! {
|
||||||
|
signal = mailbox_rx.recv() => {
|
||||||
|
return signal;
|
||||||
|
}
|
||||||
|
msg = self.props.transport.recv() => {
|
||||||
|
match msg {
|
||||||
|
Some(request) => {
|
||||||
|
match self.process_transport_inbound(request).await {
|
||||||
|
Ok(response) => {
|
||||||
|
if self.props.transport.send(Ok(response)).await.is_err() {
|
||||||
|
error!(actor = "useragent", reason = "channel closed", "send.failed");
|
||||||
|
return Some(kameo::mailbox::Signal::Stop);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(err) => {
|
||||||
|
let _ = self.props.transport.send(Err(err)).await;
|
||||||
|
return Some(kameo::mailbox::Signal::Stop);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
info!(actor = "useragent", "transport.closed");
|
||||||
|
return Some(kameo::mailbox::Signal::Stop);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl UserAgentSession {
|
||||||
|
pub fn new_test(db: crate::db::DatabasePool, actors: crate::actors::GlobalActors) -> Self {
|
||||||
|
use arbiter_proto::transport::DummyTransport;
|
||||||
|
let transport: super::Transport = Box::new(DummyTransport::new());
|
||||||
|
let props = UserAgentConnection::new(db, actors, transport);
|
||||||
|
Self {
|
||||||
|
props,
|
||||||
|
state: UserAgentStateMachine::new(DummyContext),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,27 @@
|
|||||||
|
use std::sync::Mutex;
|
||||||
|
|
||||||
|
use x25519_dalek::{EphemeralSecret, PublicKey};
|
||||||
|
|
||||||
|
pub struct UnsealContext {
|
||||||
|
pub client_public_key: PublicKey,
|
||||||
|
pub secret: Mutex<Option<EphemeralSecret>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
smlang::statemachine!(
|
||||||
|
name: UserAgent,
|
||||||
|
custom_error: false,
|
||||||
|
transitions: {
|
||||||
|
*Idle + UnsealRequest(UnsealContext) / generate_temp_keypair = WaitingForUnsealKey(UnsealContext),
|
||||||
|
WaitingForUnsealKey(UnsealContext) + ReceivedValidKey = Unsealed,
|
||||||
|
WaitingForUnsealKey(UnsealContext) + ReceivedInvalidKey = Idle,
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
pub struct DummyContext;
|
||||||
|
impl UserAgentStateMachineContext for DummyContext {
|
||||||
|
#[allow(missing_docs)]
|
||||||
|
#[allow(clippy::unused_unit)]
|
||||||
|
fn generate_temp_keypair(&mut self, event_data: UnsealContext) -> Result<UnsealContext, ()> {
|
||||||
|
Ok(event_data)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,95 +0,0 @@
|
|||||||
use super::UserAgentActor;
|
|
||||||
use arbiter_proto::proto::{
|
|
||||||
UserAgentRequest, UserAgentResponse,
|
|
||||||
auth::{
|
|
||||||
self, AuthChallenge, AuthChallengeRequest, AuthOk, ClientMessage,
|
|
||||||
ServerMessage as AuthServerMessage, client_message::Payload as ClientAuthPayload,
|
|
||||||
server_message::Payload as ServerAuthPayload,
|
|
||||||
},
|
|
||||||
user_agent_request::Payload as UserAgentRequestPayload,
|
|
||||||
user_agent_response::Payload as UserAgentResponsePayload,
|
|
||||||
};
|
|
||||||
use futures::StreamExt;
|
|
||||||
use kameo::{
|
|
||||||
actor::{ActorRef, Spawn as _},
|
|
||||||
error::SendError,
|
|
||||||
};
|
|
||||||
use tokio::sync::mpsc;
|
|
||||||
use tonic::Status;
|
|
||||||
use tracing::error;
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
actors::user_agent::{HandleAuthChallengeRequest, HandleAuthChallengeSolution},
|
|
||||||
context::ServerContext,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub(crate) async fn handle_user_agent(
|
|
||||||
context: ServerContext,
|
|
||||||
mut req_stream: tonic::Streaming<UserAgentRequest>,
|
|
||||||
tx: mpsc::Sender<Result<UserAgentResponse, Status>>,
|
|
||||||
) {
|
|
||||||
let actor = UserAgentActor::spawn(UserAgentActor::new(context, tx.clone()));
|
|
||||||
|
|
||||||
while let Some(Ok(req)) = req_stream.next().await
|
|
||||||
&& actor.is_alive()
|
|
||||||
{
|
|
||||||
match process_message(&actor, req).await {
|
|
||||||
Ok(resp) => {
|
|
||||||
if tx.send(Ok(resp)).await.is_err() {
|
|
||||||
error!(actor = "useragent", "Failed to send response to client");
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(status) => {
|
|
||||||
let _ = tx.send(Err(status)).await;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
actor.kill();
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn process_message(
|
|
||||||
actor: &ActorRef<UserAgentActor>,
|
|
||||||
req: UserAgentRequest,
|
|
||||||
) -> Result<UserAgentResponse, Status> {
|
|
||||||
let msg = req.payload.ok_or_else(|| {
|
|
||||||
error!(actor = "useragent", "Received message with no payload");
|
|
||||||
Status::invalid_argument("Expected message with payload")
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let UserAgentRequestPayload::AuthMessage(ClientMessage {
|
|
||||||
payload: Some(client_message),
|
|
||||||
}) = msg
|
|
||||||
else {
|
|
||||||
error!(
|
|
||||||
actor = "useragent",
|
|
||||||
"Received unexpected message type during authentication"
|
|
||||||
);
|
|
||||||
return Err(Status::invalid_argument(
|
|
||||||
"Expected AuthMessage with ClientMessage payload",
|
|
||||||
));
|
|
||||||
};
|
|
||||||
|
|
||||||
match client_message {
|
|
||||||
ClientAuthPayload::AuthChallengeRequest(req) => actor
|
|
||||||
.ask(HandleAuthChallengeRequest { req })
|
|
||||||
.await
|
|
||||||
.map_err(into_status),
|
|
||||||
ClientAuthPayload::AuthChallengeSolution(solution) => actor
|
|
||||||
.ask(HandleAuthChallengeSolution { solution })
|
|
||||||
.await
|
|
||||||
.map_err(into_status),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn into_status<M>(e: SendError<M, Status>) -> Status {
|
|
||||||
match e {
|
|
||||||
SendError::HandlerError(status) => status,
|
|
||||||
_ => {
|
|
||||||
error!(actor = "useragent", "Failed to send message to actor");
|
|
||||||
Status::internal("session failure")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,162 +0,0 @@
|
|||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use diesel::OptionalExtension as _;
|
|
||||||
use diesel_async::RunQueryDsl as _;
|
|
||||||
use ed25519_dalek::VerifyingKey;
|
|
||||||
use kameo::actor::{ActorRef, Spawn};
|
|
||||||
use miette::Diagnostic;
|
|
||||||
use rand::rngs::StdRng;
|
|
||||||
use smlang::statemachine;
|
|
||||||
use thiserror::Error;
|
|
||||||
use tokio::sync::RwLock;
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
context::{
|
|
||||||
bootstrap::{BootstrapActor, generate_token},
|
|
||||||
lease::LeaseHandler,
|
|
||||||
tls::{TlsDataRaw, TlsManager},
|
|
||||||
},
|
|
||||||
db::{
|
|
||||||
self,
|
|
||||||
models::ArbiterSetting,
|
|
||||||
schema::{self, arbiter_settings},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
pub(crate) mod bootstrap;
|
|
||||||
pub(crate) mod lease;
|
|
||||||
pub(crate) mod tls;
|
|
||||||
|
|
||||||
#[derive(Error, Debug, Diagnostic)]
|
|
||||||
pub enum InitError {
|
|
||||||
#[error("Database setup failed: {0}")]
|
|
||||||
#[diagnostic(code(arbiter_server::init::database_setup))]
|
|
||||||
DatabaseSetup(#[from] db::DatabaseSetupError),
|
|
||||||
|
|
||||||
#[error("Connection acquire failed: {0}")]
|
|
||||||
#[diagnostic(code(arbiter_server::init::database_pool))]
|
|
||||||
DatabasePool(#[from] db::PoolError),
|
|
||||||
|
|
||||||
#[error("Database query error: {0}")]
|
|
||||||
#[diagnostic(code(arbiter_server::init::database_query))]
|
|
||||||
DatabaseQuery(#[from] diesel::result::Error),
|
|
||||||
|
|
||||||
#[error("TLS initialization failed: {0}")]
|
|
||||||
#[diagnostic(code(arbiter_server::init::tls_init))]
|
|
||||||
Tls(#[from] tls::TlsInitError),
|
|
||||||
|
|
||||||
#[error("Bootstrap token generation failed: {0}")]
|
|
||||||
#[diagnostic(code(arbiter_server::init::bootstrap_token))]
|
|
||||||
BootstrapToken(#[from] bootstrap::BootstrapError),
|
|
||||||
|
|
||||||
#[error("I/O Error: {0}")]
|
|
||||||
#[diagnostic(code(arbiter_server::init::io))]
|
|
||||||
Io(#[from] std::io::Error),
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: Placeholder for secure root key cell implementation
|
|
||||||
pub struct KeyStorage;
|
|
||||||
|
|
||||||
statemachine! {
|
|
||||||
name: Server,
|
|
||||||
transitions: {
|
|
||||||
*NotBootstrapped + Bootstrapped = Sealed,
|
|
||||||
Sealed + Unsealed(KeyStorage) / move_key = Ready(KeyStorage),
|
|
||||||
Ready(KeyStorage) + Sealed / dispose_key = Sealed,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pub struct _Context;
|
|
||||||
impl ServerStateMachineContext for _Context {
|
|
||||||
fn move_key(&mut self, _event_data: KeyStorage) -> Result<KeyStorage, ()> {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(missing_docs)]
|
|
||||||
#[allow(clippy::unused_unit)]
|
|
||||||
fn dispose_key(&mut self, _state_data: &KeyStorage) -> Result<(), ()> {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) struct _ServerContextInner {
|
|
||||||
pub db: db::DatabasePool,
|
|
||||||
pub state: RwLock<ServerStateMachine<_Context>>,
|
|
||||||
pub rng: StdRng,
|
|
||||||
pub tls: TlsManager,
|
|
||||||
pub bootstrapper: ActorRef<BootstrapActor>,
|
|
||||||
}
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub(crate) struct ServerContext(Arc<_ServerContextInner>);
|
|
||||||
|
|
||||||
impl std::ops::Deref for ServerContext {
|
|
||||||
type Target = _ServerContextInner;
|
|
||||||
|
|
||||||
fn deref(&self) -> &Self::Target {
|
|
||||||
&self.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ServerContext {
|
|
||||||
async fn load_tls(
|
|
||||||
db: &mut db::DatabaseConnection,
|
|
||||||
settings: Option<&ArbiterSetting>,
|
|
||||||
) -> Result<TlsManager, InitError> {
|
|
||||||
match &settings {
|
|
||||||
Some(settings) => {
|
|
||||||
let tls_data_raw = TlsDataRaw {
|
|
||||||
cert: settings.cert.clone(),
|
|
||||||
key: settings.cert_key.clone(),
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(TlsManager::new(Some(tls_data_raw)).await?)
|
|
||||||
}
|
|
||||||
None => {
|
|
||||||
let tls = TlsManager::new(None).await?;
|
|
||||||
let tls_data_raw = tls.bytes();
|
|
||||||
|
|
||||||
diesel::insert_into(arbiter_settings::table)
|
|
||||||
.values(&ArbiterSetting {
|
|
||||||
id: 1,
|
|
||||||
root_key_id: None,
|
|
||||||
cert_key: tls_data_raw.key,
|
|
||||||
cert: tls_data_raw.cert,
|
|
||||||
})
|
|
||||||
.execute(db)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(tls)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn new(db: db::DatabasePool) -> Result<Self, InitError> {
|
|
||||||
let mut conn = db.get().await?;
|
|
||||||
let rng = rand::make_rng();
|
|
||||||
|
|
||||||
let settings = arbiter_settings::table
|
|
||||||
.first::<ArbiterSetting>(&mut conn)
|
|
||||||
.await
|
|
||||||
.optional()?;
|
|
||||||
|
|
||||||
let tls = Self::load_tls(&mut conn, settings.as_ref()).await?;
|
|
||||||
|
|
||||||
drop(conn);
|
|
||||||
|
|
||||||
let mut state = ServerStateMachine::new(_Context);
|
|
||||||
|
|
||||||
if let Some(settings) = &settings
|
|
||||||
&& settings.root_key_id.is_some()
|
|
||||||
{
|
|
||||||
// TODO: pass the encrypted root key to the state machine and let it handle decryption and transition to Sealed
|
|
||||||
let _ = state.process_event(ServerEvents::Bootstrapped);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(Self(Arc::new(_ServerContextInner {
|
|
||||||
bootstrapper: BootstrapActor::spawn(BootstrapActor::new(&db).await?),
|
|
||||||
db,
|
|
||||||
rng,
|
|
||||||
tls,
|
|
||||||
state: RwLock::new(state),
|
|
||||||
})))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,41 +0,0 @@
|
|||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use dashmap::DashSet;
|
|
||||||
|
|
||||||
#[derive(Clone, Default)]
|
|
||||||
struct LeaseStorage<T: Eq + std::hash::Hash>(Arc<DashSet<T>>);
|
|
||||||
|
|
||||||
// A lease that automatically releases the item when dropped
|
|
||||||
pub struct Lease<T: Clone + std::hash::Hash + Eq> {
|
|
||||||
item: T,
|
|
||||||
storage: LeaseStorage<T>,
|
|
||||||
}
|
|
||||||
impl<T: Clone + std::hash::Hash + Eq> Drop for Lease<T> {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
self.storage.0.remove(&self.item);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Default)]
|
|
||||||
pub struct LeaseHandler<T: Clone + std::hash::Hash + Eq> {
|
|
||||||
storage: LeaseStorage<T>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: Clone + std::hash::Hash + Eq> LeaseHandler<T> {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
Self {
|
|
||||||
storage: LeaseStorage(Arc::new(DashSet::new())),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn acquire(&self, item: T) -> Result<Lease<T>, ()> {
|
|
||||||
if self.storage.0.insert(item.clone()) {
|
|
||||||
Ok(Lease {
|
|
||||||
item,
|
|
||||||
storage: self.storage.clone(),
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
Err(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
65
server/crates/arbiter-server/src/context/mod.rs
Normal file
65
server/crates/arbiter-server/src/context/mod.rs
Normal file
@@ -0,0 +1,65 @@
|
|||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use miette::Diagnostic;
|
||||||
|
use thiserror::Error;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
actors::GlobalActors,
|
||||||
|
context::tls::TlsManager,
|
||||||
|
db::{self},
|
||||||
|
};
|
||||||
|
|
||||||
|
pub mod tls;
|
||||||
|
|
||||||
|
#[derive(Error, Debug, Diagnostic)]
|
||||||
|
pub enum InitError {
|
||||||
|
#[error("Database setup failed: {0}")]
|
||||||
|
#[diagnostic(code(arbiter_server::init::database_setup))]
|
||||||
|
DatabaseSetup(#[from] db::DatabaseSetupError),
|
||||||
|
|
||||||
|
#[error("Connection acquire failed: {0}")]
|
||||||
|
#[diagnostic(code(arbiter_server::init::database_pool))]
|
||||||
|
DatabasePool(#[from] db::PoolError),
|
||||||
|
|
||||||
|
#[error("Database query error: {0}")]
|
||||||
|
#[diagnostic(code(arbiter_server::init::database_query))]
|
||||||
|
DatabaseQuery(#[from] diesel::result::Error),
|
||||||
|
|
||||||
|
#[error("TLS initialization failed: {0}")]
|
||||||
|
#[diagnostic(code(arbiter_server::init::tls_init))]
|
||||||
|
Tls(#[from] tls::InitError),
|
||||||
|
|
||||||
|
#[error("Actor spawn failed: {0}")]
|
||||||
|
#[diagnostic(code(arbiter_server::init::actor_spawn))]
|
||||||
|
ActorSpawn(#[from] crate::actors::SpawnError),
|
||||||
|
|
||||||
|
#[error("I/O Error: {0}")]
|
||||||
|
#[diagnostic(code(arbiter_server::init::io))]
|
||||||
|
Io(#[from] std::io::Error),
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct _ServerContextInner {
|
||||||
|
pub db: db::DatabasePool,
|
||||||
|
pub tls: TlsManager,
|
||||||
|
pub actors: GlobalActors,
|
||||||
|
}
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct ServerContext(Arc<_ServerContextInner>);
|
||||||
|
|
||||||
|
impl std::ops::Deref for ServerContext {
|
||||||
|
type Target = _ServerContextInner;
|
||||||
|
|
||||||
|
fn deref(&self) -> &Self::Target {
|
||||||
|
&self.0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ServerContext {
|
||||||
|
pub async fn new(db: db::DatabasePool) -> Result<Self, InitError> {
|
||||||
|
Ok(Self(Arc::new(_ServerContextInner {
|
||||||
|
actors: GlobalActors::spawn(db.clone()).await?,
|
||||||
|
tls: TlsManager::new(db.clone()).await?,
|
||||||
|
db,
|
||||||
|
})))
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,13 +1,36 @@
|
|||||||
use std::string::FromUtf8Error;
|
use std::string::FromUtf8Error;
|
||||||
|
|
||||||
|
use diesel::{ExpressionMethods as _, QueryDsl, SelectableHelper as _};
|
||||||
|
use diesel_async::{AsyncConnection, RunQueryDsl};
|
||||||
use miette::Diagnostic;
|
use miette::Diagnostic;
|
||||||
use rcgen::{Certificate, KeyPair};
|
use pem::Pem;
|
||||||
use rustls::pki_types::CertificateDer;
|
use rcgen::{
|
||||||
|
BasicConstraints, Certificate, CertificateParams, CertifiedIssuer, DistinguishedName, DnType,
|
||||||
|
IsCa, Issuer, KeyPair, KeyUsagePurpose,
|
||||||
|
};
|
||||||
|
use rustls::pki_types::{pem::PemObject};
|
||||||
use thiserror::Error;
|
use thiserror::Error;
|
||||||
|
use tonic::transport::CertificateDer;
|
||||||
|
|
||||||
|
use crate::db::{
|
||||||
|
self,
|
||||||
|
models::{NewTlsHistory, TlsHistory},
|
||||||
|
schema::{
|
||||||
|
arbiter_settings,
|
||||||
|
tls_history::{self},
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
const ENCODE_CONFIG: pem::EncodeConfig = {
|
||||||
|
let line_ending = match cfg!(target_family = "windows") {
|
||||||
|
true => pem::LineEnding::CRLF,
|
||||||
|
false => pem::LineEnding::LF,
|
||||||
|
};
|
||||||
|
pem::EncodeConfig::new().set_line_ending(line_ending)
|
||||||
|
};
|
||||||
|
|
||||||
#[derive(Error, Debug, Diagnostic)]
|
#[derive(Error, Debug, Diagnostic)]
|
||||||
pub enum TlsInitError {
|
pub enum InitError {
|
||||||
#[error("Key generation error during TLS initialization: {0}")]
|
#[error("Key generation error during TLS initialization: {0}")]
|
||||||
#[diagnostic(code(arbiter_server::tls_init::key_generation))]
|
#[diagnostic(code(arbiter_server::tls_init::key_generation))]
|
||||||
KeyGeneration(#[from] rcgen::Error),
|
KeyGeneration(#[from] rcgen::Error),
|
||||||
@@ -19,71 +42,211 @@ pub enum TlsInitError {
|
|||||||
#[error("Key deserialization error: {0}")]
|
#[error("Key deserialization error: {0}")]
|
||||||
#[diagnostic(code(arbiter_server::tls_init::key_deserialization))]
|
#[diagnostic(code(arbiter_server::tls_init::key_deserialization))]
|
||||||
KeyDeserializationError(rcgen::Error),
|
KeyDeserializationError(rcgen::Error),
|
||||||
|
|
||||||
|
#[error("Database error during TLS initialization: {0}")]
|
||||||
|
#[diagnostic(code(arbiter_server::tls_init::database_error))]
|
||||||
|
DatabaseError(#[from] diesel::result::Error),
|
||||||
|
|
||||||
|
#[error("Pem deserialization error during TLS initialization: {0}")]
|
||||||
|
#[diagnostic(code(arbiter_server::tls_init::pem_deserialization))]
|
||||||
|
PemDeserializationError(#[from] rustls::pki_types::pem::Error),
|
||||||
|
|
||||||
|
#[error("Database pool acquire error during TLS initialization: {0}")]
|
||||||
|
#[diagnostic(code(arbiter_server::tls_init::database_pool_acquire))]
|
||||||
|
DatabasePoolAcquire(#[from] db::PoolError),
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct TlsData {
|
pub type PemCert = String;
|
||||||
pub cert: CertificateDer<'static>,
|
|
||||||
pub keypair: KeyPair,
|
pub fn encode_cert_to_pem(cert: &CertificateDer) -> PemCert {
|
||||||
|
pem::encode_config(
|
||||||
|
&Pem::new("CERTIFICATE", cert.to_vec()),
|
||||||
|
ENCODE_CONFIG,
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct TlsDataRaw {
|
#[allow(unused)]
|
||||||
pub cert: Vec<u8>,
|
struct SerializedTls {
|
||||||
pub key: Vec<u8>,
|
cert_pem: PemCert,
|
||||||
|
cert_key_pem: String,
|
||||||
}
|
}
|
||||||
impl TlsDataRaw {
|
|
||||||
pub fn serialize(cert: &TlsData) -> Self {
|
struct TlsCa {
|
||||||
Self {
|
issuer: Issuer<'static, KeyPair>,
|
||||||
cert: cert.cert.as_ref().to_vec(),
|
cert: CertificateDer<'static>,
|
||||||
key: cert.keypair.serialize_pem().as_bytes().to_vec(),
|
}
|
||||||
}
|
|
||||||
|
impl TlsCa {
|
||||||
|
fn generate() -> Result<Self, InitError> {
|
||||||
|
let keypair = KeyPair::generate()?;
|
||||||
|
let mut params = CertificateParams::new(["Arbiter Instance CA".into()])?;
|
||||||
|
params.is_ca = IsCa::Ca(BasicConstraints::Unconstrained);
|
||||||
|
params.key_usages = vec![
|
||||||
|
KeyUsagePurpose::KeyCertSign,
|
||||||
|
KeyUsagePurpose::CrlSign,
|
||||||
|
KeyUsagePurpose::DigitalSignature,
|
||||||
|
];
|
||||||
|
|
||||||
|
let mut dn = DistinguishedName::new();
|
||||||
|
dn.push(DnType::CommonName, "Arbiter Instance CA");
|
||||||
|
params.distinguished_name = dn;
|
||||||
|
let certified_issuer = CertifiedIssuer::self_signed(params, keypair)?;
|
||||||
|
|
||||||
|
let cert_key_pem = certified_issuer.key().serialize_pem();
|
||||||
|
|
||||||
|
let issuer = Issuer::from_ca_cert_pem(
|
||||||
|
&certified_issuer.pem(),
|
||||||
|
KeyPair::from_pem(cert_key_pem.as_ref()).unwrap(),
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
issuer,
|
||||||
|
cert: certified_issuer.der().clone(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
fn generate_leaf(&self) -> Result<TlsCert, InitError> {
|
||||||
|
let cert_key = KeyPair::generate()?;
|
||||||
|
let mut params = CertificateParams::new(["Arbiter Instance Leaf".into()])?;
|
||||||
|
params.is_ca = IsCa::NoCa;
|
||||||
|
params.key_usages = vec![
|
||||||
|
KeyUsagePurpose::DigitalSignature,
|
||||||
|
KeyUsagePurpose::KeyEncipherment,
|
||||||
|
];
|
||||||
|
|
||||||
|
let mut dn = DistinguishedName::new();
|
||||||
|
dn.push(DnType::CommonName, "Arbiter Instance Leaf");
|
||||||
|
params.distinguished_name = dn;
|
||||||
|
|
||||||
|
let new_cert = params.signed_by(&cert_key, &self.issuer)?;
|
||||||
|
|
||||||
|
Ok(TlsCert {
|
||||||
|
cert: new_cert,
|
||||||
|
cert_key,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn deserialize(&self) -> Result<TlsData, TlsInitError> {
|
#[allow(unused)]
|
||||||
let cert = CertificateDer::from_slice(&self.cert).into_owned();
|
fn serialize(&self) -> Result<SerializedTls, InitError> {
|
||||||
|
let cert_key_pem = self.issuer.key().serialize_pem();
|
||||||
|
Ok(SerializedTls {
|
||||||
|
cert_pem: encode_cert_to_pem(&self.cert),
|
||||||
|
cert_key_pem,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
let key =
|
#[allow(unused)]
|
||||||
String::from_utf8(self.key.clone()).map_err(TlsInitError::KeyInvalidFormat)?;
|
fn try_deserialize(cert_pem: &str, cert_key_pem: &str) -> Result<Self, InitError> {
|
||||||
|
let keypair =
|
||||||
let keypair = KeyPair::from_pem(&key).map_err(TlsInitError::KeyDeserializationError)?;
|
KeyPair::from_pem(cert_key_pem).map_err(InitError::KeyDeserializationError)?;
|
||||||
|
let issuer = Issuer::from_ca_cert_pem(cert_pem, keypair)?;
|
||||||
Ok(TlsData { cert, keypair })
|
Ok(Self {
|
||||||
|
issuer,
|
||||||
|
cert: CertificateDer::from_pem_slice(cert_pem.as_bytes())?,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn generate_cert(key: &KeyPair) -> Result<Certificate, rcgen::Error> {
|
struct TlsCert {
|
||||||
let params = rcgen::CertificateParams::new(vec![
|
cert: Certificate,
|
||||||
"arbiter.local".to_string(),
|
cert_key: KeyPair,
|
||||||
"localhost".to_string(),
|
|
||||||
])?;
|
|
||||||
|
|
||||||
params.self_signed(key)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Implement cert rotation
|
// TODO: Implement cert rotation
|
||||||
pub(crate) struct TlsManager {
|
pub struct TlsManager {
|
||||||
data: TlsData,
|
cert: CertificateDer<'static>,
|
||||||
|
keypair: KeyPair,
|
||||||
|
ca_cert: CertificateDer<'static>,
|
||||||
|
_db: db::DatabasePool,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TlsManager {
|
impl TlsManager {
|
||||||
pub async fn new(data: Option<TlsDataRaw>) -> Result<Self, TlsInitError> {
|
pub async fn generate_new(db: &db::DatabasePool) -> Result<Self, InitError> {
|
||||||
match data {
|
let ca = TlsCa::generate()?;
|
||||||
Some(raw) => {
|
let new_cert = ca.generate_leaf()?;
|
||||||
let tls_data = raw.deserialize()?;
|
|
||||||
Ok(Self { data: tls_data })
|
{
|
||||||
}
|
let mut conn = db.get().await?;
|
||||||
None => {
|
conn.transaction(|conn| {
|
||||||
let keypair = KeyPair::generate()?;
|
Box::pin(async {
|
||||||
let cert = generate_cert(&keypair)?;
|
let new_tls_history = NewTlsHistory {
|
||||||
let tls_data = TlsData {
|
cert: new_cert.cert.pem(),
|
||||||
cert: cert.der().clone(),
|
cert_key: new_cert.cert_key.serialize_pem(),
|
||||||
keypair,
|
ca_cert: encode_cert_to_pem(&ca.cert),
|
||||||
|
ca_key: ca.issuer.key().serialize_pem(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let inserted_tls_history: i32 = diesel::insert_into(tls_history::table)
|
||||||
|
.values(&new_tls_history)
|
||||||
|
.returning(tls_history::id)
|
||||||
|
.get_result(conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
diesel::update(arbiter_settings::table)
|
||||||
|
.set(arbiter_settings::tls_id.eq(inserted_tls_history))
|
||||||
|
.execute(conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Result::<_, diesel::result::Error>::Ok(())
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
cert: new_cert.cert.der().clone(),
|
||||||
|
keypair: new_cert.cert_key,
|
||||||
|
ca_cert: ca.cert,
|
||||||
|
_db: db.clone(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn new(db: db::DatabasePool) -> Result<Self, InitError> {
|
||||||
|
let cert_data: Option<TlsHistory> = {
|
||||||
|
let mut conn = db.get().await?;
|
||||||
|
arbiter_settings::table
|
||||||
|
.left_join(tls_history::table)
|
||||||
|
.select(Option::<TlsHistory>::as_select())
|
||||||
|
.first(&mut conn)
|
||||||
|
.await?
|
||||||
|
};
|
||||||
|
|
||||||
|
match cert_data {
|
||||||
|
Some(data) => {
|
||||||
|
let try_load = || -> Result<_, Box<dyn std::error::Error>> {
|
||||||
|
let keypair = KeyPair::from_pem(&data.cert_key)?;
|
||||||
|
let cert = CertificateDer::from_pem_slice(data.cert.as_bytes())?;
|
||||||
|
let ca_cert = CertificateDer::from_pem_slice(data.ca_cert.as_bytes())?;
|
||||||
|
Ok(Self {
|
||||||
|
cert,
|
||||||
|
keypair,
|
||||||
|
ca_cert,
|
||||||
|
_db: db.clone(),
|
||||||
|
})
|
||||||
};
|
};
|
||||||
Ok(Self { data: tls_data })
|
match try_load() {
|
||||||
|
Ok(manager) => Ok(manager),
|
||||||
|
Err(e) => {
|
||||||
|
eprintln!("Failed to load existing TLS certs: {e}. Generating new ones.");
|
||||||
|
Self::generate_new(&db).await
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
None => Self::generate_new(&db).await,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn bytes(&self) -> TlsDataRaw {
|
pub fn cert(&self) -> &CertificateDer<'static> {
|
||||||
TlsDataRaw::serialize(&self.data)
|
&self.cert
|
||||||
|
}
|
||||||
|
pub fn ca_cert(&self) -> &CertificateDer<'static> {
|
||||||
|
&self.ca_cert
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn cert_pem(&self) -> PemCert {
|
||||||
|
encode_cert_to_pem(&self.cert)
|
||||||
|
}
|
||||||
|
pub fn key_pem(&self) -> String {
|
||||||
|
self.keypair.serialize_pem()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,12 +1,7 @@
|
|||||||
use std::sync::Arc;
|
use diesel::{Connection as _, SqliteConnection, connection::SimpleConnection as _};
|
||||||
|
|
||||||
use diesel::{
|
|
||||||
Connection as _, SqliteConnection,
|
|
||||||
connection::{SimpleConnection as _, TransactionManager},
|
|
||||||
};
|
|
||||||
use diesel_async::{
|
use diesel_async::{
|
||||||
AsyncConnection, SimpleAsyncConnection,
|
AsyncConnection, SimpleAsyncConnection,
|
||||||
pooled_connection::{AsyncDieselConnectionManager, ManagerConfig, RecyclingMethod},
|
pooled_connection::{AsyncDieselConnectionManager, ManagerConfig},
|
||||||
sync_connection_wrapper::SyncConnectionWrapper,
|
sync_connection_wrapper::SyncConnectionWrapper,
|
||||||
};
|
};
|
||||||
use diesel_migrations::{EmbeddedMigrations, MigrationHarness, embed_migrations};
|
use diesel_migrations::{EmbeddedMigrations, MigrationHarness, embed_migrations};
|
||||||
@@ -22,30 +17,30 @@ pub type DatabasePool = diesel_async::pooled_connection::bb8::Pool<DatabaseConne
|
|||||||
pub type PoolInitError = diesel_async::pooled_connection::PoolError;
|
pub type PoolInitError = diesel_async::pooled_connection::PoolError;
|
||||||
pub type PoolError = diesel_async::pooled_connection::bb8::RunError;
|
pub type PoolError = diesel_async::pooled_connection::bb8::RunError;
|
||||||
|
|
||||||
static DB_FILE: &'static str = "arbiter.sqlite";
|
static DB_FILE: &str = "arbiter.sqlite";
|
||||||
|
|
||||||
const MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations");
|
const MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations");
|
||||||
|
|
||||||
#[derive(Error, Diagnostic, Debug)]
|
#[derive(Error, Diagnostic, Debug)]
|
||||||
pub enum DatabaseSetupError {
|
pub enum DatabaseSetupError {
|
||||||
#[error("Failed to determine home directory")]
|
#[error("Failed to determine home directory")]
|
||||||
#[diagnostic(code(arbiter::db::home_dir_error))]
|
#[diagnostic(code(arbiter::db::home_dir))]
|
||||||
HomeDir(std::io::Error),
|
HomeDir(std::io::Error),
|
||||||
|
|
||||||
#[error(transparent)]
|
#[error(transparent)]
|
||||||
#[diagnostic(code(arbiter::db::connection_error))]
|
#[diagnostic(code(arbiter::db::connection))]
|
||||||
Connection(diesel::ConnectionError),
|
Connection(diesel::ConnectionError),
|
||||||
|
|
||||||
#[error(transparent)]
|
#[error(transparent)]
|
||||||
#[diagnostic(code(arbiter::db::concurrency_error))]
|
#[diagnostic(code(arbiter::db::concurrency))]
|
||||||
ConcurrencySetup(diesel::result::Error),
|
ConcurrencySetup(diesel::result::Error),
|
||||||
|
|
||||||
#[error(transparent)]
|
#[error(transparent)]
|
||||||
#[diagnostic(code(arbiter::db::migration_error))]
|
#[diagnostic(code(arbiter::db::migration))]
|
||||||
Migration(Box<dyn std::error::Error + Send + Sync>),
|
Migration(Box<dyn std::error::Error + Send + Sync>),
|
||||||
|
|
||||||
#[error(transparent)]
|
#[error(transparent)]
|
||||||
#[diagnostic(code(arbiter::db::pool_error))]
|
#[diagnostic(code(arbiter::db::pool))]
|
||||||
Pool(#[from] PoolInitError),
|
Pool(#[from] PoolInitError),
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -96,12 +91,12 @@ fn initialize_database(url: &str) -> Result<(), DatabaseSetupError> {
|
|||||||
|
|
||||||
#[tracing::instrument(level = "info")]
|
#[tracing::instrument(level = "info")]
|
||||||
pub async fn create_pool(url: Option<&str>) -> Result<DatabasePool, DatabaseSetupError> {
|
pub async fn create_pool(url: Option<&str>) -> Result<DatabasePool, DatabaseSetupError> {
|
||||||
let database_url = url.map(String::from).unwrap_or(format!(
|
let database_url = url.map(String::from).unwrap_or(
|
||||||
"{}?mode=rwc",
|
database_path()?
|
||||||
(database_path()?
|
|
||||||
.to_str()
|
.to_str()
|
||||||
.expect("database path is not valid UTF-8"))
|
.expect("database path is not valid UTF-8")
|
||||||
));
|
.to_string(),
|
||||||
|
);
|
||||||
|
|
||||||
initialize_database(&database_url)?;
|
initialize_database(&database_url)?;
|
||||||
|
|
||||||
@@ -134,17 +129,16 @@ pub async fn create_pool(url: Option<&str>) -> Result<DatabasePool, DatabaseSetu
|
|||||||
Ok(pool)
|
Ok(pool)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
pub async fn create_test_pool() -> DatabasePool {
|
pub async fn create_test_pool() -> DatabasePool {
|
||||||
use rand::distr::{Alphanumeric, SampleString as _};
|
use rand::distr::{Alphanumeric, SampleString as _};
|
||||||
|
|
||||||
let tempfile_name = Alphanumeric.sample_string(&mut rand::rng(), 16);
|
let tempfile_name = Alphanumeric.sample_string(&mut rand::rng(), 16);
|
||||||
|
|
||||||
let file = std::env::temp_dir().join(tempfile_name);
|
let file = std::env::temp_dir().join(tempfile_name);
|
||||||
let url = format!(
|
let url = file
|
||||||
"{}?mode=rwc",
|
.to_str()
|
||||||
file.to_str().expect("temp file path is not valid UTF-8")
|
.expect("temp file path is not valid UTF-8")
|
||||||
);
|
.to_string();
|
||||||
|
|
||||||
create_pool(Some(&url))
|
create_pool(Some(&url))
|
||||||
.await
|
.await
|
||||||
@@ -1,49 +1,339 @@
|
|||||||
#![allow(unused)]
|
#![allow(unused)]
|
||||||
#![allow(clippy::all)]
|
#![allow(clippy::all)]
|
||||||
|
|
||||||
use crate::db::schema::{self, aead_encrypted, arbiter_settings};
|
use crate::db::schema::{
|
||||||
|
self, aead_encrypted, arbiter_settings, evm_basic_grant, evm_ether_transfer_grant,
|
||||||
|
evm_ether_transfer_grant_target, evm_ether_transfer_limit, evm_token_transfer_grant,
|
||||||
|
evm_token_transfer_log, evm_token_transfer_volume_limit, evm_transaction_log, evm_wallet,
|
||||||
|
root_key_history, tls_history,
|
||||||
|
};
|
||||||
|
use chrono::{DateTime, Utc};
|
||||||
use diesel::{prelude::*, sqlite::Sqlite};
|
use diesel::{prelude::*, sqlite::Sqlite};
|
||||||
|
use restructed::Models;
|
||||||
|
|
||||||
pub mod types {
|
pub mod types {
|
||||||
use chrono::{DateTime, Utc};
|
use chrono::{DateTime, Utc};
|
||||||
pub struct SqliteTimestamp(DateTime<Utc>);
|
use diesel::{
|
||||||
}
|
deserialize::{FromSql, FromSqlRow},
|
||||||
|
expression::AsExpression,
|
||||||
|
serialize::{IsNull, ToSql},
|
||||||
|
sql_types::Integer,
|
||||||
|
sqlite::{Sqlite, SqliteType},
|
||||||
|
};
|
||||||
|
|
||||||
#[derive(Queryable, Debug, Insertable)]
|
#[derive(Debug, FromSqlRow, AsExpression)]
|
||||||
|
#[diesel(sql_type = Integer)]
|
||||||
|
#[repr(transparent)] // hint compiler to optimize the wrapper struct away
|
||||||
|
pub struct SqliteTimestamp(pub DateTime<Utc>);
|
||||||
|
impl SqliteTimestamp {
|
||||||
|
pub fn now() -> Self {
|
||||||
|
SqliteTimestamp(Utc::now())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<chrono::DateTime<Utc>> for SqliteTimestamp {
|
||||||
|
fn from(dt: chrono::DateTime<Utc>) -> Self {
|
||||||
|
SqliteTimestamp(dt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl From<SqliteTimestamp> for chrono::DateTime<Utc> {
|
||||||
|
fn from(ts: SqliteTimestamp) -> Self {
|
||||||
|
ts.0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ToSql<Integer, Sqlite> for SqliteTimestamp {
|
||||||
|
fn to_sql<'b>(
|
||||||
|
&'b self,
|
||||||
|
out: &mut diesel::serialize::Output<'b, '_, Sqlite>,
|
||||||
|
) -> diesel::serialize::Result {
|
||||||
|
let unix_timestamp = self.0.timestamp() as i32;
|
||||||
|
out.set_value(unix_timestamp);
|
||||||
|
Ok(IsNull::No)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FromSql<Integer, Sqlite> for SqliteTimestamp {
|
||||||
|
fn from_sql(
|
||||||
|
mut bytes: <Sqlite as diesel::backend::Backend>::RawValue<'_>,
|
||||||
|
) -> diesel::deserialize::Result<Self> {
|
||||||
|
let Some(SqliteType::Long) = bytes.value_type() else {
|
||||||
|
return Err(format!(
|
||||||
|
"Expected Integer type for SqliteTimestamp, got {:?}",
|
||||||
|
bytes.value_type()
|
||||||
|
)
|
||||||
|
.into());
|
||||||
|
};
|
||||||
|
|
||||||
|
let unix_timestamp = bytes.read_long();
|
||||||
|
let datetime =
|
||||||
|
DateTime::from_timestamp(unix_timestamp, 0).ok_or("Timestamp is out of bounds")?;
|
||||||
|
|
||||||
|
Ok(SqliteTimestamp(datetime))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Key algorithm stored in the `useragent_client.key_type` column.
|
||||||
|
/// Values must stay stable — they are persisted in the database.
|
||||||
|
#[derive(Debug, Clone, Copy, PartialEq, Eq, FromSqlRow, AsExpression, strum::FromRepr)]
|
||||||
|
#[diesel(sql_type = Integer)]
|
||||||
|
#[repr(i32)]
|
||||||
|
pub enum KeyType {
|
||||||
|
Ed25519 = 1,
|
||||||
|
EcdsaSecp256k1 = 2,
|
||||||
|
Rsa = 3,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ToSql<Integer, Sqlite> for KeyType {
|
||||||
|
fn to_sql<'b>(
|
||||||
|
&'b self,
|
||||||
|
out: &mut diesel::serialize::Output<'b, '_, Sqlite>,
|
||||||
|
) -> diesel::serialize::Result {
|
||||||
|
out.set_value(*self as i32);
|
||||||
|
Ok(IsNull::No)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl FromSql<Integer, Sqlite> for KeyType {
|
||||||
|
fn from_sql(
|
||||||
|
mut bytes: <Sqlite as diesel::backend::Backend>::RawValue<'_>,
|
||||||
|
) -> diesel::deserialize::Result<Self> {
|
||||||
|
let Some(SqliteType::Long) = bytes.value_type() else {
|
||||||
|
return Err("Expected Integer for KeyType".into());
|
||||||
|
};
|
||||||
|
let discriminant = bytes.read_long();
|
||||||
|
KeyType::from_repr(discriminant as i32)
|
||||||
|
.ok_or_else(|| format!("Unknown KeyType discriminant: {discriminant}").into())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub use types::*;
|
||||||
|
|
||||||
|
#[derive(Models, Queryable, Debug, Insertable, Selectable)]
|
||||||
|
#[view(
|
||||||
|
NewAeadEncrypted,
|
||||||
|
derive(Insertable),
|
||||||
|
omit(id),
|
||||||
|
attributes_with = "deriveless"
|
||||||
|
)]
|
||||||
#[diesel(table_name = aead_encrypted, check_for_backend(Sqlite))]
|
#[diesel(table_name = aead_encrypted, check_for_backend(Sqlite))]
|
||||||
pub struct AeadEncrypted {
|
pub struct AeadEncrypted {
|
||||||
pub id: i32,
|
pub id: i32,
|
||||||
pub ciphertext: Vec<u8>,
|
pub ciphertext: Vec<u8>,
|
||||||
pub tag: Vec<u8>,
|
pub tag: Vec<u8>,
|
||||||
pub current_nonce: i32,
|
pub current_nonce: Vec<u8>,
|
||||||
pub schema_version: i32,
|
pub schema_version: i32,
|
||||||
|
pub associated_root_key_id: i32, // references root_key_history.id
|
||||||
|
pub created_at: SqliteTimestamp,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Queryable, Debug, Insertable)]
|
#[derive(Models, Queryable, Debug, Insertable, Selectable)]
|
||||||
#[diesel(table_name = arbiter_settings, check_for_backend(Sqlite))]
|
#[diesel(table_name = root_key_history, check_for_backend(Sqlite))]
|
||||||
pub struct ArbiterSetting {
|
#[view(
|
||||||
|
NewRootKeyHistory,
|
||||||
|
derive(Insertable),
|
||||||
|
omit(id),
|
||||||
|
attributes_with = "deriveless"
|
||||||
|
)]
|
||||||
|
pub struct RootKeyHistory {
|
||||||
pub id: i32,
|
pub id: i32,
|
||||||
pub root_key_id: Option<i32>, // references aead_encrypted.id
|
pub ciphertext: Vec<u8>,
|
||||||
pub cert_key: Vec<u8>,
|
pub tag: Vec<u8>,
|
||||||
pub cert: Vec<u8>,
|
pub root_key_encryption_nonce: Vec<u8>,
|
||||||
|
pub data_encryption_nonce: Vec<u8>,
|
||||||
|
pub schema_version: i32,
|
||||||
|
pub salt: Vec<u8>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Queryable, Debug)]
|
#[derive(Models, Queryable, Debug, Insertable, Selectable)]
|
||||||
|
#[diesel(table_name = tls_history, check_for_backend(Sqlite))]
|
||||||
|
#[view(
|
||||||
|
NewTlsHistory,
|
||||||
|
derive(Insertable),
|
||||||
|
omit(id, created_at),
|
||||||
|
attributes_with = "deriveless"
|
||||||
|
)]
|
||||||
|
pub struct TlsHistory {
|
||||||
|
pub id: i32,
|
||||||
|
pub cert: String,
|
||||||
|
pub cert_key: String, // PEM Encoded private key
|
||||||
|
pub ca_cert: String, // PEM Encoded certificate for cert signing
|
||||||
|
pub ca_key: String, // PEM Encoded public key for cert signing
|
||||||
|
pub created_at: SqliteTimestamp,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Queryable, Debug, Insertable, Selectable)]
|
||||||
|
#[diesel(table_name = arbiter_settings, check_for_backend(Sqlite))]
|
||||||
|
pub struct ArbiterSettings {
|
||||||
|
pub id: i32,
|
||||||
|
pub root_key_id: Option<i32>, // references root_key_history.id
|
||||||
|
pub tls_id: Option<i32>, // references tls_history.id
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Models, Queryable, Debug, Insertable, Selectable)]
|
||||||
|
#[diesel(table_name = evm_wallet, check_for_backend(Sqlite))]
|
||||||
|
#[view(
|
||||||
|
NewEvmWallet,
|
||||||
|
derive(Insertable),
|
||||||
|
omit(id, created_at),
|
||||||
|
attributes_with = "deriveless"
|
||||||
|
)]
|
||||||
|
pub struct EvmWallet {
|
||||||
|
pub id: i32,
|
||||||
|
pub address: Vec<u8>,
|
||||||
|
pub aead_encrypted_id: i32,
|
||||||
|
pub created_at: SqliteTimestamp,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Queryable, Debug, Insertable, Selectable)]
|
||||||
#[diesel(table_name = schema::program_client, check_for_backend(Sqlite))]
|
#[diesel(table_name = schema::program_client, check_for_backend(Sqlite))]
|
||||||
pub struct ProgramClient {
|
pub struct ProgramClient {
|
||||||
pub id: i32,
|
pub id: i32,
|
||||||
pub public_key: Vec<u8>,
|
|
||||||
pub nonce: i32,
|
pub nonce: i32,
|
||||||
pub created_at: i32,
|
pub public_key: Vec<u8>,
|
||||||
pub updated_at: i32,
|
pub created_at: SqliteTimestamp,
|
||||||
|
pub updated_at: SqliteTimestamp,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Queryable, Debug)]
|
#[derive(Queryable, Debug)]
|
||||||
#[diesel(table_name = schema::useragent_client, check_for_backend(Sqlite))]
|
#[diesel(table_name = schema::useragent_client, check_for_backend(Sqlite))]
|
||||||
pub struct UseragentClient {
|
pub struct UseragentClient {
|
||||||
pub id: i32,
|
pub id: i32,
|
||||||
pub public_key: Vec<u8>,
|
|
||||||
pub nonce: i32,
|
pub nonce: i32,
|
||||||
pub created_at: i32,
|
pub public_key: Vec<u8>,
|
||||||
pub updated_at: i32,
|
pub created_at: SqliteTimestamp,
|
||||||
|
pub updated_at: SqliteTimestamp,
|
||||||
|
pub key_type: KeyType,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Models, Queryable, Debug, Insertable, Selectable)]
|
||||||
|
#[diesel(table_name = evm_ether_transfer_limit, check_for_backend(Sqlite))]
|
||||||
|
#[view(
|
||||||
|
NewEvmEtherTransferLimit,
|
||||||
|
derive(Insertable),
|
||||||
|
omit(id, created_at),
|
||||||
|
attributes_with = "deriveless"
|
||||||
|
)]
|
||||||
|
pub struct EvmEtherTransferLimit {
|
||||||
|
pub id: i32,
|
||||||
|
pub window_secs: i32,
|
||||||
|
pub max_volume: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Models, Queryable, Debug, Insertable, Selectable)]
|
||||||
|
#[diesel(table_name = evm_basic_grant, check_for_backend(Sqlite))]
|
||||||
|
#[view(
|
||||||
|
NewEvmBasicGrant,
|
||||||
|
derive(Insertable),
|
||||||
|
omit(id, created_at),
|
||||||
|
attributes_with = "deriveless"
|
||||||
|
)]
|
||||||
|
pub struct EvmBasicGrant {
|
||||||
|
pub id: i32,
|
||||||
|
pub wallet_id: i32, // references evm_wallet.id
|
||||||
|
pub client_id: i32, // references program_client.id
|
||||||
|
pub chain_id: i32,
|
||||||
|
pub valid_from: Option<SqliteTimestamp>,
|
||||||
|
pub valid_until: Option<SqliteTimestamp>,
|
||||||
|
pub max_gas_fee_per_gas: Option<Vec<u8>>,
|
||||||
|
pub max_priority_fee_per_gas: Option<Vec<u8>>,
|
||||||
|
pub rate_limit_count: Option<i32>,
|
||||||
|
pub rate_limit_window_secs: Option<i32>,
|
||||||
|
pub revoked_at: Option<SqliteTimestamp>,
|
||||||
|
pub created_at: SqliteTimestamp,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Models, Queryable, Debug, Insertable, Selectable)]
|
||||||
|
#[diesel(table_name = evm_transaction_log, check_for_backend(Sqlite))]
|
||||||
|
#[view(
|
||||||
|
NewEvmTransactionLog,
|
||||||
|
derive(Insertable),
|
||||||
|
omit(id),
|
||||||
|
attributes_with = "deriveless"
|
||||||
|
)]
|
||||||
|
pub struct EvmTransactionLog {
|
||||||
|
pub id: i32,
|
||||||
|
pub grant_id: i32,
|
||||||
|
pub client_id: i32,
|
||||||
|
pub wallet_id: i32,
|
||||||
|
pub chain_id: i32,
|
||||||
|
pub eth_value: Vec<u8>,
|
||||||
|
pub signed_at: SqliteTimestamp,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Models, Queryable, Debug, Insertable, Selectable)]
|
||||||
|
#[diesel(table_name = evm_ether_transfer_grant, check_for_backend(Sqlite))]
|
||||||
|
#[view(
|
||||||
|
NewEvmEtherTransferGrant,
|
||||||
|
derive(Insertable),
|
||||||
|
omit(id),
|
||||||
|
attributes_with = "deriveless"
|
||||||
|
)]
|
||||||
|
pub struct EvmEtherTransferGrant {
|
||||||
|
pub id: i32,
|
||||||
|
pub basic_grant_id: i32,
|
||||||
|
pub limit_id: i32, // references evm_ether_transfer_limit.id
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Models, Queryable, Debug, Insertable, Selectable)]
|
||||||
|
#[diesel(table_name = evm_ether_transfer_grant_target, check_for_backend(Sqlite))]
|
||||||
|
#[view(
|
||||||
|
NewEvmEtherTransferGrantTarget,
|
||||||
|
derive(Insertable),
|
||||||
|
omit(id),
|
||||||
|
attributes_with = "deriveless"
|
||||||
|
)]
|
||||||
|
pub struct EvmEtherTransferGrantTarget {
|
||||||
|
pub id: i32,
|
||||||
|
pub grant_id: i32,
|
||||||
|
pub address: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Models, Queryable, Debug, Insertable, Selectable)]
|
||||||
|
#[diesel(table_name = evm_token_transfer_grant, check_for_backend(Sqlite))]
|
||||||
|
#[view(
|
||||||
|
NewEvmTokenTransferGrant,
|
||||||
|
derive(Insertable),
|
||||||
|
omit(id),
|
||||||
|
attributes_with = "deriveless"
|
||||||
|
)]
|
||||||
|
pub struct EvmTokenTransferGrant {
|
||||||
|
pub id: i32,
|
||||||
|
pub basic_grant_id: i32,
|
||||||
|
pub token_contract: Vec<u8>,
|
||||||
|
pub receiver: Option<Vec<u8>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Models, Queryable, Debug, Insertable, Selectable)]
|
||||||
|
#[diesel(table_name = evm_token_transfer_volume_limit, check_for_backend(Sqlite))]
|
||||||
|
#[view(
|
||||||
|
NewEvmTokenTransferVolumeLimit,
|
||||||
|
derive(Insertable),
|
||||||
|
omit(id),
|
||||||
|
attributes_with = "deriveless"
|
||||||
|
)]
|
||||||
|
pub struct EvmTokenTransferVolumeLimit {
|
||||||
|
pub id: i32,
|
||||||
|
pub grant_id: i32,
|
||||||
|
pub window_secs: i32,
|
||||||
|
pub max_volume: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Models, Queryable, Debug, Insertable, Selectable)]
|
||||||
|
#[diesel(table_name = evm_token_transfer_log, check_for_backend(Sqlite))]
|
||||||
|
#[view(
|
||||||
|
NewEvmTokenTransferLog,
|
||||||
|
derive(Insertable),
|
||||||
|
omit(id, created_at),
|
||||||
|
attributes_with = "deriveless"
|
||||||
|
)]
|
||||||
|
pub struct EvmTokenTransferLog {
|
||||||
|
pub id: i32,
|
||||||
|
pub grant_id: i32,
|
||||||
|
pub log_id: i32,
|
||||||
|
pub chain_id: i32,
|
||||||
|
pub token_contract: Vec<u8>,
|
||||||
|
pub recipient_address: Vec<u8>,
|
||||||
|
pub value: Vec<u8>,
|
||||||
|
pub created_at: SqliteTimestamp,
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,10 +3,12 @@
|
|||||||
diesel::table! {
|
diesel::table! {
|
||||||
aead_encrypted (id) {
|
aead_encrypted (id) {
|
||||||
id -> Integer,
|
id -> Integer,
|
||||||
current_nonce -> Integer,
|
current_nonce -> Binary,
|
||||||
ciphertext -> Binary,
|
ciphertext -> Binary,
|
||||||
tag -> Binary,
|
tag -> Binary,
|
||||||
schema_version -> Integer,
|
schema_version -> Integer,
|
||||||
|
associated_root_key_id -> Integer,
|
||||||
|
created_at -> Integer,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -14,8 +16,100 @@ diesel::table! {
|
|||||||
arbiter_settings (id) {
|
arbiter_settings (id) {
|
||||||
id -> Integer,
|
id -> Integer,
|
||||||
root_key_id -> Nullable<Integer>,
|
root_key_id -> Nullable<Integer>,
|
||||||
cert_key -> Binary,
|
tls_id -> Nullable<Integer>,
|
||||||
cert -> Binary,
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
diesel::table! {
|
||||||
|
evm_basic_grant (id) {
|
||||||
|
id -> Integer,
|
||||||
|
wallet_id -> Integer,
|
||||||
|
client_id -> Integer,
|
||||||
|
chain_id -> Integer,
|
||||||
|
valid_from -> Nullable<Integer>,
|
||||||
|
valid_until -> Nullable<Integer>,
|
||||||
|
max_gas_fee_per_gas -> Nullable<Binary>,
|
||||||
|
max_priority_fee_per_gas -> Nullable<Binary>,
|
||||||
|
rate_limit_count -> Nullable<Integer>,
|
||||||
|
rate_limit_window_secs -> Nullable<Integer>,
|
||||||
|
revoked_at -> Nullable<Integer>,
|
||||||
|
created_at -> Integer,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
diesel::table! {
|
||||||
|
evm_ether_transfer_grant (id) {
|
||||||
|
id -> Integer,
|
||||||
|
basic_grant_id -> Integer,
|
||||||
|
limit_id -> Integer,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
diesel::table! {
|
||||||
|
evm_ether_transfer_grant_target (id) {
|
||||||
|
id -> Integer,
|
||||||
|
grant_id -> Integer,
|
||||||
|
address -> Binary,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
diesel::table! {
|
||||||
|
evm_ether_transfer_limit (id) {
|
||||||
|
id -> Integer,
|
||||||
|
window_secs -> Integer,
|
||||||
|
max_volume -> Binary,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
diesel::table! {
|
||||||
|
evm_token_transfer_grant (id) {
|
||||||
|
id -> Integer,
|
||||||
|
basic_grant_id -> Integer,
|
||||||
|
token_contract -> Binary,
|
||||||
|
receiver -> Nullable<Binary>,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
diesel::table! {
|
||||||
|
evm_token_transfer_log (id) {
|
||||||
|
id -> Integer,
|
||||||
|
grant_id -> Integer,
|
||||||
|
log_id -> Integer,
|
||||||
|
chain_id -> Integer,
|
||||||
|
token_contract -> Binary,
|
||||||
|
recipient_address -> Binary,
|
||||||
|
value -> Binary,
|
||||||
|
created_at -> Integer,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
diesel::table! {
|
||||||
|
evm_token_transfer_volume_limit (id) {
|
||||||
|
id -> Integer,
|
||||||
|
grant_id -> Integer,
|
||||||
|
window_secs -> Integer,
|
||||||
|
max_volume -> Binary,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
diesel::table! {
|
||||||
|
evm_transaction_log (id) {
|
||||||
|
id -> Integer,
|
||||||
|
grant_id -> Integer,
|
||||||
|
client_id -> Integer,
|
||||||
|
wallet_id -> Integer,
|
||||||
|
chain_id -> Integer,
|
||||||
|
eth_value -> Binary,
|
||||||
|
signed_at -> Integer,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
diesel::table! {
|
||||||
|
evm_wallet (id) {
|
||||||
|
id -> Integer,
|
||||||
|
address -> Binary,
|
||||||
|
aead_encrypted_id -> Integer,
|
||||||
|
created_at -> Integer,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -29,6 +123,29 @@ diesel::table! {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
diesel::table! {
|
||||||
|
root_key_history (id) {
|
||||||
|
id -> Integer,
|
||||||
|
root_key_encryption_nonce -> Binary,
|
||||||
|
data_encryption_nonce -> Binary,
|
||||||
|
ciphertext -> Binary,
|
||||||
|
tag -> Binary,
|
||||||
|
schema_version -> Integer,
|
||||||
|
salt -> Binary,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
diesel::table! {
|
||||||
|
tls_history (id) {
|
||||||
|
id -> Integer,
|
||||||
|
cert -> Text,
|
||||||
|
cert_key -> Text,
|
||||||
|
ca_cert -> Text,
|
||||||
|
ca_key -> Text,
|
||||||
|
created_at -> Integer,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
diesel::table! {
|
diesel::table! {
|
||||||
useragent_client (id) {
|
useragent_client (id) {
|
||||||
id -> Integer,
|
id -> Integer,
|
||||||
@@ -36,14 +153,38 @@ diesel::table! {
|
|||||||
public_key -> Binary,
|
public_key -> Binary,
|
||||||
created_at -> Integer,
|
created_at -> Integer,
|
||||||
updated_at -> Integer,
|
updated_at -> Integer,
|
||||||
|
key_type -> Integer,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
diesel::joinable!(arbiter_settings -> aead_encrypted (root_key_id));
|
diesel::joinable!(aead_encrypted -> root_key_history (associated_root_key_id));
|
||||||
|
diesel::joinable!(arbiter_settings -> root_key_history (root_key_id));
|
||||||
|
diesel::joinable!(arbiter_settings -> tls_history (tls_id));
|
||||||
|
diesel::joinable!(evm_basic_grant -> evm_wallet (wallet_id));
|
||||||
|
diesel::joinable!(evm_basic_grant -> program_client (client_id));
|
||||||
|
diesel::joinable!(evm_ether_transfer_grant -> evm_basic_grant (basic_grant_id));
|
||||||
|
diesel::joinable!(evm_ether_transfer_grant -> evm_ether_transfer_limit (limit_id));
|
||||||
|
diesel::joinable!(evm_ether_transfer_grant_target -> evm_ether_transfer_grant (grant_id));
|
||||||
|
diesel::joinable!(evm_token_transfer_grant -> evm_basic_grant (basic_grant_id));
|
||||||
|
diesel::joinable!(evm_token_transfer_log -> evm_token_transfer_grant (grant_id));
|
||||||
|
diesel::joinable!(evm_token_transfer_log -> evm_transaction_log (log_id));
|
||||||
|
diesel::joinable!(evm_token_transfer_volume_limit -> evm_token_transfer_grant (grant_id));
|
||||||
|
diesel::joinable!(evm_wallet -> aead_encrypted (aead_encrypted_id));
|
||||||
|
|
||||||
diesel::allow_tables_to_appear_in_same_query!(
|
diesel::allow_tables_to_appear_in_same_query!(
|
||||||
aead_encrypted,
|
aead_encrypted,
|
||||||
arbiter_settings,
|
arbiter_settings,
|
||||||
|
evm_basic_grant,
|
||||||
|
evm_ether_transfer_grant,
|
||||||
|
evm_ether_transfer_grant_target,
|
||||||
|
evm_ether_transfer_limit,
|
||||||
|
evm_token_transfer_grant,
|
||||||
|
evm_token_transfer_log,
|
||||||
|
evm_token_transfer_volume_limit,
|
||||||
|
evm_transaction_log,
|
||||||
|
evm_wallet,
|
||||||
program_client,
|
program_client,
|
||||||
|
root_key_history,
|
||||||
|
tls_history,
|
||||||
useragent_client,
|
useragent_client,
|
||||||
);
|
);
|
||||||
|
|||||||
@@ -1,24 +0,0 @@
|
|||||||
use tonic::Status;
|
|
||||||
use tracing::error;
|
|
||||||
|
|
||||||
pub trait GrpcStatusExt<T> {
|
|
||||||
fn to_status(self) -> Result<T, Status>;
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> GrpcStatusExt<T> for Result<T, diesel::result::Error> {
|
|
||||||
fn to_status(self) -> Result<T, Status> {
|
|
||||||
self.map_err(|e| {
|
|
||||||
error!(error = ?e, "Database error");
|
|
||||||
Status::internal("Database error")
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> GrpcStatusExt<T> for Result<T, crate::db::PoolError> {
|
|
||||||
fn to_status(self) -> Result<T, Status> {
|
|
||||||
self.map_err(|e| {
|
|
||||||
error!(error = ?e, "Database pool error");
|
|
||||||
Status::internal("Database pool error")
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
84
server/crates/arbiter-server/src/evm/abi.rs
Normal file
84
server/crates/arbiter-server/src/evm/abi.rs
Normal file
@@ -0,0 +1,84 @@
|
|||||||
|
use alloy::sol;
|
||||||
|
|
||||||
|
sol! {
|
||||||
|
interface IERC20 {
|
||||||
|
event Transfer(address indexed from, address indexed to, uint256 value);
|
||||||
|
event Approval(address indexed owner, address indexed spender, uint256 value);
|
||||||
|
|
||||||
|
function totalSupply() external view returns (uint256);
|
||||||
|
function balanceOf(address account) external view returns (uint256);
|
||||||
|
function transfer(address to, uint256 value) external returns (bool);
|
||||||
|
function allowance(address owner, address spender) external view returns (uint256);
|
||||||
|
function approve(address spender, uint256 value) external returns (bool);
|
||||||
|
function transferFrom(address from, address to, uint256 value) external returns (bool);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sol! {
|
||||||
|
/// ERC-721: Non-Fungible Token Standard.
|
||||||
|
#[derive(Debug)]
|
||||||
|
interface IERC721 {
|
||||||
|
event Transfer(address indexed from, address indexed to, uint256 indexed tokenId);
|
||||||
|
event Approval(address indexed owner, address indexed approved, uint256 indexed tokenId);
|
||||||
|
event ApprovalForAll(address indexed owner, address indexed operator, bool approved);
|
||||||
|
|
||||||
|
function balanceOf(address owner) external view returns (uint256 balance);
|
||||||
|
function ownerOf(uint256 tokenId) external view returns (address owner);
|
||||||
|
function safeTransferFrom(address from, address to, uint256 tokenId) external;
|
||||||
|
function safeTransferFrom(address from, address to, uint256 tokenId, bytes calldata data) external;
|
||||||
|
function transferFrom(address from, address to, uint256 tokenId) external;
|
||||||
|
function approve(address to, uint256 tokenId) external;
|
||||||
|
function setApprovalForAll(address operator, bool approved) external;
|
||||||
|
function getApproved(uint256 tokenId) external view returns (address operator);
|
||||||
|
function isApprovedForAll(address owner, address operator) external view returns (bool);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sol! {
|
||||||
|
/// Wrapped Ether — the only functions beyond ERC-20 that matter.
|
||||||
|
#[derive(Debug)]
|
||||||
|
interface IWETH {
|
||||||
|
function deposit() external payable;
|
||||||
|
function withdraw(uint256 wad) external;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
sol! {
|
||||||
|
/// Permit2 — Uniswap's canonical token approval manager.
|
||||||
|
/// Replaces per-contract ERC-20 approve() with a single approval hub.
|
||||||
|
#[derive(Debug)]
|
||||||
|
interface IPermit2 {
|
||||||
|
struct TokenPermissions {
|
||||||
|
address token;
|
||||||
|
uint256 amount;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct PermitSingle {
|
||||||
|
TokenPermissions details;
|
||||||
|
address spender;
|
||||||
|
uint256 sigDeadline;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct PermitBatch {
|
||||||
|
TokenPermissions[] details;
|
||||||
|
address spender;
|
||||||
|
uint256 sigDeadline;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct AllowanceTransferDetails {
|
||||||
|
address from;
|
||||||
|
address to;
|
||||||
|
uint160 amount;
|
||||||
|
address token;
|
||||||
|
}
|
||||||
|
|
||||||
|
function approve(address token, address spender, uint160 amount, uint48 expiration) external;
|
||||||
|
function permit(address owner, PermitSingle calldata permitSingle, bytes calldata signature) external;
|
||||||
|
function permit(address owner, PermitBatch calldata permitBatch, bytes calldata signature) external;
|
||||||
|
function transferFrom(address from, address to, uint160 amount, address token) external;
|
||||||
|
function transferFrom(AllowanceTransferDetails[] calldata transferDetails) external;
|
||||||
|
|
||||||
|
function allowance(address user, address token, address spender)
|
||||||
|
external view returns (uint160 amount, uint48 expiration, uint48 nonce);
|
||||||
|
}
|
||||||
|
}
|
||||||
340
server/crates/arbiter-server/src/evm/mod.rs
Normal file
340
server/crates/arbiter-server/src/evm/mod.rs
Normal file
@@ -0,0 +1,340 @@
|
|||||||
|
pub mod abi;
|
||||||
|
pub mod safe_signer;
|
||||||
|
|
||||||
|
use alloy::{
|
||||||
|
consensus::TxEip1559,
|
||||||
|
primitives::{TxKind, U256},
|
||||||
|
};
|
||||||
|
use chrono::Utc;
|
||||||
|
use diesel::{ExpressionMethods as _, QueryDsl, QueryResult, insert_into, sqlite::Sqlite};
|
||||||
|
use diesel_async::{AsyncConnection, RunQueryDsl};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
db::{
|
||||||
|
self,
|
||||||
|
models::{EvmBasicGrant, NewEvmBasicGrant, NewEvmTransactionLog, SqliteTimestamp},
|
||||||
|
schema::{self, evm_transaction_log},
|
||||||
|
},
|
||||||
|
evm::policies::{
|
||||||
|
DatabaseID, EvalContext, EvalViolation, FullGrant, Grant, Policy, SharedGrantSettings,
|
||||||
|
SpecificGrant, SpecificMeaning, ether_transfer::EtherTransfer,
|
||||||
|
token_transfers::TokenTransfer,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
pub mod policies;
|
||||||
|
mod utils;
|
||||||
|
|
||||||
|
/// Errors that can only occur once the transaction meaning is known (during policy evaluation)
|
||||||
|
#[derive(Debug, thiserror::Error, miette::Diagnostic)]
|
||||||
|
pub enum PolicyError {
|
||||||
|
#[error("Database connection pool error")]
|
||||||
|
#[diagnostic(code(arbiter_server::evm::policy_error::pool))]
|
||||||
|
Pool(#[from] db::PoolError),
|
||||||
|
#[error("Database returned error")]
|
||||||
|
#[diagnostic(code(arbiter_server::evm::policy_error::database))]
|
||||||
|
Database(#[from] diesel::result::Error),
|
||||||
|
#[error("Transaction violates policy: {0:?}")]
|
||||||
|
#[diagnostic(code(arbiter_server::evm::policy_error::violation))]
|
||||||
|
Violations(Vec<EvalViolation>),
|
||||||
|
#[error("No matching grant found")]
|
||||||
|
#[diagnostic(code(arbiter_server::evm::policy_error::no_matching_grant))]
|
||||||
|
NoMatchingGrant,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, thiserror::Error, miette::Diagnostic)]
|
||||||
|
pub enum VetError {
|
||||||
|
#[error("Contract creation transactions are not supported")]
|
||||||
|
#[diagnostic(code(arbiter_server::evm::vet_error::contract_creation_unsupported))]
|
||||||
|
ContractCreationNotSupported,
|
||||||
|
#[error("Engine can't classify this transaction")]
|
||||||
|
#[diagnostic(code(arbiter_server::evm::vet_error::unsupported))]
|
||||||
|
UnsupportedTransactionType,
|
||||||
|
#[error("Policy evaluation failed: {1}")]
|
||||||
|
#[diagnostic(code(arbiter_server::evm::vet_error::evaluated))]
|
||||||
|
Evaluated(SpecificMeaning, #[source] PolicyError),
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, thiserror::Error, miette::Diagnostic)]
|
||||||
|
pub enum SignError {
|
||||||
|
#[error("Database connection pool error")]
|
||||||
|
#[diagnostic(code(arbiter_server::evm::database_error))]
|
||||||
|
Pool(#[from] db::PoolError),
|
||||||
|
#[error("Database returned error")]
|
||||||
|
#[diagnostic(code(arbiter_server::evm::database_error))]
|
||||||
|
Database(#[from] diesel::result::Error),
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, thiserror::Error, miette::Diagnostic)]
|
||||||
|
pub enum AnalyzeError {
|
||||||
|
#[error("Engine doesn't support granting permissions for contract creation")]
|
||||||
|
#[diagnostic(code(arbiter_server::evm::analyze_error::contract_creation_not_supported))]
|
||||||
|
ContractCreationNotSupported,
|
||||||
|
|
||||||
|
#[error("Unsupported transaction type")]
|
||||||
|
#[diagnostic(code(arbiter_server::evm::analyze_error::unsupported_transaction_type))]
|
||||||
|
UnsupportedTransactionType,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, thiserror::Error, miette::Diagnostic)]
|
||||||
|
pub enum CreationError {
|
||||||
|
#[error("Database connection pool error")]
|
||||||
|
#[diagnostic(code(arbiter_server::evm::creation_error::database_error))]
|
||||||
|
Pool(#[from] db::PoolError),
|
||||||
|
|
||||||
|
#[error("Database returned error")]
|
||||||
|
#[diagnostic(code(arbiter_server::evm::creation_error::database_error))]
|
||||||
|
Database(#[from] diesel::result::Error),
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, thiserror::Error, miette::Diagnostic)]
|
||||||
|
pub enum ListGrantsError {
|
||||||
|
#[error("Database connection pool error")]
|
||||||
|
#[diagnostic(code(arbiter_server::evm::list_grants_error::pool))]
|
||||||
|
Pool(#[from] db::PoolError),
|
||||||
|
|
||||||
|
#[error("Database returned error")]
|
||||||
|
#[diagnostic(code(arbiter_server::evm::list_grants_error::database))]
|
||||||
|
Database(#[from] diesel::result::Error),
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Controls whether a transaction should be executed or only validated
|
||||||
|
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||||
|
pub enum RunKind {
|
||||||
|
/// Validate and record the transaction
|
||||||
|
Execution,
|
||||||
|
/// Validate only, do not record
|
||||||
|
CheckOnly,
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn check_shared_constraints(
|
||||||
|
context: &EvalContext,
|
||||||
|
shared: &SharedGrantSettings,
|
||||||
|
shared_grant_id: DatabaseID,
|
||||||
|
conn: &mut impl AsyncConnection<Backend = Sqlite>,
|
||||||
|
) -> QueryResult<Vec<EvalViolation>> {
|
||||||
|
let mut violations = Vec::new();
|
||||||
|
let now = Utc::now();
|
||||||
|
|
||||||
|
// Validity window
|
||||||
|
if shared.valid_from.is_some_and(|t| now < t)
|
||||||
|
|| shared.valid_until.is_some_and(|t| now > t)
|
||||||
|
{
|
||||||
|
violations.push(EvalViolation::InvalidTime);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Gas fee caps
|
||||||
|
let fee_exceeded = shared
|
||||||
|
.max_gas_fee_per_gas
|
||||||
|
.is_some_and(|cap| U256::from(context.max_fee_per_gas) > cap);
|
||||||
|
let priority_exceeded = shared.max_priority_fee_per_gas.is_some_and(|cap| {
|
||||||
|
U256::from(context.max_priority_fee_per_gas) > cap
|
||||||
|
});
|
||||||
|
if fee_exceeded || priority_exceeded {
|
||||||
|
violations.push(EvalViolation::GasLimitExceeded {
|
||||||
|
max_gas_fee_per_gas: shared.max_gas_fee_per_gas,
|
||||||
|
max_priority_fee_per_gas: shared.max_priority_fee_per_gas,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Transaction count rate limit
|
||||||
|
if let Some(rate_limit) = &shared.rate_limit {
|
||||||
|
let window_start = SqliteTimestamp(now - rate_limit.window);
|
||||||
|
let count: i64 = evm_transaction_log::table
|
||||||
|
.filter(evm_transaction_log::grant_id.eq(shared_grant_id))
|
||||||
|
.filter(evm_transaction_log::signed_at.ge(window_start))
|
||||||
|
.count()
|
||||||
|
.get_result(conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
if count >= rate_limit.count as i64 {
|
||||||
|
violations.push(EvalViolation::RateLimitExceeded);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(violations)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Supporting only EIP-1559 transactions for now, but we can easily extend this to support legacy transactions if needed
|
||||||
|
pub struct Engine {
|
||||||
|
db: db::DatabasePool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Engine {
|
||||||
|
async fn vet_transaction<P: Policy>(
|
||||||
|
&self,
|
||||||
|
context: EvalContext,
|
||||||
|
meaning: &P::Meaning,
|
||||||
|
run_kind: RunKind,
|
||||||
|
) -> Result<(), PolicyError> {
|
||||||
|
let mut conn = self.db.get().await?;
|
||||||
|
|
||||||
|
let grant = P::try_find_grant(&context, &mut conn)
|
||||||
|
.await?
|
||||||
|
.ok_or(PolicyError::NoMatchingGrant)?;
|
||||||
|
|
||||||
|
let mut violations =
|
||||||
|
check_shared_constraints(&context, &grant.shared, grant.shared_grant_id, &mut conn)
|
||||||
|
.await?;
|
||||||
|
violations.extend(P::evaluate(&context, meaning, &grant, &mut conn).await?);
|
||||||
|
|
||||||
|
if !violations.is_empty() {
|
||||||
|
return Err(PolicyError::Violations(violations));
|
||||||
|
} else if run_kind == RunKind::Execution {
|
||||||
|
conn.transaction(|conn| {
|
||||||
|
Box::pin(async move {
|
||||||
|
let log_id: i32 = insert_into(evm_transaction_log::table)
|
||||||
|
.values(&NewEvmTransactionLog {
|
||||||
|
grant_id: grant.shared_grant_id,
|
||||||
|
client_id: context.client_id,
|
||||||
|
wallet_id: context.wallet_id,
|
||||||
|
chain_id: context.chain as i32,
|
||||||
|
eth_value: utils::u256_to_bytes(context.value).to_vec(),
|
||||||
|
signed_at: Utc::now().into(),
|
||||||
|
})
|
||||||
|
.returning(evm_transaction_log::id)
|
||||||
|
.get_result(conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
P::record_transaction(&context, meaning, log_id, &grant, conn).await?;
|
||||||
|
|
||||||
|
QueryResult::Ok(())
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Engine {
|
||||||
|
pub fn new(db: db::DatabasePool) -> Self {
|
||||||
|
Self { db }
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn create_grant<P: Policy>(
|
||||||
|
&self,
|
||||||
|
client_id: i32,
|
||||||
|
full_grant: FullGrant<P::Settings>,
|
||||||
|
) -> Result<i32, CreationError> {
|
||||||
|
let mut conn = self.db.get().await?;
|
||||||
|
|
||||||
|
let id = conn
|
||||||
|
.transaction(|conn| {
|
||||||
|
Box::pin(async move {
|
||||||
|
use schema::evm_basic_grant;
|
||||||
|
|
||||||
|
let basic_grant: EvmBasicGrant = insert_into(evm_basic_grant::table)
|
||||||
|
.values(&NewEvmBasicGrant {
|
||||||
|
wallet_id: full_grant.basic.wallet_id,
|
||||||
|
chain_id: full_grant.basic.chain as i32,
|
||||||
|
client_id,
|
||||||
|
valid_from: full_grant.basic.valid_from.map(SqliteTimestamp),
|
||||||
|
valid_until: full_grant.basic.valid_until.map(SqliteTimestamp),
|
||||||
|
max_gas_fee_per_gas: full_grant
|
||||||
|
.basic
|
||||||
|
.max_gas_fee_per_gas
|
||||||
|
.map(|fee| utils::u256_to_bytes(fee).to_vec()),
|
||||||
|
max_priority_fee_per_gas: full_grant
|
||||||
|
.basic
|
||||||
|
.max_priority_fee_per_gas
|
||||||
|
.map(|fee| utils::u256_to_bytes(fee).to_vec()),
|
||||||
|
rate_limit_count: full_grant
|
||||||
|
.basic
|
||||||
|
.rate_limit
|
||||||
|
.as_ref()
|
||||||
|
.map(|rl| rl.count as i32),
|
||||||
|
rate_limit_window_secs: full_grant
|
||||||
|
.basic
|
||||||
|
.rate_limit
|
||||||
|
.as_ref()
|
||||||
|
.map(|rl| rl.window.num_seconds() as i32),
|
||||||
|
revoked_at: None,
|
||||||
|
})
|
||||||
|
.returning(evm_basic_grant::all_columns)
|
||||||
|
.get_result(conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
P::create_grant(&basic_grant, &full_grant.specific, conn).await
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(id)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn list_all_grants(&self) -> Result<Vec<Grant<SpecificGrant>>, ListGrantsError> {
|
||||||
|
let mut conn = self.db.get().await?;
|
||||||
|
|
||||||
|
let mut grants: Vec<Grant<SpecificGrant>> = Vec::new();
|
||||||
|
|
||||||
|
grants.extend(
|
||||||
|
EtherTransfer::find_all_grants(&mut conn)
|
||||||
|
.await?
|
||||||
|
.into_iter()
|
||||||
|
.map(|g| Grant {
|
||||||
|
id: g.id,
|
||||||
|
shared_grant_id: g.shared_grant_id,
|
||||||
|
shared: g.shared,
|
||||||
|
settings: SpecificGrant::EtherTransfer(g.settings),
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
grants.extend(
|
||||||
|
TokenTransfer::find_all_grants(&mut conn)
|
||||||
|
.await?
|
||||||
|
.into_iter()
|
||||||
|
.map(|g| Grant {
|
||||||
|
id: g.id,
|
||||||
|
shared_grant_id: g.shared_grant_id,
|
||||||
|
shared: g.shared,
|
||||||
|
settings: SpecificGrant::TokenTransfer(g.settings),
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(grants)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn evaluate_transaction(
|
||||||
|
&self,
|
||||||
|
wallet_id: i32,
|
||||||
|
client_id: i32,
|
||||||
|
transaction: TxEip1559,
|
||||||
|
run_kind: RunKind,
|
||||||
|
) -> Result<SpecificMeaning, VetError> {
|
||||||
|
let TxKind::Call(to) = transaction.to else {
|
||||||
|
return Err(VetError::ContractCreationNotSupported);
|
||||||
|
};
|
||||||
|
let context = policies::EvalContext {
|
||||||
|
wallet_id,
|
||||||
|
client_id,
|
||||||
|
chain: transaction.chain_id,
|
||||||
|
to,
|
||||||
|
value: transaction.value,
|
||||||
|
calldata: transaction.input.clone(),
|
||||||
|
max_fee_per_gas: transaction.max_fee_per_gas,
|
||||||
|
max_priority_fee_per_gas: transaction.max_priority_fee_per_gas,
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(meaning) = EtherTransfer::analyze(&context) {
|
||||||
|
return match self
|
||||||
|
.vet_transaction::<EtherTransfer>(context, &meaning, run_kind)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(()) => Ok(meaning.into()),
|
||||||
|
Err(e) => Err(VetError::Evaluated(meaning.into(), e)),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
if let Some(meaning) = TokenTransfer::analyze(&context) {
|
||||||
|
return match self
|
||||||
|
.vet_transaction::<TokenTransfer>(context, &meaning, run_kind)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(()) => Ok(meaning.into()),
|
||||||
|
Err(e) => Err(VetError::Evaluated(meaning.into(), e)),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
Err(VetError::UnsupportedTransactionType)
|
||||||
|
}
|
||||||
|
}
|
||||||
209
server/crates/arbiter-server/src/evm/policies.rs
Normal file
209
server/crates/arbiter-server/src/evm/policies.rs
Normal file
@@ -0,0 +1,209 @@
|
|||||||
|
use std::fmt::Display;
|
||||||
|
|
||||||
|
use alloy::primitives::{Address, Bytes, ChainId, U256};
|
||||||
|
use chrono::{DateTime, Duration, Utc};
|
||||||
|
use diesel::{
|
||||||
|
ExpressionMethods as _, QueryDsl, SelectableHelper, result::QueryResult, sqlite::Sqlite,
|
||||||
|
};
|
||||||
|
use diesel_async::{AsyncConnection, RunQueryDsl};
|
||||||
|
use miette::Diagnostic;
|
||||||
|
use thiserror::Error;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
db::models::{self, EvmBasicGrant},
|
||||||
|
evm::utils,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub mod ether_transfer;
|
||||||
|
pub mod token_transfers;
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct EvalContext {
|
||||||
|
// Which wallet is this transaction for
|
||||||
|
pub client_id: i32,
|
||||||
|
pub wallet_id: i32,
|
||||||
|
|
||||||
|
// The transaction data
|
||||||
|
pub chain: ChainId,
|
||||||
|
pub to: Address,
|
||||||
|
pub value: U256,
|
||||||
|
pub calldata: Bytes,
|
||||||
|
|
||||||
|
// Gas pricing (EIP-1559)
|
||||||
|
pub max_fee_per_gas: u128,
|
||||||
|
pub max_priority_fee_per_gas: u128,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Error, Diagnostic)]
|
||||||
|
pub enum EvalViolation {
|
||||||
|
#[error("This grant doesn't allow transactions to the target address {target}")]
|
||||||
|
#[diagnostic(code(arbiter_server::evm::eval_violation::invalid_target))]
|
||||||
|
InvalidTarget { target: Address },
|
||||||
|
|
||||||
|
#[error("Gas limit exceeded for this grant")]
|
||||||
|
#[diagnostic(code(arbiter_server::evm::eval_violation::gas_limit_exceeded))]
|
||||||
|
GasLimitExceeded {
|
||||||
|
max_gas_fee_per_gas: Option<U256>,
|
||||||
|
max_priority_fee_per_gas: Option<U256>,
|
||||||
|
},
|
||||||
|
|
||||||
|
#[error("Rate limit exceeded for this grant")]
|
||||||
|
#[diagnostic(code(arbiter_server::evm::eval_violation::rate_limit_exceeded))]
|
||||||
|
RateLimitExceeded,
|
||||||
|
|
||||||
|
#[error("Transaction exceeds volumetric limits of the grant")]
|
||||||
|
#[diagnostic(code(arbiter_server::evm::eval_violation::volumetric_limit_exceeded))]
|
||||||
|
VolumetricLimitExceeded,
|
||||||
|
|
||||||
|
#[error("Transaction is outside of the grant's validity period")]
|
||||||
|
#[diagnostic(code(arbiter_server::evm::eval_violation::invalid_time))]
|
||||||
|
InvalidTime,
|
||||||
|
|
||||||
|
#[error("Transaction type is not allowed by this grant")]
|
||||||
|
#[diagnostic(code(arbiter_server::evm::eval_violation::invalid_transaction_type))]
|
||||||
|
InvalidTransactionType,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub type DatabaseID = i32;
|
||||||
|
|
||||||
|
pub struct Grant<PolicySettings> {
|
||||||
|
pub id: DatabaseID,
|
||||||
|
pub shared_grant_id: DatabaseID, // ID of the basic grant for shared-logic checks like rate limits and validity periods
|
||||||
|
pub shared: SharedGrantSettings,
|
||||||
|
pub settings: PolicySettings,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
pub trait Policy: Sized {
|
||||||
|
type Settings: Send + Sync + 'static + Into<SpecificGrant>;
|
||||||
|
type Meaning: Display + std::fmt::Debug + Send + Sync + 'static + Into<SpecificMeaning>;
|
||||||
|
|
||||||
|
fn analyze(context: &EvalContext) -> Option<Self::Meaning>;
|
||||||
|
|
||||||
|
// Evaluate whether a transaction with the given meaning complies with the provided grant, and return any violations if not
|
||||||
|
// Empty vector means transaction is compliant with the grant
|
||||||
|
fn evaluate(
|
||||||
|
context: &EvalContext,
|
||||||
|
meaning: &Self::Meaning,
|
||||||
|
grant: &Grant<Self::Settings>,
|
||||||
|
db: &mut impl AsyncConnection<Backend = Sqlite>,
|
||||||
|
) -> impl Future<Output = QueryResult<Vec<EvalViolation>>> + Send;
|
||||||
|
|
||||||
|
// Create a new grant in the database based on the provided grant details, and return its ID
|
||||||
|
fn create_grant(
|
||||||
|
basic: &models::EvmBasicGrant,
|
||||||
|
grant: &Self::Settings,
|
||||||
|
conn: &mut impl AsyncConnection<Backend = Sqlite>,
|
||||||
|
) -> impl std::future::Future<Output = QueryResult<DatabaseID>> + Send;
|
||||||
|
|
||||||
|
// Try to find an existing grant that matches the transaction context, and return its details if found
|
||||||
|
// Additionally, return ID of basic grant for shared-logic checks like rate limits and validity periods
|
||||||
|
fn try_find_grant(
|
||||||
|
context: &EvalContext,
|
||||||
|
conn: &mut impl AsyncConnection<Backend = Sqlite>,
|
||||||
|
) -> impl Future<Output = QueryResult<Option<Grant<Self::Settings>>>> + Send;
|
||||||
|
|
||||||
|
// Return all non-revoked grants, eagerly loading policy-specific settings
|
||||||
|
fn find_all_grants(
|
||||||
|
conn: &mut impl AsyncConnection<Backend = Sqlite>,
|
||||||
|
) -> impl Future<Output = QueryResult<Vec<Grant<Self::Settings>>>> + Send;
|
||||||
|
|
||||||
|
// Records, updates or deletes rate limits
|
||||||
|
// In other words, records grant-specific things after transaction is executed
|
||||||
|
fn record_transaction(
|
||||||
|
context: &EvalContext,
|
||||||
|
meaning: &Self::Meaning,
|
||||||
|
log_id: i32,
|
||||||
|
grant: &Grant<Self::Settings>,
|
||||||
|
conn: &mut impl AsyncConnection<Backend = Sqlite>,
|
||||||
|
) -> impl Future<Output = QueryResult<()>> + Send;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub enum ReceiverTarget {
|
||||||
|
Specific(Vec<Address>), // only allow transfers to these addresses
|
||||||
|
Any, // allow transfers to any address
|
||||||
|
}
|
||||||
|
|
||||||
|
// Classification of what transaction does
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub enum SpecificMeaning {
|
||||||
|
EtherTransfer(ether_transfer::Meaning),
|
||||||
|
TokenTransfer(token_transfers::Meaning),
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
||||||
|
pub struct TransactionRateLimit {
|
||||||
|
pub count: u32,
|
||||||
|
pub window: Duration,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
||||||
|
pub struct VolumeRateLimit {
|
||||||
|
pub max_volume: U256,
|
||||||
|
pub window: Duration,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
||||||
|
pub struct SharedGrantSettings {
|
||||||
|
pub wallet_id: i32,
|
||||||
|
pub chain: ChainId,
|
||||||
|
|
||||||
|
pub valid_from: Option<DateTime<Utc>>,
|
||||||
|
pub valid_until: Option<DateTime<Utc>>,
|
||||||
|
|
||||||
|
pub max_gas_fee_per_gas: Option<U256>,
|
||||||
|
pub max_priority_fee_per_gas: Option<U256>,
|
||||||
|
|
||||||
|
pub rate_limit: Option<TransactionRateLimit>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SharedGrantSettings {
|
||||||
|
fn try_from_model(model: EvmBasicGrant) -> QueryResult<Self> {
|
||||||
|
Ok(Self {
|
||||||
|
wallet_id: model.wallet_id,
|
||||||
|
chain: model.chain_id as u64, // safe because chain_id is stored as i32 but is guaranteed to be a valid ChainId by the API when creating grants
|
||||||
|
valid_from: model.valid_from.map(Into::into),
|
||||||
|
valid_until: model.valid_until.map(Into::into),
|
||||||
|
max_gas_fee_per_gas: model
|
||||||
|
.max_gas_fee_per_gas
|
||||||
|
.map(|b| utils::try_bytes_to_u256(&b))
|
||||||
|
.transpose()?,
|
||||||
|
max_priority_fee_per_gas: model
|
||||||
|
.max_priority_fee_per_gas
|
||||||
|
.map(|b| utils::try_bytes_to_u256(&b))
|
||||||
|
.transpose()?,
|
||||||
|
rate_limit: match (model.rate_limit_count, model.rate_limit_window_secs) {
|
||||||
|
(Some(count), Some(window_secs)) => Some(TransactionRateLimit {
|
||||||
|
count: count as u32,
|
||||||
|
window: Duration::seconds(window_secs as i64),
|
||||||
|
}),
|
||||||
|
_ => None,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn query_by_id(
|
||||||
|
conn: &mut impl AsyncConnection<Backend = Sqlite>,
|
||||||
|
id: i32,
|
||||||
|
) -> diesel::result::QueryResult<Self> {
|
||||||
|
use crate::db::schema::evm_basic_grant;
|
||||||
|
|
||||||
|
let basic_grant: EvmBasicGrant = evm_basic_grant::table
|
||||||
|
.select(EvmBasicGrant::as_select())
|
||||||
|
.filter(evm_basic_grant::id.eq(id))
|
||||||
|
.first::<EvmBasicGrant>(conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Self::try_from_model(basic_grant)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub enum SpecificGrant {
|
||||||
|
EtherTransfer(ether_transfer::Settings),
|
||||||
|
TokenTransfer(token_transfers::Settings),
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct FullGrant<PolicyGrant> {
|
||||||
|
pub basic: SharedGrantSettings,
|
||||||
|
pub specific: PolicyGrant,
|
||||||
|
}
|
||||||
@@ -0,0 +1,347 @@
|
|||||||
|
use std::collections::HashMap;
|
||||||
|
use std::fmt::Display;
|
||||||
|
|
||||||
|
use alloy::primitives::{Address, U256};
|
||||||
|
use chrono::{DateTime, Duration, Utc};
|
||||||
|
use diesel::dsl::{auto_type, insert_into};
|
||||||
|
use diesel::sqlite::Sqlite;
|
||||||
|
use diesel::{ExpressionMethods, JoinOnDsl, prelude::*};
|
||||||
|
use diesel_async::{AsyncConnection, RunQueryDsl};
|
||||||
|
|
||||||
|
use crate::db::models::{
|
||||||
|
EvmBasicGrant, EvmEtherTransferGrant, EvmEtherTransferGrantTarget, EvmEtherTransferLimit,
|
||||||
|
NewEvmEtherTransferLimit, SqliteTimestamp,
|
||||||
|
};
|
||||||
|
use crate::db::schema::{evm_basic_grant, evm_ether_transfer_limit, evm_transaction_log};
|
||||||
|
use crate::evm::policies::{
|
||||||
|
Grant, SharedGrantSettings, SpecificGrant, SpecificMeaning, VolumeRateLimit,
|
||||||
|
};
|
||||||
|
use crate::{
|
||||||
|
db::{
|
||||||
|
models::{self, NewEvmEtherTransferGrant, NewEvmEtherTransferGrantTarget},
|
||||||
|
schema::{evm_ether_transfer_grant, evm_ether_transfer_grant_target},
|
||||||
|
},
|
||||||
|
evm::{policies::Policy, utils},
|
||||||
|
};
|
||||||
|
|
||||||
|
#[auto_type]
|
||||||
|
fn grant_join() -> _ {
|
||||||
|
evm_ether_transfer_grant::table.inner_join(
|
||||||
|
evm_basic_grant::table.on(evm_ether_transfer_grant::basic_grant_id.eq(evm_basic_grant::id)),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
use super::{DatabaseID, EvalContext, EvalViolation};
|
||||||
|
|
||||||
|
// Plain ether transfer
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
||||||
|
pub struct Meaning {
|
||||||
|
to: Address,
|
||||||
|
value: U256,
|
||||||
|
}
|
||||||
|
impl Display for Meaning {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
write!(f, "Ether transfer of {} to {}", self.value, self.to)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl From<Meaning> for SpecificMeaning {
|
||||||
|
fn from(val: Meaning) -> SpecificMeaning {
|
||||||
|
SpecificMeaning::EtherTransfer(val)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A grant for ether transfers, which can be scoped to specific target addresses and volume limits
|
||||||
|
pub struct Settings {
|
||||||
|
target: Vec<Address>,
|
||||||
|
limit: VolumeRateLimit,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<Settings> for SpecificGrant {
|
||||||
|
fn from(val: Settings) -> SpecificGrant {
|
||||||
|
SpecificGrant::EtherTransfer(val)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn query_relevant_past_transaction(
|
||||||
|
grant_id: i32,
|
||||||
|
longest_window: Duration,
|
||||||
|
db: &mut impl AsyncConnection<Backend = Sqlite>,
|
||||||
|
) -> QueryResult<Vec<(U256, DateTime<Utc>)>> {
|
||||||
|
let past_transactions: Vec<(Vec<u8>, SqliteTimestamp)> = evm_transaction_log::table
|
||||||
|
.filter(evm_transaction_log::grant_id.eq(grant_id))
|
||||||
|
.filter(
|
||||||
|
evm_transaction_log::signed_at.ge(SqliteTimestamp(chrono::Utc::now() - longest_window)),
|
||||||
|
)
|
||||||
|
.select((
|
||||||
|
evm_transaction_log::eth_value,
|
||||||
|
evm_transaction_log::signed_at,
|
||||||
|
))
|
||||||
|
.load(db)
|
||||||
|
.await?;
|
||||||
|
let past_transaction: Vec<(U256, DateTime<Utc>)> = past_transactions
|
||||||
|
.into_iter()
|
||||||
|
.filter_map(|(value_bytes, timestamp)| {
|
||||||
|
let value = utils::bytes_to_u256(&value_bytes)?;
|
||||||
|
Some((value, timestamp.0))
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
Ok(past_transaction)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn check_rate_limits(
|
||||||
|
grant: &Grant<Settings>,
|
||||||
|
db: &mut impl AsyncConnection<Backend = Sqlite>,
|
||||||
|
) -> QueryResult<Vec<EvalViolation>> {
|
||||||
|
let mut violations = Vec::new();
|
||||||
|
let window = grant.settings.limit.window;
|
||||||
|
|
||||||
|
let past_transaction = query_relevant_past_transaction(grant.id, window, db).await?;
|
||||||
|
|
||||||
|
let window_start = chrono::Utc::now() - grant.settings.limit.window;
|
||||||
|
let cumulative_volume: U256 = past_transaction
|
||||||
|
.iter()
|
||||||
|
.filter(|(_, timestamp)| timestamp >= &window_start)
|
||||||
|
.fold(U256::default(), |acc, (value, _)| acc + *value);
|
||||||
|
|
||||||
|
if cumulative_volume > grant.settings.limit.max_volume {
|
||||||
|
violations.push(EvalViolation::VolumetricLimitExceeded);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(violations)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct EtherTransfer;
|
||||||
|
impl Policy for EtherTransfer {
|
||||||
|
type Settings = Settings;
|
||||||
|
|
||||||
|
type Meaning = Meaning;
|
||||||
|
|
||||||
|
fn analyze(context: &EvalContext) -> Option<Self::Meaning> {
|
||||||
|
if !context.calldata.is_empty() {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
Some(Meaning {
|
||||||
|
to: context.to,
|
||||||
|
value: context.value,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn evaluate(
|
||||||
|
_: &EvalContext,
|
||||||
|
meaning: &Self::Meaning,
|
||||||
|
grant: &Grant<Self::Settings>,
|
||||||
|
db: &mut impl AsyncConnection<Backend = Sqlite>,
|
||||||
|
) -> QueryResult<Vec<EvalViolation>> {
|
||||||
|
let mut violations = Vec::new();
|
||||||
|
|
||||||
|
// Check if the target address is within the grant's allowed targets
|
||||||
|
if !grant.settings.target.contains(&meaning.to) {
|
||||||
|
violations.push(EvalViolation::InvalidTarget { target: meaning.to });
|
||||||
|
}
|
||||||
|
|
||||||
|
let rate_violations = check_rate_limits(grant, db).await?;
|
||||||
|
violations.extend(rate_violations);
|
||||||
|
|
||||||
|
Ok(violations)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn create_grant(
|
||||||
|
basic: &models::EvmBasicGrant,
|
||||||
|
grant: &Self::Settings,
|
||||||
|
conn: &mut impl AsyncConnection<Backend = Sqlite>,
|
||||||
|
) -> diesel::result::QueryResult<DatabaseID> {
|
||||||
|
let limit_id: i32 = insert_into(evm_ether_transfer_limit::table)
|
||||||
|
.values(NewEvmEtherTransferLimit {
|
||||||
|
window_secs: grant.limit.window.num_seconds() as i32,
|
||||||
|
max_volume: utils::u256_to_bytes(grant.limit.max_volume).to_vec(),
|
||||||
|
})
|
||||||
|
.returning(evm_ether_transfer_limit::id)
|
||||||
|
.get_result(conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let grant_id: i32 = insert_into(evm_ether_transfer_grant::table)
|
||||||
|
.values(&NewEvmEtherTransferGrant {
|
||||||
|
basic_grant_id: basic.id,
|
||||||
|
limit_id,
|
||||||
|
})
|
||||||
|
.returning(evm_ether_transfer_grant::id)
|
||||||
|
.get_result(conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
for target in &grant.target {
|
||||||
|
insert_into(evm_ether_transfer_grant_target::table)
|
||||||
|
.values(NewEvmEtherTransferGrantTarget {
|
||||||
|
grant_id,
|
||||||
|
address: target.to_vec(),
|
||||||
|
})
|
||||||
|
.execute(conn)
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(grant_id)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn try_find_grant(
|
||||||
|
context: &EvalContext,
|
||||||
|
conn: &mut impl AsyncConnection<Backend = Sqlite>,
|
||||||
|
) -> diesel::result::QueryResult<Option<Grant<Self::Settings>>> {
|
||||||
|
let target_bytes = context.to.to_vec();
|
||||||
|
|
||||||
|
// Find a grant where:
|
||||||
|
// 1. The basic grant's wallet_id and client_id match the context
|
||||||
|
// 2. Any of the grant's targets match the context's `to` address
|
||||||
|
let grant: Option<(EvmBasicGrant, EvmEtherTransferGrant)> = evm_ether_transfer_grant::table
|
||||||
|
.inner_join(evm_basic_grant::table)
|
||||||
|
.inner_join(evm_ether_transfer_grant_target::table)
|
||||||
|
.filter(
|
||||||
|
evm_basic_grant::wallet_id
|
||||||
|
.eq(context.wallet_id)
|
||||||
|
.and(evm_basic_grant::client_id.eq(context.client_id))
|
||||||
|
.and(evm_basic_grant::revoked_at.is_null())
|
||||||
|
.and(evm_ether_transfer_grant_target::address.eq(&target_bytes)),
|
||||||
|
)
|
||||||
|
.select((
|
||||||
|
EvmBasicGrant::as_select(),
|
||||||
|
EvmEtherTransferGrant::as_select(),
|
||||||
|
))
|
||||||
|
.first(conn)
|
||||||
|
.await
|
||||||
|
.optional()?;
|
||||||
|
|
||||||
|
let Some((basic_grant, grant)) = grant else {
|
||||||
|
return Ok(None);
|
||||||
|
};
|
||||||
|
|
||||||
|
let target_bytes: Vec<EvmEtherTransferGrantTarget> = evm_ether_transfer_grant_target::table
|
||||||
|
.select(EvmEtherTransferGrantTarget::as_select())
|
||||||
|
.filter(evm_ether_transfer_grant_target::grant_id.eq(grant.id))
|
||||||
|
.load(conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let limit: EvmEtherTransferLimit = evm_ether_transfer_limit::table
|
||||||
|
.filter(evm_ether_transfer_limit::id.eq(grant.limit_id))
|
||||||
|
.select(EvmEtherTransferLimit::as_select())
|
||||||
|
.first::<EvmEtherTransferLimit>(conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
// Convert bytes back to Address
|
||||||
|
let targets: Vec<Address> = target_bytes
|
||||||
|
.into_iter()
|
||||||
|
.filter_map(|target| {
|
||||||
|
// TODO: Handle invalid addresses more gracefully
|
||||||
|
let arr: [u8; 20] = target.address.try_into().ok()?;
|
||||||
|
Some(Address::from(arr))
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let settings = Settings {
|
||||||
|
target: targets,
|
||||||
|
limit: VolumeRateLimit {
|
||||||
|
max_volume: utils::try_bytes_to_u256(&limit.max_volume)
|
||||||
|
.map_err(|err| diesel::result::Error::DeserializationError(Box::new(err)))?,
|
||||||
|
window: chrono::Duration::seconds(limit.window_secs as i64),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(Some(Grant {
|
||||||
|
id: grant.id,
|
||||||
|
shared_grant_id: grant.basic_grant_id,
|
||||||
|
shared: SharedGrantSettings::try_from_model(basic_grant)?,
|
||||||
|
settings,
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn record_transaction(
|
||||||
|
_context: &EvalContext,
|
||||||
|
_: &Self::Meaning,
|
||||||
|
_log_id: i32,
|
||||||
|
_grant: &Grant<Self::Settings>,
|
||||||
|
_conn: &mut impl AsyncConnection<Backend = Sqlite>,
|
||||||
|
) -> diesel::result::QueryResult<()> {
|
||||||
|
// Basic log is sufficient
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn find_all_grants(
|
||||||
|
conn: &mut impl AsyncConnection<Backend = Sqlite>,
|
||||||
|
) -> QueryResult<Vec<Grant<Self::Settings>>> {
|
||||||
|
let grants: Vec<(EvmBasicGrant, EvmEtherTransferGrant)> = grant_join()
|
||||||
|
.filter(evm_basic_grant::revoked_at.is_null())
|
||||||
|
.select((
|
||||||
|
EvmBasicGrant::as_select(),
|
||||||
|
EvmEtherTransferGrant::as_select(),
|
||||||
|
))
|
||||||
|
.load(conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
if grants.is_empty() {
|
||||||
|
return Ok(Vec::new());
|
||||||
|
}
|
||||||
|
|
||||||
|
let grant_ids: Vec<i32> = grants.iter().map(|(_, g)| g.id).collect();
|
||||||
|
let limit_ids: Vec<i32> = grants.iter().map(|(_, g)| g.limit_id).collect();
|
||||||
|
|
||||||
|
let all_targets: Vec<EvmEtherTransferGrantTarget> = evm_ether_transfer_grant_target::table
|
||||||
|
.filter(evm_ether_transfer_grant_target::grant_id.eq_any(&grant_ids))
|
||||||
|
.select(EvmEtherTransferGrantTarget::as_select())
|
||||||
|
.load(conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let all_limits: Vec<EvmEtherTransferLimit> = evm_ether_transfer_limit::table
|
||||||
|
.filter(evm_ether_transfer_limit::id.eq_any(&limit_ids))
|
||||||
|
.select(EvmEtherTransferLimit::as_select())
|
||||||
|
.load(conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let mut targets_by_grant: HashMap<i32, Vec<EvmEtherTransferGrantTarget>> = HashMap::new();
|
||||||
|
for target in all_targets {
|
||||||
|
targets_by_grant
|
||||||
|
.entry(target.grant_id)
|
||||||
|
.or_default()
|
||||||
|
.push(target);
|
||||||
|
}
|
||||||
|
|
||||||
|
let limits_by_id: HashMap<i32, EvmEtherTransferLimit> =
|
||||||
|
all_limits.into_iter().map(|l| (l.id, l)).collect();
|
||||||
|
|
||||||
|
grants
|
||||||
|
.into_iter()
|
||||||
|
.map(|(basic, specific)| {
|
||||||
|
let targets: Vec<Address> = targets_by_grant
|
||||||
|
.get(&specific.id)
|
||||||
|
.map(|v| v.as_slice())
|
||||||
|
.unwrap_or_default()
|
||||||
|
.iter()
|
||||||
|
.filter_map(|t| {
|
||||||
|
let arr: [u8; 20] = t.address.clone().try_into().ok()?;
|
||||||
|
Some(Address::from(arr))
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let limit = limits_by_id
|
||||||
|
.get(&specific.limit_id)
|
||||||
|
.ok_or(diesel::result::Error::NotFound)?;
|
||||||
|
|
||||||
|
Ok(Grant {
|
||||||
|
id: specific.id,
|
||||||
|
shared_grant_id: specific.basic_grant_id,
|
||||||
|
shared: SharedGrantSettings::try_from_model(basic)?,
|
||||||
|
settings: Settings {
|
||||||
|
target: targets,
|
||||||
|
limit: VolumeRateLimit {
|
||||||
|
max_volume: utils::try_bytes_to_u256(&limit.max_volume).map_err(
|
||||||
|
|e| diesel::result::Error::DeserializationError(Box::new(e)),
|
||||||
|
)?,
|
||||||
|
window: Duration::seconds(limit.window_secs as i64),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests;
|
||||||
@@ -0,0 +1,387 @@
|
|||||||
|
use alloy::primitives::{Address, Bytes, U256, address};
|
||||||
|
use chrono::{Duration, Utc};
|
||||||
|
use diesel::{SelectableHelper, insert_into};
|
||||||
|
use diesel_async::RunQueryDsl;
|
||||||
|
|
||||||
|
use crate::db::{
|
||||||
|
self, DatabaseConnection,
|
||||||
|
models::{EvmBasicGrant, NewEvmBasicGrant, NewEvmTransactionLog, SqliteTimestamp},
|
||||||
|
schema::{evm_basic_grant, evm_transaction_log},
|
||||||
|
};
|
||||||
|
use crate::evm::{
|
||||||
|
policies::{
|
||||||
|
EvalContext, EvalViolation, Grant, Policy, SharedGrantSettings, VolumeRateLimit,
|
||||||
|
},
|
||||||
|
utils,
|
||||||
|
};
|
||||||
|
|
||||||
|
use super::{EtherTransfer, Settings};
|
||||||
|
|
||||||
|
const WALLET_ID: i32 = 1;
|
||||||
|
const CLIENT_ID: i32 = 2;
|
||||||
|
const CHAIN_ID: u64 = 1;
|
||||||
|
|
||||||
|
const ALLOWED: Address = address!("1111111111111111111111111111111111111111");
|
||||||
|
const OTHER: Address = address!("2222222222222222222222222222222222222222");
|
||||||
|
|
||||||
|
fn ctx(to: Address, value: U256) -> EvalContext {
|
||||||
|
EvalContext {
|
||||||
|
wallet_id: WALLET_ID,
|
||||||
|
client_id: CLIENT_ID,
|
||||||
|
chain: CHAIN_ID,
|
||||||
|
to,
|
||||||
|
value,
|
||||||
|
calldata: Bytes::new(),
|
||||||
|
max_fee_per_gas: 0,
|
||||||
|
max_priority_fee_per_gas: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn insert_basic(conn: &mut DatabaseConnection, revoked: bool) -> EvmBasicGrant {
|
||||||
|
insert_into(evm_basic_grant::table)
|
||||||
|
.values(NewEvmBasicGrant {
|
||||||
|
wallet_id: WALLET_ID,
|
||||||
|
client_id: CLIENT_ID,
|
||||||
|
chain_id: CHAIN_ID as i32,
|
||||||
|
valid_from: None,
|
||||||
|
valid_until: None,
|
||||||
|
max_gas_fee_per_gas: None,
|
||||||
|
max_priority_fee_per_gas: None,
|
||||||
|
rate_limit_count: None,
|
||||||
|
rate_limit_window_secs: None,
|
||||||
|
revoked_at: revoked.then(|| SqliteTimestamp(Utc::now())),
|
||||||
|
})
|
||||||
|
.returning(EvmBasicGrant::as_select())
|
||||||
|
.get_result(conn)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn make_settings(targets: Vec<Address>, max_volume: u64) -> Settings {
|
||||||
|
Settings {
|
||||||
|
target: targets,
|
||||||
|
limit: VolumeRateLimit {
|
||||||
|
max_volume: U256::from(max_volume),
|
||||||
|
window: Duration::hours(1),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn shared() -> SharedGrantSettings {
|
||||||
|
SharedGrantSettings {
|
||||||
|
wallet_id: WALLET_ID,
|
||||||
|
chain: CHAIN_ID,
|
||||||
|
valid_from: None,
|
||||||
|
valid_until: None,
|
||||||
|
max_gas_fee_per_gas: None,
|
||||||
|
max_priority_fee_per_gas: None,
|
||||||
|
rate_limit: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── analyze ─────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn analyze_matches_empty_calldata() {
|
||||||
|
let m = EtherTransfer::analyze(&ctx(ALLOWED, U256::from(1_000u64))).unwrap();
|
||||||
|
assert_eq!(m.to, ALLOWED);
|
||||||
|
assert_eq!(m.value, U256::from(1_000u64));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn analyze_rejects_nonempty_calldata() {
|
||||||
|
let context = EvalContext {
|
||||||
|
calldata: Bytes::from(vec![0xde, 0xad, 0xbe, 0xef]),
|
||||||
|
..ctx(ALLOWED, U256::from(1u64))
|
||||||
|
};
|
||||||
|
assert!(EtherTransfer::analyze(&context).is_none());
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── evaluate ────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn evaluate_passes_for_allowed_target() {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
let mut conn = db.get().await.unwrap();
|
||||||
|
|
||||||
|
let grant = Grant {
|
||||||
|
id: 999,
|
||||||
|
shared_grant_id: 999,
|
||||||
|
shared: shared(),
|
||||||
|
settings: make_settings(vec![ALLOWED], 1_000_000),
|
||||||
|
};
|
||||||
|
let context = ctx(ALLOWED, U256::from(100u64));
|
||||||
|
let m = EtherTransfer::analyze(&context).unwrap();
|
||||||
|
let v = EtherTransfer::evaluate(&context, &m, &grant, &mut *conn)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert!(v.is_empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn evaluate_rejects_disallowed_target() {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
let mut conn = db.get().await.unwrap();
|
||||||
|
|
||||||
|
let grant = Grant {
|
||||||
|
id: 999,
|
||||||
|
shared_grant_id: 999,
|
||||||
|
shared: shared(),
|
||||||
|
settings: make_settings(vec![ALLOWED], 1_000_000),
|
||||||
|
};
|
||||||
|
let context = ctx(OTHER, U256::from(100u64));
|
||||||
|
let m = EtherTransfer::analyze(&context).unwrap();
|
||||||
|
let v = EtherTransfer::evaluate(&context, &m, &grant, &mut *conn)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert!(
|
||||||
|
v.iter()
|
||||||
|
.any(|e| matches!(e, EvalViolation::InvalidTarget { .. }))
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn evaluate_passes_when_volume_within_limit() {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
let mut conn = db.get().await.unwrap();
|
||||||
|
|
||||||
|
let basic = insert_basic(&mut conn, false).await;
|
||||||
|
let settings = make_settings(vec![ALLOWED], 1_000);
|
||||||
|
let grant_id = EtherTransfer::create_grant(&basic, &settings, &mut *conn)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
insert_into(evm_transaction_log::table)
|
||||||
|
.values(NewEvmTransactionLog {
|
||||||
|
grant_id,
|
||||||
|
client_id: CLIENT_ID,
|
||||||
|
wallet_id: WALLET_ID,
|
||||||
|
chain_id: CHAIN_ID as i32,
|
||||||
|
eth_value: utils::u256_to_bytes(U256::from(500u64)).to_vec(),
|
||||||
|
signed_at: SqliteTimestamp(Utc::now()),
|
||||||
|
})
|
||||||
|
.execute(&mut *conn)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let grant = Grant {
|
||||||
|
id: grant_id,
|
||||||
|
shared_grant_id: basic.id,
|
||||||
|
shared: shared(),
|
||||||
|
settings,
|
||||||
|
};
|
||||||
|
let context = ctx(ALLOWED, U256::from(100u64));
|
||||||
|
let m = EtherTransfer::analyze(&context).unwrap();
|
||||||
|
let v = EtherTransfer::evaluate(&context, &m, &grant, &mut *conn)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert!(
|
||||||
|
!v.iter()
|
||||||
|
.any(|e| matches!(e, EvalViolation::VolumetricLimitExceeded))
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn evaluate_rejects_volume_over_limit() {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
let mut conn = db.get().await.unwrap();
|
||||||
|
|
||||||
|
let basic = insert_basic(&mut conn, false).await;
|
||||||
|
let settings = make_settings(vec![ALLOWED], 1_000);
|
||||||
|
let grant_id = EtherTransfer::create_grant(&basic, &settings, &mut *conn)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
insert_into(evm_transaction_log::table)
|
||||||
|
.values(NewEvmTransactionLog {
|
||||||
|
grant_id,
|
||||||
|
client_id: CLIENT_ID,
|
||||||
|
wallet_id: WALLET_ID,
|
||||||
|
chain_id: CHAIN_ID as i32,
|
||||||
|
eth_value: utils::u256_to_bytes(U256::from(1_001u64)).to_vec(),
|
||||||
|
signed_at: SqliteTimestamp(Utc::now()),
|
||||||
|
})
|
||||||
|
.execute(&mut *conn)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let grant = Grant {
|
||||||
|
id: grant_id,
|
||||||
|
shared_grant_id: basic.id,
|
||||||
|
shared: shared(),
|
||||||
|
settings,
|
||||||
|
};
|
||||||
|
let context = ctx(ALLOWED, U256::from(100u64));
|
||||||
|
let m = EtherTransfer::analyze(&context).unwrap();
|
||||||
|
let v = EtherTransfer::evaluate(&context, &m, &grant, &mut *conn)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert!(
|
||||||
|
v.iter()
|
||||||
|
.any(|e| matches!(e, EvalViolation::VolumetricLimitExceeded))
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn evaluate_passes_at_exactly_volume_limit() {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
let mut conn = db.get().await.unwrap();
|
||||||
|
|
||||||
|
let basic = insert_basic(&mut conn, false).await;
|
||||||
|
let settings = make_settings(vec![ALLOWED], 1_000);
|
||||||
|
let grant_id = EtherTransfer::create_grant(&basic, &settings, &mut *conn)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// Exactly at the limit — the check is `>`, so this should not violate
|
||||||
|
insert_into(evm_transaction_log::table)
|
||||||
|
.values(NewEvmTransactionLog {
|
||||||
|
grant_id,
|
||||||
|
client_id: CLIENT_ID,
|
||||||
|
wallet_id: WALLET_ID,
|
||||||
|
chain_id: CHAIN_ID as i32,
|
||||||
|
eth_value: utils::u256_to_bytes(U256::from(1_000u64)).to_vec(),
|
||||||
|
signed_at: SqliteTimestamp(Utc::now()),
|
||||||
|
})
|
||||||
|
.execute(&mut *conn)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let grant = Grant {
|
||||||
|
id: grant_id,
|
||||||
|
shared_grant_id: basic.id,
|
||||||
|
shared: shared(),
|
||||||
|
settings,
|
||||||
|
};
|
||||||
|
let context = ctx(ALLOWED, U256::from(100u64));
|
||||||
|
let m = EtherTransfer::analyze(&context).unwrap();
|
||||||
|
let v = EtherTransfer::evaluate(&context, &m, &grant, &mut *conn)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert!(
|
||||||
|
!v.iter()
|
||||||
|
.any(|e| matches!(e, EvalViolation::VolumetricLimitExceeded))
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── try_find_grant ───────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn try_find_grant_roundtrip() {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
let mut conn = db.get().await.unwrap();
|
||||||
|
|
||||||
|
let basic = insert_basic(&mut conn, false).await;
|
||||||
|
let settings = make_settings(vec![ALLOWED], 1_000_000);
|
||||||
|
EtherTransfer::create_grant(&basic, &settings, &mut *conn)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let found = EtherTransfer::try_find_grant(&ctx(ALLOWED, U256::from(1u64)), &mut *conn)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
assert!(found.is_some());
|
||||||
|
let g = found.unwrap();
|
||||||
|
assert_eq!(g.settings.target, vec![ALLOWED]);
|
||||||
|
assert_eq!(g.settings.limit.max_volume, U256::from(1_000_000u64));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn try_find_grant_revoked_returns_none() {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
let mut conn = db.get().await.unwrap();
|
||||||
|
|
||||||
|
let basic = insert_basic(&mut conn, true).await;
|
||||||
|
let settings = make_settings(vec![ALLOWED], 1_000_000);
|
||||||
|
EtherTransfer::create_grant(&basic, &settings, &mut *conn)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let found = EtherTransfer::try_find_grant(&ctx(ALLOWED, U256::from(1u64)), &mut *conn)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert!(found.is_none());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn try_find_grant_wrong_target_returns_none() {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
let mut conn = db.get().await.unwrap();
|
||||||
|
|
||||||
|
let basic = insert_basic(&mut conn, false).await;
|
||||||
|
let settings = make_settings(vec![ALLOWED], 1_000_000);
|
||||||
|
EtherTransfer::create_grant(&basic, &settings, &mut *conn)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let found = EtherTransfer::try_find_grant(&ctx(OTHER, U256::from(1u64)), &mut *conn)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert!(found.is_none());
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── find_all_grants ──────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn find_all_grants_empty_db() {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
let mut conn = db.get().await.unwrap();
|
||||||
|
let all = EtherTransfer::find_all_grants(&mut *conn).await.unwrap();
|
||||||
|
assert!(all.is_empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn find_all_grants_excludes_revoked() {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
let mut conn = db.get().await.unwrap();
|
||||||
|
|
||||||
|
let settings = make_settings(vec![ALLOWED], 1_000_000);
|
||||||
|
let active = insert_basic(&mut conn, false).await;
|
||||||
|
EtherTransfer::create_grant(&active, &settings, &mut *conn)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
let revoked = insert_basic(&mut conn, true).await;
|
||||||
|
EtherTransfer::create_grant(&revoked, &settings, &mut *conn)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let all = EtherTransfer::find_all_grants(&mut *conn).await.unwrap();
|
||||||
|
assert_eq!(all.len(), 1);
|
||||||
|
assert_eq!(all[0].settings.target, vec![ALLOWED]);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn find_all_grants_multiple_targets() {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
let mut conn = db.get().await.unwrap();
|
||||||
|
|
||||||
|
let basic = insert_basic(&mut conn, false).await;
|
||||||
|
let settings = make_settings(vec![ALLOWED, OTHER], 1_000_000);
|
||||||
|
EtherTransfer::create_grant(&basic, &settings, &mut *conn)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let all = EtherTransfer::find_all_grants(&mut *conn).await.unwrap();
|
||||||
|
assert_eq!(all.len(), 1);
|
||||||
|
assert_eq!(all[0].settings.target.len(), 2);
|
||||||
|
assert_eq!(all[0].settings.limit.max_volume, U256::from(1_000_000u64));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn find_all_grants_multiple_grants() {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
let mut conn = db.get().await.unwrap();
|
||||||
|
|
||||||
|
let basic1 = insert_basic(&mut conn, false).await;
|
||||||
|
EtherTransfer::create_grant(&basic1, &make_settings(vec![ALLOWED], 500), &mut *conn)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
let basic2 = insert_basic(&mut conn, false).await;
|
||||||
|
EtherTransfer::create_grant(&basic2, &make_settings(vec![OTHER], 1_000), &mut *conn)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let all = EtherTransfer::find_all_grants(&mut *conn).await.unwrap();
|
||||||
|
assert_eq!(all.len(), 2);
|
||||||
|
}
|
||||||
@@ -0,0 +1,385 @@
|
|||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
use alloy::{
|
||||||
|
primitives::{Address, U256},
|
||||||
|
sol_types::SolCall,
|
||||||
|
};
|
||||||
|
use arbiter_tokens_registry::evm::nonfungible::{self, TokenInfo};
|
||||||
|
use chrono::{DateTime, Duration, Utc};
|
||||||
|
use diesel::dsl::{auto_type, insert_into};
|
||||||
|
use diesel::sqlite::Sqlite;
|
||||||
|
use diesel::{ExpressionMethods, prelude::*};
|
||||||
|
use diesel_async::{AsyncConnection, RunQueryDsl};
|
||||||
|
|
||||||
|
use crate::db::models::{
|
||||||
|
EvmBasicGrant, EvmTokenTransferGrant, EvmTokenTransferVolumeLimit, NewEvmTokenTransferGrant,
|
||||||
|
NewEvmTokenTransferLog, NewEvmTokenTransferVolumeLimit, SqliteTimestamp,
|
||||||
|
};
|
||||||
|
use crate::db::schema::{
|
||||||
|
evm_basic_grant, evm_token_transfer_grant, evm_token_transfer_log,
|
||||||
|
evm_token_transfer_volume_limit,
|
||||||
|
};
|
||||||
|
use crate::evm::{
|
||||||
|
abi::IERC20::transferCall,
|
||||||
|
policies::{
|
||||||
|
Grant, Policy, SharedGrantSettings, SpecificGrant, SpecificMeaning, VolumeRateLimit,
|
||||||
|
},
|
||||||
|
utils,
|
||||||
|
};
|
||||||
|
|
||||||
|
use super::{DatabaseID, EvalContext, EvalViolation};
|
||||||
|
|
||||||
|
#[auto_type]
|
||||||
|
fn grant_join() -> _ {
|
||||||
|
evm_token_transfer_grant::table.inner_join(
|
||||||
|
evm_basic_grant::table.on(evm_token_transfer_grant::basic_grant_id.eq(evm_basic_grant::id)),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
|
||||||
|
pub struct Meaning {
|
||||||
|
token: &'static TokenInfo,
|
||||||
|
to: Address,
|
||||||
|
value: U256,
|
||||||
|
}
|
||||||
|
impl std::fmt::Display for Meaning {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
write!(
|
||||||
|
f,
|
||||||
|
"Transfer of {} {} to {}",
|
||||||
|
self.value, self.token.symbol, self.to
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl From<Meaning> for SpecificMeaning {
|
||||||
|
fn from(val: Meaning) -> SpecificMeaning {
|
||||||
|
SpecificMeaning::TokenTransfer(val)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A grant for token transfers, which can be scoped to specific target addresses and volume limits
|
||||||
|
pub struct Settings {
|
||||||
|
token_contract: Address,
|
||||||
|
target: Option<Address>,
|
||||||
|
volume_limits: Vec<VolumeRateLimit>,
|
||||||
|
}
|
||||||
|
impl From<Settings> for SpecificGrant {
|
||||||
|
fn from(val: Settings) -> SpecificGrant {
|
||||||
|
SpecificGrant::TokenTransfer(val)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn query_relevant_past_transfers(
|
||||||
|
grant_id: i32,
|
||||||
|
longest_window: Duration,
|
||||||
|
db: &mut impl AsyncConnection<Backend = Sqlite>,
|
||||||
|
) -> QueryResult<Vec<(U256, DateTime<Utc>)>> {
|
||||||
|
let past_logs: Vec<(Vec<u8>, SqliteTimestamp)> = evm_token_transfer_log::table
|
||||||
|
.filter(evm_token_transfer_log::grant_id.eq(grant_id))
|
||||||
|
.filter(
|
||||||
|
evm_token_transfer_log::created_at
|
||||||
|
.ge(SqliteTimestamp(chrono::Utc::now() - longest_window)),
|
||||||
|
)
|
||||||
|
.select((
|
||||||
|
evm_token_transfer_log::value,
|
||||||
|
evm_token_transfer_log::created_at,
|
||||||
|
))
|
||||||
|
.load(db)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let past_transfers: Vec<(U256, DateTime<Utc>)> = past_logs
|
||||||
|
.into_iter()
|
||||||
|
.filter_map(|(value_bytes, timestamp)| {
|
||||||
|
let value = utils::bytes_to_u256(&value_bytes)?;
|
||||||
|
Some((value, timestamp.0))
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
Ok(past_transfers)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn check_volume_rate_limits(
|
||||||
|
grant: &Grant<Settings>,
|
||||||
|
db: &mut impl AsyncConnection<Backend = Sqlite>,
|
||||||
|
) -> QueryResult<Vec<EvalViolation>> {
|
||||||
|
let mut violations = Vec::new();
|
||||||
|
|
||||||
|
let Some(longest_window) = grant.settings.volume_limits.iter().map(|l| l.window).max() else {
|
||||||
|
return Ok(violations);
|
||||||
|
};
|
||||||
|
|
||||||
|
let past_transfers = query_relevant_past_transfers(grant.id, longest_window, db).await?;
|
||||||
|
|
||||||
|
for limit in &grant.settings.volume_limits {
|
||||||
|
let window_start = chrono::Utc::now() - limit.window;
|
||||||
|
let cumulative_volume: U256 = past_transfers
|
||||||
|
.iter()
|
||||||
|
.filter(|(_, timestamp)| timestamp >= &window_start)
|
||||||
|
.fold(U256::default(), |acc, (value, _)| acc + *value);
|
||||||
|
|
||||||
|
if cumulative_volume > limit.max_volume {
|
||||||
|
violations.push(EvalViolation::VolumetricLimitExceeded);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(violations)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct TokenTransfer;
|
||||||
|
impl Policy for TokenTransfer {
|
||||||
|
type Settings = Settings;
|
||||||
|
type Meaning = Meaning;
|
||||||
|
|
||||||
|
fn analyze(context: &EvalContext) -> Option<Self::Meaning> {
|
||||||
|
let token = nonfungible::get_token(context.chain, context.to)?;
|
||||||
|
let decoded = transferCall::abi_decode_raw_validate(&context.calldata).ok()?;
|
||||||
|
|
||||||
|
Some(Meaning {
|
||||||
|
token,
|
||||||
|
to: decoded.to,
|
||||||
|
value: decoded.value,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn evaluate(
|
||||||
|
context: &EvalContext,
|
||||||
|
meaning: &Self::Meaning,
|
||||||
|
grant: &Grant<Self::Settings>,
|
||||||
|
db: &mut impl AsyncConnection<Backend = Sqlite>,
|
||||||
|
) -> QueryResult<Vec<EvalViolation>> {
|
||||||
|
let mut violations = Vec::new();
|
||||||
|
|
||||||
|
// erc20 transfer shouldn't carry eth value
|
||||||
|
if !context.value.is_zero() {
|
||||||
|
violations.push(EvalViolation::InvalidTransactionType);
|
||||||
|
return Ok(violations);
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(allowed) = grant.settings.target
|
||||||
|
&& allowed != meaning.to
|
||||||
|
{
|
||||||
|
violations.push(EvalViolation::InvalidTarget { target: meaning.to });
|
||||||
|
}
|
||||||
|
|
||||||
|
let rate_violations = check_volume_rate_limits(grant, db).await?;
|
||||||
|
violations.extend(rate_violations);
|
||||||
|
|
||||||
|
Ok(violations)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn create_grant(
|
||||||
|
basic: &EvmBasicGrant,
|
||||||
|
grant: &Self::Settings,
|
||||||
|
conn: &mut impl AsyncConnection<Backend = Sqlite>,
|
||||||
|
) -> QueryResult<DatabaseID> {
|
||||||
|
// Store the specific receiver as bytes (None means any receiver is allowed)
|
||||||
|
let receiver: Option<Vec<u8>> = grant.target.map(|addr| addr.to_vec());
|
||||||
|
|
||||||
|
let grant_id: i32 = insert_into(evm_token_transfer_grant::table)
|
||||||
|
.values(NewEvmTokenTransferGrant {
|
||||||
|
basic_grant_id: basic.id,
|
||||||
|
token_contract: grant.token_contract.to_vec(),
|
||||||
|
receiver,
|
||||||
|
})
|
||||||
|
.returning(evm_token_transfer_grant::id)
|
||||||
|
.get_result(conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
for limit in &grant.volume_limits {
|
||||||
|
insert_into(evm_token_transfer_volume_limit::table)
|
||||||
|
.values(NewEvmTokenTransferVolumeLimit {
|
||||||
|
grant_id,
|
||||||
|
window_secs: limit.window.num_seconds() as i32,
|
||||||
|
max_volume: utils::u256_to_bytes(limit.max_volume).to_vec(),
|
||||||
|
})
|
||||||
|
.execute(conn)
|
||||||
|
.await?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(grant_id)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn try_find_grant(
|
||||||
|
context: &EvalContext,
|
||||||
|
conn: &mut impl AsyncConnection<Backend = Sqlite>,
|
||||||
|
) -> QueryResult<Option<Grant<Self::Settings>>> {
|
||||||
|
let token_contract_bytes = context.to.to_vec();
|
||||||
|
|
||||||
|
let grant: Option<(EvmBasicGrant, EvmTokenTransferGrant)> = grant_join()
|
||||||
|
.filter(evm_basic_grant::revoked_at.is_null())
|
||||||
|
.filter(evm_basic_grant::wallet_id.eq(context.wallet_id))
|
||||||
|
.filter(evm_basic_grant::client_id.eq(context.client_id))
|
||||||
|
.filter(evm_token_transfer_grant::token_contract.eq(&token_contract_bytes))
|
||||||
|
.select((
|
||||||
|
EvmBasicGrant::as_select(),
|
||||||
|
EvmTokenTransferGrant::as_select(),
|
||||||
|
))
|
||||||
|
.first(conn)
|
||||||
|
.await
|
||||||
|
.optional()?;
|
||||||
|
|
||||||
|
let Some((basic_grant, token_grant)) = grant else {
|
||||||
|
return Ok(None);
|
||||||
|
};
|
||||||
|
|
||||||
|
let volume_limits_db: Vec<EvmTokenTransferVolumeLimit> =
|
||||||
|
evm_token_transfer_volume_limit::table
|
||||||
|
.filter(evm_token_transfer_volume_limit::grant_id.eq(token_grant.id))
|
||||||
|
.select(EvmTokenTransferVolumeLimit::as_select())
|
||||||
|
.load(conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let volume_limits: Vec<VolumeRateLimit> = volume_limits_db
|
||||||
|
.into_iter()
|
||||||
|
.map(|row| {
|
||||||
|
Ok(VolumeRateLimit {
|
||||||
|
max_volume: utils::try_bytes_to_u256(&row.max_volume).map_err(|err| {
|
||||||
|
diesel::result::Error::DeserializationError(Box::new(err))
|
||||||
|
})?,
|
||||||
|
window: Duration::seconds(row.window_secs as i64),
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.collect::<QueryResult<Vec<_>>>()?;
|
||||||
|
|
||||||
|
let token_contract: [u8; 20] = token_grant.token_contract.try_into().map_err(|_| {
|
||||||
|
diesel::result::Error::DeserializationError(
|
||||||
|
"Invalid token contract address length".into(),
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let target: Option<Address> = match token_grant.receiver {
|
||||||
|
None => None,
|
||||||
|
Some(bytes) => {
|
||||||
|
let arr: [u8; 20] = bytes.try_into().map_err(|_| {
|
||||||
|
diesel::result::Error::DeserializationError(
|
||||||
|
"Invalid receiver address length".into(),
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
Some(Address::from(arr))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let settings = Settings {
|
||||||
|
token_contract: Address::from(token_contract),
|
||||||
|
target,
|
||||||
|
volume_limits,
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(Some(Grant {
|
||||||
|
id: token_grant.id,
|
||||||
|
shared_grant_id: token_grant.basic_grant_id,
|
||||||
|
shared: SharedGrantSettings::try_from_model(basic_grant)?,
|
||||||
|
settings,
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn record_transaction(
|
||||||
|
context: &EvalContext,
|
||||||
|
meaning: &Self::Meaning,
|
||||||
|
log_id: i32,
|
||||||
|
grant: &Grant<Self::Settings>,
|
||||||
|
conn: &mut impl AsyncConnection<Backend = Sqlite>,
|
||||||
|
) -> QueryResult<()> {
|
||||||
|
insert_into(evm_token_transfer_log::table)
|
||||||
|
.values(NewEvmTokenTransferLog {
|
||||||
|
grant_id: grant.id,
|
||||||
|
log_id,
|
||||||
|
chain_id: context.chain as i32,
|
||||||
|
token_contract: context.to.to_vec(),
|
||||||
|
recipient_address: meaning.to.to_vec(),
|
||||||
|
value: utils::u256_to_bytes(meaning.value).to_vec(),
|
||||||
|
})
|
||||||
|
.execute(conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn find_all_grants(
|
||||||
|
conn: &mut impl AsyncConnection<Backend = Sqlite>,
|
||||||
|
) -> QueryResult<Vec<Grant<Self::Settings>>> {
|
||||||
|
let grants: Vec<(EvmBasicGrant, EvmTokenTransferGrant)> = grant_join()
|
||||||
|
.filter(evm_basic_grant::revoked_at.is_null())
|
||||||
|
.select((
|
||||||
|
EvmBasicGrant::as_select(),
|
||||||
|
EvmTokenTransferGrant::as_select(),
|
||||||
|
))
|
||||||
|
.load(conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
if grants.is_empty() {
|
||||||
|
return Ok(Vec::new());
|
||||||
|
}
|
||||||
|
|
||||||
|
let grant_ids: Vec<i32> = grants.iter().map(|(_, g)| g.id).collect();
|
||||||
|
|
||||||
|
let all_volume_limits: Vec<EvmTokenTransferVolumeLimit> =
|
||||||
|
evm_token_transfer_volume_limit::table
|
||||||
|
.filter(evm_token_transfer_volume_limit::grant_id.eq_any(&grant_ids))
|
||||||
|
.select(EvmTokenTransferVolumeLimit::as_select())
|
||||||
|
.load(conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let mut limits_by_grant: HashMap<i32, Vec<EvmTokenTransferVolumeLimit>> = HashMap::new();
|
||||||
|
for limit in all_volume_limits {
|
||||||
|
limits_by_grant
|
||||||
|
.entry(limit.grant_id)
|
||||||
|
.or_default()
|
||||||
|
.push(limit);
|
||||||
|
}
|
||||||
|
|
||||||
|
grants
|
||||||
|
.into_iter()
|
||||||
|
.map(|(basic, specific)| {
|
||||||
|
let volume_limits: Vec<VolumeRateLimit> = limits_by_grant
|
||||||
|
.get(&specific.id)
|
||||||
|
.map(|v| v.as_slice())
|
||||||
|
.unwrap_or_default()
|
||||||
|
.iter()
|
||||||
|
.map(|row| {
|
||||||
|
Ok(VolumeRateLimit {
|
||||||
|
max_volume: utils::try_bytes_to_u256(&row.max_volume).map_err(|e| {
|
||||||
|
diesel::result::Error::DeserializationError(Box::new(e))
|
||||||
|
})?,
|
||||||
|
window: Duration::seconds(row.window_secs as i64),
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.collect::<QueryResult<Vec<_>>>()?;
|
||||||
|
|
||||||
|
let token_contract: [u8; 20] =
|
||||||
|
specific.token_contract.clone().try_into().map_err(|_| {
|
||||||
|
diesel::result::Error::DeserializationError(
|
||||||
|
"Invalid token contract address length".into(),
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let target: Option<Address> = match &specific.receiver {
|
||||||
|
None => None,
|
||||||
|
Some(bytes) => {
|
||||||
|
let arr: [u8; 20] = bytes.clone().try_into().map_err(|_| {
|
||||||
|
diesel::result::Error::DeserializationError(
|
||||||
|
"Invalid receiver address length".into(),
|
||||||
|
)
|
||||||
|
})?;
|
||||||
|
Some(Address::from(arr))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(Grant {
|
||||||
|
id: specific.id,
|
||||||
|
shared_grant_id: specific.basic_grant_id,
|
||||||
|
shared: SharedGrantSettings::try_from_model(basic)?,
|
||||||
|
settings: Settings {
|
||||||
|
token_contract: Address::from(token_contract),
|
||||||
|
target,
|
||||||
|
volume_limits,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests;
|
||||||
@@ -0,0 +1,397 @@
|
|||||||
|
use alloy::primitives::{Address, Bytes, U256, address};
|
||||||
|
use alloy::sol_types::SolCall;
|
||||||
|
use chrono::{Duration, Utc};
|
||||||
|
use diesel::{SelectableHelper, insert_into};
|
||||||
|
use diesel_async::RunQueryDsl;
|
||||||
|
|
||||||
|
use crate::db::{
|
||||||
|
self, DatabaseConnection,
|
||||||
|
models::{EvmBasicGrant, NewEvmBasicGrant, SqliteTimestamp},
|
||||||
|
schema::evm_basic_grant,
|
||||||
|
};
|
||||||
|
use crate::evm::{
|
||||||
|
abi::IERC20::transferCall,
|
||||||
|
policies::{EvalContext, EvalViolation, Grant, Policy, SharedGrantSettings, VolumeRateLimit},
|
||||||
|
utils,
|
||||||
|
};
|
||||||
|
|
||||||
|
use super::{Settings, TokenTransfer};
|
||||||
|
|
||||||
|
// DAI on Ethereum mainnet — present in the static token registry
|
||||||
|
const CHAIN_ID: u64 = 1;
|
||||||
|
const DAI: Address = address!("6B175474E89094C44Da98b954EedeAC495271d0F");
|
||||||
|
|
||||||
|
const WALLET_ID: i32 = 1;
|
||||||
|
const CLIENT_ID: i32 = 2;
|
||||||
|
|
||||||
|
const RECIPIENT: Address = address!("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa");
|
||||||
|
const OTHER: Address = address!("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb");
|
||||||
|
const UNKNOWN_TOKEN: Address = address!("cccccccccccccccccccccccccccccccccccccccc");
|
||||||
|
|
||||||
|
/// Encode `transfer(to, value)` raw params (no 4-byte selector).
|
||||||
|
/// `abi_decode_raw_validate` expects exactly this format.
|
||||||
|
fn transfer_calldata(to: Address, value: U256) -> Bytes {
|
||||||
|
let mut raw = Vec::new();
|
||||||
|
transferCall { to, value }.abi_encode_raw(&mut raw);
|
||||||
|
Bytes::from(raw)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn ctx(to: Address, calldata: Bytes) -> EvalContext {
|
||||||
|
EvalContext {
|
||||||
|
wallet_id: WALLET_ID,
|
||||||
|
client_id: CLIENT_ID,
|
||||||
|
chain: CHAIN_ID,
|
||||||
|
to,
|
||||||
|
value: U256::ZERO,
|
||||||
|
calldata,
|
||||||
|
max_fee_per_gas: 0,
|
||||||
|
max_priority_fee_per_gas: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn insert_basic(conn: &mut DatabaseConnection, revoked: bool) -> EvmBasicGrant {
|
||||||
|
insert_into(evm_basic_grant::table)
|
||||||
|
.values(NewEvmBasicGrant {
|
||||||
|
wallet_id: WALLET_ID,
|
||||||
|
client_id: CLIENT_ID,
|
||||||
|
chain_id: CHAIN_ID as i32,
|
||||||
|
valid_from: None,
|
||||||
|
valid_until: None,
|
||||||
|
max_gas_fee_per_gas: None,
|
||||||
|
max_priority_fee_per_gas: None,
|
||||||
|
rate_limit_count: None,
|
||||||
|
rate_limit_window_secs: None,
|
||||||
|
revoked_at: revoked.then(|| SqliteTimestamp(Utc::now())),
|
||||||
|
})
|
||||||
|
.returning(EvmBasicGrant::as_select())
|
||||||
|
.get_result(conn)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn make_settings(target: Option<Address>, max_volume: Option<u64>) -> Settings {
|
||||||
|
Settings {
|
||||||
|
token_contract: DAI,
|
||||||
|
target,
|
||||||
|
volume_limits: max_volume
|
||||||
|
.map(|v| {
|
||||||
|
vec![VolumeRateLimit {
|
||||||
|
max_volume: U256::from(v),
|
||||||
|
window: Duration::hours(1),
|
||||||
|
}]
|
||||||
|
})
|
||||||
|
.unwrap_or_default(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn shared() -> SharedGrantSettings {
|
||||||
|
SharedGrantSettings {
|
||||||
|
wallet_id: WALLET_ID,
|
||||||
|
chain: CHAIN_ID,
|
||||||
|
valid_from: None,
|
||||||
|
valid_until: None,
|
||||||
|
max_gas_fee_per_gas: None,
|
||||||
|
max_priority_fee_per_gas: None,
|
||||||
|
rate_limit: None,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── analyze ─────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn analyze_known_token_valid_calldata() {
|
||||||
|
let calldata = transfer_calldata(RECIPIENT, U256::from(100u64));
|
||||||
|
let m = TokenTransfer::analyze(&ctx(DAI, calldata)).unwrap();
|
||||||
|
assert_eq!(m.to, RECIPIENT);
|
||||||
|
assert_eq!(m.value, U256::from(100u64));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn analyze_unknown_token_returns_none() {
|
||||||
|
let calldata = transfer_calldata(RECIPIENT, U256::from(100u64));
|
||||||
|
assert!(TokenTransfer::analyze(&ctx(UNKNOWN_TOKEN, calldata)).is_none());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn analyze_invalid_calldata_returns_none() {
|
||||||
|
let calldata = Bytes::from(vec![0xde, 0xad, 0xbe, 0xef]);
|
||||||
|
assert!(TokenTransfer::analyze(&ctx(DAI, calldata)).is_none());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn analyze_empty_calldata_returns_none() {
|
||||||
|
assert!(TokenTransfer::analyze(&ctx(DAI, Bytes::new())).is_none());
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── evaluate ────────────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn evaluate_rejects_nonzero_eth_value() {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
let mut conn = db.get().await.unwrap();
|
||||||
|
|
||||||
|
let grant = Grant {
|
||||||
|
id: 999,
|
||||||
|
shared_grant_id: 999,
|
||||||
|
shared: shared(),
|
||||||
|
settings: make_settings(None, None),
|
||||||
|
};
|
||||||
|
let calldata = transfer_calldata(RECIPIENT, U256::from(100u64));
|
||||||
|
let mut context = ctx(DAI, calldata);
|
||||||
|
context.value = U256::from(1u64); // ETH attached to an ERC-20 call
|
||||||
|
|
||||||
|
let m = TokenTransfer::analyze(&EvalContext { value: U256::ZERO, ..context.clone() })
|
||||||
|
.unwrap();
|
||||||
|
let v = TokenTransfer::evaluate(&context, &m, &grant, &mut *conn).await.unwrap();
|
||||||
|
assert!(v.iter().any(|e| matches!(e, EvalViolation::InvalidTransactionType)));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn evaluate_passes_any_recipient_when_no_restriction() {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
let mut conn = db.get().await.unwrap();
|
||||||
|
|
||||||
|
let grant = Grant {
|
||||||
|
id: 999,
|
||||||
|
shared_grant_id: 999,
|
||||||
|
shared: shared(),
|
||||||
|
settings: make_settings(None, None),
|
||||||
|
};
|
||||||
|
let calldata = transfer_calldata(RECIPIENT, U256::from(100u64));
|
||||||
|
let context = ctx(DAI, calldata);
|
||||||
|
let m = TokenTransfer::analyze(&context).unwrap();
|
||||||
|
let v = TokenTransfer::evaluate(&context, &m, &grant, &mut *conn).await.unwrap();
|
||||||
|
assert!(v.is_empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn evaluate_passes_matching_restricted_recipient() {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
let mut conn = db.get().await.unwrap();
|
||||||
|
|
||||||
|
let grant = Grant {
|
||||||
|
id: 999,
|
||||||
|
shared_grant_id: 999,
|
||||||
|
shared: shared(),
|
||||||
|
settings: make_settings(Some(RECIPIENT), None),
|
||||||
|
};
|
||||||
|
let calldata = transfer_calldata(RECIPIENT, U256::from(100u64));
|
||||||
|
let context = ctx(DAI, calldata);
|
||||||
|
let m = TokenTransfer::analyze(&context).unwrap();
|
||||||
|
let v = TokenTransfer::evaluate(&context, &m, &grant, &mut *conn).await.unwrap();
|
||||||
|
assert!(v.is_empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn evaluate_rejects_wrong_restricted_recipient() {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
let mut conn = db.get().await.unwrap();
|
||||||
|
|
||||||
|
let grant = Grant {
|
||||||
|
id: 999,
|
||||||
|
shared_grant_id: 999,
|
||||||
|
shared: shared(),
|
||||||
|
settings: make_settings(Some(RECIPIENT), None),
|
||||||
|
};
|
||||||
|
let calldata = transfer_calldata(OTHER, U256::from(100u64));
|
||||||
|
let context = ctx(DAI, calldata);
|
||||||
|
let m = TokenTransfer::analyze(&context).unwrap();
|
||||||
|
let v = TokenTransfer::evaluate(&context, &m, &grant, &mut *conn).await.unwrap();
|
||||||
|
assert!(v.iter().any(|e| matches!(e, EvalViolation::InvalidTarget { .. })));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn evaluate_passes_volume_within_limit() {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
let mut conn = db.get().await.unwrap();
|
||||||
|
|
||||||
|
let basic = insert_basic(&mut conn, false).await;
|
||||||
|
let settings = make_settings(None, Some(1_000));
|
||||||
|
let grant_id = TokenTransfer::create_grant(&basic, &settings, &mut *conn).await.unwrap();
|
||||||
|
|
||||||
|
// Record a past transfer of 500 (within 1000 limit)
|
||||||
|
use crate::db::{models::NewEvmTokenTransferLog, schema::evm_token_transfer_log};
|
||||||
|
insert_into(evm_token_transfer_log::table)
|
||||||
|
.values(NewEvmTokenTransferLog {
|
||||||
|
grant_id,
|
||||||
|
log_id: 0,
|
||||||
|
chain_id: CHAIN_ID as i32,
|
||||||
|
token_contract: DAI.to_vec(),
|
||||||
|
recipient_address: RECIPIENT.to_vec(),
|
||||||
|
value: utils::u256_to_bytes(U256::from(500u64)).to_vec(),
|
||||||
|
})
|
||||||
|
.execute(&mut *conn)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let grant = Grant { id: grant_id, shared_grant_id: basic.id, shared: shared(), settings };
|
||||||
|
let calldata = transfer_calldata(RECIPIENT, U256::from(100u64));
|
||||||
|
let context = ctx(DAI, calldata);
|
||||||
|
let m = TokenTransfer::analyze(&context).unwrap();
|
||||||
|
let v = TokenTransfer::evaluate(&context, &m, &grant, &mut *conn).await.unwrap();
|
||||||
|
assert!(!v.iter().any(|e| matches!(e, EvalViolation::VolumetricLimitExceeded)));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn evaluate_rejects_volume_over_limit() {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
let mut conn = db.get().await.unwrap();
|
||||||
|
|
||||||
|
let basic = insert_basic(&mut conn, false).await;
|
||||||
|
let settings = make_settings(None, Some(1_000));
|
||||||
|
let grant_id = TokenTransfer::create_grant(&basic, &settings, &mut *conn).await.unwrap();
|
||||||
|
|
||||||
|
use crate::db::{models::NewEvmTokenTransferLog, schema::evm_token_transfer_log};
|
||||||
|
insert_into(evm_token_transfer_log::table)
|
||||||
|
.values(NewEvmTokenTransferLog {
|
||||||
|
grant_id,
|
||||||
|
log_id: 0,
|
||||||
|
chain_id: CHAIN_ID as i32,
|
||||||
|
token_contract: DAI.to_vec(),
|
||||||
|
recipient_address: RECIPIENT.to_vec(),
|
||||||
|
value: utils::u256_to_bytes(U256::from(1_001u64)).to_vec(),
|
||||||
|
})
|
||||||
|
.execute(&mut *conn)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let grant = Grant { id: grant_id, shared_grant_id: basic.id, shared: shared(), settings };
|
||||||
|
let calldata = transfer_calldata(RECIPIENT, U256::from(100u64));
|
||||||
|
let context = ctx(DAI, calldata);
|
||||||
|
let m = TokenTransfer::analyze(&context).unwrap();
|
||||||
|
let v = TokenTransfer::evaluate(&context, &m, &grant, &mut *conn).await.unwrap();
|
||||||
|
assert!(v.iter().any(|e| matches!(e, EvalViolation::VolumetricLimitExceeded)));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn evaluate_no_volume_limits_always_passes() {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
let mut conn = db.get().await.unwrap();
|
||||||
|
|
||||||
|
let grant = Grant {
|
||||||
|
id: 999,
|
||||||
|
shared_grant_id: 999,
|
||||||
|
shared: shared(),
|
||||||
|
settings: make_settings(None, None), // no volume limits
|
||||||
|
};
|
||||||
|
let calldata = transfer_calldata(RECIPIENT, U256::from(u64::MAX));
|
||||||
|
let context = ctx(DAI, calldata);
|
||||||
|
let m = TokenTransfer::analyze(&context).unwrap();
|
||||||
|
let v = TokenTransfer::evaluate(&context, &m, &grant, &mut *conn).await.unwrap();
|
||||||
|
assert!(!v.iter().any(|e| matches!(e, EvalViolation::VolumetricLimitExceeded)));
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── try_find_grant ───────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn try_find_grant_roundtrip() {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
let mut conn = db.get().await.unwrap();
|
||||||
|
|
||||||
|
let basic = insert_basic(&mut conn, false).await;
|
||||||
|
let settings = make_settings(Some(RECIPIENT), Some(5_000));
|
||||||
|
TokenTransfer::create_grant(&basic, &settings, &mut *conn).await.unwrap();
|
||||||
|
|
||||||
|
let calldata = transfer_calldata(RECIPIENT, U256::from(100u64));
|
||||||
|
let found = TokenTransfer::try_find_grant(&ctx(DAI, calldata), &mut *conn)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
assert!(found.is_some());
|
||||||
|
let g = found.unwrap();
|
||||||
|
assert_eq!(g.settings.token_contract, DAI);
|
||||||
|
assert_eq!(g.settings.target, Some(RECIPIENT));
|
||||||
|
assert_eq!(g.settings.volume_limits.len(), 1);
|
||||||
|
assert_eq!(g.settings.volume_limits[0].max_volume, U256::from(5_000u64));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn try_find_grant_revoked_returns_none() {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
let mut conn = db.get().await.unwrap();
|
||||||
|
|
||||||
|
let basic = insert_basic(&mut conn, true).await;
|
||||||
|
let settings = make_settings(None, None);
|
||||||
|
TokenTransfer::create_grant(&basic, &settings, &mut *conn).await.unwrap();
|
||||||
|
|
||||||
|
let calldata = transfer_calldata(RECIPIENT, U256::from(1u64));
|
||||||
|
let found = TokenTransfer::try_find_grant(&ctx(DAI, calldata), &mut *conn)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert!(found.is_none());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn try_find_grant_unknown_token_returns_none() {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
let mut conn = db.get().await.unwrap();
|
||||||
|
|
||||||
|
let basic = insert_basic(&mut conn, false).await;
|
||||||
|
let settings = make_settings(None, None);
|
||||||
|
TokenTransfer::create_grant(&basic, &settings, &mut *conn).await.unwrap();
|
||||||
|
|
||||||
|
// Query with a different token contract
|
||||||
|
let calldata = transfer_calldata(RECIPIENT, U256::from(1u64));
|
||||||
|
let found = TokenTransfer::try_find_grant(&ctx(UNKNOWN_TOKEN, calldata), &mut *conn)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert!(found.is_none());
|
||||||
|
}
|
||||||
|
|
||||||
|
// ── find_all_grants ──────────────────────────────────────────────────────
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn find_all_grants_empty_db() {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
let mut conn = db.get().await.unwrap();
|
||||||
|
let all = TokenTransfer::find_all_grants(&mut *conn).await.unwrap();
|
||||||
|
assert!(all.is_empty());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn find_all_grants_excludes_revoked() {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
let mut conn = db.get().await.unwrap();
|
||||||
|
|
||||||
|
let settings = make_settings(None, Some(1_000));
|
||||||
|
let active = insert_basic(&mut conn, false).await;
|
||||||
|
TokenTransfer::create_grant(&active, &settings, &mut *conn).await.unwrap();
|
||||||
|
let revoked = insert_basic(&mut conn, true).await;
|
||||||
|
TokenTransfer::create_grant(&revoked, &settings, &mut *conn).await.unwrap();
|
||||||
|
|
||||||
|
let all = TokenTransfer::find_all_grants(&mut *conn).await.unwrap();
|
||||||
|
assert_eq!(all.len(), 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn find_all_grants_loads_volume_limits() {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
let mut conn = db.get().await.unwrap();
|
||||||
|
|
||||||
|
let basic = insert_basic(&mut conn, false).await;
|
||||||
|
let settings = make_settings(None, Some(9_999));
|
||||||
|
TokenTransfer::create_grant(&basic, &settings, &mut *conn).await.unwrap();
|
||||||
|
|
||||||
|
let all = TokenTransfer::find_all_grants(&mut *conn).await.unwrap();
|
||||||
|
assert_eq!(all.len(), 1);
|
||||||
|
assert_eq!(all[0].settings.volume_limits.len(), 1);
|
||||||
|
assert_eq!(all[0].settings.volume_limits[0].max_volume, U256::from(9_999u64));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn find_all_grants_multiple_grants_batch_loaded() {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
let mut conn = db.get().await.unwrap();
|
||||||
|
|
||||||
|
let b1 = insert_basic(&mut conn, false).await;
|
||||||
|
TokenTransfer::create_grant(&b1, &make_settings(None, Some(1_000)), &mut *conn)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
let b2 = insert_basic(&mut conn, false).await;
|
||||||
|
TokenTransfer::create_grant(&b2, &make_settings(Some(RECIPIENT), Some(2_000)), &mut *conn)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let all = TokenTransfer::find_all_grants(&mut *conn).await.unwrap();
|
||||||
|
assert_eq!(all.len(), 2);
|
||||||
|
}
|
||||||
196
server/crates/arbiter-server/src/evm/safe_signer.rs
Normal file
196
server/crates/arbiter-server/src/evm/safe_signer.rs
Normal file
@@ -0,0 +1,196 @@
|
|||||||
|
use std::sync::Mutex;
|
||||||
|
|
||||||
|
use alloy::{
|
||||||
|
consensus::SignableTransaction,
|
||||||
|
network::{TxSigner, TxSignerSync},
|
||||||
|
primitives::{Address, ChainId, Signature, B256},
|
||||||
|
signers::{Error, Result, Signer, SignerSync, utils::secret_key_to_address},
|
||||||
|
};
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use k256::ecdsa::{self, signature::hazmat::PrehashSigner, RecoveryId, SigningKey};
|
||||||
|
use memsafe::MemSafe;
|
||||||
|
|
||||||
|
/// An Ethereum signer that stores its secp256k1 secret key inside a
|
||||||
|
/// hardware-protected [`MemSafe`] cell.
|
||||||
|
///
|
||||||
|
/// The underlying memory page is kept non-readable/non-writable at rest.
|
||||||
|
/// Access is temporarily elevated only for the duration of each signing
|
||||||
|
/// operation, then immediately revoked.
|
||||||
|
///
|
||||||
|
/// Because [`MemSafe::read`] requires `&mut self` while the [`Signer`] trait
|
||||||
|
/// requires `&self`, the cell is wrapped in a [`Mutex`].
|
||||||
|
pub struct SafeSigner {
|
||||||
|
key: Mutex<MemSafe<SigningKey>>,
|
||||||
|
address: Address,
|
||||||
|
chain_id: Option<ChainId>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl std::fmt::Debug for SafeSigner {
|
||||||
|
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||||
|
f.debug_struct("SafeSigner")
|
||||||
|
.field("address", &self.address)
|
||||||
|
.field("chain_id", &self.chain_id)
|
||||||
|
.finish()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Generates a secp256k1 secret key directly inside a [`MemSafe`] cell.
|
||||||
|
///
|
||||||
|
/// Random bytes are written in-place into protected memory, then validated
|
||||||
|
/// as a legal scalar on the secp256k1 curve (the scalar must be in
|
||||||
|
/// `[1, n)` where `n` is the curve order — roughly 1-in-2^128 chance of
|
||||||
|
/// rejection, but we retry to be correct).
|
||||||
|
///
|
||||||
|
/// Returns the protected key bytes and the derived Ethereum address.
|
||||||
|
pub fn generate(rng: &mut impl rand::Rng) -> (MemSafe<[u8; 32]>, Address) {
|
||||||
|
loop {
|
||||||
|
let mut cell = MemSafe::new([0u8; 32]).expect("MemSafe allocation");
|
||||||
|
{
|
||||||
|
let mut w = cell.write().expect("MemSafe write");
|
||||||
|
rng.fill_bytes(w.as_mut());
|
||||||
|
}
|
||||||
|
let reader = cell.read().expect("MemSafe read");
|
||||||
|
if let Ok(sk) = SigningKey::from_slice(reader.as_ref()) {
|
||||||
|
let address = secret_key_to_address(&sk);
|
||||||
|
drop(reader);
|
||||||
|
return (cell, address);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SafeSigner {
|
||||||
|
/// Reconstructs a `SafeSigner` from key material held in a [`MemSafe`] buffer.
|
||||||
|
///
|
||||||
|
/// The key bytes are read from protected memory, parsed as a secp256k1
|
||||||
|
/// scalar, and immediately moved into a new [`MemSafe`] cell. The raw
|
||||||
|
/// bytes are never exposed outside this function.
|
||||||
|
pub fn from_memsafe(mut cell: MemSafe<Vec<u8>>) -> Result<Self> {
|
||||||
|
let reader = cell.read().map_err(Error::other)?;
|
||||||
|
let sk = SigningKey::from_slice(reader.as_slice()).map_err(Error::other)?;
|
||||||
|
drop(reader);
|
||||||
|
Self::new(sk)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Creates a new `SafeSigner` by moving the signing key into a protected
|
||||||
|
/// memory region.
|
||||||
|
pub fn new(key: SigningKey) -> Result<Self> {
|
||||||
|
let address = secret_key_to_address(&key);
|
||||||
|
let cell = MemSafe::new(key).map_err(Error::other)?;
|
||||||
|
Ok(Self {
|
||||||
|
key: Mutex::new(cell),
|
||||||
|
address,
|
||||||
|
chain_id: None,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn sign_hash_inner(&self, hash: &B256) -> Result<Signature> {
|
||||||
|
let mut cell = self.key.lock().expect("SafeSigner mutex poisoned");
|
||||||
|
let reader = cell.read().map_err(Error::other)?;
|
||||||
|
let sig: (ecdsa::Signature, RecoveryId) = reader.sign_prehash(hash.as_ref())?;
|
||||||
|
Ok(sig.into())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn sign_tx_inner(
|
||||||
|
&self,
|
||||||
|
tx: &mut dyn SignableTransaction<Signature>,
|
||||||
|
) -> Result<Signature> {
|
||||||
|
if let Some(chain_id) = self.chain_id
|
||||||
|
&& !tx.set_chain_id_checked(chain_id)
|
||||||
|
{
|
||||||
|
return Err(Error::TransactionChainIdMismatch {
|
||||||
|
signer: chain_id,
|
||||||
|
tx: tx.chain_id().unwrap(),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
self.sign_hash_inner(&tx.signature_hash()).map_err(Error::other)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl Signer for SafeSigner {
|
||||||
|
#[inline]
|
||||||
|
async fn sign_hash(&self, hash: &B256) -> Result<Signature> {
|
||||||
|
self.sign_hash_inner(hash)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
fn address(&self) -> Address {
|
||||||
|
self.address
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
fn chain_id(&self) -> Option<ChainId> {
|
||||||
|
self.chain_id
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
fn set_chain_id(&mut self, chain_id: Option<ChainId>) {
|
||||||
|
self.chain_id = chain_id;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SignerSync for SafeSigner {
|
||||||
|
#[inline]
|
||||||
|
fn sign_hash_sync(&self, hash: &B256) -> Result<Signature> {
|
||||||
|
self.sign_hash_inner(hash)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[inline]
|
||||||
|
fn chain_id_sync(&self) -> Option<ChainId> {
|
||||||
|
self.chain_id
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl TxSigner<Signature> for SafeSigner {
|
||||||
|
fn address(&self) -> Address {
|
||||||
|
self.address
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn sign_transaction(
|
||||||
|
&self,
|
||||||
|
tx: &mut dyn SignableTransaction<Signature>,
|
||||||
|
) -> Result<Signature> {
|
||||||
|
self.sign_tx_inner(tx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TxSignerSync<Signature> for SafeSigner {
|
||||||
|
fn address(&self) -> Address {
|
||||||
|
self.address
|
||||||
|
}
|
||||||
|
|
||||||
|
fn sign_transaction_sync(
|
||||||
|
&self,
|
||||||
|
tx: &mut dyn SignableTransaction<Signature>,
|
||||||
|
) -> Result<Signature> {
|
||||||
|
self.sign_tx_inner(tx)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
use alloy::signers::local::PrivateKeySigner;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn sign_and_recover() {
|
||||||
|
let pk = PrivateKeySigner::random();
|
||||||
|
let key = pk.into_credential();
|
||||||
|
let signer = SafeSigner::new(key).unwrap();
|
||||||
|
let message = b"hello arbiter";
|
||||||
|
let sig = signer.sign_message_sync(message).unwrap();
|
||||||
|
let recovered = sig.recover_address_from_msg(message).unwrap();
|
||||||
|
assert_eq!(recovered, Signer::address(&signer));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn chain_id_roundtrip() {
|
||||||
|
let pk = PrivateKeySigner::random();
|
||||||
|
let key = pk.into_credential();
|
||||||
|
let mut signer = SafeSigner::new(key).unwrap();
|
||||||
|
assert_eq!(Signer::chain_id(&signer), None);
|
||||||
|
signer.set_chain_id(Some(1337));
|
||||||
|
assert_eq!(Signer::chain_id(&signer), Some(1337));
|
||||||
|
}
|
||||||
|
}
|
||||||
26
server/crates/arbiter-server/src/evm/utils.rs
Normal file
26
server/crates/arbiter-server/src/evm/utils.rs
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
use alloy::primitives::U256;
|
||||||
|
|
||||||
|
#[derive(thiserror::Error, Debug)]
|
||||||
|
#[error("Expected {expected} bytes but got {actual} bytes")]
|
||||||
|
pub struct LengthError {
|
||||||
|
pub expected: usize,
|
||||||
|
pub actual: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn u256_to_bytes(value: U256) -> [u8; 32] {
|
||||||
|
value.to_le_bytes()
|
||||||
|
}
|
||||||
|
pub fn bytes_to_u256(bytes: &[u8]) -> Option<U256> {
|
||||||
|
let bytes: [u8; 32] = bytes.try_into().ok()?;
|
||||||
|
Some(U256::from_le_bytes(bytes))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn try_bytes_to_u256(bytes: &[u8]) -> diesel::result::QueryResult<U256> {
|
||||||
|
let bytes: [u8; 32] = bytes.try_into().map_err(|_| {
|
||||||
|
diesel::result::Error::DeserializationError(Box::new(LengthError {
|
||||||
|
expected: 32,
|
||||||
|
actual: bytes.len(),
|
||||||
|
}))
|
||||||
|
})?;
|
||||||
|
Ok(U256::from_le_bytes(bytes))
|
||||||
|
}
|
||||||
@@ -1,62 +1,201 @@
|
|||||||
#![allow(unused)]
|
#![forbid(unsafe_code)]
|
||||||
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use arbiter_proto::{
|
use arbiter_proto::{
|
||||||
proto::{ClientRequest, ClientResponse, UserAgentRequest, UserAgentResponse},
|
proto::{
|
||||||
transport::BiStream,
|
client::{ClientRequest, ClientResponse},
|
||||||
|
user_agent::{UserAgentRequest, UserAgentResponse},
|
||||||
|
},
|
||||||
|
transport::{IdentityRecvConverter, SendConverter, grpc},
|
||||||
};
|
};
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use tokio_stream::wrappers::ReceiverStream;
|
use tokio_stream::wrappers::ReceiverStream;
|
||||||
|
|
||||||
use tokio::sync::mpsc;
|
use tokio::sync::mpsc;
|
||||||
use tonic::{Request, Response, Status};
|
use tonic::{Request, Response, Status};
|
||||||
|
use tracing::info;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
actors::{client::handle_client, user_agent::handle_user_agent},
|
actors::{
|
||||||
|
client::{self, ClientConnection as ClientConnectionProps, ClientError, connect_client},
|
||||||
|
user_agent::{self, TransportResponseError, UserAgentConnection, connect_user_agent},
|
||||||
|
},
|
||||||
context::ServerContext,
|
context::ServerContext,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub mod actors;
|
pub mod actors;
|
||||||
mod context;
|
pub mod context;
|
||||||
mod db;
|
pub mod db;
|
||||||
mod errors;
|
pub mod evm;
|
||||||
|
|
||||||
const DEFAULT_CHANNEL_SIZE: usize = 1000;
|
const DEFAULT_CHANNEL_SIZE: usize = 1000;
|
||||||
|
|
||||||
|
struct UserAgentGrpcSender;
|
||||||
|
|
||||||
|
impl SendConverter for UserAgentGrpcSender {
|
||||||
|
type Input = Result<UserAgentResponse, TransportResponseError>;
|
||||||
|
type Output = Result<UserAgentResponse, Status>;
|
||||||
|
|
||||||
|
fn convert(&self, item: Self::Input) -> Self::Output {
|
||||||
|
match item {
|
||||||
|
Ok(message) => Ok(message),
|
||||||
|
Err(err) => Err(user_agent_error_status(err)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ClientGrpcSender;
|
||||||
|
|
||||||
|
impl SendConverter for ClientGrpcSender {
|
||||||
|
type Input = Result<ClientResponse, ClientError>;
|
||||||
|
type Output = Result<ClientResponse, Status>;
|
||||||
|
|
||||||
|
fn convert(&self, item: Self::Input) -> Self::Output {
|
||||||
|
match item {
|
||||||
|
Ok(message) => Ok(message),
|
||||||
|
Err(err) => Err(client_error_status(err)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn client_error_status(value: ClientError) -> Status {
|
||||||
|
match value {
|
||||||
|
ClientError::MissingRequestPayload | ClientError::UnexpectedRequestPayload => {
|
||||||
|
Status::invalid_argument("Expected message with payload")
|
||||||
|
}
|
||||||
|
ClientError::StateTransitionFailed => Status::internal("State machine error"),
|
||||||
|
ClientError::Auth(ref err) => client_auth_error_status(err),
|
||||||
|
ClientError::ConnectionRegistrationFailed => {
|
||||||
|
Status::internal("Connection registration failed")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn client_auth_error_status(value: &client::auth::Error) -> Status {
|
||||||
|
use client::auth::Error;
|
||||||
|
match value {
|
||||||
|
Error::UnexpectedMessagePayload | Error::InvalidClientPubkeyLength => {
|
||||||
|
Status::invalid_argument(value.to_string())
|
||||||
|
}
|
||||||
|
Error::InvalidAuthPubkeyEncoding => {
|
||||||
|
Status::invalid_argument("Failed to convert pubkey to VerifyingKey")
|
||||||
|
}
|
||||||
|
Error::InvalidChallengeSolution => Status::unauthenticated(value.to_string()),
|
||||||
|
Error::NotRegistered => Status::permission_denied(value.to_string()),
|
||||||
|
Error::Transport => Status::internal("Transport error"),
|
||||||
|
Error::DatabasePoolUnavailable => Status::internal("Database pool error"),
|
||||||
|
Error::DatabaseOperationFailed => Status::internal("Database error"),
|
||||||
|
Error::InternalError => Status::internal("Internal error"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn user_agent_error_status(value: TransportResponseError) -> Status {
|
||||||
|
match value {
|
||||||
|
TransportResponseError::MissingRequestPayload
|
||||||
|
| TransportResponseError::UnexpectedRequestPayload => {
|
||||||
|
Status::invalid_argument("Expected message with payload")
|
||||||
|
}
|
||||||
|
TransportResponseError::InvalidStateForUnsealEncryptedKey => {
|
||||||
|
Status::failed_precondition("Invalid state for unseal encrypted key")
|
||||||
|
}
|
||||||
|
TransportResponseError::InvalidClientPubkeyLength => {
|
||||||
|
Status::invalid_argument("client_pubkey must be 32 bytes")
|
||||||
|
}
|
||||||
|
TransportResponseError::StateTransitionFailed => Status::internal("State machine error"),
|
||||||
|
TransportResponseError::KeyHolderActorUnreachable => {
|
||||||
|
Status::internal("Vault is not available")
|
||||||
|
}
|
||||||
|
TransportResponseError::Auth(ref err) => auth_error_status(err),
|
||||||
|
TransportResponseError::ConnectionRegistrationFailed => {
|
||||||
|
Status::internal("Failed registering connection")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn auth_error_status(value: &user_agent::auth::Error) -> Status {
|
||||||
|
use user_agent::auth::Error;
|
||||||
|
match value {
|
||||||
|
Error::UnexpectedMessagePayload | Error::InvalidClientPubkeyLength => {
|
||||||
|
Status::invalid_argument(value.to_string())
|
||||||
|
}
|
||||||
|
Error::InvalidAuthPubkeyEncoding => {
|
||||||
|
Status::invalid_argument("Failed to convert pubkey to VerifyingKey")
|
||||||
|
}
|
||||||
|
Error::PublicKeyNotRegistered | Error::InvalidChallengeSolution => {
|
||||||
|
Status::unauthenticated(value.to_string())
|
||||||
|
}
|
||||||
|
Error::InvalidBootstrapToken => Status::invalid_argument("Invalid bootstrap token"),
|
||||||
|
Error::Transport => Status::internal("Transport error"),
|
||||||
|
Error::BootstrapperActorUnreachable => {
|
||||||
|
Status::internal("Bootstrap token consumption failed")
|
||||||
|
}
|
||||||
|
Error::DatabasePoolUnavailable => Status::internal("Database pool error"),
|
||||||
|
Error::DatabaseOperationFailed => Status::internal("Database error"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub struct Server {
|
pub struct Server {
|
||||||
context: ServerContext,
|
context: ServerContext,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Server {
|
||||||
|
pub fn new(context: ServerContext) -> Self {
|
||||||
|
Self { context }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl arbiter_proto::proto::arbiter_service_server::ArbiterService for Server {
|
impl arbiter_proto::proto::arbiter_service_server::ArbiterService for Server {
|
||||||
type UserAgentStream = ReceiverStream<Result<UserAgentResponse, Status>>;
|
type UserAgentStream = ReceiverStream<Result<UserAgentResponse, Status>>;
|
||||||
type ClientStream = ReceiverStream<Result<ClientResponse, Status>>;
|
type ClientStream = ReceiverStream<Result<ClientResponse, Status>>;
|
||||||
|
|
||||||
|
#[tracing::instrument(level = "debug", skip(self))]
|
||||||
async fn client(
|
async fn client(
|
||||||
&self,
|
&self,
|
||||||
request: Request<tonic::Streaming<ClientRequest>>,
|
request: Request<tonic::Streaming<ClientRequest>>,
|
||||||
) -> Result<Response<Self::ClientStream>, Status> {
|
) -> Result<Response<Self::ClientStream>, Status> {
|
||||||
let req_stream = request.into_inner();
|
let req_stream = request.into_inner();
|
||||||
let (tx, rx) = mpsc::channel(DEFAULT_CHANNEL_SIZE);
|
let (tx, rx) = mpsc::channel(DEFAULT_CHANNEL_SIZE);
|
||||||
tokio::spawn(handle_client(
|
|
||||||
self.context.clone(),
|
let transport = grpc::GrpcAdapter::new(
|
||||||
BiStream {
|
tx,
|
||||||
request_stream: req_stream,
|
req_stream,
|
||||||
response_sender: tx,
|
IdentityRecvConverter::<ClientRequest>::new(),
|
||||||
},
|
ClientGrpcSender,
|
||||||
));
|
);
|
||||||
|
let props = ClientConnectionProps::new(
|
||||||
|
self.context.db.clone(),
|
||||||
|
Box::new(transport),
|
||||||
|
self.context.actors.clone(),
|
||||||
|
);
|
||||||
|
tokio::spawn(connect_client(props));
|
||||||
|
|
||||||
|
info!(event = "connection established", "grpc.client");
|
||||||
|
|
||||||
Ok(Response::new(ReceiverStream::new(rx)))
|
Ok(Response::new(ReceiverStream::new(rx)))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tracing::instrument(level = "debug", skip(self))]
|
||||||
async fn user_agent(
|
async fn user_agent(
|
||||||
&self,
|
&self,
|
||||||
request: Request<tonic::Streaming<UserAgentRequest>>,
|
request: Request<tonic::Streaming<UserAgentRequest>>,
|
||||||
) -> Result<Response<Self::UserAgentStream>, Status> {
|
) -> Result<Response<Self::UserAgentStream>, Status> {
|
||||||
let req_stream = request.into_inner();
|
let req_stream = request.into_inner();
|
||||||
let (tx, rx) = mpsc::channel(DEFAULT_CHANNEL_SIZE);
|
let (tx, rx) = mpsc::channel(DEFAULT_CHANNEL_SIZE);
|
||||||
tokio::spawn(handle_user_agent(self.context.clone(), req_stream, tx));
|
|
||||||
|
let transport = grpc::GrpcAdapter::new(
|
||||||
|
tx,
|
||||||
|
req_stream,
|
||||||
|
IdentityRecvConverter::<UserAgentRequest>::new(),
|
||||||
|
UserAgentGrpcSender,
|
||||||
|
);
|
||||||
|
let props = UserAgentConnection::new(
|
||||||
|
self.context.db.clone(),
|
||||||
|
self.context.actors.clone(),
|
||||||
|
Box::new(transport),
|
||||||
|
);
|
||||||
|
tokio::spawn(connect_user_agent(props));
|
||||||
|
|
||||||
|
info!(event = "connection established", "grpc.user_agent");
|
||||||
|
|
||||||
Ok(Response::new(ReceiverStream::new(rx)))
|
Ok(Response::new(ReceiverStream::new(rx)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
53
server/crates/arbiter-server/src/main.rs
Normal file
53
server/crates/arbiter-server/src/main.rs
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
use std::net::SocketAddr;
|
||||||
|
|
||||||
|
use arbiter_proto::{proto::arbiter_service_server::ArbiterServiceServer, url::ArbiterUrl};
|
||||||
|
use arbiter_server::{Server, actors::bootstrap::GetToken, context::ServerContext, db};
|
||||||
|
use miette::miette;
|
||||||
|
use tonic::transport::{Identity, ServerTlsConfig};
|
||||||
|
use tracing::info;
|
||||||
|
|
||||||
|
const PORT: u16 = 50051;
|
||||||
|
|
||||||
|
#[tokio::main]
|
||||||
|
async fn main() -> miette::Result<()> {
|
||||||
|
tracing_subscriber::fmt()
|
||||||
|
.with_env_filter(
|
||||||
|
tracing_subscriber::EnvFilter::try_from_default_env()
|
||||||
|
.unwrap_or_else(|_| tracing_subscriber::EnvFilter::new("info")),
|
||||||
|
)
|
||||||
|
.init();
|
||||||
|
|
||||||
|
info!("Starting arbiter server");
|
||||||
|
|
||||||
|
let db = db::create_pool(None).await?;
|
||||||
|
info!("Database ready");
|
||||||
|
|
||||||
|
let context = ServerContext::new(db).await?;
|
||||||
|
|
||||||
|
let addr: SocketAddr = format!("127.0.0.1:{PORT}").parse().expect("valid address");
|
||||||
|
info!(%addr, "Starting gRPC server");
|
||||||
|
|
||||||
|
let url = ArbiterUrl {
|
||||||
|
host: addr.ip().to_string(),
|
||||||
|
port: addr.port(),
|
||||||
|
ca_cert: context.tls.ca_cert().clone().into_owned(),
|
||||||
|
bootstrap_token: context.actors.bootstrapper.ask(GetToken).await.unwrap(),
|
||||||
|
};
|
||||||
|
|
||||||
|
info!(%url, "Server URL");
|
||||||
|
|
||||||
|
let tls = ServerTlsConfig::new().identity(Identity::from_pem(
|
||||||
|
context.tls.cert_pem(),
|
||||||
|
context.tls.key_pem(),
|
||||||
|
));
|
||||||
|
|
||||||
|
tonic::transport::Server::builder()
|
||||||
|
.tls_config(tls)
|
||||||
|
.map_err(|err| miette!("Faild to setup TLS: {err}"))?
|
||||||
|
.add_service(ArbiterServiceServer::new(Server::new(context)))
|
||||||
|
.serve(addr)
|
||||||
|
.await
|
||||||
|
.map_err(|e| miette::miette!("gRPC server error: {e}"))?;
|
||||||
|
|
||||||
|
unreachable!("gRPC server should run indefinitely");
|
||||||
|
}
|
||||||
4
server/crates/arbiter-server/tests/client.rs
Normal file
4
server/crates/arbiter-server/tests/client.rs
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
mod common;
|
||||||
|
|
||||||
|
#[path = "client/auth.rs"]
|
||||||
|
mod auth;
|
||||||
222
server/crates/arbiter-server/tests/client/auth.rs
Normal file
222
server/crates/arbiter-server/tests/client/auth.rs
Normal file
@@ -0,0 +1,222 @@
|
|||||||
|
use alloy::{
|
||||||
|
consensus::TxEip1559,
|
||||||
|
primitives::{Address, Bytes, TxKind, U256},
|
||||||
|
rlp::Encodable,
|
||||||
|
};
|
||||||
|
use arbiter_proto::proto::{
|
||||||
|
client::{
|
||||||
|
AuthChallengeRequest, AuthChallengeSolution, ClientRequest,
|
||||||
|
client_request::Payload as ClientRequestPayload,
|
||||||
|
client_response::Payload as ClientResponsePayload,
|
||||||
|
},
|
||||||
|
evm::EvmSignTransactionRequest,
|
||||||
|
};
|
||||||
|
use arbiter_proto::transport::Bi;
|
||||||
|
use arbiter_server::actors::GlobalActors;
|
||||||
|
use arbiter_server::{
|
||||||
|
actors::client::{ClientConnection, connect_client},
|
||||||
|
db::{self, schema},
|
||||||
|
};
|
||||||
|
use diesel::{ExpressionMethods as _, insert_into};
|
||||||
|
use diesel_async::RunQueryDsl;
|
||||||
|
use ed25519_dalek::Signer as _;
|
||||||
|
|
||||||
|
use super::common::ChannelTransport;
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[test_log::test]
|
||||||
|
pub async fn test_unregistered_pubkey_rejected() {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
|
||||||
|
let (server_transport, mut test_transport) = ChannelTransport::new();
|
||||||
|
let actors = GlobalActors::spawn(db.clone()).await.unwrap();
|
||||||
|
let props = ClientConnection::new(db.clone(), Box::new(server_transport), actors);
|
||||||
|
let task = tokio::spawn(connect_client(props));
|
||||||
|
|
||||||
|
let new_key = ed25519_dalek::SigningKey::generate(&mut rand::rng());
|
||||||
|
let pubkey_bytes = new_key.verifying_key().to_bytes().to_vec();
|
||||||
|
|
||||||
|
test_transport
|
||||||
|
.send(ClientRequest {
|
||||||
|
payload: Some(ClientRequestPayload::AuthChallengeRequest(
|
||||||
|
AuthChallengeRequest {
|
||||||
|
pubkey: pubkey_bytes,
|
||||||
|
},
|
||||||
|
)),
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// Auth fails, connect_client returns, transport drops
|
||||||
|
task.await.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[test_log::test]
|
||||||
|
pub async fn test_challenge_auth() {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
|
||||||
|
let new_key = ed25519_dalek::SigningKey::generate(&mut rand::rng());
|
||||||
|
let pubkey_bytes = new_key.verifying_key().to_bytes().to_vec();
|
||||||
|
|
||||||
|
{
|
||||||
|
let mut conn = db.get().await.unwrap();
|
||||||
|
insert_into(schema::program_client::table)
|
||||||
|
.values(schema::program_client::public_key.eq(pubkey_bytes.clone()))
|
||||||
|
.execute(&mut conn)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
let (server_transport, mut test_transport) = ChannelTransport::new();
|
||||||
|
let actors = GlobalActors::spawn(db.clone()).await.unwrap();
|
||||||
|
|
||||||
|
let props = ClientConnection::new(db.clone(), Box::new(server_transport), actors);
|
||||||
|
let task = tokio::spawn(connect_client(props));
|
||||||
|
|
||||||
|
// Send challenge request
|
||||||
|
test_transport
|
||||||
|
.send(ClientRequest {
|
||||||
|
payload: Some(ClientRequestPayload::AuthChallengeRequest(
|
||||||
|
AuthChallengeRequest {
|
||||||
|
pubkey: pubkey_bytes,
|
||||||
|
},
|
||||||
|
)),
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// Read the challenge response
|
||||||
|
let response = test_transport
|
||||||
|
.recv()
|
||||||
|
.await
|
||||||
|
.expect("should receive challenge");
|
||||||
|
let challenge = match response {
|
||||||
|
Ok(resp) => match resp.payload {
|
||||||
|
Some(ClientResponsePayload::AuthChallenge(c)) => c,
|
||||||
|
other => panic!("Expected AuthChallenge, got {other:?}"),
|
||||||
|
},
|
||||||
|
Err(err) => panic!("Expected Ok response, got Err({err:?})"),
|
||||||
|
};
|
||||||
|
|
||||||
|
// Sign the challenge and send solution
|
||||||
|
let formatted_challenge = arbiter_proto::format_challenge(challenge.nonce, &challenge.pubkey);
|
||||||
|
let signature = new_key.sign(&formatted_challenge);
|
||||||
|
|
||||||
|
test_transport
|
||||||
|
.send(ClientRequest {
|
||||||
|
payload: Some(ClientRequestPayload::AuthChallengeSolution(
|
||||||
|
AuthChallengeSolution {
|
||||||
|
signature: signature.to_bytes().to_vec(),
|
||||||
|
},
|
||||||
|
)),
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// Auth completes, session spawned
|
||||||
|
task.await.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[test_log::test]
|
||||||
|
pub async fn test_evm_sign_request_payload_is_handled() {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
|
||||||
|
let new_key = ed25519_dalek::SigningKey::generate(&mut rand::rng());
|
||||||
|
let pubkey_bytes = new_key.verifying_key().to_bytes().to_vec();
|
||||||
|
|
||||||
|
{
|
||||||
|
let mut conn = db.get().await.unwrap();
|
||||||
|
insert_into(schema::program_client::table)
|
||||||
|
.values(schema::program_client::public_key.eq(pubkey_bytes.clone()))
|
||||||
|
.execute(&mut conn)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
let (server_transport, mut test_transport) = ChannelTransport::new();
|
||||||
|
let actors = GlobalActors::spawn(db.clone()).await.unwrap();
|
||||||
|
|
||||||
|
let props = ClientConnection::new(db.clone(), Box::new(server_transport), actors);
|
||||||
|
let task = tokio::spawn(connect_client(props));
|
||||||
|
|
||||||
|
test_transport
|
||||||
|
.send(ClientRequest {
|
||||||
|
payload: Some(ClientRequestPayload::AuthChallengeRequest(
|
||||||
|
AuthChallengeRequest {
|
||||||
|
pubkey: pubkey_bytes,
|
||||||
|
},
|
||||||
|
)),
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let response = test_transport
|
||||||
|
.recv()
|
||||||
|
.await
|
||||||
|
.expect("should receive challenge");
|
||||||
|
let challenge = match response {
|
||||||
|
Ok(resp) => match resp.payload {
|
||||||
|
Some(ClientResponsePayload::AuthChallenge(c)) => c,
|
||||||
|
other => panic!("Expected AuthChallenge, got {other:?}"),
|
||||||
|
},
|
||||||
|
Err(err) => panic!("Expected Ok response, got Err({err:?})"),
|
||||||
|
};
|
||||||
|
|
||||||
|
let formatted_challenge = arbiter_proto::format_challenge(challenge.nonce, &challenge.pubkey);
|
||||||
|
let signature = new_key.sign(&formatted_challenge);
|
||||||
|
|
||||||
|
test_transport
|
||||||
|
.send(ClientRequest {
|
||||||
|
payload: Some(ClientRequestPayload::AuthChallengeSolution(
|
||||||
|
AuthChallengeSolution {
|
||||||
|
signature: signature.to_bytes().to_vec(),
|
||||||
|
},
|
||||||
|
)),
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
task.await.unwrap();
|
||||||
|
|
||||||
|
let tx = TxEip1559 {
|
||||||
|
chain_id: 1,
|
||||||
|
nonce: 0,
|
||||||
|
gas_limit: 21_000,
|
||||||
|
max_fee_per_gas: 1,
|
||||||
|
max_priority_fee_per_gas: 1,
|
||||||
|
to: TxKind::Call(Address::from_slice(&[0x11; 20])),
|
||||||
|
value: U256::ZERO,
|
||||||
|
input: Bytes::new(),
|
||||||
|
access_list: Default::default(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut rlp_transaction = Vec::new();
|
||||||
|
tx.encode(&mut rlp_transaction);
|
||||||
|
|
||||||
|
test_transport
|
||||||
|
.send(ClientRequest {
|
||||||
|
payload: Some(ClientRequestPayload::EvmSignTransaction(
|
||||||
|
EvmSignTransactionRequest {
|
||||||
|
wallet_address: [0x22; 20].to_vec(),
|
||||||
|
rlp_transaction,
|
||||||
|
},
|
||||||
|
)),
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let response = test_transport
|
||||||
|
.recv()
|
||||||
|
.await
|
||||||
|
.expect("should receive sign response");
|
||||||
|
|
||||||
|
match response {
|
||||||
|
Ok(resp) => match resp.payload {
|
||||||
|
Some(ClientResponsePayload::EvmSignTransaction(_)) => {}
|
||||||
|
other => panic!("Expected EvmSignTransaction response, got {other:?}"),
|
||||||
|
},
|
||||||
|
Err(err) => panic!("Expected Ok response, got Err({err:?})"),
|
||||||
|
}
|
||||||
|
}
|
||||||
73
server/crates/arbiter-server/tests/common/mod.rs
Normal file
73
server/crates/arbiter-server/tests/common/mod.rs
Normal file
@@ -0,0 +1,73 @@
|
|||||||
|
use arbiter_proto::transport::{Bi, Error};
|
||||||
|
use arbiter_server::{
|
||||||
|
actors::keyholder::KeyHolder,
|
||||||
|
db::{self, schema},
|
||||||
|
};
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use diesel::QueryDsl;
|
||||||
|
use diesel_async::RunQueryDsl;
|
||||||
|
use memsafe::MemSafe;
|
||||||
|
use tokio::sync::mpsc;
|
||||||
|
|
||||||
|
#[allow(dead_code)]
|
||||||
|
pub async fn bootstrapped_keyholder(db: &db::DatabasePool) -> KeyHolder {
|
||||||
|
let mut actor = KeyHolder::new(db.clone()).await.unwrap();
|
||||||
|
actor
|
||||||
|
.bootstrap(MemSafe::new(b"test-seal-key".to_vec()).unwrap())
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
actor
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(dead_code)]
|
||||||
|
pub async fn root_key_history_id(db: &db::DatabasePool) -> i32 {
|
||||||
|
let mut conn = db.get().await.unwrap();
|
||||||
|
let id = schema::arbiter_settings::table
|
||||||
|
.select(schema::arbiter_settings::root_key_id)
|
||||||
|
.first::<Option<i32>>(&mut conn)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
id.expect("root_key_id should be set after bootstrap")
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(dead_code)]
|
||||||
|
pub struct ChannelTransport<T, Y> {
|
||||||
|
receiver: mpsc::Receiver<T>,
|
||||||
|
sender: mpsc::Sender<Y>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T, Y> ChannelTransport<T, Y> {
|
||||||
|
#[allow(dead_code)]
|
||||||
|
pub fn new() -> (Self, ChannelTransport<Y, T>) {
|
||||||
|
let (tx1, rx1) = mpsc::channel(10);
|
||||||
|
let (tx2, rx2) = mpsc::channel(10);
|
||||||
|
(
|
||||||
|
Self {
|
||||||
|
receiver: rx1,
|
||||||
|
sender: tx2,
|
||||||
|
},
|
||||||
|
ChannelTransport {
|
||||||
|
receiver: rx2,
|
||||||
|
sender: tx1,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl<T, Y> Bi<T, Y> for ChannelTransport<T, Y>
|
||||||
|
where
|
||||||
|
T: Send + 'static,
|
||||||
|
Y: Send + 'static,
|
||||||
|
{
|
||||||
|
async fn send(&mut self, item: Y) -> Result<(), Error> {
|
||||||
|
self.sender
|
||||||
|
.send(item)
|
||||||
|
.await
|
||||||
|
.map_err(|_| Error::ChannelClosed)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn recv(&mut self) -> Option<T> {
|
||||||
|
self.receiver.recv().await
|
||||||
|
}
|
||||||
|
}
|
||||||
8
server/crates/arbiter-server/tests/keyholder.rs
Normal file
8
server/crates/arbiter-server/tests/keyholder.rs
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
mod common;
|
||||||
|
|
||||||
|
#[path = "keyholder/concurrency.rs"]
|
||||||
|
mod concurrency;
|
||||||
|
#[path = "keyholder/lifecycle.rs"]
|
||||||
|
mod lifecycle;
|
||||||
|
#[path = "keyholder/storage.rs"]
|
||||||
|
mod storage;
|
||||||
173
server/crates/arbiter-server/tests/keyholder/concurrency.rs
Normal file
173
server/crates/arbiter-server/tests/keyholder/concurrency.rs
Normal file
@@ -0,0 +1,173 @@
|
|||||||
|
use std::collections::{HashMap, HashSet};
|
||||||
|
|
||||||
|
use arbiter_server::{
|
||||||
|
actors::keyholder::{CreateNew, Error, KeyHolder},
|
||||||
|
db::{self, models, schema},
|
||||||
|
};
|
||||||
|
use diesel::{ExpressionMethods as _, QueryDsl, SelectableHelper, dsl::sql_query};
|
||||||
|
use diesel_async::RunQueryDsl;
|
||||||
|
use kameo::actor::{ActorRef, Spawn as _};
|
||||||
|
use memsafe::MemSafe;
|
||||||
|
use tokio::task::JoinSet;
|
||||||
|
|
||||||
|
use crate::common;
|
||||||
|
|
||||||
|
async fn write_concurrently(
|
||||||
|
actor: ActorRef<KeyHolder>,
|
||||||
|
prefix: &'static str,
|
||||||
|
count: usize,
|
||||||
|
) -> Vec<(i32, Vec<u8>)> {
|
||||||
|
let mut set = JoinSet::new();
|
||||||
|
for i in 0..count {
|
||||||
|
let actor = actor.clone();
|
||||||
|
set.spawn(async move {
|
||||||
|
let plaintext = format!("{prefix}-{i}").into_bytes();
|
||||||
|
let id = actor
|
||||||
|
.ask(CreateNew {
|
||||||
|
plaintext: MemSafe::new(plaintext.clone()).unwrap(),
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
(id, plaintext)
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut out = Vec::with_capacity(count);
|
||||||
|
while let Some(res) = set.join_next().await {
|
||||||
|
out.push(res.unwrap());
|
||||||
|
}
|
||||||
|
out
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[test_log::test]
|
||||||
|
async fn concurrent_create_new_no_duplicate_nonces_() {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
let actor = KeyHolder::spawn(common::bootstrapped_keyholder(&db).await);
|
||||||
|
|
||||||
|
let writes = write_concurrently(actor, "nonce-unique", 32).await;
|
||||||
|
assert_eq!(writes.len(), 32);
|
||||||
|
|
||||||
|
let mut conn = db.get().await.unwrap();
|
||||||
|
let rows: Vec<models::AeadEncrypted> = schema::aead_encrypted::table
|
||||||
|
.select(models::AeadEncrypted::as_select())
|
||||||
|
.load(&mut conn)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(rows.len(), 32);
|
||||||
|
|
||||||
|
let nonces: Vec<&Vec<u8>> = rows.iter().map(|r| &r.current_nonce).collect();
|
||||||
|
let unique: HashSet<&Vec<u8>> = nonces.iter().copied().collect();
|
||||||
|
assert_eq!(nonces.len(), unique.len(), "all nonces must be unique");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[test_log::test]
|
||||||
|
async fn concurrent_create_new_root_nonce_never_moves_backward() {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
let actor = KeyHolder::spawn(common::bootstrapped_keyholder(&db).await);
|
||||||
|
|
||||||
|
write_concurrently(actor, "root-max", 24).await;
|
||||||
|
|
||||||
|
let mut conn = db.get().await.unwrap();
|
||||||
|
let rows: Vec<models::AeadEncrypted> = schema::aead_encrypted::table
|
||||||
|
.select(models::AeadEncrypted::as_select())
|
||||||
|
.load(&mut conn)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
let max_nonce = rows
|
||||||
|
.iter()
|
||||||
|
.map(|r| r.current_nonce.clone())
|
||||||
|
.max()
|
||||||
|
.expect("at least one row");
|
||||||
|
|
||||||
|
let root_row: models::RootKeyHistory = schema::root_key_history::table
|
||||||
|
.select(models::RootKeyHistory::as_select())
|
||||||
|
.first(&mut conn)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(root_row.data_encryption_nonce, max_nonce);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[test_log::test]
|
||||||
|
async fn insert_failure_does_not_create_partial_row() {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
let mut actor = common::bootstrapped_keyholder(&db).await;
|
||||||
|
let root_key_history_id = common::root_key_history_id(&db).await;
|
||||||
|
|
||||||
|
let mut conn = db.get().await.unwrap();
|
||||||
|
let before_count: i64 = schema::aead_encrypted::table
|
||||||
|
.count()
|
||||||
|
.get_result(&mut conn)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
let before_root_nonce: Vec<u8> = schema::root_key_history::table
|
||||||
|
.filter(schema::root_key_history::id.eq(root_key_history_id))
|
||||||
|
.select(schema::root_key_history::data_encryption_nonce)
|
||||||
|
.first(&mut conn)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
sql_query(
|
||||||
|
"CREATE TRIGGER fail_aead_insert BEFORE INSERT ON aead_encrypted BEGIN SELECT RAISE(ABORT, 'forced test failure'); END;",
|
||||||
|
)
|
||||||
|
.execute(&mut conn)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
drop(conn);
|
||||||
|
|
||||||
|
let err = actor
|
||||||
|
.create_new(MemSafe::new(b"should fail".to_vec()).unwrap())
|
||||||
|
.await
|
||||||
|
.unwrap_err();
|
||||||
|
assert!(matches!(err, Error::DatabaseTransaction(_)));
|
||||||
|
|
||||||
|
let mut conn = db.get().await.unwrap();
|
||||||
|
sql_query("DROP TRIGGER fail_aead_insert;")
|
||||||
|
.execute(&mut conn)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let after_count: i64 = schema::aead_encrypted::table
|
||||||
|
.count()
|
||||||
|
.get_result(&mut conn)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(
|
||||||
|
before_count, after_count,
|
||||||
|
"failed insert must not create row"
|
||||||
|
);
|
||||||
|
|
||||||
|
let after_root_nonce: Vec<u8> = schema::root_key_history::table
|
||||||
|
.filter(schema::root_key_history::id.eq(root_key_history_id))
|
||||||
|
.select(schema::root_key_history::data_encryption_nonce)
|
||||||
|
.first(&mut conn)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert!(
|
||||||
|
after_root_nonce > before_root_nonce,
|
||||||
|
"current behavior allows nonce gap on failed insert"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[test_log::test]
|
||||||
|
async fn decrypt_roundtrip_after_high_concurrency() {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
let actor = KeyHolder::spawn(common::bootstrapped_keyholder(&db).await);
|
||||||
|
|
||||||
|
let writes = write_concurrently(actor, "roundtrip", 40).await;
|
||||||
|
let expected: HashMap<i32, Vec<u8>> = writes.into_iter().collect();
|
||||||
|
|
||||||
|
let mut decryptor = KeyHolder::new(db.clone()).await.unwrap();
|
||||||
|
decryptor
|
||||||
|
.try_unseal(MemSafe::new(b"test-seal-key".to_vec()).unwrap())
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
for (id, plaintext) in expected {
|
||||||
|
let mut decrypted = decryptor.decrypt(id).await.unwrap();
|
||||||
|
assert_eq!(*decrypted.read().unwrap(), plaintext);
|
||||||
|
}
|
||||||
|
}
|
||||||
131
server/crates/arbiter-server/tests/keyholder/lifecycle.rs
Normal file
131
server/crates/arbiter-server/tests/keyholder/lifecycle.rs
Normal file
@@ -0,0 +1,131 @@
|
|||||||
|
use arbiter_server::{
|
||||||
|
actors::keyholder::{Error, KeyHolder},
|
||||||
|
db::{self, models, schema},
|
||||||
|
};
|
||||||
|
use diesel::{QueryDsl, SelectableHelper};
|
||||||
|
use diesel_async::RunQueryDsl;
|
||||||
|
use memsafe::MemSafe;
|
||||||
|
|
||||||
|
use crate::common;
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[test_log::test]
|
||||||
|
async fn test_bootstrap() {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
let mut actor = KeyHolder::new(db.clone()).await.unwrap();
|
||||||
|
|
||||||
|
let seal_key = MemSafe::new(b"test-seal-key".to_vec()).unwrap();
|
||||||
|
actor.bootstrap(seal_key).await.unwrap();
|
||||||
|
|
||||||
|
let mut conn = db.get().await.unwrap();
|
||||||
|
let row: models::RootKeyHistory = schema::root_key_history::table
|
||||||
|
.select(models::RootKeyHistory::as_select())
|
||||||
|
.first(&mut conn)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
assert_eq!(row.schema_version, 1);
|
||||||
|
assert_eq!(
|
||||||
|
row.tag,
|
||||||
|
arbiter_server::actors::keyholder::encryption::v1::ROOT_KEY_TAG
|
||||||
|
);
|
||||||
|
assert!(!row.ciphertext.is_empty());
|
||||||
|
assert!(!row.salt.is_empty());
|
||||||
|
assert_eq!(
|
||||||
|
row.data_encryption_nonce,
|
||||||
|
arbiter_server::actors::keyholder::encryption::v1::Nonce::default().to_vec()
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[test_log::test]
|
||||||
|
async fn test_bootstrap_rejects_double() {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
let mut actor = common::bootstrapped_keyholder(&db).await;
|
||||||
|
|
||||||
|
let seal_key2 = MemSafe::new(b"test-seal-key".to_vec()).unwrap();
|
||||||
|
let err = actor.bootstrap(seal_key2).await.unwrap_err();
|
||||||
|
assert!(matches!(err, Error::AlreadyBootstrapped));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[test_log::test]
|
||||||
|
async fn test_create_new_before_bootstrap_fails() {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
let mut actor = KeyHolder::new(db).await.unwrap();
|
||||||
|
|
||||||
|
let err = actor
|
||||||
|
.create_new(MemSafe::new(b"data".to_vec()).unwrap())
|
||||||
|
.await
|
||||||
|
.unwrap_err();
|
||||||
|
assert!(matches!(err, Error::NotBootstrapped));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[test_log::test]
|
||||||
|
async fn test_decrypt_before_bootstrap_fails() {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
let mut actor = KeyHolder::new(db).await.unwrap();
|
||||||
|
|
||||||
|
let err = actor.decrypt(1).await.unwrap_err();
|
||||||
|
assert!(matches!(err, Error::NotBootstrapped));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[test_log::test]
|
||||||
|
async fn test_new_restores_sealed_state() {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
let actor = common::bootstrapped_keyholder(&db).await;
|
||||||
|
drop(actor);
|
||||||
|
|
||||||
|
let mut actor2 = KeyHolder::new(db).await.unwrap();
|
||||||
|
let err = actor2.decrypt(1).await.unwrap_err();
|
||||||
|
assert!(matches!(err, Error::NotBootstrapped));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[test_log::test]
|
||||||
|
async fn test_unseal_correct_password() {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
let mut actor = common::bootstrapped_keyholder(&db).await;
|
||||||
|
|
||||||
|
let plaintext = b"survive a restart";
|
||||||
|
let aead_id = actor
|
||||||
|
.create_new(MemSafe::new(plaintext.to_vec()).unwrap())
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
drop(actor);
|
||||||
|
|
||||||
|
let mut actor = KeyHolder::new(db.clone()).await.unwrap();
|
||||||
|
let seal_key = MemSafe::new(b"test-seal-key".to_vec()).unwrap();
|
||||||
|
actor.try_unseal(seal_key).await.unwrap();
|
||||||
|
|
||||||
|
let mut decrypted = actor.decrypt(aead_id).await.unwrap();
|
||||||
|
assert_eq!(*decrypted.read().unwrap(), plaintext);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[test_log::test]
|
||||||
|
async fn test_unseal_wrong_then_correct_password() {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
let mut actor = common::bootstrapped_keyholder(&db).await;
|
||||||
|
|
||||||
|
let plaintext = b"important data";
|
||||||
|
let aead_id = actor
|
||||||
|
.create_new(MemSafe::new(plaintext.to_vec()).unwrap())
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
drop(actor);
|
||||||
|
|
||||||
|
let mut actor = KeyHolder::new(db.clone()).await.unwrap();
|
||||||
|
|
||||||
|
let bad_key = MemSafe::new(b"wrong-password".to_vec()).unwrap();
|
||||||
|
let err = actor.try_unseal(bad_key).await.unwrap_err();
|
||||||
|
assert!(matches!(err, Error::InvalidKey));
|
||||||
|
|
||||||
|
let good_key = MemSafe::new(b"test-seal-key".to_vec()).unwrap();
|
||||||
|
actor.try_unseal(good_key).await.unwrap();
|
||||||
|
|
||||||
|
let mut decrypted = actor.decrypt(aead_id).await.unwrap();
|
||||||
|
assert_eq!(*decrypted.read().unwrap(), plaintext);
|
||||||
|
}
|
||||||
161
server/crates/arbiter-server/tests/keyholder/storage.rs
Normal file
161
server/crates/arbiter-server/tests/keyholder/storage.rs
Normal file
@@ -0,0 +1,161 @@
|
|||||||
|
use std::collections::HashSet;
|
||||||
|
|
||||||
|
use arbiter_server::{
|
||||||
|
actors::keyholder::{Error, encryption::v1},
|
||||||
|
db::{self, models, schema},
|
||||||
|
};
|
||||||
|
use diesel::{ExpressionMethods as _, QueryDsl, SelectableHelper, dsl::update};
|
||||||
|
use diesel_async::RunQueryDsl;
|
||||||
|
use memsafe::MemSafe;
|
||||||
|
|
||||||
|
use crate::common;
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[test_log::test]
|
||||||
|
async fn test_create_decrypt_roundtrip() {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
let mut actor = common::bootstrapped_keyholder(&db).await;
|
||||||
|
|
||||||
|
let plaintext = b"hello arbiter";
|
||||||
|
let aead_id = actor
|
||||||
|
.create_new(MemSafe::new(plaintext.to_vec()).unwrap())
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let mut decrypted = actor.decrypt(aead_id).await.unwrap();
|
||||||
|
assert_eq!(*decrypted.read().unwrap(), plaintext);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[test_log::test]
|
||||||
|
async fn test_decrypt_nonexistent_returns_not_found() {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
let mut actor = common::bootstrapped_keyholder(&db).await;
|
||||||
|
|
||||||
|
let err = actor.decrypt(9999).await.unwrap_err();
|
||||||
|
assert!(matches!(err, Error::NotFound));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[test_log::test]
|
||||||
|
async fn test_ciphertext_differs_across_entries() {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
let mut actor = common::bootstrapped_keyholder(&db).await;
|
||||||
|
|
||||||
|
let plaintext = b"same content";
|
||||||
|
let id1 = actor
|
||||||
|
.create_new(MemSafe::new(plaintext.to_vec()).unwrap())
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
let id2 = actor
|
||||||
|
.create_new(MemSafe::new(plaintext.to_vec()).unwrap())
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let mut conn = db.get().await.unwrap();
|
||||||
|
let row1: models::AeadEncrypted = schema::aead_encrypted::table
|
||||||
|
.filter(schema::aead_encrypted::id.eq(id1))
|
||||||
|
.select(models::AeadEncrypted::as_select())
|
||||||
|
.first(&mut conn)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
let row2: models::AeadEncrypted = schema::aead_encrypted::table
|
||||||
|
.filter(schema::aead_encrypted::id.eq(id2))
|
||||||
|
.select(models::AeadEncrypted::as_select())
|
||||||
|
.first(&mut conn)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
assert_ne!(row1.ciphertext, row2.ciphertext);
|
||||||
|
|
||||||
|
let mut d1 = actor.decrypt(id1).await.unwrap();
|
||||||
|
let mut d2 = actor.decrypt(id2).await.unwrap();
|
||||||
|
assert_eq!(*d1.read().unwrap(), plaintext);
|
||||||
|
assert_eq!(*d2.read().unwrap(), plaintext);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[test_log::test]
|
||||||
|
async fn test_nonce_never_reused() {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
let mut actor = common::bootstrapped_keyholder(&db).await;
|
||||||
|
|
||||||
|
let n = 5;
|
||||||
|
for i in 0..n {
|
||||||
|
actor
|
||||||
|
.create_new(MemSafe::new(format!("secret {i}").into_bytes()).unwrap())
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut conn = db.get().await.unwrap();
|
||||||
|
let rows: Vec<models::AeadEncrypted> = schema::aead_encrypted::table
|
||||||
|
.select(models::AeadEncrypted::as_select())
|
||||||
|
.load(&mut conn)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
assert_eq!(rows.len(), n);
|
||||||
|
|
||||||
|
let nonces: Vec<&Vec<u8>> = rows.iter().map(|r| &r.current_nonce).collect();
|
||||||
|
let unique: HashSet<&Vec<u8>> = nonces.iter().copied().collect();
|
||||||
|
assert_eq!(nonces.len(), unique.len(), "all nonces must be unique");
|
||||||
|
|
||||||
|
for (i, row) in rows.iter().enumerate() {
|
||||||
|
let mut expected = v1::Nonce::default();
|
||||||
|
for _ in 0..=i {
|
||||||
|
expected.increment();
|
||||||
|
}
|
||||||
|
assert_eq!(row.current_nonce, expected.to_vec(), "nonce {i} mismatch");
|
||||||
|
}
|
||||||
|
|
||||||
|
let root_row: models::RootKeyHistory = schema::root_key_history::table
|
||||||
|
.select(models::RootKeyHistory::as_select())
|
||||||
|
.first(&mut conn)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
let last_nonce = &rows.last().unwrap().current_nonce;
|
||||||
|
assert_eq!(&root_row.data_encryption_nonce, last_nonce);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[test_log::test]
|
||||||
|
async fn broken_db_nonce_format_fails_closed() {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
let mut actor = common::bootstrapped_keyholder(&db).await;
|
||||||
|
let root_key_history_id = common::root_key_history_id(&db).await;
|
||||||
|
|
||||||
|
let mut conn = db.get().await.unwrap();
|
||||||
|
update(
|
||||||
|
schema::root_key_history::table
|
||||||
|
.filter(schema::root_key_history::id.eq(root_key_history_id)),
|
||||||
|
)
|
||||||
|
.set(schema::root_key_history::data_encryption_nonce.eq(vec![1, 2, 3]))
|
||||||
|
.execute(&mut conn)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
drop(conn);
|
||||||
|
|
||||||
|
let err = actor
|
||||||
|
.create_new(MemSafe::new(b"must fail".to_vec()).unwrap())
|
||||||
|
.await
|
||||||
|
.unwrap_err();
|
||||||
|
assert!(matches!(err, Error::BrokenDatabase));
|
||||||
|
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
let mut actor = common::bootstrapped_keyholder(&db).await;
|
||||||
|
let id = actor
|
||||||
|
.create_new(MemSafe::new(b"decrypt target".to_vec()).unwrap())
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
let mut conn = db.get().await.unwrap();
|
||||||
|
update(schema::aead_encrypted::table.filter(schema::aead_encrypted::id.eq(id)))
|
||||||
|
.set(schema::aead_encrypted::current_nonce.eq(vec![7, 8]))
|
||||||
|
.execute(&mut conn)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
drop(conn);
|
||||||
|
|
||||||
|
let err = actor.decrypt(id).await.unwrap_err();
|
||||||
|
assert!(matches!(err, Error::BrokenDatabase));
|
||||||
|
}
|
||||||
8
server/crates/arbiter-server/tests/user_agent.rs
Normal file
8
server/crates/arbiter-server/tests/user_agent.rs
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
mod common;
|
||||||
|
|
||||||
|
#[path = "user_agent/auth.rs"]
|
||||||
|
mod auth;
|
||||||
|
#[path = "user_agent/sdk_client.rs"]
|
||||||
|
mod sdk_client;
|
||||||
|
#[path = "user_agent/unseal.rs"]
|
||||||
|
mod unseal;
|
||||||
168
server/crates/arbiter-server/tests/user_agent/auth.rs
Normal file
168
server/crates/arbiter-server/tests/user_agent/auth.rs
Normal file
@@ -0,0 +1,168 @@
|
|||||||
|
use arbiter_proto::proto::user_agent::{
|
||||||
|
AuthChallengeRequest, AuthChallengeSolution, KeyType as ProtoKeyType, UserAgentRequest,
|
||||||
|
user_agent_request::Payload as UserAgentRequestPayload,
|
||||||
|
user_agent_response::Payload as UserAgentResponsePayload,
|
||||||
|
};
|
||||||
|
use arbiter_proto::transport::Bi;
|
||||||
|
use arbiter_server::{
|
||||||
|
actors::{
|
||||||
|
GlobalActors,
|
||||||
|
bootstrap::GetToken,
|
||||||
|
user_agent::{UserAgentConnection, connect_user_agent},
|
||||||
|
},
|
||||||
|
db::{self, schema},
|
||||||
|
};
|
||||||
|
use diesel::{ExpressionMethods as _, QueryDsl, insert_into};
|
||||||
|
use diesel_async::RunQueryDsl;
|
||||||
|
use ed25519_dalek::Signer as _;
|
||||||
|
|
||||||
|
use super::common::ChannelTransport;
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[test_log::test]
|
||||||
|
pub async fn test_bootstrap_token_auth() {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
let actors = GlobalActors::spawn(db.clone()).await.unwrap();
|
||||||
|
let token = actors.bootstrapper.ask(GetToken).await.unwrap().unwrap();
|
||||||
|
|
||||||
|
let (server_transport, mut test_transport) = ChannelTransport::new();
|
||||||
|
let props = UserAgentConnection::new(db.clone(), actors, Box::new(server_transport));
|
||||||
|
let task = tokio::spawn(connect_user_agent(props));
|
||||||
|
|
||||||
|
let new_key = ed25519_dalek::SigningKey::generate(&mut rand::rng());
|
||||||
|
let pubkey_bytes = new_key.verifying_key().to_bytes().to_vec();
|
||||||
|
|
||||||
|
test_transport
|
||||||
|
.send(UserAgentRequest {
|
||||||
|
payload: Some(UserAgentRequestPayload::AuthChallengeRequest(
|
||||||
|
AuthChallengeRequest {
|
||||||
|
pubkey: pubkey_bytes,
|
||||||
|
bootstrap_token: Some(token),
|
||||||
|
key_type: ProtoKeyType::Ed25519.into(),
|
||||||
|
},
|
||||||
|
)),
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
task.await.unwrap();
|
||||||
|
|
||||||
|
let mut conn = db.get().await.unwrap();
|
||||||
|
let stored_pubkey: Vec<u8> = schema::useragent_client::table
|
||||||
|
.select(schema::useragent_client::public_key)
|
||||||
|
.first::<Vec<u8>>(&mut conn)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(stored_pubkey, new_key.verifying_key().to_bytes().to_vec());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[test_log::test]
|
||||||
|
pub async fn test_bootstrap_invalid_token_auth() {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
let actors = GlobalActors::spawn(db.clone()).await.unwrap();
|
||||||
|
|
||||||
|
let (server_transport, mut test_transport) = ChannelTransport::new();
|
||||||
|
let props = UserAgentConnection::new(db.clone(), actors, Box::new(server_transport));
|
||||||
|
let task = tokio::spawn(connect_user_agent(props));
|
||||||
|
|
||||||
|
let new_key = ed25519_dalek::SigningKey::generate(&mut rand::rng());
|
||||||
|
let pubkey_bytes = new_key.verifying_key().to_bytes().to_vec();
|
||||||
|
|
||||||
|
test_transport
|
||||||
|
.send(UserAgentRequest {
|
||||||
|
payload: Some(UserAgentRequestPayload::AuthChallengeRequest(
|
||||||
|
AuthChallengeRequest {
|
||||||
|
pubkey: pubkey_bytes,
|
||||||
|
bootstrap_token: Some("invalid_token".to_string()),
|
||||||
|
key_type: ProtoKeyType::Ed25519.into(),
|
||||||
|
},
|
||||||
|
)),
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// Auth fails, connect_user_agent returns, transport drops
|
||||||
|
task.await.unwrap();
|
||||||
|
|
||||||
|
// Verify no key was registered
|
||||||
|
let mut conn = db.get().await.unwrap();
|
||||||
|
let count: i64 = schema::useragent_client::table
|
||||||
|
.count()
|
||||||
|
.get_result::<i64>(&mut conn)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
assert_eq!(count, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[test_log::test]
|
||||||
|
pub async fn test_challenge_auth() {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
let actors = GlobalActors::spawn(db.clone()).await.unwrap();
|
||||||
|
|
||||||
|
let new_key = ed25519_dalek::SigningKey::generate(&mut rand::rng());
|
||||||
|
let pubkey_bytes = new_key.verifying_key().to_bytes().to_vec();
|
||||||
|
|
||||||
|
// Pre-register key with key_type
|
||||||
|
{
|
||||||
|
let mut conn = db.get().await.unwrap();
|
||||||
|
insert_into(schema::useragent_client::table)
|
||||||
|
.values((
|
||||||
|
schema::useragent_client::public_key.eq(pubkey_bytes.clone()),
|
||||||
|
schema::useragent_client::key_type.eq(1i32),
|
||||||
|
))
|
||||||
|
.execute(&mut conn)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
let (server_transport, mut test_transport) = ChannelTransport::new();
|
||||||
|
let props = UserAgentConnection::new(db.clone(), actors, Box::new(server_transport));
|
||||||
|
let task = tokio::spawn(connect_user_agent(props));
|
||||||
|
|
||||||
|
// Send challenge request
|
||||||
|
test_transport
|
||||||
|
.send(UserAgentRequest {
|
||||||
|
payload: Some(UserAgentRequestPayload::AuthChallengeRequest(
|
||||||
|
AuthChallengeRequest {
|
||||||
|
pubkey: pubkey_bytes,
|
||||||
|
bootstrap_token: None,
|
||||||
|
key_type: ProtoKeyType::Ed25519.into(),
|
||||||
|
},
|
||||||
|
)),
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// Read the challenge response
|
||||||
|
let response = test_transport
|
||||||
|
.recv()
|
||||||
|
.await
|
||||||
|
.expect("should receive challenge");
|
||||||
|
let challenge = match response {
|
||||||
|
Ok(resp) => match resp.payload {
|
||||||
|
Some(UserAgentResponsePayload::AuthChallenge(c)) => c,
|
||||||
|
other => panic!("Expected AuthChallenge, got {other:?}"),
|
||||||
|
},
|
||||||
|
Err(err) => panic!("Expected Ok response, got Err({err:?})"),
|
||||||
|
};
|
||||||
|
|
||||||
|
// Sign the challenge and send solution
|
||||||
|
let formatted_challenge = arbiter_proto::format_challenge(challenge.nonce, &challenge.pubkey);
|
||||||
|
let signature = new_key.sign(&formatted_challenge);
|
||||||
|
|
||||||
|
test_transport
|
||||||
|
.send(UserAgentRequest {
|
||||||
|
payload: Some(UserAgentRequestPayload::AuthChallengeSolution(
|
||||||
|
AuthChallengeSolution {
|
||||||
|
signature: signature.to_bytes().to_vec(),
|
||||||
|
},
|
||||||
|
)),
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// Auth completes, session spawned
|
||||||
|
task.await.unwrap();
|
||||||
|
}
|
||||||
270
server/crates/arbiter-server/tests/user_agent/sdk_client.rs
Normal file
270
server/crates/arbiter-server/tests/user_agent/sdk_client.rs
Normal file
@@ -0,0 +1,270 @@
|
|||||||
|
use arbiter_proto::proto::user_agent::{
|
||||||
|
SdkClientApproveRequest, SdkClientError as ProtoSdkClientError, SdkClientRevokeRequest,
|
||||||
|
UserAgentRequest, sdk_client_approve_response, sdk_client_list_response,
|
||||||
|
sdk_client_revoke_response, user_agent_request::Payload as UserAgentRequestPayload,
|
||||||
|
user_agent_response::Payload as UserAgentResponsePayload,
|
||||||
|
};
|
||||||
|
use arbiter_server::{
|
||||||
|
actors::{GlobalActors, user_agent::session::UserAgentSession},
|
||||||
|
db,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// Shared helper: create a session and register a client pubkey via sdk_client_approve.
|
||||||
|
async fn make_session(db: &db::DatabasePool) -> UserAgentSession {
|
||||||
|
let actors = GlobalActors::spawn(db.clone()).await.unwrap();
|
||||||
|
UserAgentSession::new_test(db.clone(), actors)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[test_log::test]
|
||||||
|
async fn test_sdk_client_approve_registers_client() {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
let mut session = make_session(&db).await;
|
||||||
|
|
||||||
|
let pubkey = [0x42u8; 32];
|
||||||
|
|
||||||
|
let response = session
|
||||||
|
.process_transport_inbound(UserAgentRequest {
|
||||||
|
payload: Some(UserAgentRequestPayload::SdkClientApprove(
|
||||||
|
SdkClientApproveRequest {
|
||||||
|
pubkey: pubkey.to_vec(),
|
||||||
|
},
|
||||||
|
)),
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.expect("handler should succeed");
|
||||||
|
|
||||||
|
let entry = match response.payload.unwrap() {
|
||||||
|
UserAgentResponsePayload::SdkClientApprove(resp) => match resp.result.unwrap() {
|
||||||
|
sdk_client_approve_response::Result::Client(e) => e,
|
||||||
|
sdk_client_approve_response::Result::Error(e) => {
|
||||||
|
panic!("Expected Client, got error {:?}", e)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
other => panic!("Expected SdkClientApprove, got {other:?}"),
|
||||||
|
};
|
||||||
|
|
||||||
|
assert_eq!(entry.pubkey, pubkey.to_vec());
|
||||||
|
assert!(entry.id > 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[test_log::test]
|
||||||
|
async fn test_sdk_client_approve_duplicate_returns_already_exists() {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
let mut session = make_session(&db).await;
|
||||||
|
|
||||||
|
let pubkey = [0x11u8; 32];
|
||||||
|
let req = UserAgentRequest {
|
||||||
|
payload: Some(UserAgentRequestPayload::SdkClientApprove(
|
||||||
|
SdkClientApproveRequest {
|
||||||
|
pubkey: pubkey.to_vec(),
|
||||||
|
},
|
||||||
|
)),
|
||||||
|
};
|
||||||
|
|
||||||
|
session
|
||||||
|
.process_transport_inbound(req.clone())
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let response = session
|
||||||
|
.process_transport_inbound(req)
|
||||||
|
.await
|
||||||
|
.expect("second insert should not panic");
|
||||||
|
|
||||||
|
match response.payload.unwrap() {
|
||||||
|
UserAgentResponsePayload::SdkClientApprove(resp) => match resp.result.unwrap() {
|
||||||
|
sdk_client_approve_response::Result::Error(code) => {
|
||||||
|
assert_eq!(code, ProtoSdkClientError::AlreadyExists as i32);
|
||||||
|
}
|
||||||
|
sdk_client_approve_response::Result::Client(_) => {
|
||||||
|
panic!("Expected AlreadyExists error for duplicate pubkey")
|
||||||
|
}
|
||||||
|
},
|
||||||
|
other => panic!("Expected SdkClientApprove, got {other:?}"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[test_log::test]
|
||||||
|
async fn test_sdk_client_list_shows_registered_clients() {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
let mut session = make_session(&db).await;
|
||||||
|
|
||||||
|
let pubkey_a = [0x0Au8; 32];
|
||||||
|
let pubkey_b = [0x0Bu8; 32];
|
||||||
|
|
||||||
|
for pubkey in [pubkey_a, pubkey_b] {
|
||||||
|
session
|
||||||
|
.process_transport_inbound(UserAgentRequest {
|
||||||
|
payload: Some(UserAgentRequestPayload::SdkClientApprove(
|
||||||
|
SdkClientApproveRequest {
|
||||||
|
pubkey: pubkey.to_vec(),
|
||||||
|
},
|
||||||
|
)),
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
|
||||||
|
let response = session
|
||||||
|
.process_transport_inbound(UserAgentRequest {
|
||||||
|
payload: Some(UserAgentRequestPayload::SdkClientList(())),
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.expect("list should succeed");
|
||||||
|
|
||||||
|
let clients = match response.payload.unwrap() {
|
||||||
|
UserAgentResponsePayload::SdkClientList(resp) => match resp.result.unwrap() {
|
||||||
|
sdk_client_list_response::Result::Clients(list) => list.clients,
|
||||||
|
sdk_client_list_response::Result::Error(e) => {
|
||||||
|
panic!("Expected Clients, got error {:?}", e)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
other => panic!("Expected SdkClientList, got {other:?}"),
|
||||||
|
};
|
||||||
|
|
||||||
|
assert_eq!(clients.len(), 2);
|
||||||
|
let pubkeys: Vec<Vec<u8>> = clients.into_iter().map(|e| e.pubkey).collect();
|
||||||
|
assert!(pubkeys.contains(&pubkey_a.to_vec()));
|
||||||
|
assert!(pubkeys.contains(&pubkey_b.to_vec()));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[test_log::test]
|
||||||
|
async fn test_sdk_client_revoke_removes_client() {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
let mut session = make_session(&db).await;
|
||||||
|
|
||||||
|
let pubkey = [0xBBu8; 32];
|
||||||
|
|
||||||
|
// Register a client and get its id
|
||||||
|
let approve_response = session
|
||||||
|
.process_transport_inbound(UserAgentRequest {
|
||||||
|
payload: Some(UserAgentRequestPayload::SdkClientApprove(
|
||||||
|
SdkClientApproveRequest {
|
||||||
|
pubkey: pubkey.to_vec(),
|
||||||
|
},
|
||||||
|
)),
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let client_id = match approve_response.payload.unwrap() {
|
||||||
|
UserAgentResponsePayload::SdkClientApprove(resp) => match resp.result.unwrap() {
|
||||||
|
sdk_client_approve_response::Result::Client(e) => e.id,
|
||||||
|
sdk_client_approve_response::Result::Error(e) => panic!("approve failed: {:?}", e),
|
||||||
|
},
|
||||||
|
other => panic!("{other:?}"),
|
||||||
|
};
|
||||||
|
|
||||||
|
// Revoke the client
|
||||||
|
let revoke_response = session
|
||||||
|
.process_transport_inbound(UserAgentRequest {
|
||||||
|
payload: Some(UserAgentRequestPayload::SdkClientRevoke(
|
||||||
|
SdkClientRevokeRequest { client_id },
|
||||||
|
)),
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.expect("revoke should succeed");
|
||||||
|
|
||||||
|
match revoke_response.payload.unwrap() {
|
||||||
|
UserAgentResponsePayload::SdkClientRevoke(resp) => match resp.result.unwrap() {
|
||||||
|
sdk_client_revoke_response::Result::Ok(_) => {}
|
||||||
|
sdk_client_revoke_response::Result::Error(e) => {
|
||||||
|
panic!("Expected Ok, got error {:?}", e)
|
||||||
|
}
|
||||||
|
},
|
||||||
|
other => panic!("Expected SdkClientRevoke, got {other:?}"),
|
||||||
|
}
|
||||||
|
|
||||||
|
// List should now be empty
|
||||||
|
let list_response = session
|
||||||
|
.process_transport_inbound(UserAgentRequest {
|
||||||
|
payload: Some(UserAgentRequestPayload::SdkClientList(())),
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let clients = match list_response.payload.unwrap() {
|
||||||
|
UserAgentResponsePayload::SdkClientList(resp) => match resp.result.unwrap() {
|
||||||
|
sdk_client_list_response::Result::Clients(list) => list.clients,
|
||||||
|
sdk_client_list_response::Result::Error(e) => panic!("list error: {:?}", e),
|
||||||
|
},
|
||||||
|
other => panic!("{other:?}"),
|
||||||
|
};
|
||||||
|
assert!(clients.is_empty(), "client should be removed after revoke");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[test_log::test]
|
||||||
|
async fn test_sdk_client_revoke_not_found_returns_error() {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
let mut session = make_session(&db).await;
|
||||||
|
|
||||||
|
let response = session
|
||||||
|
.process_transport_inbound(UserAgentRequest {
|
||||||
|
payload: Some(UserAgentRequestPayload::SdkClientRevoke(
|
||||||
|
SdkClientRevokeRequest { client_id: 9999 },
|
||||||
|
)),
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
match response.payload.unwrap() {
|
||||||
|
UserAgentResponsePayload::SdkClientRevoke(resp) => match resp.result.unwrap() {
|
||||||
|
sdk_client_revoke_response::Result::Error(code) => {
|
||||||
|
assert_eq!(code, ProtoSdkClientError::NotFound as i32);
|
||||||
|
}
|
||||||
|
sdk_client_revoke_response::Result::Ok(_) => {
|
||||||
|
panic!("Expected NotFound error for missing client_id")
|
||||||
|
}
|
||||||
|
},
|
||||||
|
other => panic!("Expected SdkClientRevoke, got {other:?}"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[test_log::test]
|
||||||
|
async fn test_sdk_client_approve_rejected_client_cannot_auth() {
|
||||||
|
// Verify the core flow: only pre-approved clients can authenticate
|
||||||
|
use arbiter_proto::proto::client::{
|
||||||
|
AuthChallengeRequest, ClientRequest, client_request::Payload as ClientRequestPayload,
|
||||||
|
client_response::Payload as ClientResponsePayload,
|
||||||
|
};
|
||||||
|
use arbiter_proto::transport::Bi as _;
|
||||||
|
use arbiter_server::actors::client::{ClientConnection, connect_client};
|
||||||
|
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
let actors = GlobalActors::spawn(db.clone()).await.unwrap();
|
||||||
|
|
||||||
|
let new_key = ed25519_dalek::SigningKey::generate(&mut rand::rng());
|
||||||
|
let pubkey_bytes = new_key.verifying_key().to_bytes().to_vec();
|
||||||
|
|
||||||
|
let (server_transport, mut test_transport) = super::common::ChannelTransport::<_, _>::new();
|
||||||
|
let props = ClientConnection::new(db.clone(), Box::new(server_transport), actors.clone());
|
||||||
|
let task = tokio::spawn(connect_client(props));
|
||||||
|
|
||||||
|
test_transport
|
||||||
|
.send(ClientRequest {
|
||||||
|
payload: Some(ClientRequestPayload::AuthChallengeRequest(
|
||||||
|
AuthChallengeRequest {
|
||||||
|
pubkey: pubkey_bytes.clone(),
|
||||||
|
},
|
||||||
|
)),
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let response = test_transport.recv().await.unwrap().unwrap();
|
||||||
|
assert!(
|
||||||
|
matches!(
|
||||||
|
response.payload.unwrap(),
|
||||||
|
ClientResponsePayload::ClientConnectError(_)
|
||||||
|
),
|
||||||
|
"unregistered client should be rejected"
|
||||||
|
);
|
||||||
|
|
||||||
|
task.await.unwrap();
|
||||||
|
}
|
||||||
184
server/crates/arbiter-server/tests/user_agent/unseal.rs
Normal file
184
server/crates/arbiter-server/tests/user_agent/unseal.rs
Normal file
@@ -0,0 +1,184 @@
|
|||||||
|
use arbiter_proto::proto::user_agent::{
|
||||||
|
UnsealEncryptedKey, UnsealResult, UnsealStart, UserAgentRequest,
|
||||||
|
user_agent_request::Payload as UserAgentRequestPayload,
|
||||||
|
user_agent_response::Payload as UserAgentResponsePayload,
|
||||||
|
};
|
||||||
|
use arbiter_server::{
|
||||||
|
actors::{
|
||||||
|
GlobalActors,
|
||||||
|
keyholder::{Bootstrap, Seal},
|
||||||
|
user_agent::session::UserAgentSession,
|
||||||
|
},
|
||||||
|
db,
|
||||||
|
};
|
||||||
|
use chacha20poly1305::{AeadInPlace, XChaCha20Poly1305, XNonce, aead::KeyInit};
|
||||||
|
use memsafe::MemSafe;
|
||||||
|
use x25519_dalek::{EphemeralSecret, PublicKey};
|
||||||
|
|
||||||
|
async fn setup_sealed_user_agent(
|
||||||
|
seal_key: &[u8],
|
||||||
|
) -> (db::DatabasePool, UserAgentSession) {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
let actors = GlobalActors::spawn(db.clone()).await.unwrap();
|
||||||
|
|
||||||
|
actors
|
||||||
|
.key_holder
|
||||||
|
.ask(Bootstrap {
|
||||||
|
seal_key_raw: MemSafe::new(seal_key.to_vec()).unwrap(),
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
actors.key_holder.ask(Seal).await.unwrap();
|
||||||
|
|
||||||
|
let session = UserAgentSession::new_test(db.clone(), actors);
|
||||||
|
|
||||||
|
(db, session)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn client_dh_encrypt(
|
||||||
|
user_agent: &mut UserAgentSession,
|
||||||
|
key_to_send: &[u8],
|
||||||
|
) -> UnsealEncryptedKey {
|
||||||
|
let client_secret = EphemeralSecret::random();
|
||||||
|
let client_public = PublicKey::from(&client_secret);
|
||||||
|
|
||||||
|
let response = user_agent
|
||||||
|
.process_transport_inbound(UserAgentRequest {
|
||||||
|
payload: Some(UserAgentRequestPayload::UnsealStart(UnsealStart {
|
||||||
|
client_pubkey: client_public.as_bytes().to_vec(),
|
||||||
|
})),
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let server_pubkey = match response.payload.unwrap() {
|
||||||
|
UserAgentResponsePayload::UnsealStartResponse(resp) => resp.server_pubkey,
|
||||||
|
other => panic!("Expected UnsealStartResponse, got {other:?}"),
|
||||||
|
};
|
||||||
|
let server_public = PublicKey::from(<[u8; 32]>::try_from(server_pubkey.as_slice()).unwrap());
|
||||||
|
|
||||||
|
let shared_secret = client_secret.diffie_hellman(&server_public);
|
||||||
|
let cipher = XChaCha20Poly1305::new(shared_secret.as_bytes().into());
|
||||||
|
let nonce = XNonce::from([0u8; 24]);
|
||||||
|
let associated_data = b"unseal";
|
||||||
|
let mut ciphertext = key_to_send.to_vec();
|
||||||
|
cipher
|
||||||
|
.encrypt_in_place(&nonce, associated_data, &mut ciphertext)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
UnsealEncryptedKey {
|
||||||
|
nonce: nonce.to_vec(),
|
||||||
|
ciphertext,
|
||||||
|
associated_data: associated_data.to_vec(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn unseal_key_request(req: UnsealEncryptedKey) -> UserAgentRequest {
|
||||||
|
UserAgentRequest {
|
||||||
|
payload: Some(UserAgentRequestPayload::UnsealEncryptedKey(req)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[test_log::test]
|
||||||
|
pub async fn test_unseal_success() {
|
||||||
|
let seal_key = b"test-seal-key";
|
||||||
|
let (_db, mut user_agent) = setup_sealed_user_agent(seal_key).await;
|
||||||
|
|
||||||
|
let encrypted_key = client_dh_encrypt(&mut user_agent, seal_key).await;
|
||||||
|
|
||||||
|
let response = user_agent
|
||||||
|
.process_transport_inbound(unseal_key_request(encrypted_key))
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
response.payload.unwrap(),
|
||||||
|
UserAgentResponsePayload::UnsealResult(UnsealResult::Success.into()),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[test_log::test]
|
||||||
|
pub async fn test_unseal_wrong_seal_key() {
|
||||||
|
let (_db, mut user_agent) = setup_sealed_user_agent(b"correct-key").await;
|
||||||
|
|
||||||
|
let encrypted_key = client_dh_encrypt(&mut user_agent, b"wrong-key").await;
|
||||||
|
|
||||||
|
let response = user_agent
|
||||||
|
.process_transport_inbound(unseal_key_request(encrypted_key))
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
response.payload.unwrap(),
|
||||||
|
UserAgentResponsePayload::UnsealResult(UnsealResult::InvalidKey.into()),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[test_log::test]
|
||||||
|
pub async fn test_unseal_corrupted_ciphertext() {
|
||||||
|
let (_db, mut user_agent) = setup_sealed_user_agent(b"test-key").await;
|
||||||
|
|
||||||
|
let client_secret = EphemeralSecret::random();
|
||||||
|
let client_public = PublicKey::from(&client_secret);
|
||||||
|
|
||||||
|
user_agent
|
||||||
|
.process_transport_inbound(UserAgentRequest {
|
||||||
|
payload: Some(UserAgentRequestPayload::UnsealStart(UnsealStart {
|
||||||
|
client_pubkey: client_public.as_bytes().to_vec(),
|
||||||
|
})),
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
let response = user_agent
|
||||||
|
.process_transport_inbound(unseal_key_request(UnsealEncryptedKey {
|
||||||
|
nonce: vec![0u8; 24],
|
||||||
|
ciphertext: vec![0u8; 32],
|
||||||
|
associated_data: vec![],
|
||||||
|
}))
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
response.payload.unwrap(),
|
||||||
|
UserAgentResponsePayload::UnsealResult(UnsealResult::InvalidKey.into()),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[test_log::test]
|
||||||
|
pub async fn test_unseal_retry_after_invalid_key() {
|
||||||
|
let seal_key = b"real-seal-key";
|
||||||
|
let (_db, mut user_agent) = setup_sealed_user_agent(seal_key).await;
|
||||||
|
|
||||||
|
{
|
||||||
|
let encrypted_key = client_dh_encrypt(&mut user_agent, b"wrong-key").await;
|
||||||
|
|
||||||
|
let response = user_agent
|
||||||
|
.process_transport_inbound(unseal_key_request(encrypted_key))
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
response.payload.unwrap(),
|
||||||
|
UserAgentResponsePayload::UnsealResult(UnsealResult::InvalidKey.into()),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
let encrypted_key = client_dh_encrypt(&mut user_agent, seal_key).await;
|
||||||
|
|
||||||
|
let response = user_agent
|
||||||
|
.process_transport_inbound(unseal_key_request(encrypted_key))
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
response.payload.unwrap(),
|
||||||
|
UserAgentResponsePayload::UnsealResult(UnsealResult::Success.into()),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
7
server/crates/arbiter-terrors-poc/Cargo.toml
Normal file
7
server/crates/arbiter-terrors-poc/Cargo.toml
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
[package]
|
||||||
|
name = "arbiter-terrors-poc"
|
||||||
|
version = "0.1.0"
|
||||||
|
edition = "2024"
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
terrors = "0.3"
|
||||||
139
server/crates/arbiter-terrors-poc/src/auth.rs
Normal file
139
server/crates/arbiter-terrors-poc/src/auth.rs
Normal file
@@ -0,0 +1,139 @@
|
|||||||
|
use crate::errors::{InternalError1, InternalError2, InvalidSignature, NotRegistered};
|
||||||
|
use terrors::OneOf;
|
||||||
|
|
||||||
|
use crate::errors::ProtoError;
|
||||||
|
|
||||||
|
// Each sub-call's error type already implements DrainInto<ProtoError>, so we convert
|
||||||
|
// directly to ProtoError without broaden — no turbofish needed anywhere.
|
||||||
|
//
|
||||||
|
// Call chain:
|
||||||
|
// load_config() → OneOf<(InternalError2,)> → ProtoError::from
|
||||||
|
// get_nonce() → OneOf<(InternalError1, InternalError2)> → ProtoError::from
|
||||||
|
// verify_sig() → OneOf<(InvalidSignature,)> → ProtoError::from
|
||||||
|
pub fn process_request(id: u32, sig: &str) -> Result<String, ProtoError> {
|
||||||
|
if id == 0 {
|
||||||
|
return Err(ProtoError::NotRegistered);
|
||||||
|
}
|
||||||
|
|
||||||
|
let config = load_config(id).map_err(ProtoError::from)?;
|
||||||
|
let nonce = crate::db::get_nonce(id).map_err(ProtoError::from)?;
|
||||||
|
verify_signature(nonce, sig).map_err(ProtoError::from)?;
|
||||||
|
|
||||||
|
Ok(format!("config={config} nonce={nonce} sig={sig}"))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Simulates loading a config value.
|
||||||
|
// id=97 triggers InternalError2 ("config read failed").
|
||||||
|
fn load_config(id: u32) -> Result<String, OneOf<(InternalError2,)>> {
|
||||||
|
if id == 97 {
|
||||||
|
return Err(OneOf::new(InternalError2("config read failed".to_owned())));
|
||||||
|
}
|
||||||
|
Ok(format!("cfg-{id}"))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn verify_signature(_nonce: u32, sig: &str) -> Result<(), OneOf<(InvalidSignature,)>> {
|
||||||
|
if sig != "ok" {
|
||||||
|
return Err(OneOf::new(InvalidSignature));
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
type AuthError = OneOf<(
|
||||||
|
NotRegistered,
|
||||||
|
InvalidSignature,
|
||||||
|
InternalError1,
|
||||||
|
InternalError2,
|
||||||
|
)>;
|
||||||
|
|
||||||
|
pub fn authenticate(id: u32, sig: &str) -> Result<u32, AuthError> {
|
||||||
|
if id == 0 {
|
||||||
|
return Err(OneOf::new(NotRegistered));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Return type AuthError lets the compiler infer the broaden target.
|
||||||
|
let nonce = crate::db::get_nonce(id).map_err(OneOf::broaden)?;
|
||||||
|
verify_signature(nonce, sig).map_err(OneOf::broaden)?;
|
||||||
|
|
||||||
|
Ok(nonce)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn verify_signature_ok() {
|
||||||
|
assert!(verify_signature(42, "ok").is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn verify_signature_bad() {
|
||||||
|
let err = verify_signature(42, "bad").unwrap_err();
|
||||||
|
assert!(err.narrow::<crate::errors::InvalidSignature, _>().is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn authenticate_success() {
|
||||||
|
assert_eq!(authenticate(1, "ok").unwrap(), 42);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn authenticate_not_registered() {
|
||||||
|
let err = authenticate(0, "ok").unwrap_err();
|
||||||
|
assert!(err.narrow::<crate::errors::NotRegistered, _>().is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn authenticate_invalid_signature() {
|
||||||
|
let err = authenticate(1, "bad").unwrap_err();
|
||||||
|
assert!(err.narrow::<crate::errors::InvalidSignature, _>().is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn authenticate_internal_error1() {
|
||||||
|
let err = authenticate(99, "ok").unwrap_err();
|
||||||
|
assert!(err.narrow::<crate::errors::InternalError1, _>().is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn authenticate_internal_error2() {
|
||||||
|
let err = authenticate(98, "ok").unwrap_err();
|
||||||
|
assert!(err.narrow::<crate::errors::InternalError2, _>().is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn process_request_success() {
|
||||||
|
let result = process_request(1, "ok").unwrap();
|
||||||
|
assert!(result.contains("nonce=42"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn process_request_not_registered() {
|
||||||
|
let err = process_request(0, "ok").unwrap_err();
|
||||||
|
assert!(matches!(err, crate::errors::ProtoError::NotRegistered));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn process_request_invalid_signature() {
|
||||||
|
let err = process_request(1, "bad").unwrap_err();
|
||||||
|
assert!(matches!(err, crate::errors::ProtoError::InvalidSignature));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn process_request_internal_from_config() {
|
||||||
|
// id=97 → load_config returns InternalError2
|
||||||
|
let err = process_request(97, "ok").unwrap_err();
|
||||||
|
assert!(
|
||||||
|
matches!(err, crate::errors::ProtoError::Internal(ref msg) if msg == "config read failed")
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn process_request_internal_from_db() {
|
||||||
|
// id=99 → get_nonce returns InternalError1
|
||||||
|
let err = process_request(99, "ok").unwrap_err();
|
||||||
|
assert!(
|
||||||
|
matches!(err, crate::errors::ProtoError::Internal(ref msg) if msg == "db pool unavailable")
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
38
server/crates/arbiter-terrors-poc/src/db.rs
Normal file
38
server/crates/arbiter-terrors-poc/src/db.rs
Normal file
@@ -0,0 +1,38 @@
|
|||||||
|
use crate::errors::{InternalError1, InternalError2};
|
||||||
|
use terrors::OneOf;
|
||||||
|
|
||||||
|
// Simulates fetching a nonce from a database.
|
||||||
|
// id=99 → InternalError1 (pool unavailable)
|
||||||
|
// id=98 → InternalError2 (query timeout)
|
||||||
|
pub fn get_nonce(id: u32) -> Result<u32, OneOf<(InternalError1, InternalError2)>> {
|
||||||
|
match id {
|
||||||
|
99 => Err(OneOf::new(InternalError1("db pool unavailable".to_owned()))),
|
||||||
|
98 => Err(OneOf::new(InternalError2("query timeout".to_owned()))),
|
||||||
|
_ => Ok(42),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn get_nonce_returns_nonce_for_valid_id() {
|
||||||
|
assert_eq!(get_nonce(1).unwrap(), 42);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn get_nonce_returns_internal_error1_for_sentinel() {
|
||||||
|
let err = get_nonce(99).unwrap_err();
|
||||||
|
let internal = err.narrow::<crate::errors::InternalError1, _>().unwrap();
|
||||||
|
assert_eq!(internal.0, "db pool unavailable");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn get_nonce_returns_internal_error2_for_sentinel() {
|
||||||
|
let err = get_nonce(98).unwrap_err();
|
||||||
|
let e = err.narrow::<crate::errors::InternalError1, _>().unwrap_err();
|
||||||
|
let internal = e.take::<crate::errors::InternalError2>();
|
||||||
|
assert_eq!(internal.0, "query timeout");
|
||||||
|
}
|
||||||
|
}
|
||||||
130
server/crates/arbiter-terrors-poc/src/errors.rs
Normal file
130
server/crates/arbiter-terrors-poc/src/errors.rs
Normal file
@@ -0,0 +1,130 @@
|
|||||||
|
use terrors::OneOf;
|
||||||
|
|
||||||
|
// Wire boundary type — what would go into a proto response
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub enum ProtoError {
|
||||||
|
NotRegistered,
|
||||||
|
InvalidSignature,
|
||||||
|
Internal(String), // Or Box<dyn Error>, who cares?
|
||||||
|
}
|
||||||
|
|
||||||
|
// Internal terrors types
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct NotRegistered;
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct InvalidSignature;
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct InternalError1(pub String);
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct InternalError2(pub String);
|
||||||
|
|
||||||
|
// Errors can be scattered across the codebase as long as they implement Into<ProtoError>
|
||||||
|
impl From<NotRegistered> for ProtoError {
|
||||||
|
fn from(_: NotRegistered) -> Self {
|
||||||
|
ProtoError::NotRegistered
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<InvalidSignature> for ProtoError {
|
||||||
|
fn from(_: InvalidSignature) -> Self {
|
||||||
|
ProtoError::InvalidSignature
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<InternalError1> for ProtoError {
|
||||||
|
fn from(e: InternalError1) -> Self {
|
||||||
|
ProtoError::Internal(e.0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl From<InternalError2> for ProtoError {
|
||||||
|
fn from(e: InternalError2) -> Self {
|
||||||
|
ProtoError::Internal(e.0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Private helper trait for converting from OneOf<T...> where each T can be converted
|
||||||
|
/// into the target type `O` by recursively narrowing until a match is found.
|
||||||
|
///
|
||||||
|
/// IDK why this isn't already in terrors.
|
||||||
|
trait DrainInto<O>: terrors::TypeSet + Sized {
|
||||||
|
fn drain(e: OneOf<Self>) -> O;
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! impl_drain_into {
|
||||||
|
($head:ident) => {
|
||||||
|
impl<$head, O> DrainInto<O> for ($head,)
|
||||||
|
where
|
||||||
|
$head: Into<O> + 'static,
|
||||||
|
{
|
||||||
|
fn drain(e: OneOf<($head,)>) -> O {
|
||||||
|
e.take().into()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
($head:ident, $($tail:ident),+) => {
|
||||||
|
impl<$head, $($tail),+, O> DrainInto<O> for ($head, $($tail),+)
|
||||||
|
where
|
||||||
|
$head: Into<O> + 'static,
|
||||||
|
($($tail,)+): DrainInto<O>,
|
||||||
|
{
|
||||||
|
fn drain(e: OneOf<($head, $($tail),+)>) -> O {
|
||||||
|
match e.narrow::<$head, _>() {
|
||||||
|
Ok(h) => h.into(),
|
||||||
|
Err(rest) => <($($tail,)+)>::drain(rest),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl_drain_into!($($tail),+);
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generates impls for all tuple sizes from 1 up to 7 (restricted by terrors internal impl).
|
||||||
|
// Each invocation produces one impl then recurses on the tail.
|
||||||
|
impl_drain_into!(A, B, C, D, E, F, G, H, I);
|
||||||
|
|
||||||
|
// Blanket From impl: body delegates to the recursive drain.
|
||||||
|
impl<E: DrainInto<ProtoError>> From<OneOf<E>> for ProtoError {
|
||||||
|
fn from(e: OneOf<E>) -> Self {
|
||||||
|
E::drain(e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn not_registered_converts_to_proto() {
|
||||||
|
let e: ProtoError = NotRegistered.into();
|
||||||
|
assert!(matches!(e, ProtoError::NotRegistered));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn invalid_signature_converts_to_proto() {
|
||||||
|
let e: ProtoError = InvalidSignature.into();
|
||||||
|
assert!(matches!(e, ProtoError::InvalidSignature));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn internal_converts_to_proto() {
|
||||||
|
let e: ProtoError = InternalError1("boom".into()).into();
|
||||||
|
assert!(matches!(e, ProtoError::Internal(msg) if msg == "boom"));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn one_of_remainder_converts_to_proto_invalid_signature() {
|
||||||
|
use terrors::OneOf;
|
||||||
|
let e: OneOf<(InvalidSignature, InternalError1)> = OneOf::new(InvalidSignature);
|
||||||
|
let proto = ProtoError::from(e);
|
||||||
|
assert!(matches!(proto, ProtoError::InvalidSignature));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn one_of_remainder_converts_to_proto_internal() {
|
||||||
|
use terrors::OneOf;
|
||||||
|
let e: OneOf<(InvalidSignature, InternalError1)> =
|
||||||
|
OneOf::new(InternalError1("db fail".into()));
|
||||||
|
let proto = ProtoError::from(e);
|
||||||
|
assert!(matches!(proto, ProtoError::Internal(msg) if msg == "db fail"));
|
||||||
|
}
|
||||||
|
}
|
||||||
43
server/crates/arbiter-terrors-poc/src/main.rs
Normal file
43
server/crates/arbiter-terrors-poc/src/main.rs
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
mod auth;
|
||||||
|
mod db;
|
||||||
|
mod errors;
|
||||||
|
|
||||||
|
use errors::ProtoError;
|
||||||
|
|
||||||
|
fn run(id: u32, sig: &str) {
|
||||||
|
print!("authenticate(id={id}, sig={sig:?}) => ");
|
||||||
|
match auth::authenticate(id, sig) {
|
||||||
|
Ok(nonce) => println!("Ok(nonce={nonce})"),
|
||||||
|
Err(e) => match e.narrow::<errors::NotRegistered, _>() {
|
||||||
|
Ok(_) => println!("Err(NotRegistered) — handled locally"),
|
||||||
|
Err(remaining) => {
|
||||||
|
let proto = ProtoError::from(remaining);
|
||||||
|
println!("Err(ProtoError::{proto:?}) — forwarded to wire");
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn run_process(id: u32, sig: &str) {
|
||||||
|
print!("process_request(id={id}, sig={sig:?}) => ");
|
||||||
|
match auth::process_request(id, sig) {
|
||||||
|
Ok(s) => println!("Ok({s})"),
|
||||||
|
Err(e) => println!("Err(ProtoError::{e:?})"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn main() {
|
||||||
|
println!("=== authenticate ===");
|
||||||
|
run(0, "ok"); // NotRegistered
|
||||||
|
run(1, "bad"); // InvalidSignature
|
||||||
|
run(99, "ok"); // InternalError1
|
||||||
|
run(98, "ok"); // InternalError2
|
||||||
|
run(1, "ok"); // success
|
||||||
|
|
||||||
|
println!("\n=== process_request (Try chain) ===");
|
||||||
|
run_process(0, "ok"); // NotRegistered (guard, no I/O)
|
||||||
|
run_process(97, "ok"); // InternalError2 from load_config
|
||||||
|
run_process(99, "ok"); // InternalError1 from get_nonce
|
||||||
|
run_process(1, "bad"); // InvalidSignature from verify_signature
|
||||||
|
run_process(1, "ok"); // success
|
||||||
|
}
|
||||||
7
server/crates/arbiter-tokens-registry/Cargo.toml
Normal file
7
server/crates/arbiter-tokens-registry/Cargo.toml
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
[package]
|
||||||
|
name = "arbiter-tokens-registry"
|
||||||
|
version = "0.1.0"
|
||||||
|
edition = "2024"
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
alloy.workspace = true
|
||||||
1
server/crates/arbiter-tokens-registry/src/evm/mod.rs
Normal file
1
server/crates/arbiter-tokens-registry/src/evm/mod.rs
Normal file
@@ -0,0 +1 @@
|
|||||||
|
pub mod nonfungible;
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user