Compare commits
2 Commits
b22be1627a
...
security-b
| Author | SHA1 | Date | |
|---|---|---|---|
| 075d33219e | |||
| 8cb6f4abe0 |
3
.vscode/settings.json
vendored
@@ -1,3 +0,0 @@
|
|||||||
{
|
|
||||||
"git.enabled": false
|
|
||||||
}
|
|
||||||
@@ -8,7 +8,7 @@ when:
|
|||||||
include: ['.woodpecker/server-*.yaml', 'server/**']
|
include: ['.woodpecker/server-*.yaml', 'server/**']
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: audit
|
- name: test
|
||||||
image: jdxcode/mise:latest
|
image: jdxcode/mise:latest
|
||||||
directory: server
|
directory: server
|
||||||
environment:
|
environment:
|
||||||
|
|||||||
@@ -1,25 +0,0 @@
|
|||||||
when:
|
|
||||||
- event: pull_request
|
|
||||||
path:
|
|
||||||
include: ['.woodpecker/server-*.yaml', 'server/**']
|
|
||||||
- event: push
|
|
||||||
branch: main
|
|
||||||
path:
|
|
||||||
include: ['.woodpecker/server-*.yaml', 'server/**']
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: lint
|
|
||||||
image: jdxcode/mise:latest
|
|
||||||
directory: server
|
|
||||||
environment:
|
|
||||||
CARGO_TERM_COLOR: always
|
|
||||||
CARGO_TARGET_DIR: /usr/local/cargo/target
|
|
||||||
CARGO_HOME: /usr/local/cargo/registry
|
|
||||||
volumes:
|
|
||||||
- cargo-target:/usr/local/cargo/target
|
|
||||||
- cargo-registry:/usr/local/cargo/registry
|
|
||||||
commands:
|
|
||||||
- apt-get update && apt-get install -y pkg-config
|
|
||||||
- mise install rust
|
|
||||||
- mise install protoc
|
|
||||||
- mise exec rust -- cargo clippy --all-targets --all-features -- -D warnings
|
|
||||||
@@ -8,7 +8,7 @@ when:
|
|||||||
include: ['.woodpecker/server-*.yaml', 'server/**']
|
include: ['.woodpecker/server-*.yaml', 'server/**']
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: vet
|
- name: test
|
||||||
image: jdxcode/mise:latest
|
image: jdxcode/mise:latest
|
||||||
directory: server
|
directory: server
|
||||||
environment:
|
environment:
|
||||||
|
|||||||
@@ -3,6 +3,7 @@
|
|||||||
Arbiter is a permissioned signing service for cryptocurrency wallets. It runs as a background service on the user's machine with an optional client application for vault management.
|
Arbiter is a permissioned signing service for cryptocurrency wallets. It runs as a background service on the user's machine with an optional client application for vault management.
|
||||||
|
|
||||||
**Core principle:** The vault NEVER exposes key material. It only produces signatures when a request satisfies the configured policies.
|
**Core principle:** The vault NEVER exposes key material. It only produces signatures when a request satisfies the configured policies.
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
## 1. Peer Types
|
## 1. Peer Types
|
||||||
|
|||||||
190
LICENSE
@@ -1,190 +0,0 @@
|
|||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
Copyright 2026 MarketTakers
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
13
README.md
@@ -1,13 +0,0 @@
|
|||||||
# Arbiter
|
|
||||||
> Policy-first multi-client wallet daemon, allowing permissioned transactions across blockchains
|
|
||||||
|
|
||||||
## Security warning
|
|
||||||
Arbiter can't meaningfully protect against host compromise. Potential attack flow:
|
|
||||||
- Attacker steals TLS keys from database
|
|
||||||
- Pretends to be server; just accepts user agent challenge solutions
|
|
||||||
- Pretend to be in sealed state and performing DH with client
|
|
||||||
- Steals user password and derives seal key
|
|
||||||
|
|
||||||
While this attack is highly targetive, it's still possible.
|
|
||||||
|
|
||||||
> This software is experimental. Do not use with funds you cannot afford to lose.
|
|
||||||
0
useragent/.gitignore → app/.gitignore
vendored
|
Before Width: | Height: | Size: 101 KiB After Width: | Height: | Size: 101 KiB |
|
Before Width: | Height: | Size: 5.5 KiB After Width: | Height: | Size: 5.5 KiB |
|
Before Width: | Height: | Size: 520 B After Width: | Height: | Size: 520 B |
|
Before Width: | Height: | Size: 14 KiB After Width: | Height: | Size: 14 KiB |
|
Before Width: | Height: | Size: 1.0 KiB After Width: | Height: | Size: 1.0 KiB |
|
Before Width: | Height: | Size: 36 KiB After Width: | Height: | Size: 36 KiB |
|
Before Width: | Height: | Size: 2.2 KiB After Width: | Height: | Size: 2.2 KiB |
@@ -41,6 +41,14 @@ packages:
|
|||||||
url: "https://pub.dev"
|
url: "https://pub.dev"
|
||||||
source: hosted
|
source: hosted
|
||||||
version: "1.19.1"
|
version: "1.19.1"
|
||||||
|
cupertino_icons:
|
||||||
|
dependency: "direct main"
|
||||||
|
description:
|
||||||
|
name: cupertino_icons
|
||||||
|
sha256: ba631d1c7f7bef6b729a622b7b752645a2d076dba9976925b8f25725a30e1ee6
|
||||||
|
url: "https://pub.dev"
|
||||||
|
source: hosted
|
||||||
|
version: "1.0.8"
|
||||||
fake_async:
|
fake_async:
|
||||||
dependency: transitive
|
dependency: transitive
|
||||||
description:
|
description:
|
||||||
89
app/pubspec.yaml
Normal file
@@ -0,0 +1,89 @@
|
|||||||
|
name: app
|
||||||
|
description: "A new Flutter project."
|
||||||
|
# The following line prevents the package from being accidentally published to
|
||||||
|
# pub.dev using `flutter pub publish`. This is preferred for private packages.
|
||||||
|
publish_to: 'none' # Remove this line if you wish to publish to pub.dev
|
||||||
|
|
||||||
|
# The following defines the version and build number for your application.
|
||||||
|
# A version number is three numbers separated by dots, like 1.2.43
|
||||||
|
# followed by an optional build number separated by a +.
|
||||||
|
# Both the version and the builder number may be overridden in flutter
|
||||||
|
# build by specifying --build-name and --build-number, respectively.
|
||||||
|
# In Android, build-name is used as versionName while build-number used as versionCode.
|
||||||
|
# Read more about Android versioning at https://developer.android.com/studio/publish/versioning
|
||||||
|
# In iOS, build-name is used as CFBundleShortVersionString while build-number is used as CFBundleVersion.
|
||||||
|
# Read more about iOS versioning at
|
||||||
|
# https://developer.apple.com/library/archive/documentation/General/Reference/InfoPlistKeyReference/Articles/CoreFoundationKeys.html
|
||||||
|
# In Windows, build-name is used as the major, minor, and patch parts
|
||||||
|
# of the product and file versions while build-number is used as the build suffix.
|
||||||
|
version: 1.0.0+1
|
||||||
|
|
||||||
|
environment:
|
||||||
|
sdk: ^3.10.8
|
||||||
|
|
||||||
|
# Dependencies specify other packages that your package needs in order to work.
|
||||||
|
# To automatically upgrade your package dependencies to the latest versions
|
||||||
|
# consider running `flutter pub upgrade --major-versions`. Alternatively,
|
||||||
|
# dependencies can be manually updated by changing the version numbers below to
|
||||||
|
# the latest version available on pub.dev. To see which dependencies have newer
|
||||||
|
# versions available, run `flutter pub outdated`.
|
||||||
|
dependencies:
|
||||||
|
flutter:
|
||||||
|
sdk: flutter
|
||||||
|
|
||||||
|
# The following adds the Cupertino Icons font to your application.
|
||||||
|
# Use with the CupertinoIcons class for iOS style icons.
|
||||||
|
cupertino_icons: ^1.0.8
|
||||||
|
|
||||||
|
dev_dependencies:
|
||||||
|
flutter_test:
|
||||||
|
sdk: flutter
|
||||||
|
|
||||||
|
# The "flutter_lints" package below contains a set of recommended lints to
|
||||||
|
# encourage good coding practices. The lint set provided by the package is
|
||||||
|
# activated in the `analysis_options.yaml` file located at the root of your
|
||||||
|
# package. See that file for information about deactivating specific lint
|
||||||
|
# rules and activating additional ones.
|
||||||
|
flutter_lints: ^6.0.0
|
||||||
|
|
||||||
|
# For information on the generic Dart part of this file, see the
|
||||||
|
# following page: https://dart.dev/tools/pub/pubspec
|
||||||
|
|
||||||
|
# The following section is specific to Flutter packages.
|
||||||
|
flutter:
|
||||||
|
|
||||||
|
# The following line ensures that the Material Icons font is
|
||||||
|
# included with your application, so that you can use the icons in
|
||||||
|
# the material Icons class.
|
||||||
|
uses-material-design: true
|
||||||
|
|
||||||
|
# To add assets to your application, add an assets section, like this:
|
||||||
|
# assets:
|
||||||
|
# - images/a_dot_burr.jpeg
|
||||||
|
# - images/a_dot_ham.jpeg
|
||||||
|
|
||||||
|
# An image asset can refer to one or more resolution-specific "variants", see
|
||||||
|
# https://flutter.dev/to/resolution-aware-images
|
||||||
|
|
||||||
|
# For details regarding adding assets from package dependencies, see
|
||||||
|
# https://flutter.dev/to/asset-from-package
|
||||||
|
|
||||||
|
# To add custom fonts to your application, add a fonts section here,
|
||||||
|
# in this "flutter" section. Each entry in this list should have a
|
||||||
|
# "family" key with the font family name, and a "fonts" key with a
|
||||||
|
# list giving the asset and other descriptors for the font. For
|
||||||
|
# example:
|
||||||
|
# fonts:
|
||||||
|
# - family: Schyler
|
||||||
|
# fonts:
|
||||||
|
# - asset: fonts/Schyler-Regular.ttf
|
||||||
|
# - asset: fonts/Schyler-Italic.ttf
|
||||||
|
# style: italic
|
||||||
|
# - family: Trajan Pro
|
||||||
|
# fonts:
|
||||||
|
# - asset: fonts/TrajanPro.ttf
|
||||||
|
# - asset: fonts/TrajanPro_Bold.ttf
|
||||||
|
# weight: 700
|
||||||
|
#
|
||||||
|
# For details regarding fonts from package dependencies,
|
||||||
|
# see https://flutter.dev/to/font-from-package
|
||||||
|
Before Width: | Height: | Size: 33 KiB After Width: | Height: | Size: 33 KiB |
@@ -10,10 +10,6 @@ backend = "cargo:cargo-features"
|
|||||||
version = "0.11.1"
|
version = "0.11.1"
|
||||||
backend = "cargo:cargo-features-manager"
|
backend = "cargo:cargo-features-manager"
|
||||||
|
|
||||||
[[tools."cargo:cargo-insta"]]
|
|
||||||
version = "1.46.3"
|
|
||||||
backend = "cargo:cargo-insta"
|
|
||||||
|
|
||||||
[[tools."cargo:cargo-nextest"]]
|
[[tools."cargo:cargo-nextest"]]
|
||||||
version = "0.9.126"
|
version = "0.9.126"
|
||||||
backend = "cargo:cargo-nextest"
|
backend = "cargo:cargo-nextest"
|
||||||
|
|||||||
@@ -2,10 +2,10 @@
|
|||||||
"cargo:diesel_cli" = { version = "2.3.6", features = "sqlite,sqlite-bundled", default-features = false }
|
"cargo:diesel_cli" = { version = "2.3.6", features = "sqlite,sqlite-bundled", default-features = false }
|
||||||
"cargo:cargo-audit" = "0.22.1"
|
"cargo:cargo-audit" = "0.22.1"
|
||||||
"cargo:cargo-vet" = "0.10.2"
|
"cargo:cargo-vet" = "0.10.2"
|
||||||
|
|
||||||
flutter = "3.38.9-stable"
|
flutter = "3.38.9-stable"
|
||||||
protoc = "29.6"
|
protoc = "29.6"
|
||||||
"rust" = {version = "1.93.0", components = "clippy"}
|
rust = "1.93.1"
|
||||||
"cargo:cargo-features-manager" = "0.11.1"
|
"cargo:cargo-features-manager" = "0.11.1"
|
||||||
"cargo:cargo-nextest" = "0.9.126"
|
"cargo:cargo-nextest" = "0.9.126"
|
||||||
"cargo:cargo-shear" = "latest"
|
"cargo:cargo-shear" = "latest"
|
||||||
"cargo:cargo-insta" = "1.46.3"
|
|
||||||
|
|||||||
@@ -3,14 +3,65 @@ syntax = "proto3";
|
|||||||
package arbiter;
|
package arbiter;
|
||||||
|
|
||||||
import "auth.proto";
|
import "auth.proto";
|
||||||
import "client.proto";
|
|
||||||
import "user_agent.proto";
|
message ClientRequest {
|
||||||
|
oneof payload {
|
||||||
|
arbiter.auth.ClientMessage auth_message = 1;
|
||||||
|
CertRotationAck cert_rotation_ack = 2;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
message ClientResponse {
|
||||||
|
oneof payload {
|
||||||
|
arbiter.auth.ServerMessage auth_message = 1;
|
||||||
|
CertRotationNotification cert_rotation_notification = 2;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
message UserAgentRequest {
|
||||||
|
oneof payload {
|
||||||
|
arbiter.auth.ClientMessage auth_message = 1;
|
||||||
|
CertRotationAck cert_rotation_ack = 2;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
message UserAgentResponse {
|
||||||
|
oneof payload {
|
||||||
|
arbiter.auth.ServerMessage auth_message = 1;
|
||||||
|
CertRotationNotification cert_rotation_notification = 2;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
message ServerInfo {
|
message ServerInfo {
|
||||||
string version = 1;
|
string version = 1;
|
||||||
bytes cert_public_key = 2;
|
bytes cert_public_key = 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TLS Certificate Rotation Protocol
|
||||||
|
message CertRotationNotification {
|
||||||
|
// New public certificate (DER-encoded)
|
||||||
|
bytes new_cert = 1;
|
||||||
|
|
||||||
|
// Unix timestamp when rotation will be executed (if all ACKs received)
|
||||||
|
int64 rotation_scheduled_at = 2;
|
||||||
|
|
||||||
|
// Unix timestamp deadline for ACK (7 days from now)
|
||||||
|
int64 ack_deadline = 3;
|
||||||
|
|
||||||
|
// Rotation ID for tracking
|
||||||
|
int32 rotation_id = 4;
|
||||||
|
}
|
||||||
|
|
||||||
|
message CertRotationAck {
|
||||||
|
// Rotation ID (from CertRotationNotification)
|
||||||
|
int32 rotation_id = 1;
|
||||||
|
|
||||||
|
// Client public key for identification
|
||||||
|
bytes client_public_key = 2;
|
||||||
|
|
||||||
|
// Confirmation that client saved the new certificate
|
||||||
|
bool cert_saved = 3;
|
||||||
|
}
|
||||||
|
|
||||||
service ArbiterService {
|
service ArbiterService {
|
||||||
rpc Client(stream ClientRequest) returns (stream ClientResponse);
|
rpc Client(stream ClientRequest) returns (stream ClientResponse);
|
||||||
rpc UserAgent(stream UserAgentRequest) returns (stream UserAgentResponse);
|
rpc UserAgent(stream UserAgentRequest) returns (stream UserAgentResponse);
|
||||||
|
|||||||
@@ -1,17 +0,0 @@
|
|||||||
syntax = "proto3";
|
|
||||||
|
|
||||||
package arbiter;
|
|
||||||
|
|
||||||
import "auth.proto";
|
|
||||||
|
|
||||||
message ClientRequest {
|
|
||||||
oneof payload {
|
|
||||||
arbiter.auth.ClientMessage auth_message = 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
message ClientResponse {
|
|
||||||
oneof payload {
|
|
||||||
arbiter.auth.ServerMessage auth_message = 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
46
protobufs/google/protobuf/timestamp.proto
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
// Protocol Buffers - Google's data interchange format
|
||||||
|
// Copyright 2008 Google Inc. All rights reserved.
|
||||||
|
//
|
||||||
|
// Redistribution and use in source and binary forms, with or without
|
||||||
|
// modification, are permitted provided that the following conditions are
|
||||||
|
// met:
|
||||||
|
//
|
||||||
|
// * Redistributions of source code must retain the above copyright
|
||||||
|
// notice, this list of conditions and the following disclaimer.
|
||||||
|
// * Redistributions in binary form must reproduce the above
|
||||||
|
// copyright notice, this list of conditions and the following disclaimer
|
||||||
|
// in the documentation and/or other materials provided with the
|
||||||
|
// distribution.
|
||||||
|
// * Neither the name of Google Inc. nor the names of its
|
||||||
|
// contributors may be used to endorse or promote products derived from
|
||||||
|
// this software without specific prior written permission.
|
||||||
|
|
||||||
|
syntax = "proto3";
|
||||||
|
|
||||||
|
package google.protobuf;
|
||||||
|
|
||||||
|
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
|
||||||
|
option cc_enable_arenas = true;
|
||||||
|
option go_package = "google.golang.org/protobuf/types/known/timestamppb";
|
||||||
|
option java_package = "com.google.protobuf";
|
||||||
|
option java_outer_classname = "TimestampProto";
|
||||||
|
option java_multiple_files = true;
|
||||||
|
option objc_class_prefix = "GPB";
|
||||||
|
|
||||||
|
// A Timestamp represents a point in time independent of any time zone or local
|
||||||
|
// calendar, encoded as a count of seconds and fractions of seconds at
|
||||||
|
// nanosecond resolution. The count is relative to an epoch at UTC midnight on
|
||||||
|
// January 1, 1970, in the proleptic Gregorian calendar which extends the
|
||||||
|
// Gregorian calendar backwards to year one.
|
||||||
|
message Timestamp {
|
||||||
|
// Represents seconds of UTC time since Unix epoch
|
||||||
|
// 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
|
||||||
|
// 9999-12-31T23:59:59Z inclusive.
|
||||||
|
int64 seconds = 1;
|
||||||
|
|
||||||
|
// Non-negative fractions of a second at nanosecond resolution. Negative
|
||||||
|
// second values with fractions must still have non-negative nanos values
|
||||||
|
// that count forward in time. Must be from 0 to 999,999,999
|
||||||
|
// inclusive.
|
||||||
|
int32 nanos = 2;
|
||||||
|
}
|
||||||
14
protobufs/unseal.proto
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
syntax = "proto3";
|
||||||
|
|
||||||
|
package arbiter.unseal;
|
||||||
|
|
||||||
|
message UserAgentKeyRequest {}
|
||||||
|
|
||||||
|
message ServerKeyResponse {
|
||||||
|
bytes pubkey = 1;
|
||||||
|
}
|
||||||
|
message UserAgentSealedKey {
|
||||||
|
bytes sealed_key = 1;
|
||||||
|
bytes pubkey = 2;
|
||||||
|
bytes nonce = 3;
|
||||||
|
}
|
||||||
@@ -1,51 +0,0 @@
|
|||||||
syntax = "proto3";
|
|
||||||
|
|
||||||
package arbiter;
|
|
||||||
|
|
||||||
import "auth.proto";
|
|
||||||
import "google/protobuf/empty.proto";
|
|
||||||
|
|
||||||
message UnsealStart {
|
|
||||||
bytes client_pubkey = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
message UnsealStartResponse {
|
|
||||||
bytes server_pubkey = 1;
|
|
||||||
}
|
|
||||||
message UnsealEncryptedKey {
|
|
||||||
bytes nonce = 1;
|
|
||||||
bytes ciphertext = 2;
|
|
||||||
bytes associated_data = 3;
|
|
||||||
}
|
|
||||||
|
|
||||||
enum UnsealResult {
|
|
||||||
UNSEAL_RESULT_UNSPECIFIED = 0;
|
|
||||||
UNSEAL_RESULT_SUCCESS = 1;
|
|
||||||
UNSEAL_RESULT_INVALID_KEY = 2;
|
|
||||||
UNSEAL_RESULT_UNBOOTSTRAPPED = 3;
|
|
||||||
}
|
|
||||||
|
|
||||||
enum VaultState {
|
|
||||||
VAULT_STATE_UNSPECIFIED = 0;
|
|
||||||
VAULT_STATE_UNBOOTSTRAPPED = 1;
|
|
||||||
VAULT_STATE_SEALED = 2;
|
|
||||||
VAULT_STATE_UNSEALED = 3;
|
|
||||||
VAULT_STATE_ERROR = 4;
|
|
||||||
}
|
|
||||||
|
|
||||||
message UserAgentRequest {
|
|
||||||
oneof payload {
|
|
||||||
arbiter.auth.ClientMessage auth_message = 1;
|
|
||||||
UnsealStart unseal_start = 2;
|
|
||||||
UnsealEncryptedKey unseal_encrypted_key = 3;
|
|
||||||
google.protobuf.Empty query_vault_state = 4;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
message UserAgentResponse {
|
|
||||||
oneof payload {
|
|
||||||
arbiter.auth.ServerMessage auth_message = 1;
|
|
||||||
UnsealStartResponse unseal_start_response = 2;
|
|
||||||
UnsealResult unseal_result = 3;
|
|
||||||
VaultState vault_state = 4;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
601
server/Cargo.lock
generated
@@ -23,12 +23,3 @@ async-trait = "0.1.89"
|
|||||||
futures = "0.3.31"
|
futures = "0.3.31"
|
||||||
tokio-stream = { version = "0.1.18", features = ["full"] }
|
tokio-stream = { version = "0.1.18", features = ["full"] }
|
||||||
kameo = "0.19.2"
|
kameo = "0.19.2"
|
||||||
x25519-dalek = { version = "2.0.1", features = ["getrandom"] }
|
|
||||||
rstest = "0.26.1"
|
|
||||||
rustls-pki-types = "1.14.0"
|
|
||||||
rcgen = { version = "0.14.7", features = [
|
|
||||||
"aws_lc_rs",
|
|
||||||
"pem",
|
|
||||||
"x509-parser",
|
|
||||||
"zeroize",
|
|
||||||
], default-features = false }
|
|
||||||
|
|||||||
BIN
server/crates/.DS_Store
vendored
@@ -3,6 +3,5 @@ name = "arbiter-client"
|
|||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
edition = "2024"
|
edition = "2024"
|
||||||
repository = "https://git.markettakers.org/MarketTakers/arbiter"
|
repository = "https://git.markettakers.org/MarketTakers/arbiter"
|
||||||
license = "Apache-2.0"
|
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
|||||||
@@ -3,31 +3,19 @@ name = "arbiter-proto"
|
|||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
edition = "2024"
|
edition = "2024"
|
||||||
repository = "https://git.markettakers.org/MarketTakers/arbiter"
|
repository = "https://git.markettakers.org/MarketTakers/arbiter"
|
||||||
license = "Apache-2.0"
|
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
tonic.workspace = true
|
tonic.workspace = true
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
futures.workspace = true
|
futures.workspace = true
|
||||||
|
hex = "0.4.3"
|
||||||
tonic-prost = "0.14.3"
|
tonic-prost = "0.14.3"
|
||||||
prost = "0.14.3"
|
prost = "0.14.3"
|
||||||
kameo.workspace = true
|
kameo.workspace = true
|
||||||
url = "2.5.8"
|
|
||||||
miette.workspace = true
|
|
||||||
thiserror.workspace = true
|
|
||||||
rustls-pki-types.workspace = true
|
|
||||||
base64 = "0.22.1"
|
|
||||||
tracing.workspace = true
|
|
||||||
|
|
||||||
[build-dependencies]
|
[build-dependencies]
|
||||||
|
prost-build = "0.14.3"
|
||||||
|
serde_json = "1"
|
||||||
tonic-prost-build = "0.14.3"
|
tonic-prost-build = "0.14.3"
|
||||||
|
|
||||||
[dev-dependencies]
|
|
||||||
rstest.workspace = true
|
|
||||||
rand.workspace = true
|
|
||||||
rcgen.workspace = true
|
|
||||||
|
|
||||||
[package.metadata.cargo-shear]
|
|
||||||
ignored = ["tonic-prost", "prost", "kameo"]
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,21 +1,15 @@
|
|||||||
use tonic_prost_build::configure;
|
|
||||||
|
|
||||||
static PROTOBUF_DIR: &str = "../../../protobufs";
|
static PROTOBUF_DIR: &str = "../../../protobufs";
|
||||||
|
|
||||||
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||||
|
let proto_files = vec![
|
||||||
println!("cargo::rerun-if-changed={PROTOBUF_DIR}");
|
|
||||||
|
|
||||||
configure()
|
|
||||||
.message_attribute(".", "#[derive(::kameo::Reply)]")
|
|
||||||
.compile_protos(
|
|
||||||
&[
|
|
||||||
format!("{}/arbiter.proto", PROTOBUF_DIR),
|
format!("{}/arbiter.proto", PROTOBUF_DIR),
|
||||||
format!("{}/auth.proto", PROTOBUF_DIR),
|
format!("{}/auth.proto", PROTOBUF_DIR),
|
||||||
],
|
];
|
||||||
&[PROTOBUF_DIR.to_string()],
|
|
||||||
)
|
// Компилируем protobuf (tonic-prost-build автоматически использует prost_types для google.protobuf)
|
||||||
|
tonic_prost_build::configure()
|
||||||
|
.message_attribute(".", "#[derive(::kameo::Reply)]")
|
||||||
|
.compile_protos(&proto_files, &[PROTOBUF_DIR.to_string()])?;
|
||||||
|
|
||||||
.unwrap();
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,8 +1,3 @@
|
|||||||
pub mod transport;
|
|
||||||
pub mod url;
|
|
||||||
|
|
||||||
use base64::{Engine, prelude::BASE64_STANDARD};
|
|
||||||
|
|
||||||
use crate::proto::auth::AuthChallenge;
|
use crate::proto::auth::AuthChallenge;
|
||||||
|
|
||||||
pub mod proto {
|
pub mod proto {
|
||||||
@@ -13,7 +8,9 @@ pub mod proto {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub static BOOTSTRAP_PATH: &str = "bootstrap_token";
|
pub mod transport;
|
||||||
|
|
||||||
|
pub static BOOTSTRAP_TOKEN_PATH: &str = "bootstrap_token";
|
||||||
|
|
||||||
pub fn home_path() -> Result<std::path::PathBuf, std::io::Error> {
|
pub fn home_path() -> Result<std::path::PathBuf, std::io::Error> {
|
||||||
static ARBITER_HOME: &str = ".arbiter";
|
static ARBITER_HOME: &str = ".arbiter";
|
||||||
@@ -29,6 +26,6 @@ pub fn home_path() -> Result<std::path::PathBuf, std::io::Error> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn format_challenge(challenge: &AuthChallenge) -> Vec<u8> {
|
pub fn format_challenge(challenge: &AuthChallenge) -> Vec<u8> {
|
||||||
let concat_form = format!("{}:{}", challenge.nonce, BASE64_STANDARD.encode(&challenge.pubkey));
|
let concat_form = format!("{}:{}", challenge.nonce, hex::encode(&challenge.pubkey));
|
||||||
concat_form.into_bytes().to_vec()
|
concat_form.into_bytes().to_vec()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,125 +1,46 @@
|
|||||||
use std::marker::PhantomData;
|
use futures::{Stream, StreamExt};
|
||||||
|
use tokio::sync::mpsc::{self, error::SendError};
|
||||||
use futures::StreamExt;
|
|
||||||
use tokio::sync::mpsc;
|
|
||||||
use tonic::{Status, Streaming};
|
use tonic::{Status, Streaming};
|
||||||
|
|
||||||
/// Errors returned by transport adapters implementing [`Bi`].
|
|
||||||
pub enum Error {
|
|
||||||
/// The outbound side of the transport is no longer accepting messages.
|
|
||||||
ChannelClosed,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Minimal bidirectional transport abstraction used by protocol code.
|
// Abstraction for stream for sans-io capabilities
|
||||||
///
|
pub trait Bi<T, U>: Stream<Item = Result<T, Status>> + Send + Sync + 'static {
|
||||||
/// `Bi<T, U, E>` models a duplex channel with:
|
type Error;
|
||||||
/// - inbound items of type `T` read via [`Bi::recv`]
|
|
||||||
/// - outbound success items of type `U` or domain errors of type `E` written via [`Bi::send`]
|
|
||||||
///
|
|
||||||
/// The trait intentionally exposes only the operations the protocol layer needs,
|
|
||||||
/// allowing it to work with gRPC streams and other transport implementations.
|
|
||||||
///
|
|
||||||
/// # Stream termination and errors
|
|
||||||
///
|
|
||||||
/// [`Bi::recv`] returns:
|
|
||||||
/// - `Some(item)` when a new inbound message is available
|
|
||||||
/// - `None` when the inbound stream ends or the underlying transport reports an error
|
|
||||||
///
|
|
||||||
/// Implementations may collapse transport-specific receive errors into `None`
|
|
||||||
/// when the protocol does not need to distinguish them from normal stream
|
|
||||||
/// termination.
|
|
||||||
pub trait Bi<T, U, E>: Send + Sync + 'static {
|
|
||||||
/// Sends one outbound result to the peer.
|
|
||||||
fn send(
|
fn send(
|
||||||
&mut self,
|
&mut self,
|
||||||
item: Result<U, E>,
|
item: Result<U, Status>,
|
||||||
) -> impl std::future::Future<Output = Result<(), Error>> + Send;
|
) -> impl std::future::Future<Output = Result<(), Self::Error>> + Send;
|
||||||
|
|
||||||
/// Receives the next inbound item.
|
|
||||||
///
|
|
||||||
/// Returns `None` when the inbound stream is finished or can no longer
|
|
||||||
/// produce items.
|
|
||||||
fn recv(&mut self) -> impl std::future::Future<Output = Option<T>> + Send;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// [`Bi`] adapter backed by a tonic gRPC bidirectional stream.
|
// Bi-directional stream abstraction for handling gRPC streaming requests and responses
|
||||||
///
|
pub struct BiStream<T, U> {
|
||||||
/// Outbound items are sent through a Tokio MPSC sender, while inbound items are
|
pub request_stream: Streaming<T>,
|
||||||
/// read from tonic [`Streaming`].
|
pub response_sender: mpsc::Sender<Result<U, Status>>,
|
||||||
pub struct GrpcAdapter<Inbound, Outbound, E> {
|
|
||||||
sender: mpsc::Sender<Result<Outbound, Status>>,
|
|
||||||
receiver: Streaming<Inbound>,
|
|
||||||
_error: PhantomData<E>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<Inbound, Outbound, E> GrpcAdapter<Inbound, Outbound, E> {
|
impl<T, U> Stream for BiStream<T, U>
|
||||||
|
|
||||||
/// Creates a new gRPC-backed [`Bi`] adapter.
|
|
||||||
pub fn new(sender: mpsc::Sender<Result<Outbound, Status>>, receiver: Streaming<Inbound>) -> Self {
|
|
||||||
Self {
|
|
||||||
sender,
|
|
||||||
receiver,
|
|
||||||
_error: PhantomData,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<Inbound, Outbound, E> Bi<Inbound, Outbound, E> for GrpcAdapter<Inbound, Outbound, E>
|
|
||||||
where
|
where
|
||||||
Inbound: Send + 'static,
|
T: Send + 'static,
|
||||||
Outbound: Send + 'static,
|
U: Send + 'static,
|
||||||
E: Into<Status> + Send + Sync + 'static,
|
|
||||||
{
|
{
|
||||||
#[tracing::instrument(level = "trace", skip(self, item))]
|
type Item = Result<T, Status>;
|
||||||
async fn send(&mut self, item: Result<Outbound, E>) -> Result<(), Error> {
|
|
||||||
self.sender
|
|
||||||
.send(item.map_err(Into::into))
|
|
||||||
.await
|
|
||||||
.map_err(|_| Error::ChannelClosed)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tracing::instrument(level = "trace", skip(self))]
|
fn poll_next(
|
||||||
async fn recv(&mut self) -> Option<Inbound> {
|
mut self: std::pin::Pin<&mut Self>,
|
||||||
self.receiver.next().await.transpose().ok().flatten()
|
cx: &mut std::task::Context<'_>,
|
||||||
|
) -> std::task::Poll<Option<Self::Item>> {
|
||||||
|
self.request_stream.poll_next_unpin(cx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// No-op [`Bi`] transport for tests and manual actor usage.
|
impl<T, U> Bi<T, U> for BiStream<T, U>
|
||||||
///
|
|
||||||
/// `send` drops all items and succeeds. [`Bi::recv`] never resolves and therefore
|
|
||||||
/// does not busy-wait or spuriously close the stream.
|
|
||||||
pub struct DummyTransport<T, U, E> {
|
|
||||||
_marker: PhantomData<(T, U, E)>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T, U, E> DummyTransport<T, U, E> {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
Self {
|
|
||||||
_marker: PhantomData,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T, U, E> Default for DummyTransport<T, U, E> {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self::new()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T, U, E> Bi<T, U, E> for DummyTransport<T, U, E>
|
|
||||||
where
|
where
|
||||||
T: Send + Sync + 'static,
|
T: Send + 'static,
|
||||||
U: Send + Sync + 'static,
|
U: Send + 'static,
|
||||||
E: Send + Sync + 'static,
|
|
||||||
{
|
{
|
||||||
async fn send(&mut self, _item: Result<U, E>) -> Result<(), Error> {
|
type Error = SendError<Result<U, Status>>;
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn recv(&mut self) -> impl std::future::Future<Output = Option<T>> + Send {
|
async fn send(&mut self, item: Result<U, Status>) -> Result<(), Self::Error> {
|
||||||
async {
|
self.response_sender.send(item).await
|
||||||
std::future::pending::<()>().await;
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,128 +0,0 @@
|
|||||||
use std::fmt::Display;
|
|
||||||
|
|
||||||
use base64::{Engine as _, prelude::BASE64_URL_SAFE};
|
|
||||||
use rustls_pki_types::CertificateDer;
|
|
||||||
|
|
||||||
const ARBITER_URL_SCHEME: &str = "arbiter";
|
|
||||||
const CERT_QUERY_KEY: &str = "cert";
|
|
||||||
const BOOTSTRAP_TOKEN_QUERY_KEY: &str = "bootstrap_token";
|
|
||||||
|
|
||||||
pub struct ArbiterUrl {
|
|
||||||
pub host: String,
|
|
||||||
pub port: u16,
|
|
||||||
pub ca_cert: CertificateDer<'static>,
|
|
||||||
pub bootstrap_token: Option<String>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Display for ArbiterUrl {
|
|
||||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
|
||||||
let mut base = format!(
|
|
||||||
"{ARBITER_URL_SCHEME}://{}:{}?{CERT_QUERY_KEY}={}",
|
|
||||||
self.host,
|
|
||||||
self.port,
|
|
||||||
BASE64_URL_SAFE.encode(self.ca_cert.to_vec())
|
|
||||||
);
|
|
||||||
if let Some(token) = &self.bootstrap_token {
|
|
||||||
base.push_str(&format!("&{BOOTSTRAP_TOKEN_QUERY_KEY}={}", token));
|
|
||||||
}
|
|
||||||
f.write_str(&base)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, thiserror::Error, miette::Diagnostic)]
|
|
||||||
pub enum Error {
|
|
||||||
#[error("Invalid URL scheme, expected '{ARBITER_URL_SCHEME}://'")]
|
|
||||||
#[diagnostic(
|
|
||||||
code(arbiter::url::invalid_scheme),
|
|
||||||
help("The URL must start with '{ARBITER_URL_SCHEME}://'")
|
|
||||||
)]
|
|
||||||
InvalidScheme,
|
|
||||||
#[error("Missing host in URL")]
|
|
||||||
#[diagnostic(
|
|
||||||
code(arbiter::url::missing_host),
|
|
||||||
help("The URL must include a host, e.g., '{ARBITER_URL_SCHEME}://127.0.0.1:<port>'")
|
|
||||||
)]
|
|
||||||
MissingHost,
|
|
||||||
#[error("Missing port in URL")]
|
|
||||||
#[diagnostic(
|
|
||||||
code(arbiter::url::missing_port),
|
|
||||||
help("The URL must include a port, e.g., '{ARBITER_URL_SCHEME}://127.0.0.1:1234'")
|
|
||||||
)]
|
|
||||||
MissingPort,
|
|
||||||
#[error("Missing 'cert' query parameter in URL")]
|
|
||||||
#[diagnostic(
|
|
||||||
code(arbiter::url::missing_cert),
|
|
||||||
help("The URL must include a 'cert' query parameter")
|
|
||||||
)]
|
|
||||||
MissingCert,
|
|
||||||
#[error("Invalid base64 in 'cert' query parameter: {0}")]
|
|
||||||
#[diagnostic(code(arbiter::url::invalid_cert_base64))]
|
|
||||||
InvalidCertBase64(#[from] base64::DecodeError),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a> TryFrom<&'a str> for ArbiterUrl {
|
|
||||||
type Error = Error;
|
|
||||||
|
|
||||||
fn try_from(value: &'a str) -> Result<Self, Self::Error> {
|
|
||||||
let url = url::Url::parse(value).map_err(|_| Error::InvalidScheme)?;
|
|
||||||
|
|
||||||
if url.scheme() != ARBITER_URL_SCHEME {
|
|
||||||
return Err(Error::InvalidScheme);
|
|
||||||
}
|
|
||||||
|
|
||||||
let host = url.host_str().ok_or(Error::MissingHost)?.to_string();
|
|
||||||
let port = url.port().ok_or(Error::MissingPort)?;
|
|
||||||
let cert_str = url
|
|
||||||
.query_pairs()
|
|
||||||
.find(|(k, _)| k == CERT_QUERY_KEY)
|
|
||||||
.ok_or(Error::MissingCert)?
|
|
||||||
.1;
|
|
||||||
|
|
||||||
let cert = BASE64_URL_SAFE.decode(cert_str.as_ref())?;
|
|
||||||
let cert = CertificateDer::from_slice(&cert).into_owned();
|
|
||||||
|
|
||||||
let bootstrap_token = url
|
|
||||||
.query_pairs()
|
|
||||||
.find(|(k, _)| k == BOOTSTRAP_TOKEN_QUERY_KEY)
|
|
||||||
.map(|(_, v)| v.to_string());
|
|
||||||
|
|
||||||
Ok(ArbiterUrl {
|
|
||||||
host,
|
|
||||||
port,
|
|
||||||
ca_cert: cert,
|
|
||||||
bootstrap_token,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use rcgen::generate_simple_self_signed;
|
|
||||||
use rstest::rstest;
|
|
||||||
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
#[rstest]
|
|
||||||
|
|
||||||
fn test_parsing_correctness(
|
|
||||||
#[values("127.0.0.1", "localhost", "192.168.1.1", "some.domain.com")] host: &str,
|
|
||||||
|
|
||||||
#[values(None, Some("token123".to_string()))] bootstrap_token: Option<String>,
|
|
||||||
) {
|
|
||||||
let cert = generate_simple_self_signed(&["Arbiter CA".into()]).unwrap();
|
|
||||||
let cert = cert.cert.der();
|
|
||||||
|
|
||||||
let url = ArbiterUrl {
|
|
||||||
host: host.to_string(),
|
|
||||||
port: 1234,
|
|
||||||
ca_cert: cert.clone().into_owned(),
|
|
||||||
bootstrap_token,
|
|
||||||
};
|
|
||||||
let url_str = url.to_string();
|
|
||||||
let parsed_url = ArbiterUrl::try_from(url_str.as_str()).unwrap();
|
|
||||||
assert_eq!(url.host, parsed_url.host);
|
|
||||||
assert_eq!(url.port, parsed_url.port);
|
|
||||||
assert_eq!(url.ca_cert.to_vec(), parsed_url.ca_cert.to_vec());
|
|
||||||
assert_eq!(url.bootstrap_token, parsed_url.bootstrap_token);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
BIN
server/crates/arbiter-server/.DS_Store
vendored
@@ -3,10 +3,15 @@ name = "arbiter-server"
|
|||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
edition = "2024"
|
edition = "2024"
|
||||||
repository = "https://git.markettakers.org/MarketTakers/arbiter"
|
repository = "https://git.markettakers.org/MarketTakers/arbiter"
|
||||||
license = "Apache-2.0"
|
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
diesel = { version = "2.3.6", features = ["chrono", "returning_clauses_for_sqlite_3_35", "serde_json", "time", "uuid"] }
|
diesel = { version = "2.3.6", features = [
|
||||||
|
"sqlite",
|
||||||
|
"uuid",
|
||||||
|
"time",
|
||||||
|
"chrono",
|
||||||
|
"serde_json",
|
||||||
|
] }
|
||||||
diesel-async = { version = "0.7.4", features = [
|
diesel-async = { version = "0.7.4", features = [
|
||||||
"bb8",
|
"bb8",
|
||||||
"migrations",
|
"migrations",
|
||||||
@@ -16,9 +21,7 @@ diesel-async = { version = "0.7.4", features = [
|
|||||||
ed25519-dalek.workspace = true
|
ed25519-dalek.workspace = true
|
||||||
arbiter-proto.path = "../arbiter-proto"
|
arbiter-proto.path = "../arbiter-proto"
|
||||||
tracing.workspace = true
|
tracing.workspace = true
|
||||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
|
||||||
tonic.workspace = true
|
tonic.workspace = true
|
||||||
tonic.features = ["tls-aws-lc"]
|
|
||||||
tokio.workspace = true
|
tokio.workspace = true
|
||||||
rustls.workspace = true
|
rustls.workspace = true
|
||||||
smlang.workspace = true
|
smlang.workspace = true
|
||||||
@@ -31,18 +34,19 @@ futures.workspace = true
|
|||||||
tokio-stream.workspace = true
|
tokio-stream.workspace = true
|
||||||
dashmap = "6.1.0"
|
dashmap = "6.1.0"
|
||||||
rand.workspace = true
|
rand.workspace = true
|
||||||
rcgen.workspace = true
|
rcgen = { version = "0.14.7", features = [
|
||||||
|
"aws_lc_rs",
|
||||||
|
"pem",
|
||||||
|
"x509-parser",
|
||||||
|
"zeroize",
|
||||||
|
], default-features = false }
|
||||||
chrono.workspace = true
|
chrono.workspace = true
|
||||||
memsafe = "0.4.0"
|
memsafe = "0.4.0"
|
||||||
zeroize = { version = "1.8.2", features = ["std", "simd"] }
|
zeroize = { version = "1.8.2", features = ["std", "simd"] }
|
||||||
|
argon2 = { version = "0.5", features = ["std"] }
|
||||||
kameo.workspace = true
|
kameo.workspace = true
|
||||||
x25519-dalek.workspace = true
|
hex = "0.4.3"
|
||||||
chacha20poly1305 = { version = "0.10.1", features = ["std"] }
|
chacha20poly1305 = "0.10.1"
|
||||||
argon2 = { version = "0.5.3", features = ["zeroize"] }
|
|
||||||
restructed = "0.2.2"
|
|
||||||
strum = { version = "0.27.2", features = ["derive"] }
|
|
||||||
pem = "3.0.6"
|
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
insta = "1.46.3"
|
|
||||||
test-log = { version = "0.2", default-features = false, features = ["trace"] }
|
test-log = { version = "0.2", default-features = false, features = ["trace"] }
|
||||||
|
|||||||
@@ -0,0 +1,11 @@
|
|||||||
|
-- Rollback TLS rotation tables
|
||||||
|
|
||||||
|
-- Удалить добавленную колонку из arbiter_settings
|
||||||
|
ALTER TABLE arbiter_settings DROP COLUMN current_cert_id;
|
||||||
|
|
||||||
|
-- Удалить таблицы в обратном порядке
|
||||||
|
DROP TABLE IF EXISTS tls_rotation_history;
|
||||||
|
DROP TABLE IF EXISTS rotation_client_acks;
|
||||||
|
DROP TABLE IF EXISTS tls_rotation_state;
|
||||||
|
DROP INDEX IF EXISTS idx_tls_certificates_active;
|
||||||
|
DROP TABLE IF EXISTS tls_certificates;
|
||||||
@@ -0,0 +1,57 @@
|
|||||||
|
-- История всех сертификатов
|
||||||
|
CREATE TABLE IF NOT EXISTS tls_certificates (
|
||||||
|
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
|
||||||
|
cert BLOB NOT NULL, -- DER-encoded
|
||||||
|
cert_key BLOB NOT NULL, -- PEM-encoded
|
||||||
|
not_before INTEGER NOT NULL, -- Unix timestamp
|
||||||
|
not_after INTEGER NOT NULL, -- Unix timestamp
|
||||||
|
created_at INTEGER NOT NULL DEFAULT(unixepoch('now')),
|
||||||
|
is_active BOOLEAN NOT NULL DEFAULT 0 -- Только один active=1
|
||||||
|
) STRICT;
|
||||||
|
|
||||||
|
CREATE INDEX idx_tls_certificates_active ON tls_certificates(is_active, not_after);
|
||||||
|
|
||||||
|
-- Tracking процесса ротации
|
||||||
|
CREATE TABLE IF NOT EXISTS tls_rotation_state (
|
||||||
|
id INTEGER NOT NULL PRIMARY KEY CHECK(id = 1), -- Singleton
|
||||||
|
state TEXT NOT NULL DEFAULT('normal') CHECK(state IN ('normal', 'initiated', 'waiting_acks', 'ready')),
|
||||||
|
new_cert_id INTEGER REFERENCES tls_certificates(id),
|
||||||
|
initiated_at INTEGER,
|
||||||
|
timeout_at INTEGER -- Таймаут для ожидания ACKs (initiated_at + 7 дней)
|
||||||
|
) STRICT;
|
||||||
|
|
||||||
|
-- Tracking ACKs от клиентов
|
||||||
|
CREATE TABLE IF NOT EXISTS rotation_client_acks (
|
||||||
|
rotation_id INTEGER NOT NULL, -- Ссылка на new_cert_id
|
||||||
|
client_key TEXT NOT NULL, -- Публичный ключ клиента (hex)
|
||||||
|
ack_received_at INTEGER NOT NULL DEFAULT(unixepoch('now')),
|
||||||
|
PRIMARY KEY (rotation_id, client_key)
|
||||||
|
) STRICT;
|
||||||
|
|
||||||
|
-- Audit trail событий ротации
|
||||||
|
CREATE TABLE IF NOT EXISTS tls_rotation_history (
|
||||||
|
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT,
|
||||||
|
cert_id INTEGER NOT NULL REFERENCES tls_certificates(id),
|
||||||
|
event_type TEXT NOT NULL CHECK(event_type IN ('created', 'rotation_initiated', 'acks_complete', 'activated', 'timeout')),
|
||||||
|
timestamp INTEGER NOT NULL DEFAULT(unixepoch('now')),
|
||||||
|
details TEXT -- JSON с доп. информацией
|
||||||
|
) STRICT;
|
||||||
|
|
||||||
|
-- Миграция существующего сертификата
|
||||||
|
INSERT INTO tls_certificates (id, cert, cert_key, not_before, not_after, is_active, created_at)
|
||||||
|
SELECT
|
||||||
|
1,
|
||||||
|
cert,
|
||||||
|
cert_key,
|
||||||
|
unixepoch('now') as not_before,
|
||||||
|
unixepoch('now') + (90 * 24 * 60 * 60) as not_after, -- 90 дней
|
||||||
|
1 as is_active,
|
||||||
|
unixepoch('now')
|
||||||
|
FROM arbiter_settings WHERE id = 1;
|
||||||
|
|
||||||
|
-- Инициализация rotation_state
|
||||||
|
INSERT INTO tls_rotation_state (id, state) VALUES (1, 'normal');
|
||||||
|
|
||||||
|
-- Добавить ссылку на текущий сертификат
|
||||||
|
ALTER TABLE arbiter_settings ADD COLUMN current_cert_id INTEGER REFERENCES tls_certificates(id);
|
||||||
|
UPDATE arbiter_settings SET current_cert_id = 1 WHERE id = 1;
|
||||||
@@ -1,47 +1,19 @@
|
|||||||
create table if not exists root_key_history (
|
|
||||||
id INTEGER not null PRIMARY KEY,
|
|
||||||
-- root key stored as aead encrypted artifact, with only difference that it's decrypted by unseal key (derived from user password)
|
|
||||||
root_key_encryption_nonce blob not null default(1), -- if re-encrypted, this should be incremented. Used for encrypting root key
|
|
||||||
data_encryption_nonce blob not null default(1), -- nonce used for encrypting with key itself
|
|
||||||
ciphertext blob not null,
|
|
||||||
tag blob not null,
|
|
||||||
schema_version integer not null default(1), -- server would need to reencrypt, because this means that we have changed algorithm
|
|
||||||
salt blob not null -- for key deriviation
|
|
||||||
) STRICT;
|
|
||||||
|
|
||||||
create table if not exists aead_encrypted (
|
create table if not exists aead_encrypted (
|
||||||
id INTEGER not null PRIMARY KEY,
|
id INTEGER not null PRIMARY KEY,
|
||||||
current_nonce blob not null default(1), -- if re-encrypted, this should be incremented
|
current_nonce integer not null default(1), -- if re-encrypted, this should be incremented
|
||||||
ciphertext blob not null,
|
ciphertext blob not null,
|
||||||
tag blob not null,
|
tag blob not null,
|
||||||
schema_version integer not null default(1), -- server would need to reencrypt, because this means that we have changed algorithm
|
schema_version integer not null default(1) -- server would need to reencrypt, because this means that we have changed algorithm
|
||||||
associated_root_key_id integer not null references root_key_history (id) on delete RESTRICT,
|
|
||||||
created_at integer not null default(unixepoch ('now'))
|
|
||||||
) STRICT;
|
|
||||||
|
|
||||||
create unique index if not exists uniq_nonce_per_root_key on aead_encrypted (
|
|
||||||
current_nonce,
|
|
||||||
associated_root_key_id
|
|
||||||
);
|
|
||||||
|
|
||||||
create table if not exists tls_history (
|
|
||||||
id INTEGER not null PRIMARY KEY,
|
|
||||||
cert text not null,
|
|
||||||
cert_key text not null, -- PEM Encoded private key
|
|
||||||
ca_cert text not null,
|
|
||||||
ca_key text not null, -- PEM Encoded private key
|
|
||||||
created_at integer not null default(unixepoch ('now'))
|
|
||||||
) STRICT;
|
) STRICT;
|
||||||
|
|
||||||
-- This is a singleton
|
-- This is a singleton
|
||||||
create table if not exists arbiter_settings (
|
create table if not exists arbiter_settings (
|
||||||
id INTEGER not null PRIMARY KEY CHECK (id = 1), -- singleton row, id must be 1
|
id INTEGER not null PRIMARY KEY CHECK (id = 1), -- singleton row, id must be 1
|
||||||
root_key_id integer references root_key_history (id) on delete RESTRICT, -- if null, means wasn't bootstrapped yet
|
root_key_id integer references aead_encrypted (id) on delete RESTRICT, -- if null, means wasn't bootstrapped yet
|
||||||
tls_id integer references tls_history (id) on delete RESTRICT
|
cert_key blob not null,
|
||||||
|
cert blob not null
|
||||||
) STRICT;
|
) STRICT;
|
||||||
|
|
||||||
insert into arbiter_settings (id) values (1) on conflict do nothing; -- ensure singleton row exists
|
|
||||||
|
|
||||||
create table if not exists useragent_client (
|
create table if not exists useragent_client (
|
||||||
id integer not null primary key,
|
id integer not null primary key,
|
||||||
nonce integer not null default (1), -- used for auth challenge
|
nonce integer not null default (1), -- used for auth challenge
|
||||||
|
|||||||
@@ -0,0 +1,2 @@
|
|||||||
|
-- Remove argon2_salt column
|
||||||
|
ALTER TABLE aead_encrypted DROP COLUMN argon2_salt;
|
||||||
@@ -0,0 +1,2 @@
|
|||||||
|
-- Add argon2_salt column to store password derivation salt
|
||||||
|
ALTER TABLE aead_encrypted ADD COLUMN argon2_salt TEXT;
|
||||||
BIN
server/crates/arbiter-server/src/.DS_Store
vendored
2
server/crates/arbiter-server/src/actors.rs
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
pub mod user_agent;
|
||||||
|
pub mod client;
|
||||||
@@ -7,6 +7,6 @@ use crate::ServerContext;
|
|||||||
|
|
||||||
pub(crate) async fn handle_client(
|
pub(crate) async fn handle_client(
|
||||||
_context: ServerContext,
|
_context: ServerContext,
|
||||||
_bistream: impl Bi<ClientRequest, ClientResponse, tonic::Status>,
|
_bistream: impl Bi<ClientRequest, ClientResponse>,
|
||||||
) {
|
) {
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1 +0,0 @@
|
|||||||
pub mod v1;
|
|
||||||
@@ -1,237 +0,0 @@
|
|||||||
use std::ops::Deref as _;
|
|
||||||
|
|
||||||
use argon2::{Algorithm, Argon2, password_hash::Salt as ArgonSalt};
|
|
||||||
use chacha20poly1305::{
|
|
||||||
AeadInPlace, Key, KeyInit as _, XChaCha20Poly1305, XNonce,
|
|
||||||
aead::{AeadMut, Error, Payload},
|
|
||||||
};
|
|
||||||
use memsafe::MemSafe;
|
|
||||||
use rand::{
|
|
||||||
Rng as _, SeedableRng,
|
|
||||||
rngs::{StdRng, SysRng},
|
|
||||||
};
|
|
||||||
|
|
||||||
pub const ROOT_KEY_TAG: &[u8] = "arbiter/seal/v1".as_bytes();
|
|
||||||
pub const TAG: &[u8] = "arbiter/private-key/v1".as_bytes();
|
|
||||||
|
|
||||||
pub const NONCE_LENGTH: usize = 24;
|
|
||||||
|
|
||||||
#[derive(Default)]
|
|
||||||
pub struct Nonce([u8; NONCE_LENGTH]);
|
|
||||||
impl Nonce {
|
|
||||||
pub fn increment(&mut self) {
|
|
||||||
for i in (0..self.0.len()).rev() {
|
|
||||||
if self.0[i] == 0xFF {
|
|
||||||
self.0[i] = 0;
|
|
||||||
} else {
|
|
||||||
self.0[i] += 1;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn to_vec(&self) -> Vec<u8> {
|
|
||||||
self.0.to_vec()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl<'a> TryFrom<&'a [u8]> for Nonce {
|
|
||||||
type Error = ();
|
|
||||||
|
|
||||||
fn try_from(value: &'a [u8]) -> Result<Self, Self::Error> {
|
|
||||||
if value.len() != NONCE_LENGTH {
|
|
||||||
return Err(());
|
|
||||||
}
|
|
||||||
let mut nonce = [0u8; NONCE_LENGTH];
|
|
||||||
nonce.copy_from_slice(value);
|
|
||||||
Ok(Self(nonce))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct KeyCell(pub MemSafe<Key>);
|
|
||||||
impl From<MemSafe<Key>> for KeyCell {
|
|
||||||
fn from(value: MemSafe<Key>) -> Self {
|
|
||||||
Self(value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl TryFrom<MemSafe<Vec<u8>>> for KeyCell {
|
|
||||||
type Error = ();
|
|
||||||
|
|
||||||
fn try_from(mut value: MemSafe<Vec<u8>>) -> Result<Self, Self::Error> {
|
|
||||||
let value = value.read().unwrap();
|
|
||||||
if value.len() != size_of::<Key>() {
|
|
||||||
return Err(());
|
|
||||||
}
|
|
||||||
let mut cell = MemSafe::new(Key::default()).unwrap();
|
|
||||||
{
|
|
||||||
let mut cell_write = cell.write().unwrap();
|
|
||||||
let cell_slice: &mut [u8] = cell_write.as_mut();
|
|
||||||
cell_slice.copy_from_slice(&value);
|
|
||||||
}
|
|
||||||
Ok(Self(cell))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl KeyCell {
|
|
||||||
pub fn new_secure_random() -> Self {
|
|
||||||
let mut key = MemSafe::new(Key::default()).unwrap();
|
|
||||||
{
|
|
||||||
let mut key_buffer = key.write().unwrap();
|
|
||||||
let key_buffer: &mut [u8] = key_buffer.as_mut();
|
|
||||||
|
|
||||||
let mut rng = StdRng::try_from_rng(&mut SysRng).unwrap();
|
|
||||||
rng.fill_bytes(key_buffer);
|
|
||||||
}
|
|
||||||
|
|
||||||
key.into()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn encrypt_in_place(
|
|
||||||
&mut self,
|
|
||||||
nonce: &Nonce,
|
|
||||||
associated_data: &[u8],
|
|
||||||
mut buffer: impl AsMut<Vec<u8>>,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
let key_reader = self.0.read().unwrap();
|
|
||||||
let key_ref = key_reader.deref();
|
|
||||||
let cipher = XChaCha20Poly1305::new(key_ref);
|
|
||||||
let nonce = XNonce::from_slice(nonce.0.as_ref());
|
|
||||||
let buffer = buffer.as_mut();
|
|
||||||
cipher.encrypt_in_place(nonce, associated_data, buffer)
|
|
||||||
}
|
|
||||||
pub fn decrypt_in_place(
|
|
||||||
&mut self,
|
|
||||||
nonce: &Nonce,
|
|
||||||
associated_data: &[u8],
|
|
||||||
buffer: &mut MemSafe<Vec<u8>>,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
let key_reader = self.0.read().unwrap();
|
|
||||||
let key_ref = key_reader.deref();
|
|
||||||
let cipher = XChaCha20Poly1305::new(key_ref);
|
|
||||||
let nonce = XNonce::from_slice(nonce.0.as_ref());
|
|
||||||
let mut buffer = buffer.write().unwrap();
|
|
||||||
let buffer: &mut Vec<u8> = buffer.as_mut();
|
|
||||||
cipher.decrypt_in_place(nonce, associated_data, buffer)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn encrypt(
|
|
||||||
&mut self,
|
|
||||||
nonce: &Nonce,
|
|
||||||
associated_data: &[u8],
|
|
||||||
plaintext: impl AsRef<[u8]>,
|
|
||||||
) -> Result<Vec<u8>, Error> {
|
|
||||||
let key_reader = self.0.read().unwrap();
|
|
||||||
let key_ref = key_reader.deref();
|
|
||||||
let mut cipher = XChaCha20Poly1305::new(key_ref);
|
|
||||||
let nonce = XNonce::from_slice(nonce.0.as_ref());
|
|
||||||
|
|
||||||
let ciphertext = cipher.encrypt(
|
|
||||||
nonce,
|
|
||||||
Payload {
|
|
||||||
msg: plaintext.as_ref(),
|
|
||||||
aad: associated_data,
|
|
||||||
},
|
|
||||||
)?;
|
|
||||||
Ok(ciphertext)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub type Salt = [u8; ArgonSalt::RECOMMENDED_LENGTH];
|
|
||||||
|
|
||||||
pub fn generate_salt() -> Salt {
|
|
||||||
let mut salt = Salt::default();
|
|
||||||
let mut rng = StdRng::try_from_rng(&mut SysRng).unwrap();
|
|
||||||
rng.fill_bytes(&mut salt);
|
|
||||||
salt
|
|
||||||
}
|
|
||||||
|
|
||||||
/// User password might be of different length, have not enough entropy, etc...
|
|
||||||
/// Derive a fixed-length key from the password using Argon2id, which is designed for password hashing and key derivation.
|
|
||||||
pub fn derive_seal_key(mut password: MemSafe<Vec<u8>>, salt: &Salt) -> KeyCell {
|
|
||||||
let params = argon2::Params::new(262_144, 3, 4, None).unwrap();
|
|
||||||
let hasher = Argon2::new(Algorithm::Argon2id, argon2::Version::V0x13, params);
|
|
||||||
let mut key = MemSafe::new(Key::default()).unwrap();
|
|
||||||
{
|
|
||||||
let password_source = password.read().unwrap();
|
|
||||||
let mut key_buffer = key.write().unwrap();
|
|
||||||
let key_buffer: &mut [u8] = key_buffer.as_mut();
|
|
||||||
|
|
||||||
hasher
|
|
||||||
.hash_password_into(password_source.deref(), salt, key_buffer)
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
key.into()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use super::*;
|
|
||||||
use memsafe::MemSafe;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
pub fn derive_seal_key_deterministic() {
|
|
||||||
static PASSWORD: &[u8] = b"password";
|
|
||||||
let password = MemSafe::new(PASSWORD.to_vec()).unwrap();
|
|
||||||
let password2 = MemSafe::new(PASSWORD.to_vec()).unwrap();
|
|
||||||
let salt = generate_salt();
|
|
||||||
|
|
||||||
let mut key1 = derive_seal_key(password, &salt);
|
|
||||||
let mut key2 = derive_seal_key(password2, &salt);
|
|
||||||
|
|
||||||
let key1_reader = key1.0.read().unwrap();
|
|
||||||
let key2_reader = key2.0.read().unwrap();
|
|
||||||
|
|
||||||
assert_eq!(key1_reader.deref(), key2_reader.deref());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
pub fn successful_derive() {
|
|
||||||
static PASSWORD: &[u8] = b"password";
|
|
||||||
let password = MemSafe::new(PASSWORD.to_vec()).unwrap();
|
|
||||||
let salt = generate_salt();
|
|
||||||
|
|
||||||
let mut key = derive_seal_key(password, &salt);
|
|
||||||
let key_reader = key.0.read().unwrap();
|
|
||||||
let key_ref = key_reader.deref();
|
|
||||||
|
|
||||||
assert_ne!(key_ref.as_slice(), &[0u8; 32][..]);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
pub fn encrypt_decrypt() {
|
|
||||||
static PASSWORD: &[u8] = b"password";
|
|
||||||
let password = MemSafe::new(PASSWORD.to_vec()).unwrap();
|
|
||||||
let salt = generate_salt();
|
|
||||||
|
|
||||||
let mut key = derive_seal_key(password, &salt);
|
|
||||||
let nonce = Nonce(*b"unique nonce 123 1231233"); // 24 bytes for XChaCha20Poly1305
|
|
||||||
let associated_data = b"associated data";
|
|
||||||
let mut buffer = b"secret data".to_vec();
|
|
||||||
|
|
||||||
key.encrypt_in_place(&nonce, associated_data, &mut buffer)
|
|
||||||
.unwrap();
|
|
||||||
assert_ne!(buffer, b"secret data");
|
|
||||||
|
|
||||||
let mut buffer = MemSafe::new(buffer).unwrap();
|
|
||||||
|
|
||||||
key.decrypt_in_place(&nonce, associated_data, &mut buffer)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let buffer = buffer.read().unwrap();
|
|
||||||
assert_eq!(*buffer, b"secret data");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
// We should fuzz this
|
|
||||||
pub fn test_nonce_increment() {
|
|
||||||
let mut nonce = Nonce([0u8; NONCE_LENGTH]);
|
|
||||||
nonce.increment();
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
nonce.0,
|
|
||||||
[
|
|
||||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1
|
|
||||||
]
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,407 +0,0 @@
|
|||||||
use diesel::{
|
|
||||||
ExpressionMethods as _, OptionalExtension, QueryDsl, SelectableHelper,
|
|
||||||
dsl::{insert_into, update},
|
|
||||||
};
|
|
||||||
use diesel_async::{AsyncConnection, RunQueryDsl};
|
|
||||||
use kameo::{Actor, Reply, messages};
|
|
||||||
use memsafe::MemSafe;
|
|
||||||
use strum::{EnumDiscriminants, IntoDiscriminant};
|
|
||||||
use tracing::{error, info};
|
|
||||||
|
|
||||||
use crate::db::{
|
|
||||||
self,
|
|
||||||
models::{self, RootKeyHistory},
|
|
||||||
schema::{self},
|
|
||||||
};
|
|
||||||
use encryption::v1::{self, KeyCell, Nonce};
|
|
||||||
|
|
||||||
pub mod encryption;
|
|
||||||
|
|
||||||
#[derive(Default, EnumDiscriminants)]
|
|
||||||
#[strum_discriminants(derive(Reply), vis(pub))]
|
|
||||||
enum State {
|
|
||||||
#[default]
|
|
||||||
Unbootstrapped,
|
|
||||||
Sealed {
|
|
||||||
root_key_history_id: i32,
|
|
||||||
},
|
|
||||||
Unsealed {
|
|
||||||
root_key_history_id: i32,
|
|
||||||
root_key: KeyCell,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, thiserror::Error, miette::Diagnostic)]
|
|
||||||
pub enum Error {
|
|
||||||
#[error("Keyholder is already bootstrapped")]
|
|
||||||
#[diagnostic(code(arbiter::keyholder::already_bootstrapped))]
|
|
||||||
AlreadyBootstrapped,
|
|
||||||
#[error("Keyholder is not bootstrapped")]
|
|
||||||
#[diagnostic(code(arbiter::keyholder::not_bootstrapped))]
|
|
||||||
NotBootstrapped,
|
|
||||||
#[error("Invalid key provided")]
|
|
||||||
#[diagnostic(code(arbiter::keyholder::invalid_key))]
|
|
||||||
InvalidKey,
|
|
||||||
|
|
||||||
#[error("Requested aead entry not found")]
|
|
||||||
#[diagnostic(code(arbiter::keyholder::aead_not_found))]
|
|
||||||
NotFound,
|
|
||||||
|
|
||||||
#[error("Encryption error: {0}")]
|
|
||||||
#[diagnostic(code(arbiter::keyholder::encryption_error))]
|
|
||||||
Encryption(#[from] chacha20poly1305::aead::Error),
|
|
||||||
|
|
||||||
#[error("Database error: {0}")]
|
|
||||||
#[diagnostic(code(arbiter::keyholder::database_error))]
|
|
||||||
DatabaseConnection(#[from] db::PoolError),
|
|
||||||
|
|
||||||
#[error("Database transaction error: {0}")]
|
|
||||||
#[diagnostic(code(arbiter::keyholder::database_transaction_error))]
|
|
||||||
DatabaseTransaction(#[from] diesel::result::Error),
|
|
||||||
|
|
||||||
#[error("Broken database")]
|
|
||||||
#[diagnostic(code(arbiter::keyholder::broken_database))]
|
|
||||||
BrokenDatabase,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Manages vault root key and tracks current state of the vault (bootstrapped/unbootstrapped, sealed/unsealed).
|
|
||||||
/// Provides API for encrypting and decrypting data using the vault root key.
|
|
||||||
/// Abstraction over database to make sure nonces are never reused and encryption keys are never exposed in plaintext outside of this actor.
|
|
||||||
#[derive(Actor)]
|
|
||||||
pub struct KeyHolder {
|
|
||||||
db: db::DatabasePool,
|
|
||||||
state: State,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[messages]
|
|
||||||
impl KeyHolder {
|
|
||||||
pub async fn new(db: db::DatabasePool) -> Result<Self, Error> {
|
|
||||||
let state = {
|
|
||||||
let mut conn = db.get().await?;
|
|
||||||
|
|
||||||
let (root_key_history,) = schema::arbiter_settings::table
|
|
||||||
.left_join(schema::root_key_history::table)
|
|
||||||
.select((Option::<RootKeyHistory>::as_select(),))
|
|
||||||
.get_result::<(Option<RootKeyHistory>,)>(&mut conn)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
match root_key_history {
|
|
||||||
Some(root_key_history) => State::Sealed {
|
|
||||||
root_key_history_id: root_key_history.id,
|
|
||||||
},
|
|
||||||
None => State::Unbootstrapped,
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(Self { db, state })
|
|
||||||
}
|
|
||||||
|
|
||||||
// Exclusive transaction to avoid race condtions if multiple keyholders write
|
|
||||||
// additional layer of protection against nonce-reuse
|
|
||||||
async fn get_new_nonce(pool: &db::DatabasePool, root_key_id: i32) -> Result<Nonce, Error> {
|
|
||||||
let mut conn = pool.get().await?;
|
|
||||||
|
|
||||||
let nonce = conn
|
|
||||||
.exclusive_transaction(|conn| {
|
|
||||||
Box::pin(async move {
|
|
||||||
let current_nonce: Vec<u8> = schema::root_key_history::table
|
|
||||||
.filter(schema::root_key_history::id.eq(root_key_id))
|
|
||||||
.select(schema::root_key_history::data_encryption_nonce)
|
|
||||||
.first(conn)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let mut nonce =
|
|
||||||
v1::Nonce::try_from(current_nonce.as_slice()).map_err(|_| {
|
|
||||||
error!(
|
|
||||||
"Broken database: invalid nonce for root key history id={}",
|
|
||||||
root_key_id
|
|
||||||
);
|
|
||||||
Error::BrokenDatabase
|
|
||||||
})?;
|
|
||||||
nonce.increment();
|
|
||||||
|
|
||||||
update(schema::root_key_history::table)
|
|
||||||
.filter(schema::root_key_history::id.eq(root_key_id))
|
|
||||||
.set(schema::root_key_history::data_encryption_nonce.eq(nonce.to_vec()))
|
|
||||||
.execute(conn)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Result::<_, Error>::Ok(nonce)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(nonce)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[message]
|
|
||||||
pub async fn bootstrap(&mut self, seal_key_raw: MemSafe<Vec<u8>>) -> Result<(), Error> {
|
|
||||||
if !matches!(self.state, State::Unbootstrapped) {
|
|
||||||
return Err(Error::AlreadyBootstrapped);
|
|
||||||
}
|
|
||||||
let salt = v1::generate_salt();
|
|
||||||
let mut seal_key = v1::derive_seal_key(seal_key_raw, &salt);
|
|
||||||
let mut root_key = KeyCell::new_secure_random();
|
|
||||||
|
|
||||||
// Zero nonces are fine because they are one-time
|
|
||||||
let root_key_nonce = v1::Nonce::default();
|
|
||||||
let data_encryption_nonce = v1::Nonce::default();
|
|
||||||
|
|
||||||
let root_key_ciphertext: Vec<u8> = {
|
|
||||||
let root_key_reader = root_key.0.read().unwrap();
|
|
||||||
let root_key_reader = root_key_reader.as_slice();
|
|
||||||
seal_key
|
|
||||||
.encrypt(&root_key_nonce, v1::ROOT_KEY_TAG, root_key_reader)
|
|
||||||
.map_err(|err| {
|
|
||||||
error!(?err, "Fatal bootstrap error");
|
|
||||||
Error::Encryption(err)
|
|
||||||
})?
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut conn = self.db.get().await?;
|
|
||||||
|
|
||||||
let data_encryption_nonce_bytes = data_encryption_nonce.to_vec();
|
|
||||||
let root_key_history_id = conn
|
|
||||||
.transaction(|conn| {
|
|
||||||
Box::pin(async move {
|
|
||||||
let root_key_history_id: i32 = insert_into(schema::root_key_history::table)
|
|
||||||
.values(&models::NewRootKeyHistory {
|
|
||||||
ciphertext: root_key_ciphertext,
|
|
||||||
tag: v1::ROOT_KEY_TAG.to_vec(),
|
|
||||||
root_key_encryption_nonce: root_key_nonce.to_vec(),
|
|
||||||
data_encryption_nonce: data_encryption_nonce_bytes,
|
|
||||||
schema_version: 1,
|
|
||||||
salt: salt.to_vec(),
|
|
||||||
})
|
|
||||||
.returning(schema::root_key_history::id)
|
|
||||||
.get_result(conn)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
update(schema::arbiter_settings::table)
|
|
||||||
.set(schema::arbiter_settings::root_key_id.eq(root_key_history_id))
|
|
||||||
.execute(conn)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Result::<_, diesel::result::Error>::Ok(root_key_history_id)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
self.state = State::Unsealed {
|
|
||||||
root_key,
|
|
||||||
root_key_history_id,
|
|
||||||
};
|
|
||||||
|
|
||||||
info!("Keyholder bootstrapped successfully");
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[message]
|
|
||||||
pub async fn try_unseal(&mut self, seal_key_raw: MemSafe<Vec<u8>>) -> Result<(), Error> {
|
|
||||||
let State::Sealed {
|
|
||||||
root_key_history_id,
|
|
||||||
} = &self.state
|
|
||||||
else {
|
|
||||||
return Err(Error::NotBootstrapped);
|
|
||||||
};
|
|
||||||
|
|
||||||
// We don't want to hold connection while doing expensive KDF work
|
|
||||||
let current_key = {
|
|
||||||
let mut conn = self.db.get().await?;
|
|
||||||
schema::root_key_history::table
|
|
||||||
.filter(schema::root_key_history::id.eq(*root_key_history_id))
|
|
||||||
.select(schema::root_key_history::data_encryption_nonce)
|
|
||||||
.select(RootKeyHistory::as_select())
|
|
||||||
.first(&mut conn)
|
|
||||||
.await?
|
|
||||||
};
|
|
||||||
|
|
||||||
let salt = ¤t_key.salt;
|
|
||||||
let salt = v1::Salt::try_from(salt.as_slice()).map_err(|_| {
|
|
||||||
error!("Broken database: invalid salt for root key");
|
|
||||||
Error::BrokenDatabase
|
|
||||||
})?;
|
|
||||||
let mut seal_key = v1::derive_seal_key(seal_key_raw, &salt);
|
|
||||||
|
|
||||||
let mut root_key = MemSafe::new(current_key.ciphertext.clone()).unwrap();
|
|
||||||
|
|
||||||
let nonce = v1::Nonce::try_from(current_key.root_key_encryption_nonce.as_slice()).map_err(
|
|
||||||
|_| {
|
|
||||||
error!("Broken database: invalid nonce for root key");
|
|
||||||
Error::BrokenDatabase
|
|
||||||
},
|
|
||||||
)?;
|
|
||||||
|
|
||||||
seal_key
|
|
||||||
.decrypt_in_place(&nonce, v1::ROOT_KEY_TAG, &mut root_key)
|
|
||||||
.map_err(|err| {
|
|
||||||
error!(?err, "Failed to unseal root key: invalid seal key");
|
|
||||||
Error::InvalidKey
|
|
||||||
})?;
|
|
||||||
|
|
||||||
self.state = State::Unsealed {
|
|
||||||
root_key_history_id: current_key.id,
|
|
||||||
root_key: v1::KeyCell::try_from(root_key).map_err(|err| {
|
|
||||||
error!(?err, "Broken database: invalid encryption key size");
|
|
||||||
Error::BrokenDatabase
|
|
||||||
})?,
|
|
||||||
};
|
|
||||||
|
|
||||||
info!("Keyholder unsealed successfully");
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decrypts the `aead_encrypted` entry with the given ID and returns the plaintext
|
|
||||||
#[message]
|
|
||||||
pub async fn decrypt(&mut self, aead_id: i32) -> Result<MemSafe<Vec<u8>>, Error> {
|
|
||||||
let State::Unsealed { root_key, .. } = &mut self.state else {
|
|
||||||
return Err(Error::NotBootstrapped);
|
|
||||||
};
|
|
||||||
|
|
||||||
let row: models::AeadEncrypted = {
|
|
||||||
let mut conn = self.db.get().await?;
|
|
||||||
schema::aead_encrypted::table
|
|
||||||
.select(models::AeadEncrypted::as_select())
|
|
||||||
.filter(schema::aead_encrypted::id.eq(aead_id))
|
|
||||||
.first(&mut conn)
|
|
||||||
.await
|
|
||||||
.optional()?
|
|
||||||
.ok_or(Error::NotFound)?
|
|
||||||
};
|
|
||||||
|
|
||||||
let nonce = v1::Nonce::try_from(row.current_nonce.as_slice()).map_err(|_| {
|
|
||||||
error!(
|
|
||||||
"Broken database: invalid nonce for aead_encrypted id={}",
|
|
||||||
aead_id
|
|
||||||
);
|
|
||||||
Error::BrokenDatabase
|
|
||||||
})?;
|
|
||||||
let mut output = MemSafe::new(row.ciphertext).unwrap();
|
|
||||||
root_key.decrypt_in_place(&nonce, v1::TAG, &mut output)?;
|
|
||||||
Ok(output)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Creates new `aead_encrypted` entry in the database and returns it's ID
|
|
||||||
#[message]
|
|
||||||
pub async fn create_new(&mut self, mut plaintext: MemSafe<Vec<u8>>) -> Result<i32, Error> {
|
|
||||||
let State::Unsealed {
|
|
||||||
root_key,
|
|
||||||
root_key_history_id,
|
|
||||||
} = &mut self.state
|
|
||||||
else {
|
|
||||||
return Err(Error::NotBootstrapped);
|
|
||||||
};
|
|
||||||
|
|
||||||
// Order matters here - `get_new_nonce` acquires connection, so we need to call it before next acquire
|
|
||||||
// Borrow checker note: &mut borrow a few lines above is disjoint from this field
|
|
||||||
let nonce = Self::get_new_nonce(&self.db, *root_key_history_id).await?;
|
|
||||||
|
|
||||||
let mut ciphertext_buffer = plaintext.write().unwrap();
|
|
||||||
let ciphertext_buffer: &mut Vec<u8> = ciphertext_buffer.as_mut();
|
|
||||||
root_key.encrypt_in_place(&nonce, v1::TAG, &mut *ciphertext_buffer)?;
|
|
||||||
|
|
||||||
let ciphertext = std::mem::take(ciphertext_buffer);
|
|
||||||
|
|
||||||
let mut conn = self.db.get().await?;
|
|
||||||
let aead_id: i32 = insert_into(schema::aead_encrypted::table)
|
|
||||||
.values(&models::NewAeadEncrypted {
|
|
||||||
ciphertext,
|
|
||||||
tag: v1::TAG.to_vec(),
|
|
||||||
current_nonce: nonce.to_vec(),
|
|
||||||
schema_version: 1,
|
|
||||||
associated_root_key_id: *root_key_history_id,
|
|
||||||
created_at: chrono::Utc::now().timestamp() as i32,
|
|
||||||
})
|
|
||||||
.returning(schema::aead_encrypted::id)
|
|
||||||
.get_result(&mut conn)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(aead_id)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[message]
|
|
||||||
pub fn get_state(&self) -> StateDiscriminants {
|
|
||||||
self.state.discriminant()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[message]
|
|
||||||
pub fn seal(&mut self) -> Result<(), Error> {
|
|
||||||
let State::Unsealed {
|
|
||||||
root_key_history_id,
|
|
||||||
..
|
|
||||||
} = &self.state
|
|
||||||
else {
|
|
||||||
return Err(Error::NotBootstrapped);
|
|
||||||
};
|
|
||||||
self.state = State::Sealed {
|
|
||||||
root_key_history_id: *root_key_history_id,
|
|
||||||
};
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use diesel::SelectableHelper;
|
|
||||||
|
|
||||||
use diesel_async::RunQueryDsl;
|
|
||||||
use memsafe::MemSafe;
|
|
||||||
|
|
||||||
use crate::db::{self};
|
|
||||||
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
async fn bootstrapped_actor(db: &db::DatabasePool) -> KeyHolder {
|
|
||||||
let mut actor = KeyHolder::new(db.clone()).await.unwrap();
|
|
||||||
let seal_key = MemSafe::new(b"test-seal-key".to_vec()).unwrap();
|
|
||||||
actor.bootstrap(seal_key).await.unwrap();
|
|
||||||
actor
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
#[test_log::test]
|
|
||||||
async fn nonce_monotonic_even_when_nonce_allocation_interleaves() {
|
|
||||||
let db = db::create_test_pool().await;
|
|
||||||
let mut actor = bootstrapped_actor(&db).await;
|
|
||||||
let root_key_history_id = match actor.state {
|
|
||||||
State::Unsealed {
|
|
||||||
root_key_history_id,
|
|
||||||
..
|
|
||||||
} => root_key_history_id,
|
|
||||||
_ => panic!("expected unsealed state"),
|
|
||||||
};
|
|
||||||
|
|
||||||
let n1 = KeyHolder::get_new_nonce(&db, root_key_history_id)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
let n2 = KeyHolder::get_new_nonce(&db, root_key_history_id)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
assert!(n2.to_vec() > n1.to_vec(), "nonce must increase");
|
|
||||||
|
|
||||||
let mut conn = db.get().await.unwrap();
|
|
||||||
let root_row: models::RootKeyHistory = schema::root_key_history::table
|
|
||||||
.select(models::RootKeyHistory::as_select())
|
|
||||||
.first(&mut conn)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
assert_eq!(root_row.data_encryption_nonce, n2.to_vec());
|
|
||||||
|
|
||||||
let id = actor
|
|
||||||
.create_new(MemSafe::new(b"post-interleave".to_vec()).unwrap())
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
let row: models::AeadEncrypted = schema::aead_encrypted::table
|
|
||||||
.filter(schema::aead_encrypted::id.eq(id))
|
|
||||||
.select(models::AeadEncrypted::as_select())
|
|
||||||
.first(&mut conn)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
assert!(
|
|
||||||
row.current_nonce > n2.to_vec(),
|
|
||||||
"next write must advance nonce"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,40 +0,0 @@
|
|||||||
use kameo::actor::{ActorRef, Spawn};
|
|
||||||
use miette::Diagnostic;
|
|
||||||
use thiserror::Error;
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
actors::{bootstrap::Bootstrapper, keyholder::KeyHolder},
|
|
||||||
db,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub mod bootstrap;
|
|
||||||
pub mod client;
|
|
||||||
pub mod keyholder;
|
|
||||||
pub mod user_agent;
|
|
||||||
|
|
||||||
#[derive(Error, Debug, Diagnostic)]
|
|
||||||
pub enum SpawnError {
|
|
||||||
#[error("Failed to spawn Bootstrapper actor")]
|
|
||||||
#[diagnostic(code(SpawnError::Bootstrapper))]
|
|
||||||
Bootstrapper(#[from] bootstrap::Error),
|
|
||||||
|
|
||||||
#[error("Failed to spawn KeyHolder actor")]
|
|
||||||
#[diagnostic(code(SpawnError::KeyHolder))]
|
|
||||||
KeyHolder(#[from] keyholder::Error),
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Long-lived actors that are shared across all connections and handle global state and operations
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct GlobalActors {
|
|
||||||
pub key_holder: ActorRef<KeyHolder>,
|
|
||||||
pub bootstrapper: ActorRef<Bootstrapper>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl GlobalActors {
|
|
||||||
pub async fn spawn(db: db::DatabasePool) -> Result<Self, SpawnError> {
|
|
||||||
Ok(Self {
|
|
||||||
bootstrapper: Bootstrapper::spawn(Bootstrapper::new(&db).await?),
|
|
||||||
key_holder: KeyHolder::spawn(KeyHolder::new(db.clone()).await?),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
374
server/crates/arbiter-server/src/actors/user_agent.rs
Normal file
@@ -0,0 +1,374 @@
|
|||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use arbiter_proto::{
|
||||||
|
proto::{
|
||||||
|
UserAgentRequest, UserAgentResponse,
|
||||||
|
auth::{
|
||||||
|
self, AuthChallengeRequest, ClientMessage, ServerMessage as AuthServerMessage,
|
||||||
|
client_message::Payload as ClientAuthPayload,
|
||||||
|
server_message::Payload as ServerAuthPayload,
|
||||||
|
},
|
||||||
|
user_agent_request::Payload as UserAgentRequestPayload,
|
||||||
|
user_agent_response::Payload as UserAgentResponsePayload,
|
||||||
|
},
|
||||||
|
transport::Bi,
|
||||||
|
};
|
||||||
|
use diesel::{ExpressionMethods as _, OptionalExtension as _, QueryDsl, dsl::update};
|
||||||
|
use diesel_async::{AsyncConnection, RunQueryDsl};
|
||||||
|
use ed25519_dalek::VerifyingKey;
|
||||||
|
use futures::StreamExt;
|
||||||
|
use kameo::{
|
||||||
|
Actor,
|
||||||
|
actor::{ActorRef, Spawn},
|
||||||
|
error::SendError,
|
||||||
|
messages,
|
||||||
|
prelude::Context,
|
||||||
|
};
|
||||||
|
use tokio::sync::mpsc;
|
||||||
|
use tokio::sync::mpsc::Sender;
|
||||||
|
use tonic::{Status, transport::Server};
|
||||||
|
use tracing::{debug, error, info};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
ServerContext,
|
||||||
|
actors::user_agent::auth::AuthChallenge,
|
||||||
|
context::bootstrap::{BootstrapActor, ConsumeToken},
|
||||||
|
db::{self, schema},
|
||||||
|
errors::GrpcStatusExt,
|
||||||
|
};
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct ChallengeContext {
|
||||||
|
challenge: AuthChallenge,
|
||||||
|
key: VerifyingKey,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Request context with deserialized public key for state machine.
|
||||||
|
// This intermediate struct is needed because the state machine branches depending on presence of bootstrap token,
|
||||||
|
// but we want to have the deserialized key in both branches.
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct AuthRequestContext {
|
||||||
|
pubkey: VerifyingKey,
|
||||||
|
bootstrap_token: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
smlang::statemachine!(
|
||||||
|
name: UserAgent,
|
||||||
|
derive_states: [Debug],
|
||||||
|
custom_error: false,
|
||||||
|
transitions: {
|
||||||
|
*Init + AuthRequest(AuthRequestContext) / auth_request_context = ReceivedAuthRequest(AuthRequestContext),
|
||||||
|
ReceivedAuthRequest(AuthRequestContext) + ReceivedBootstrapToken = Authenticated,
|
||||||
|
|
||||||
|
ReceivedAuthRequest(AuthRequestContext) + SentChallenge(ChallengeContext) / move_challenge = WaitingForChallengeSolution(ChallengeContext),
|
||||||
|
|
||||||
|
WaitingForChallengeSolution(ChallengeContext) + ReceivedGoodSolution = Authenticated,
|
||||||
|
WaitingForChallengeSolution(ChallengeContext) + ReceivedBadSolution = AuthError, // block further transitions, but connection should close anyway
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
pub struct DummyContext;
|
||||||
|
impl UserAgentStateMachineContext for DummyContext {
|
||||||
|
#[allow(missing_docs)]
|
||||||
|
#[allow(clippy::unused_unit)]
|
||||||
|
fn move_challenge(
|
||||||
|
&mut self,
|
||||||
|
state_data: &AuthRequestContext,
|
||||||
|
event_data: ChallengeContext,
|
||||||
|
) -> Result<ChallengeContext, ()> {
|
||||||
|
Ok(event_data)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(missing_docs)]
|
||||||
|
#[allow(clippy::unused_unit)]
|
||||||
|
fn auth_request_context(
|
||||||
|
&mut self,
|
||||||
|
event_data: AuthRequestContext,
|
||||||
|
) -> Result<AuthRequestContext, ()> {
|
||||||
|
Ok(event_data)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Actor)]
|
||||||
|
pub struct UserAgentActor {
|
||||||
|
db: db::DatabasePool,
|
||||||
|
bootstapper: ActorRef<BootstrapActor>,
|
||||||
|
state: UserAgentStateMachine<DummyContext>,
|
||||||
|
tx: Sender<Result<UserAgentResponse, Status>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl UserAgentActor {
|
||||||
|
pub(crate) fn new(
|
||||||
|
context: ServerContext,
|
||||||
|
tx: Sender<Result<UserAgentResponse, Status>>,
|
||||||
|
) -> Self {
|
||||||
|
Self {
|
||||||
|
db: context.db.clone(),
|
||||||
|
bootstapper: context.bootstrapper.clone(),
|
||||||
|
state: UserAgentStateMachine::new(DummyContext),
|
||||||
|
tx,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn new_manual(
|
||||||
|
db: db::DatabasePool,
|
||||||
|
bootstapper: ActorRef<BootstrapActor>,
|
||||||
|
tx: Sender<Result<UserAgentResponse, Status>>,
|
||||||
|
) -> Self {
|
||||||
|
Self {
|
||||||
|
db,
|
||||||
|
bootstapper,
|
||||||
|
state: UserAgentStateMachine::new(DummyContext),
|
||||||
|
tx,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn transition(&mut self, event: UserAgentEvents) -> Result<(), Status> {
|
||||||
|
self.state.process_event(event).map_err(|e| {
|
||||||
|
error!(?e, "State transition failed");
|
||||||
|
Status::internal("State machine error")
|
||||||
|
})?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn auth_with_bootstrap_token(
|
||||||
|
&mut self,
|
||||||
|
pubkey: ed25519_dalek::VerifyingKey,
|
||||||
|
token: String,
|
||||||
|
) -> Result<UserAgentResponse, Status> {
|
||||||
|
let token_ok: bool = self
|
||||||
|
.bootstapper
|
||||||
|
.ask(ConsumeToken { token })
|
||||||
|
.await
|
||||||
|
.map_err(|e| {
|
||||||
|
error!(?pubkey, "Failed to consume bootstrap token: {e}");
|
||||||
|
Status::internal("Bootstrap token consumption failed")
|
||||||
|
})?;
|
||||||
|
|
||||||
|
if !token_ok {
|
||||||
|
error!(?pubkey, "Invalid bootstrap token provided");
|
||||||
|
return Err(Status::invalid_argument("Invalid bootstrap token"));
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
let mut conn = self.db.get().await.to_status()?;
|
||||||
|
|
||||||
|
diesel::insert_into(schema::useragent_client::table)
|
||||||
|
.values((
|
||||||
|
schema::useragent_client::public_key.eq(pubkey.as_bytes().to_vec()),
|
||||||
|
schema::useragent_client::nonce.eq(1),
|
||||||
|
))
|
||||||
|
.execute(&mut conn)
|
||||||
|
.await
|
||||||
|
.to_status()?;
|
||||||
|
}
|
||||||
|
|
||||||
|
self.transition(UserAgentEvents::ReceivedBootstrapToken)?;
|
||||||
|
|
||||||
|
Ok(auth_response(ServerAuthPayload::AuthOk(auth::AuthOk {})))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn auth_with_challenge(&mut self, pubkey: VerifyingKey, pubkey_bytes: Vec<u8>) -> Output {
|
||||||
|
let nonce: Option<i32> = {
|
||||||
|
let mut db_conn = self.db.get().await.to_status()?;
|
||||||
|
db_conn
|
||||||
|
.transaction(|conn| {
|
||||||
|
Box::pin(async move {
|
||||||
|
let current_nonce = schema::useragent_client::table
|
||||||
|
.filter(
|
||||||
|
schema::useragent_client::public_key.eq(pubkey.as_bytes().to_vec()),
|
||||||
|
)
|
||||||
|
.select(schema::useragent_client::nonce)
|
||||||
|
.first::<i32>(conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
update(schema::useragent_client::table)
|
||||||
|
.filter(
|
||||||
|
schema::useragent_client::public_key.eq(pubkey.as_bytes().to_vec()),
|
||||||
|
)
|
||||||
|
.set(schema::useragent_client::nonce.eq(current_nonce + 1))
|
||||||
|
.execute(conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Result::<_, diesel::result::Error>::Ok(current_nonce)
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.optional()
|
||||||
|
.to_status()?
|
||||||
|
};
|
||||||
|
|
||||||
|
let Some(nonce) = nonce else {
|
||||||
|
error!(?pubkey, "Public key not found in database");
|
||||||
|
return Err(Status::unauthenticated("Public key not registered"));
|
||||||
|
};
|
||||||
|
|
||||||
|
let challenge = auth::AuthChallenge {
|
||||||
|
pubkey: pubkey_bytes,
|
||||||
|
nonce,
|
||||||
|
};
|
||||||
|
|
||||||
|
self.transition(UserAgentEvents::SentChallenge(ChallengeContext {
|
||||||
|
challenge: challenge.clone(),
|
||||||
|
key: pubkey,
|
||||||
|
}))?;
|
||||||
|
|
||||||
|
info!(
|
||||||
|
?pubkey,
|
||||||
|
?challenge,
|
||||||
|
"Sent authentication challenge to client"
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(auth_response(ServerAuthPayload::AuthChallenge(challenge)))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn verify_challenge_solution(
|
||||||
|
&self,
|
||||||
|
solution: &auth::AuthChallengeSolution,
|
||||||
|
) -> Result<(bool, &ChallengeContext), Status> {
|
||||||
|
let UserAgentStates::WaitingForChallengeSolution(challenge_context) = self.state.state()
|
||||||
|
else {
|
||||||
|
error!("Received challenge solution in invalid state");
|
||||||
|
return Err(Status::invalid_argument(
|
||||||
|
"Invalid state for challenge solution",
|
||||||
|
));
|
||||||
|
};
|
||||||
|
let formatted_challenge = arbiter_proto::format_challenge(&challenge_context.challenge);
|
||||||
|
|
||||||
|
let signature = solution.signature.as_slice().try_into().map_err(|_| {
|
||||||
|
error!(?solution, "Invalid signature length");
|
||||||
|
Status::invalid_argument("Invalid signature length")
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let valid = challenge_context
|
||||||
|
.key
|
||||||
|
.verify_strict(&formatted_challenge, &signature)
|
||||||
|
.is_ok();
|
||||||
|
|
||||||
|
Ok((valid, challenge_context))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type Output = Result<UserAgentResponse, Status>;
|
||||||
|
|
||||||
|
fn auth_response(payload: ServerAuthPayload) -> UserAgentResponse {
|
||||||
|
UserAgentResponse {
|
||||||
|
payload: Some(UserAgentResponsePayload::AuthMessage(AuthServerMessage {
|
||||||
|
payload: Some(payload),
|
||||||
|
})),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[messages]
|
||||||
|
impl UserAgentActor {
|
||||||
|
#[message(ctx)]
|
||||||
|
pub async fn handle_auth_challenge_request(
|
||||||
|
&mut self,
|
||||||
|
req: AuthChallengeRequest,
|
||||||
|
ctx: &mut Context<Self, Output>,
|
||||||
|
) -> Output {
|
||||||
|
let pubkey = req.pubkey.as_array().ok_or(Status::invalid_argument(
|
||||||
|
"Expected pubkey to have specific length",
|
||||||
|
))?;
|
||||||
|
let pubkey = VerifyingKey::from_bytes(pubkey).map_err(|err| {
|
||||||
|
error!(?pubkey, "Failed to convert to VerifyingKey");
|
||||||
|
Status::invalid_argument("Failed to convert pubkey to VerifyingKey")
|
||||||
|
})?;
|
||||||
|
|
||||||
|
self.transition(UserAgentEvents::AuthRequest(AuthRequestContext {
|
||||||
|
pubkey,
|
||||||
|
bootstrap_token: req.bootstrap_token.clone(),
|
||||||
|
}))?;
|
||||||
|
|
||||||
|
match req.bootstrap_token {
|
||||||
|
Some(token) => self.auth_with_bootstrap_token(pubkey, token).await,
|
||||||
|
None => self.auth_with_challenge(pubkey, req.pubkey).await,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[message(ctx)]
|
||||||
|
pub async fn handle_auth_challenge_solution(
|
||||||
|
&mut self,
|
||||||
|
solution: auth::AuthChallengeSolution,
|
||||||
|
ctx: &mut Context<Self, Output>,
|
||||||
|
) -> Output {
|
||||||
|
let (valid, challenge_context) = self.verify_challenge_solution(&solution)?;
|
||||||
|
|
||||||
|
if valid {
|
||||||
|
info!(
|
||||||
|
?challenge_context,
|
||||||
|
"Client provided valid solution to authentication challenge"
|
||||||
|
);
|
||||||
|
self.transition(UserAgentEvents::ReceivedGoodSolution)?;
|
||||||
|
Ok(auth_response(ServerAuthPayload::AuthOk(auth::AuthOk {})))
|
||||||
|
} else {
|
||||||
|
error!("Client provided invalid solution to authentication challenge");
|
||||||
|
self.transition(UserAgentEvents::ReceivedBadSolution)?;
|
||||||
|
Err(Status::unauthenticated("Invalid challenge solution"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use arbiter_proto::proto::{
|
||||||
|
UserAgentResponse,
|
||||||
|
auth::{AuthChallengeRequest, AuthOk},
|
||||||
|
user_agent_response::Payload as UserAgentResponsePayload,
|
||||||
|
};
|
||||||
|
use kameo::actor::Spawn;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
actors::user_agent::HandleAuthChallengeRequest, context::bootstrap::BootstrapActor, db,
|
||||||
|
};
|
||||||
|
|
||||||
|
use super::UserAgentActor;
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
#[test_log::test]
|
||||||
|
pub async fn test_bootstrap_token_auth() {
|
||||||
|
let db = db::create_test_pool().await;
|
||||||
|
// explicitly not installing any user_agent pubkeys
|
||||||
|
let bootstrapper = BootstrapActor::new(&db).await.unwrap(); // this will create bootstrap token
|
||||||
|
let token = bootstrapper.get_token().unwrap();
|
||||||
|
|
||||||
|
let bootstrapper_ref = BootstrapActor::spawn(bootstrapper);
|
||||||
|
let user_agent = UserAgentActor::new_manual(
|
||||||
|
db.clone(),
|
||||||
|
bootstrapper_ref,
|
||||||
|
tokio::sync::mpsc::channel(1).0, // dummy channel, we won't actually send responses in this test
|
||||||
|
);
|
||||||
|
let user_agent_ref = UserAgentActor::spawn(user_agent);
|
||||||
|
|
||||||
|
// simulate client sending auth request with bootstrap token
|
||||||
|
let new_key = ed25519_dalek::SigningKey::generate(&mut rand::rng());
|
||||||
|
let pubkey_bytes = new_key.verifying_key().to_bytes().to_vec();
|
||||||
|
|
||||||
|
let result = user_agent_ref
|
||||||
|
.ask(HandleAuthChallengeRequest {
|
||||||
|
req: AuthChallengeRequest {
|
||||||
|
pubkey: pubkey_bytes,
|
||||||
|
bootstrap_token: Some(token),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
.expect("Shouldn't fail to send message");
|
||||||
|
|
||||||
|
// auth succeeded
|
||||||
|
assert_eq!(
|
||||||
|
result,
|
||||||
|
UserAgentResponse {
|
||||||
|
payload: Some(UserAgentResponsePayload::AuthMessage(
|
||||||
|
arbiter_proto::proto::auth::ServerMessage {
|
||||||
|
payload: Some(arbiter_proto::proto::auth::server_message::Payload::AuthOk(
|
||||||
|
AuthOk {},
|
||||||
|
)),
|
||||||
|
},
|
||||||
|
)),
|
||||||
|
}
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
mod transport;
|
||||||
|
pub(crate) use transport::handle_user_agent;
|
||||||
@@ -1,533 +0,0 @@
|
|||||||
use std::{ops::DerefMut, sync::Mutex};
|
|
||||||
|
|
||||||
use arbiter_proto::{
|
|
||||||
proto::{
|
|
||||||
UnsealEncryptedKey, UnsealResult, UnsealStart, UnsealStartResponse, UserAgentRequest,
|
|
||||||
UserAgentResponse,
|
|
||||||
auth::{
|
|
||||||
self, AuthChallengeRequest, AuthOk, ClientMessage as ClientAuthMessage,
|
|
||||||
ServerMessage as AuthServerMessage, client_message::Payload as ClientAuthPayload,
|
|
||||||
server_message::Payload as ServerAuthPayload,
|
|
||||||
},
|
|
||||||
user_agent_request::Payload as UserAgentRequestPayload,
|
|
||||||
user_agent_response::Payload as UserAgentResponsePayload,
|
|
||||||
},
|
|
||||||
transport::{Bi, DummyTransport},
|
|
||||||
};
|
|
||||||
use chacha20poly1305::{AeadInPlace, XChaCha20Poly1305, XNonce, aead::KeyInit};
|
|
||||||
use diesel::{ExpressionMethods as _, OptionalExtension as _, QueryDsl, dsl::update};
|
|
||||||
use diesel_async::RunQueryDsl;
|
|
||||||
use ed25519_dalek::VerifyingKey;
|
|
||||||
use kameo::{Actor, error::SendError};
|
|
||||||
use memsafe::MemSafe;
|
|
||||||
use tokio::select;
|
|
||||||
use tonic::Status;
|
|
||||||
use tracing::{error, info};
|
|
||||||
use x25519_dalek::{EphemeralSecret, PublicKey};
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
ServerContext,
|
|
||||||
actors::{
|
|
||||||
GlobalActors,
|
|
||||||
bootstrap::ConsumeToken,
|
|
||||||
keyholder::{self, TryUnseal},
|
|
||||||
user_agent::state::{
|
|
||||||
ChallengeContext, DummyContext, UnsealContext, UserAgentEvents, UserAgentStateMachine,
|
|
||||||
UserAgentStates,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
db::{self, schema},
|
|
||||||
};
|
|
||||||
|
|
||||||
mod state;
|
|
||||||
|
|
||||||
#[derive(Debug, Clone, PartialEq, Eq, thiserror::Error)]
|
|
||||||
pub enum UserAgentError {
|
|
||||||
#[error("Expected message with payload")]
|
|
||||||
MissingRequestPayload,
|
|
||||||
#[error("Expected message with payload")]
|
|
||||||
UnexpectedRequestPayload,
|
|
||||||
#[error("Invalid state for challenge solution")]
|
|
||||||
InvalidStateForChallengeSolution,
|
|
||||||
#[error("Invalid state for unseal encrypted key")]
|
|
||||||
InvalidStateForUnsealEncryptedKey,
|
|
||||||
#[error("client_pubkey must be 32 bytes")]
|
|
||||||
InvalidClientPubkeyLength,
|
|
||||||
#[error("Expected pubkey to have specific length")]
|
|
||||||
InvalidAuthPubkeyLength,
|
|
||||||
#[error("Failed to convert pubkey to VerifyingKey")]
|
|
||||||
InvalidAuthPubkeyEncoding,
|
|
||||||
#[error("Invalid signature length")]
|
|
||||||
InvalidSignatureLength,
|
|
||||||
#[error("Invalid bootstrap token")]
|
|
||||||
InvalidBootstrapToken,
|
|
||||||
#[error("Public key not registered")]
|
|
||||||
PublicKeyNotRegistered,
|
|
||||||
#[error("Invalid challenge solution")]
|
|
||||||
InvalidChallengeSolution,
|
|
||||||
#[error("State machine error")]
|
|
||||||
StateTransitionFailed,
|
|
||||||
#[error("Bootstrap token consumption failed")]
|
|
||||||
BootstrapperActorUnreachable,
|
|
||||||
#[error("Vault is not available")]
|
|
||||||
KeyHolderActorUnreachable,
|
|
||||||
#[error("Database pool error")]
|
|
||||||
DatabasePoolUnavailable,
|
|
||||||
#[error("Database error")]
|
|
||||||
DatabaseOperationFailed,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<UserAgentError> for Status {
|
|
||||||
fn from(value: UserAgentError) -> Self {
|
|
||||||
match value {
|
|
||||||
UserAgentError::MissingRequestPayload | UserAgentError::UnexpectedRequestPayload => {
|
|
||||||
Status::invalid_argument("Expected message with payload")
|
|
||||||
}
|
|
||||||
UserAgentError::InvalidStateForChallengeSolution => {
|
|
||||||
Status::invalid_argument("Invalid state for challenge solution")
|
|
||||||
}
|
|
||||||
UserAgentError::InvalidStateForUnsealEncryptedKey => {
|
|
||||||
Status::failed_precondition("Invalid state for unseal encrypted key")
|
|
||||||
}
|
|
||||||
UserAgentError::InvalidClientPubkeyLength => {
|
|
||||||
Status::invalid_argument("client_pubkey must be 32 bytes")
|
|
||||||
}
|
|
||||||
UserAgentError::InvalidAuthPubkeyLength => {
|
|
||||||
Status::invalid_argument("Expected pubkey to have specific length")
|
|
||||||
}
|
|
||||||
UserAgentError::InvalidAuthPubkeyEncoding => {
|
|
||||||
Status::invalid_argument("Failed to convert pubkey to VerifyingKey")
|
|
||||||
}
|
|
||||||
UserAgentError::InvalidSignatureLength => {
|
|
||||||
Status::invalid_argument("Invalid signature length")
|
|
||||||
}
|
|
||||||
UserAgentError::InvalidBootstrapToken => {
|
|
||||||
Status::invalid_argument("Invalid bootstrap token")
|
|
||||||
}
|
|
||||||
UserAgentError::PublicKeyNotRegistered => {
|
|
||||||
Status::unauthenticated("Public key not registered")
|
|
||||||
}
|
|
||||||
UserAgentError::InvalidChallengeSolution => {
|
|
||||||
Status::unauthenticated("Invalid challenge solution")
|
|
||||||
}
|
|
||||||
UserAgentError::StateTransitionFailed => Status::internal("State machine error"),
|
|
||||||
UserAgentError::BootstrapperActorUnreachable => {
|
|
||||||
Status::internal("Bootstrap token consumption failed")
|
|
||||||
}
|
|
||||||
UserAgentError::KeyHolderActorUnreachable => Status::internal("Vault is not available"),
|
|
||||||
UserAgentError::DatabasePoolUnavailable => Status::internal("Database pool error"),
|
|
||||||
UserAgentError::DatabaseOperationFailed => Status::internal("Database error"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct UserAgentActor<Transport>
|
|
||||||
where
|
|
||||||
Transport: Bi<UserAgentRequest, UserAgentResponse, UserAgentError>,
|
|
||||||
{
|
|
||||||
db: db::DatabasePool,
|
|
||||||
actors: GlobalActors,
|
|
||||||
state: UserAgentStateMachine<DummyContext>,
|
|
||||||
transport: Transport,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<Transport> UserAgentActor<Transport>
|
|
||||||
where
|
|
||||||
Transport: Bi<UserAgentRequest, UserAgentResponse, UserAgentError>,
|
|
||||||
{
|
|
||||||
pub(crate) fn new(context: ServerContext, transport: Transport) -> Self {
|
|
||||||
Self {
|
|
||||||
db: context.db.clone(),
|
|
||||||
actors: context.actors.clone(),
|
|
||||||
state: UserAgentStateMachine::new(DummyContext),
|
|
||||||
transport,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn transition(&mut self, event: UserAgentEvents) -> Result<(), UserAgentError> {
|
|
||||||
self.state.process_event(event).map_err(|e| {
|
|
||||||
error!(?e, "State transition failed");
|
|
||||||
UserAgentError::StateTransitionFailed
|
|
||||||
})?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn process_transport_inbound(&mut self, req: UserAgentRequest) -> Output {
|
|
||||||
let msg = req.payload.ok_or_else(|| {
|
|
||||||
error!(actor = "useragent", "Received message with no payload");
|
|
||||||
UserAgentError::MissingRequestPayload
|
|
||||||
})?;
|
|
||||||
|
|
||||||
match msg {
|
|
||||||
UserAgentRequestPayload::AuthMessage(ClientAuthMessage {
|
|
||||||
payload: Some(ClientAuthPayload::AuthChallengeRequest(req)),
|
|
||||||
}) => self.handle_auth_challenge_request(req).await,
|
|
||||||
UserAgentRequestPayload::AuthMessage(ClientAuthMessage {
|
|
||||||
payload: Some(ClientAuthPayload::AuthChallengeSolution(solution)),
|
|
||||||
}) => self.handle_auth_challenge_solution(solution).await,
|
|
||||||
UserAgentRequestPayload::UnsealStart(unseal_start) => {
|
|
||||||
self.handle_unseal_request(unseal_start).await
|
|
||||||
}
|
|
||||||
UserAgentRequestPayload::UnsealEncryptedKey(unseal_encrypted_key) => {
|
|
||||||
self.handle_unseal_encrypted_key(unseal_encrypted_key).await
|
|
||||||
}
|
|
||||||
_ => Err(UserAgentError::UnexpectedRequestPayload),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn auth_with_bootstrap_token(
|
|
||||||
&mut self,
|
|
||||||
pubkey: ed25519_dalek::VerifyingKey,
|
|
||||||
token: String,
|
|
||||||
) -> Result<UserAgentResponse, UserAgentError> {
|
|
||||||
let token_ok: bool = self
|
|
||||||
.actors
|
|
||||||
.bootstrapper
|
|
||||||
.ask(ConsumeToken { token })
|
|
||||||
.await
|
|
||||||
.map_err(|e| {
|
|
||||||
error!(?pubkey, "Failed to consume bootstrap token: {e}");
|
|
||||||
UserAgentError::BootstrapperActorUnreachable
|
|
||||||
})?;
|
|
||||||
|
|
||||||
if !token_ok {
|
|
||||||
error!(?pubkey, "Invalid bootstrap token provided");
|
|
||||||
return Err(UserAgentError::InvalidBootstrapToken);
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
let mut conn = self.db.get().await.map_err(|e| {
|
|
||||||
error!(error = ?e, "Database pool error");
|
|
||||||
UserAgentError::DatabasePoolUnavailable
|
|
||||||
})?;
|
|
||||||
|
|
||||||
diesel::insert_into(schema::useragent_client::table)
|
|
||||||
.values((
|
|
||||||
schema::useragent_client::public_key.eq(pubkey.as_bytes().to_vec()),
|
|
||||||
schema::useragent_client::nonce.eq(1),
|
|
||||||
))
|
|
||||||
.execute(&mut conn)
|
|
||||||
.await
|
|
||||||
.map_err(|e| {
|
|
||||||
error!(error = ?e, "Database error");
|
|
||||||
UserAgentError::DatabaseOperationFailed
|
|
||||||
})?;
|
|
||||||
}
|
|
||||||
|
|
||||||
self.transition(UserAgentEvents::ReceivedBootstrapToken)?;
|
|
||||||
|
|
||||||
Ok(auth_response(ServerAuthPayload::AuthOk(AuthOk {})))
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn auth_with_challenge(&mut self, pubkey: VerifyingKey, pubkey_bytes: Vec<u8>) -> Output {
|
|
||||||
let nonce: Option<i32> = {
|
|
||||||
let mut db_conn = self.db.get().await.map_err(|e| {
|
|
||||||
error!(error = ?e, "Database pool error");
|
|
||||||
UserAgentError::DatabasePoolUnavailable
|
|
||||||
})?;
|
|
||||||
db_conn
|
|
||||||
.exclusive_transaction(|conn| {
|
|
||||||
Box::pin(async move {
|
|
||||||
let current_nonce = schema::useragent_client::table
|
|
||||||
.filter(
|
|
||||||
schema::useragent_client::public_key.eq(pubkey.as_bytes().to_vec()),
|
|
||||||
)
|
|
||||||
.select(schema::useragent_client::nonce)
|
|
||||||
.first::<i32>(conn)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
update(schema::useragent_client::table)
|
|
||||||
.filter(
|
|
||||||
schema::useragent_client::public_key.eq(pubkey.as_bytes().to_vec()),
|
|
||||||
)
|
|
||||||
.set(schema::useragent_client::nonce.eq(current_nonce + 1))
|
|
||||||
.execute(conn)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Result::<_, diesel::result::Error>::Ok(current_nonce)
|
|
||||||
})
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
.optional()
|
|
||||||
.map_err(|e| {
|
|
||||||
error!(error = ?e, "Database error");
|
|
||||||
UserAgentError::DatabaseOperationFailed
|
|
||||||
})?
|
|
||||||
};
|
|
||||||
|
|
||||||
let Some(nonce) = nonce else {
|
|
||||||
error!(?pubkey, "Public key not found in database");
|
|
||||||
return Err(UserAgentError::PublicKeyNotRegistered);
|
|
||||||
};
|
|
||||||
|
|
||||||
let challenge = auth::AuthChallenge {
|
|
||||||
pubkey: pubkey_bytes,
|
|
||||||
nonce,
|
|
||||||
};
|
|
||||||
|
|
||||||
self.transition(UserAgentEvents::SentChallenge(ChallengeContext {
|
|
||||||
challenge: challenge.clone(),
|
|
||||||
key: pubkey,
|
|
||||||
}))?;
|
|
||||||
|
|
||||||
info!(
|
|
||||||
?pubkey,
|
|
||||||
?challenge,
|
|
||||||
"Sent authentication challenge to client"
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok(auth_response(ServerAuthPayload::AuthChallenge(challenge)))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn verify_challenge_solution(
|
|
||||||
&self,
|
|
||||||
solution: &auth::AuthChallengeSolution,
|
|
||||||
) -> Result<(bool, &ChallengeContext), UserAgentError> {
|
|
||||||
let UserAgentStates::WaitingForChallengeSolution(challenge_context) = self.state.state()
|
|
||||||
else {
|
|
||||||
error!("Received challenge solution in invalid state");
|
|
||||||
return Err(UserAgentError::InvalidStateForChallengeSolution);
|
|
||||||
};
|
|
||||||
let formatted_challenge = arbiter_proto::format_challenge(&challenge_context.challenge);
|
|
||||||
|
|
||||||
let signature = solution.signature.as_slice().try_into().map_err(|_| {
|
|
||||||
error!(?solution, "Invalid signature length");
|
|
||||||
UserAgentError::InvalidSignatureLength
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let valid = challenge_context
|
|
||||||
.key
|
|
||||||
.verify_strict(&formatted_challenge, &signature)
|
|
||||||
.is_ok();
|
|
||||||
|
|
||||||
Ok((valid, challenge_context))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type Output = Result<UserAgentResponse, UserAgentError>;
|
|
||||||
|
|
||||||
fn auth_response(payload: ServerAuthPayload) -> UserAgentResponse {
|
|
||||||
UserAgentResponse {
|
|
||||||
payload: Some(UserAgentResponsePayload::AuthMessage(AuthServerMessage {
|
|
||||||
payload: Some(payload),
|
|
||||||
})),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn unseal_response(payload: UserAgentResponsePayload) -> UserAgentResponse {
|
|
||||||
UserAgentResponse {
|
|
||||||
payload: Some(payload),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<Transport> UserAgentActor<Transport>
|
|
||||||
where
|
|
||||||
Transport: Bi<UserAgentRequest, UserAgentResponse, UserAgentError>,
|
|
||||||
{
|
|
||||||
async fn handle_unseal_request(&mut self, req: UnsealStart) -> Output {
|
|
||||||
let secret = EphemeralSecret::random();
|
|
||||||
let public_key = PublicKey::from(&secret);
|
|
||||||
|
|
||||||
let client_pubkey_bytes: [u8; 32] = req
|
|
||||||
.client_pubkey
|
|
||||||
.try_into()
|
|
||||||
.map_err(|_| UserAgentError::InvalidClientPubkeyLength)?;
|
|
||||||
|
|
||||||
let client_public_key = PublicKey::from(client_pubkey_bytes);
|
|
||||||
|
|
||||||
self.transition(UserAgentEvents::UnsealRequest(UnsealContext {
|
|
||||||
secret: Mutex::new(Some(secret)),
|
|
||||||
client_public_key,
|
|
||||||
}))?;
|
|
||||||
|
|
||||||
Ok(unseal_response(
|
|
||||||
UserAgentResponsePayload::UnsealStartResponse(UnsealStartResponse {
|
|
||||||
server_pubkey: public_key.as_bytes().to_vec(),
|
|
||||||
}),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn handle_unseal_encrypted_key(&mut self, req: UnsealEncryptedKey) -> Output {
|
|
||||||
let UserAgentStates::WaitingForUnsealKey(unseal_context) = self.state.state() else {
|
|
||||||
error!("Received unseal encrypted key in invalid state");
|
|
||||||
return Err(UserAgentError::InvalidStateForUnsealEncryptedKey);
|
|
||||||
};
|
|
||||||
let ephemeral_secret = {
|
|
||||||
let mut secret_lock = unseal_context.secret.lock().unwrap();
|
|
||||||
let secret = secret_lock.take();
|
|
||||||
match secret {
|
|
||||||
Some(secret) => secret,
|
|
||||||
None => {
|
|
||||||
drop(secret_lock);
|
|
||||||
error!("Ephemeral secret already taken");
|
|
||||||
self.transition(UserAgentEvents::ReceivedInvalidKey)?;
|
|
||||||
return Ok(unseal_response(UserAgentResponsePayload::UnsealResult(
|
|
||||||
UnsealResult::InvalidKey.into(),
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let nonce = XNonce::from_slice(&req.nonce);
|
|
||||||
|
|
||||||
let shared_secret = ephemeral_secret.diffie_hellman(&unseal_context.client_public_key);
|
|
||||||
let cipher = XChaCha20Poly1305::new(shared_secret.as_bytes().into());
|
|
||||||
|
|
||||||
let mut seal_key_buffer = MemSafe::new(req.ciphertext.clone()).unwrap();
|
|
||||||
|
|
||||||
let decryption_result = {
|
|
||||||
let mut write_handle = seal_key_buffer.write().unwrap();
|
|
||||||
let write_handle = write_handle.deref_mut();
|
|
||||||
cipher.decrypt_in_place(nonce, &req.associated_data, write_handle)
|
|
||||||
};
|
|
||||||
|
|
||||||
match decryption_result {
|
|
||||||
Ok(_) => {
|
|
||||||
match self
|
|
||||||
.actors
|
|
||||||
.key_holder
|
|
||||||
.ask(TryUnseal {
|
|
||||||
seal_key_raw: seal_key_buffer,
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Ok(_) => {
|
|
||||||
info!("Successfully unsealed key with client-provided key");
|
|
||||||
self.transition(UserAgentEvents::ReceivedValidKey)?;
|
|
||||||
Ok(unseal_response(UserAgentResponsePayload::UnsealResult(
|
|
||||||
UnsealResult::Success.into(),
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
Err(SendError::HandlerError(keyholder::Error::InvalidKey)) => {
|
|
||||||
self.transition(UserAgentEvents::ReceivedInvalidKey)?;
|
|
||||||
Ok(unseal_response(UserAgentResponsePayload::UnsealResult(
|
|
||||||
UnsealResult::InvalidKey.into(),
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
Err(SendError::HandlerError(err)) => {
|
|
||||||
error!(?err, "Keyholder failed to unseal key");
|
|
||||||
self.transition(UserAgentEvents::ReceivedInvalidKey)?;
|
|
||||||
Ok(unseal_response(UserAgentResponsePayload::UnsealResult(
|
|
||||||
UnsealResult::InvalidKey.into(),
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
Err(err) => {
|
|
||||||
error!(?err, "Failed to send unseal request to keyholder");
|
|
||||||
self.transition(UserAgentEvents::ReceivedInvalidKey)?;
|
|
||||||
Err(UserAgentError::KeyHolderActorUnreachable)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(err) => {
|
|
||||||
error!(?err, "Failed to decrypt unseal key");
|
|
||||||
self.transition(UserAgentEvents::ReceivedInvalidKey)?;
|
|
||||||
Ok(unseal_response(UserAgentResponsePayload::UnsealResult(
|
|
||||||
UnsealResult::InvalidKey.into(),
|
|
||||||
)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn handle_auth_challenge_request(&mut self, req: AuthChallengeRequest) -> Output {
|
|
||||||
let pubkey = req
|
|
||||||
.pubkey
|
|
||||||
.as_array()
|
|
||||||
.ok_or(UserAgentError::InvalidAuthPubkeyLength)?;
|
|
||||||
let pubkey = VerifyingKey::from_bytes(pubkey).map_err(|_err| {
|
|
||||||
error!(?pubkey, "Failed to convert to VerifyingKey");
|
|
||||||
UserAgentError::InvalidAuthPubkeyEncoding
|
|
||||||
})?;
|
|
||||||
|
|
||||||
self.transition(UserAgentEvents::AuthRequest)?;
|
|
||||||
|
|
||||||
match req.bootstrap_token {
|
|
||||||
Some(token) => self.auth_with_bootstrap_token(pubkey, token).await,
|
|
||||||
None => self.auth_with_challenge(pubkey, req.pubkey).await,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn handle_auth_challenge_solution(
|
|
||||||
&mut self,
|
|
||||||
solution: auth::AuthChallengeSolution,
|
|
||||||
) -> Output {
|
|
||||||
let (valid, challenge_context) = self.verify_challenge_solution(&solution)?;
|
|
||||||
|
|
||||||
if valid {
|
|
||||||
info!(
|
|
||||||
?challenge_context,
|
|
||||||
"Client provided valid solution to authentication challenge"
|
|
||||||
);
|
|
||||||
self.transition(UserAgentEvents::ReceivedGoodSolution)?;
|
|
||||||
Ok(auth_response(ServerAuthPayload::AuthOk(AuthOk {})))
|
|
||||||
} else {
|
|
||||||
error!("Client provided invalid solution to authentication challenge");
|
|
||||||
self.transition(UserAgentEvents::ReceivedBadSolution)?;
|
|
||||||
Err(UserAgentError::InvalidChallengeSolution)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
impl<Transport> Actor for UserAgentActor<Transport>
|
|
||||||
where
|
|
||||||
Transport: Bi<UserAgentRequest, UserAgentResponse, UserAgentError>,
|
|
||||||
{
|
|
||||||
type Args = Self;
|
|
||||||
|
|
||||||
type Error = ();
|
|
||||||
|
|
||||||
async fn on_start(
|
|
||||||
args: Self::Args,
|
|
||||||
_: kameo::prelude::ActorRef<Self>,
|
|
||||||
) -> Result<Self, Self::Error> {
|
|
||||||
Ok(args)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn next(
|
|
||||||
&mut self,
|
|
||||||
_actor_ref: kameo::prelude::WeakActorRef<Self>,
|
|
||||||
mailbox_rx: &mut kameo::prelude::MailboxReceiver<Self>,
|
|
||||||
) -> Option<kameo::mailbox::Signal<Self>> {
|
|
||||||
loop {
|
|
||||||
select! {
|
|
||||||
signal = mailbox_rx.recv() => {
|
|
||||||
return signal;
|
|
||||||
}
|
|
||||||
msg = self.transport.recv() => {
|
|
||||||
match msg {
|
|
||||||
Some(request) => {
|
|
||||||
match self.process_transport_inbound(request).await {
|
|
||||||
Ok(response) => {
|
|
||||||
if self.transport.send(Ok(response)).await.is_err() {
|
|
||||||
error!(actor = "useragent", reason = "channel closed", "send.failed");
|
|
||||||
return Some(kameo::mailbox::Signal::Stop);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(err) => {
|
|
||||||
let _ = self.transport.send(Err(err)).await;
|
|
||||||
return Some(kameo::mailbox::Signal::Stop);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
None => {
|
|
||||||
info!(actor = "useragent", "transport.closed");
|
|
||||||
return Some(kameo::mailbox::Signal::Stop);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
impl UserAgentActor<DummyTransport<UserAgentRequest, UserAgentResponse, UserAgentError>> {
|
|
||||||
pub fn new_manual(db: db::DatabasePool, actors: GlobalActors) -> Self {
|
|
||||||
Self {
|
|
||||||
db,
|
|
||||||
actors,
|
|
||||||
state: UserAgentStateMachine::new(DummyContext),
|
|
||||||
transport: DummyTransport::new(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,51 +0,0 @@
|
|||||||
use std::sync::Mutex;
|
|
||||||
|
|
||||||
use arbiter_proto::proto::auth::AuthChallenge;
|
|
||||||
use ed25519_dalek::VerifyingKey;
|
|
||||||
use x25519_dalek::{EphemeralSecret, PublicKey};
|
|
||||||
|
|
||||||
/// Context for state machine with validated key and sent challenge
|
|
||||||
/// Challenge is then transformed to bytes using shared function and verified
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
pub struct ChallengeContext {
|
|
||||||
pub challenge: AuthChallenge,
|
|
||||||
pub key: VerifyingKey,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct UnsealContext {
|
|
||||||
pub client_public_key: PublicKey,
|
|
||||||
pub secret: Mutex<Option<EphemeralSecret>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
smlang::statemachine!(
|
|
||||||
name: UserAgent,
|
|
||||||
custom_error: false,
|
|
||||||
transitions: {
|
|
||||||
*Init + AuthRequest = ReceivedAuthRequest,
|
|
||||||
ReceivedAuthRequest + ReceivedBootstrapToken = Idle,
|
|
||||||
|
|
||||||
ReceivedAuthRequest + SentChallenge(ChallengeContext) / move_challenge = WaitingForChallengeSolution(ChallengeContext),
|
|
||||||
|
|
||||||
WaitingForChallengeSolution(ChallengeContext) + ReceivedGoodSolution = Idle,
|
|
||||||
WaitingForChallengeSolution(ChallengeContext) + ReceivedBadSolution = AuthError, // block further transitions, but connection should close anyway
|
|
||||||
|
|
||||||
Idle + UnsealRequest(UnsealContext) / generate_temp_keypair = WaitingForUnsealKey(UnsealContext),
|
|
||||||
WaitingForUnsealKey(UnsealContext) + ReceivedValidKey = Unsealed,
|
|
||||||
WaitingForUnsealKey(UnsealContext) + ReceivedInvalidKey = Idle,
|
|
||||||
}
|
|
||||||
);
|
|
||||||
|
|
||||||
pub struct DummyContext;
|
|
||||||
impl UserAgentStateMachineContext for DummyContext {
|
|
||||||
#[allow(missing_docs)]
|
|
||||||
#[allow(clippy::unused_unit)]
|
|
||||||
fn generate_temp_keypair(&mut self, event_data: UnsealContext) -> Result<UnsealContext, ()> {
|
|
||||||
Ok(event_data)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(missing_docs)]
|
|
||||||
#[allow(clippy::unused_unit)]
|
|
||||||
fn move_challenge(&mut self, event_data: ChallengeContext) -> Result<ChallengeContext, ()> {
|
|
||||||
Ok(event_data)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -0,0 +1,95 @@
|
|||||||
|
use super::UserAgentActor;
|
||||||
|
use arbiter_proto::proto::{
|
||||||
|
UserAgentRequest, UserAgentResponse,
|
||||||
|
auth::{
|
||||||
|
self, AuthChallenge, AuthChallengeRequest, AuthOk, ClientMessage,
|
||||||
|
ServerMessage as AuthServerMessage, client_message::Payload as ClientAuthPayload,
|
||||||
|
server_message::Payload as ServerAuthPayload,
|
||||||
|
},
|
||||||
|
user_agent_request::Payload as UserAgentRequestPayload,
|
||||||
|
user_agent_response::Payload as UserAgentResponsePayload,
|
||||||
|
};
|
||||||
|
use futures::StreamExt;
|
||||||
|
use kameo::{
|
||||||
|
actor::{ActorRef, Spawn as _},
|
||||||
|
error::SendError,
|
||||||
|
};
|
||||||
|
use tokio::sync::mpsc;
|
||||||
|
use tonic::Status;
|
||||||
|
use tracing::error;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
actors::user_agent::{HandleAuthChallengeRequest, HandleAuthChallengeSolution},
|
||||||
|
context::ServerContext,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub(crate) async fn handle_user_agent(
|
||||||
|
context: ServerContext,
|
||||||
|
mut req_stream: tonic::Streaming<UserAgentRequest>,
|
||||||
|
tx: mpsc::Sender<Result<UserAgentResponse, Status>>,
|
||||||
|
) {
|
||||||
|
let actor = UserAgentActor::spawn(UserAgentActor::new(context, tx.clone()));
|
||||||
|
|
||||||
|
while let Some(Ok(req)) = req_stream.next().await
|
||||||
|
&& actor.is_alive()
|
||||||
|
{
|
||||||
|
match process_message(&actor, req).await {
|
||||||
|
Ok(resp) => {
|
||||||
|
if tx.send(Ok(resp)).await.is_err() {
|
||||||
|
error!(actor = "useragent", "Failed to send response to client");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(status) => {
|
||||||
|
let _ = tx.send(Err(status)).await;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
actor.kill();
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn process_message(
|
||||||
|
actor: &ActorRef<UserAgentActor>,
|
||||||
|
req: UserAgentRequest,
|
||||||
|
) -> Result<UserAgentResponse, Status> {
|
||||||
|
let msg = req.payload.ok_or_else(|| {
|
||||||
|
error!(actor = "useragent", "Received message with no payload");
|
||||||
|
Status::invalid_argument("Expected message with payload")
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let UserAgentRequestPayload::AuthMessage(ClientMessage {
|
||||||
|
payload: Some(client_message),
|
||||||
|
}) = msg
|
||||||
|
else {
|
||||||
|
error!(
|
||||||
|
actor = "useragent",
|
||||||
|
"Received unexpected message type during authentication"
|
||||||
|
);
|
||||||
|
return Err(Status::invalid_argument(
|
||||||
|
"Expected AuthMessage with ClientMessage payload",
|
||||||
|
));
|
||||||
|
};
|
||||||
|
|
||||||
|
match client_message {
|
||||||
|
ClientAuthPayload::AuthChallengeRequest(req) => actor
|
||||||
|
.ask(HandleAuthChallengeRequest { req })
|
||||||
|
.await
|
||||||
|
.map_err(into_status),
|
||||||
|
ClientAuthPayload::AuthChallengeSolution(solution) => actor
|
||||||
|
.ask(HandleAuthChallengeSolution { solution })
|
||||||
|
.await
|
||||||
|
.map_err(into_status),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn into_status<M>(e: SendError<M, Status>) -> Status {
|
||||||
|
match e {
|
||||||
|
SendError::HandlerError(status) => status,
|
||||||
|
_ => {
|
||||||
|
error!(actor = "useragent", "Failed to send message to actor");
|
||||||
|
Status::internal("session failure")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
402
server/crates/arbiter-server/src/context.rs
Normal file
@@ -0,0 +1,402 @@
|
|||||||
|
use std::collections::HashSet;
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use diesel::OptionalExtension as _;
|
||||||
|
use diesel_async::RunQueryDsl as _;
|
||||||
|
use ed25519_dalek::VerifyingKey;
|
||||||
|
use kameo::actor::{ActorRef, Spawn};
|
||||||
|
use miette::Diagnostic;
|
||||||
|
use rand::rngs::StdRng;
|
||||||
|
use secrecy::{ExposeSecret, SecretBox};
|
||||||
|
use smlang::statemachine;
|
||||||
|
use thiserror::Error;
|
||||||
|
use tokio::sync::{watch, RwLock};
|
||||||
|
use zeroize::Zeroizing;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
context::{
|
||||||
|
bootstrap::{BootstrapActor, generate_token},
|
||||||
|
lease::LeaseHandler,
|
||||||
|
tls::{RotationState, RotationTask, TlsDataRaw, TlsManager},
|
||||||
|
},
|
||||||
|
db::{
|
||||||
|
self,
|
||||||
|
models::ArbiterSetting,
|
||||||
|
schema::{self, arbiter_settings},
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
pub(crate) mod bootstrap;
|
||||||
|
pub(crate) mod lease;
|
||||||
|
pub(crate) mod tls;
|
||||||
|
|
||||||
|
#[derive(Error, Debug, Diagnostic)]
|
||||||
|
pub enum InitError {
|
||||||
|
#[error("Database setup failed: {0}")]
|
||||||
|
#[diagnostic(code(arbiter_server::init::database_setup))]
|
||||||
|
DatabaseSetup(#[from] db::DatabaseSetupError),
|
||||||
|
|
||||||
|
#[error("Connection acquire failed: {0}")]
|
||||||
|
#[diagnostic(code(arbiter_server::init::database_pool))]
|
||||||
|
DatabasePool(#[from] db::PoolError),
|
||||||
|
|
||||||
|
#[error("Database query error: {0}")]
|
||||||
|
#[diagnostic(code(arbiter_server::init::database_query))]
|
||||||
|
DatabaseQuery(#[from] diesel::result::Error),
|
||||||
|
|
||||||
|
#[error("TLS initialization failed: {0}")]
|
||||||
|
#[diagnostic(code(arbiter_server::init::tls_init))]
|
||||||
|
Tls(#[from] tls::TlsInitError),
|
||||||
|
|
||||||
|
#[error("Bootstrap token generation failed: {0}")]
|
||||||
|
#[diagnostic(code(arbiter_server::init::bootstrap_token))]
|
||||||
|
BootstrapToken(#[from] bootstrap::BootstrapError),
|
||||||
|
|
||||||
|
#[error("I/O Error: {0}")]
|
||||||
|
#[diagnostic(code(arbiter_server::init::io))]
|
||||||
|
Io(#[from] std::io::Error),
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Error, Debug, Diagnostic)]
|
||||||
|
pub enum UnsealError {
|
||||||
|
#[error("Database error: {0}")]
|
||||||
|
#[diagnostic(code(arbiter_server::unseal::database_pool))]
|
||||||
|
Database(#[from] db::PoolError),
|
||||||
|
|
||||||
|
#[error("Query error: {0}")]
|
||||||
|
#[diagnostic(code(arbiter_server::unseal::database_query))]
|
||||||
|
Query(#[from] diesel::result::Error),
|
||||||
|
|
||||||
|
#[error("Decryption failed: {0}")]
|
||||||
|
#[diagnostic(code(arbiter_server::unseal::decryption))]
|
||||||
|
DecryptionFailed(#[from] crate::crypto::CryptoError),
|
||||||
|
|
||||||
|
#[error("Invalid state for unseal")]
|
||||||
|
#[diagnostic(code(arbiter_server::unseal::invalid_state))]
|
||||||
|
InvalidState,
|
||||||
|
|
||||||
|
#[error("Missing salt in database")]
|
||||||
|
#[diagnostic(code(arbiter_server::unseal::missing_salt))]
|
||||||
|
MissingSalt,
|
||||||
|
|
||||||
|
#[error("No root key configured in database")]
|
||||||
|
#[diagnostic(code(arbiter_server::unseal::no_root_key))]
|
||||||
|
NoRootKey,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Error, Debug, Diagnostic)]
|
||||||
|
pub enum SealError {
|
||||||
|
#[error("Invalid state for seal")]
|
||||||
|
#[diagnostic(code(arbiter_server::seal::invalid_state))]
|
||||||
|
InvalidState,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Secure in-memory storage for root encryption key
|
||||||
|
///
|
||||||
|
/// Uses `secrecy` crate for automatic zeroization on drop to prevent key material
|
||||||
|
/// from remaining in memory after use. SecretBox provides heap-allocated secret
|
||||||
|
/// storage that implements Send + Sync for safe use in async contexts.
|
||||||
|
pub struct KeyStorage {
|
||||||
|
/// 32-byte root key protected by SecretBox
|
||||||
|
key: SecretBox<[u8; 32]>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl KeyStorage {
|
||||||
|
/// Create new KeyStorage from a 32-byte root key
|
||||||
|
pub fn new(key: [u8; 32]) -> Self {
|
||||||
|
Self {
|
||||||
|
key: SecretBox::new(Box::new(key)),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Access the key for cryptographic operations
|
||||||
|
pub fn key(&self) -> &[u8; 32] {
|
||||||
|
self.key.expose_secret()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Drop автоматически реализован через secrecy::Zeroize
|
||||||
|
// который зануляет память при освобождении
|
||||||
|
|
||||||
|
statemachine! {
|
||||||
|
name: Server,
|
||||||
|
transitions: {
|
||||||
|
*NotBootstrapped + Bootstrapped = Sealed,
|
||||||
|
Sealed + Unsealed(KeyStorage) / move_key = Ready(KeyStorage),
|
||||||
|
Ready(KeyStorage) + Sealed / dispose_key = Sealed,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub struct _Context;
|
||||||
|
impl ServerStateMachineContext for _Context {
|
||||||
|
/// Move key from unseal event into Ready state
|
||||||
|
fn move_key(&mut self, event_data: KeyStorage) -> Result<KeyStorage, ()> {
|
||||||
|
// Просто перемещаем KeyStorage из event в state
|
||||||
|
// Без клонирования - event data consumed
|
||||||
|
Ok(event_data)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Securely dispose of key when sealing
|
||||||
|
#[allow(missing_docs)]
|
||||||
|
#[allow(clippy::unused_unit)]
|
||||||
|
fn dispose_key(&mut self, _state_data: &KeyStorage) -> Result<(), ()> {
|
||||||
|
// KeyStorage будет dropped после state transition
|
||||||
|
// secrecy::Zeroize зануляет память автоматически
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) struct _ServerContextInner {
|
||||||
|
pub db: db::DatabasePool,
|
||||||
|
pub state: RwLock<ServerStateMachine<_Context>>,
|
||||||
|
pub rng: StdRng,
|
||||||
|
pub tls: Arc<TlsManager>,
|
||||||
|
pub bootstrapper: ActorRef<BootstrapActor>,
|
||||||
|
pub rotation_state: RwLock<RotationState>,
|
||||||
|
pub rotation_acks: Arc<RwLock<HashSet<VerifyingKey>>>,
|
||||||
|
pub user_agent_leases: LeaseHandler<VerifyingKey>,
|
||||||
|
pub client_leases: LeaseHandler<VerifyingKey>,
|
||||||
|
}
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub(crate) struct ServerContext(Arc<_ServerContextInner>);
|
||||||
|
|
||||||
|
impl std::ops::Deref for ServerContext {
|
||||||
|
type Target = _ServerContextInner;
|
||||||
|
|
||||||
|
fn deref(&self) -> &Self::Target {
|
||||||
|
&self.0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ServerContext {
|
||||||
|
/// Check if all active clients have acknowledged the rotation
|
||||||
|
pub async fn check_rotation_ready(&self) -> bool {
|
||||||
|
// TODO: Implement proper rotation readiness check
|
||||||
|
// For now, return false as placeholder
|
||||||
|
false
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn load_tls(
|
||||||
|
db: &db::DatabasePool,
|
||||||
|
settings: Option<&ArbiterSetting>,
|
||||||
|
) -> Result<TlsManager, InitError> {
|
||||||
|
match settings {
|
||||||
|
Some(s) if s.current_cert_id.is_some() => {
|
||||||
|
// Load active certificate from tls_certificates table
|
||||||
|
Ok(TlsManager::load_from_db(
|
||||||
|
db.clone(),
|
||||||
|
s.current_cert_id.unwrap(),
|
||||||
|
)
|
||||||
|
.await?)
|
||||||
|
}
|
||||||
|
Some(s) => {
|
||||||
|
// Legacy migration: extract validity and save to new table
|
||||||
|
let tls_data_raw = TlsDataRaw {
|
||||||
|
cert: s.cert.clone(),
|
||||||
|
key: s.cert_key.clone(),
|
||||||
|
};
|
||||||
|
|
||||||
|
// For legacy certificates, use current time as not_before
|
||||||
|
// and current time + 90 days as not_after
|
||||||
|
let not_before = chrono::Utc::now().timestamp();
|
||||||
|
let not_after = not_before + (90 * 24 * 60 * 60); // 90 days
|
||||||
|
|
||||||
|
Ok(TlsManager::new_from_legacy(
|
||||||
|
db.clone(),
|
||||||
|
tls_data_raw,
|
||||||
|
not_before,
|
||||||
|
not_after,
|
||||||
|
)
|
||||||
|
.await?)
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
// First startup - generate new certificate
|
||||||
|
Ok(TlsManager::new(db.clone()).await?)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn new(db: db::DatabasePool) -> Result<Self, InitError> {
|
||||||
|
let mut conn = db.get().await?;
|
||||||
|
let rng = rand::make_rng();
|
||||||
|
|
||||||
|
let settings = arbiter_settings::table
|
||||||
|
.first::<ArbiterSetting>(&mut conn)
|
||||||
|
.await
|
||||||
|
.optional()?;
|
||||||
|
|
||||||
|
drop(conn);
|
||||||
|
|
||||||
|
// Load TLS manager
|
||||||
|
let tls = Self::load_tls(&db, settings.as_ref()).await?;
|
||||||
|
|
||||||
|
// Load rotation state from database
|
||||||
|
let rotation_state = RotationState::load_from_db(&db)
|
||||||
|
.await
|
||||||
|
.unwrap_or(RotationState::Normal);
|
||||||
|
|
||||||
|
let bootstrap_token = generate_token().await?;
|
||||||
|
|
||||||
|
let mut state = ServerStateMachine::new(_Context);
|
||||||
|
|
||||||
|
if let Some(settings) = &settings
|
||||||
|
&& settings.root_key_id.is_some()
|
||||||
|
{
|
||||||
|
// TODO: pass the encrypted root key to the state machine and let it handle decryption and transition to Sealed
|
||||||
|
let _ = state.process_event(ServerEvents::Bootstrapped);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create shutdown channel for rotation task
|
||||||
|
let (rotation_shutdown_tx, rotation_shutdown_rx) = watch::channel(false);
|
||||||
|
|
||||||
|
// Initialize bootstrap actor
|
||||||
|
let bootstrapper = BootstrapActor::spawn(BootstrapActor::new(&db).await?);
|
||||||
|
|
||||||
|
let context = Arc::new(_ServerContextInner {
|
||||||
|
db: db.clone(),
|
||||||
|
rng,
|
||||||
|
tls: Arc::new(tls),
|
||||||
|
state: RwLock::new(state),
|
||||||
|
bootstrapper,
|
||||||
|
rotation_state: RwLock::new(rotation_state),
|
||||||
|
rotation_acks: Arc::new(RwLock::new(HashSet::new())),
|
||||||
|
user_agent_leases: Default::default(),
|
||||||
|
client_leases: Default::default(),
|
||||||
|
});
|
||||||
|
|
||||||
|
Ok(Self(context))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Unseal vault with password
|
||||||
|
pub async fn unseal(&self, password: &str) -> Result<(), UnsealError> {
|
||||||
|
use crate::crypto::root_key;
|
||||||
|
use diesel::QueryDsl as _;
|
||||||
|
|
||||||
|
// 1. Get root_key_id from settings
|
||||||
|
let mut conn = self.db.get().await?;
|
||||||
|
|
||||||
|
let settings: db::models::ArbiterSetting = schema::arbiter_settings::table
|
||||||
|
.first(&mut conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let root_key_id = settings.root_key_id.ok_or(UnsealError::NoRootKey)?;
|
||||||
|
|
||||||
|
// 2. Load encrypted root key
|
||||||
|
let encrypted: db::models::AeadEncrypted = schema::aead_encrypted::table
|
||||||
|
.find(root_key_id)
|
||||||
|
.first(&mut conn)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let salt = encrypted
|
||||||
|
.argon2_salt
|
||||||
|
.as_ref()
|
||||||
|
.ok_or(UnsealError::MissingSalt)?;
|
||||||
|
|
||||||
|
drop(conn);
|
||||||
|
|
||||||
|
// 3. Decrypt root key using password
|
||||||
|
let root_key = root_key::decrypt_root_key(&encrypted, password, salt)
|
||||||
|
.map_err(UnsealError::DecryptionFailed)?;
|
||||||
|
|
||||||
|
// 4. Create secure storage
|
||||||
|
let key_storage = KeyStorage::new(root_key);
|
||||||
|
|
||||||
|
// 5. Transition state machine
|
||||||
|
let mut state = self.state.write().await;
|
||||||
|
state
|
||||||
|
.process_event(ServerEvents::Unsealed(key_storage))
|
||||||
|
.map_err(|_| UnsealError::InvalidState)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Seal the server (lock the key)
|
||||||
|
pub async fn seal(&self) -> Result<(), SealError> {
|
||||||
|
let mut state = self.state.write().await;
|
||||||
|
state
|
||||||
|
.process_event(ServerEvents::Sealed)
|
||||||
|
.map_err(|_| SealError::InvalidState)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_keystorage_creation() {
|
||||||
|
let key = [42u8; 32];
|
||||||
|
let storage = KeyStorage::new(key);
|
||||||
|
assert_eq!(storage.key()[0], 42);
|
||||||
|
assert_eq!(storage.key().len(), 32);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_keystorage_zeroization() {
|
||||||
|
let key = [99u8; 32];
|
||||||
|
{
|
||||||
|
let _storage = KeyStorage::new(key);
|
||||||
|
// storage будет dropped здесь
|
||||||
|
}
|
||||||
|
// После drop SecretBox должен зануляеть память
|
||||||
|
// Это проверяется автоматически через secrecy::Zeroize
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_state_machine_transitions() {
|
||||||
|
let mut state = ServerStateMachine::new(_Context);
|
||||||
|
|
||||||
|
// Начальное состояние
|
||||||
|
assert!(matches!(state.state(), &ServerStates::NotBootstrapped));
|
||||||
|
|
||||||
|
// Bootstrapped transition
|
||||||
|
state.process_event(ServerEvents::Bootstrapped).unwrap();
|
||||||
|
assert!(matches!(state.state(), &ServerStates::Sealed));
|
||||||
|
|
||||||
|
// Unsealed transition
|
||||||
|
let key_storage = KeyStorage::new([1u8; 32]);
|
||||||
|
state
|
||||||
|
.process_event(ServerEvents::Unsealed(key_storage))
|
||||||
|
.unwrap();
|
||||||
|
assert!(matches!(state.state(), &ServerStates::Ready(_)));
|
||||||
|
|
||||||
|
// Sealed transition
|
||||||
|
state.process_event(ServerEvents::Sealed).unwrap();
|
||||||
|
assert!(matches!(state.state(), &ServerStates::Sealed));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_move_key_callback() {
|
||||||
|
let mut ctx = _Context;
|
||||||
|
let key_storage = KeyStorage::new([7u8; 32]);
|
||||||
|
let result = ctx.move_key(key_storage);
|
||||||
|
assert!(result.is_ok());
|
||||||
|
assert_eq!(result.unwrap().key()[0], 7);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_dispose_key_callback() {
|
||||||
|
let mut ctx = _Context;
|
||||||
|
let key_storage = KeyStorage::new([13u8; 32]);
|
||||||
|
let result = ctx.dispose_key(&key_storage);
|
||||||
|
assert!(result.is_ok());
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_invalid_state_transitions() {
|
||||||
|
let mut state = ServerStateMachine::new(_Context);
|
||||||
|
|
||||||
|
// Попытка unseal без bootstrap
|
||||||
|
let key_storage = KeyStorage::new([1u8; 32]);
|
||||||
|
let result = state.process_event(ServerEvents::Unsealed(key_storage));
|
||||||
|
assert!(result.is_err());
|
||||||
|
|
||||||
|
// Правильный путь
|
||||||
|
state.process_event(ServerEvents::Bootstrapped).unwrap();
|
||||||
|
|
||||||
|
// Попытка повторного bootstrap
|
||||||
|
let result = state.process_event(ServerEvents::Bootstrapped);
|
||||||
|
assert!(result.is_err());
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,37 +1,40 @@
|
|||||||
use arbiter_proto::{BOOTSTRAP_PATH, home_path};
|
use arbiter_proto::{BOOTSTRAP_TOKEN_PATH, home_path};
|
||||||
use diesel::QueryDsl;
|
use diesel::{ExpressionMethods, QueryDsl};
|
||||||
use diesel_async::RunQueryDsl;
|
use diesel_async::RunQueryDsl;
|
||||||
use kameo::{Actor, messages};
|
use kameo::{Actor, messages};
|
||||||
|
use memsafe::MemSafe;
|
||||||
use miette::Diagnostic;
|
use miette::Diagnostic;
|
||||||
use rand::{
|
use rand::{RngExt, distr::StandardUniform, make_rng, rngs::StdRng};
|
||||||
RngExt,
|
use secrecy::SecretString;
|
||||||
distr::{Alphanumeric},
|
|
||||||
make_rng,
|
|
||||||
rngs::StdRng,
|
|
||||||
};
|
|
||||||
use thiserror::Error;
|
use thiserror::Error;
|
||||||
|
use tracing::info;
|
||||||
|
use zeroize::{Zeroize, Zeroizing};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
context::{self, ServerContext},
|
||||||
|
db::{self, DatabasePool, schema},
|
||||||
|
};
|
||||||
|
|
||||||
use crate::db::{self, DatabasePool, schema};
|
|
||||||
const TOKEN_LENGTH: usize = 64;
|
const TOKEN_LENGTH: usize = 64;
|
||||||
|
|
||||||
pub async fn generate_token() -> Result<String, std::io::Error> {
|
pub async fn generate_token() -> Result<String, std::io::Error> {
|
||||||
let rng: StdRng = make_rng();
|
let rng: StdRng = make_rng();
|
||||||
|
|
||||||
let token: String = rng.sample_iter(Alphanumeric).take(TOKEN_LENGTH).fold(
|
let token: String = rng
|
||||||
Default::default(),
|
.sample_iter::<char, _>(StandardUniform)
|
||||||
|mut accum, char| {
|
.take(TOKEN_LENGTH)
|
||||||
|
.fold(Default::default(), |mut accum, char| {
|
||||||
accum += char.to_string().as_str();
|
accum += char.to_string().as_str();
|
||||||
accum
|
accum
|
||||||
},
|
});
|
||||||
);
|
|
||||||
|
|
||||||
tokio::fs::write(home_path()?.join(BOOTSTRAP_PATH), token.as_str()).await?;
|
tokio::fs::write(home_path()?.join(BOOTSTRAP_TOKEN_PATH), token.as_str()).await?;
|
||||||
|
|
||||||
Ok(token)
|
Ok(token)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Error, Debug, Diagnostic)]
|
#[derive(Error, Debug, Diagnostic)]
|
||||||
pub enum Error {
|
pub enum BootstrapError {
|
||||||
#[error("Database error: {0}")]
|
#[error("Database error: {0}")]
|
||||||
#[diagnostic(code(arbiter_server::bootstrap::database))]
|
#[diagnostic(code(arbiter_server::bootstrap::database))]
|
||||||
Database(#[from] db::PoolError),
|
Database(#[from] db::PoolError),
|
||||||
@@ -46,12 +49,12 @@ pub enum Error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Actor)]
|
#[derive(Actor)]
|
||||||
pub struct Bootstrapper {
|
pub struct BootstrapActor {
|
||||||
token: Option<String>,
|
token: Option<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Bootstrapper {
|
impl BootstrapActor {
|
||||||
pub async fn new(db: &DatabasePool) -> Result<Self, Error> {
|
pub async fn new(db: &DatabasePool) -> Result<Self, BootstrapError> {
|
||||||
let mut conn = db.get().await?;
|
let mut conn = db.get().await?;
|
||||||
|
|
||||||
let row_count: i64 = schema::useragent_client::table
|
let row_count: i64 = schema::useragent_client::table
|
||||||
@@ -61,9 +64,10 @@ impl Bootstrapper {
|
|||||||
|
|
||||||
drop(conn);
|
drop(conn);
|
||||||
|
|
||||||
|
|
||||||
let token = if row_count == 0 {
|
let token = if row_count == 0 {
|
||||||
let token = generate_token().await?;
|
let token = generate_token().await?;
|
||||||
|
info!(%token, "Generated bootstrap token");
|
||||||
|
tokio::fs::write(home_path()?.join(BOOTSTRAP_TOKEN_PATH), token.as_str()).await?;
|
||||||
Some(token)
|
Some(token)
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
@@ -71,10 +75,15 @@ impl Bootstrapper {
|
|||||||
|
|
||||||
Ok(Self { token })
|
Ok(Self { token })
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
pub fn get_token(&self) -> Option<String> {
|
||||||
|
self.token.clone()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[messages]
|
#[messages]
|
||||||
impl Bootstrapper {
|
impl BootstrapActor {
|
||||||
#[message]
|
#[message]
|
||||||
pub fn is_correct_token(&self, token: String) -> bool {
|
pub fn is_correct_token(&self, token: String) -> bool {
|
||||||
match &self.token {
|
match &self.token {
|
||||||
@@ -93,11 +102,3 @@ impl Bootstrapper {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[messages]
|
|
||||||
impl Bootstrapper {
|
|
||||||
#[message]
|
|
||||||
pub fn get_token(&self) -> Option<String> {
|
|
||||||
self.token.clone()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
46
server/crates/arbiter-server/src/context/lease.rs
Normal file
@@ -0,0 +1,46 @@
|
|||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use dashmap::DashSet;
|
||||||
|
|
||||||
|
#[derive(Clone, Default)]
|
||||||
|
struct LeaseStorage<T: Eq + std::hash::Hash>(Arc<DashSet<T>>);
|
||||||
|
|
||||||
|
// A lease that automatically releases the item when dropped
|
||||||
|
pub struct Lease<T: Clone + std::hash::Hash + Eq> {
|
||||||
|
item: T,
|
||||||
|
storage: LeaseStorage<T>,
|
||||||
|
}
|
||||||
|
impl<T: Clone + std::hash::Hash + Eq> Drop for Lease<T> {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
self.storage.0.remove(&self.item);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Default)]
|
||||||
|
pub struct LeaseHandler<T: Clone + std::hash::Hash + Eq> {
|
||||||
|
storage: LeaseStorage<T>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: Clone + std::hash::Hash + Eq> LeaseHandler<T> {
|
||||||
|
pub fn new() -> Self {
|
||||||
|
Self {
|
||||||
|
storage: LeaseStorage(Arc::new(DashSet::new())),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn acquire(&self, item: T) -> Result<Lease<T>, ()> {
|
||||||
|
if self.storage.0.insert(item.clone()) {
|
||||||
|
Ok(Lease {
|
||||||
|
item,
|
||||||
|
storage: self.storage.clone(),
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
Err(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get all currently leased items
|
||||||
|
pub fn get_all(&self) -> Vec<T> {
|
||||||
|
self.storage.0.iter().map(|entry| entry.clone()).collect()
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,65 +0,0 @@
|
|||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use miette::Diagnostic;
|
|
||||||
use thiserror::Error;
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
actors::GlobalActors,
|
|
||||||
context::tls::TlsManager,
|
|
||||||
db::{self},
|
|
||||||
};
|
|
||||||
|
|
||||||
pub mod tls;
|
|
||||||
|
|
||||||
#[derive(Error, Debug, Diagnostic)]
|
|
||||||
pub enum InitError {
|
|
||||||
#[error("Database setup failed: {0}")]
|
|
||||||
#[diagnostic(code(arbiter_server::init::database_setup))]
|
|
||||||
DatabaseSetup(#[from] db::DatabaseSetupError),
|
|
||||||
|
|
||||||
#[error("Connection acquire failed: {0}")]
|
|
||||||
#[diagnostic(code(arbiter_server::init::database_pool))]
|
|
||||||
DatabasePool(#[from] db::PoolError),
|
|
||||||
|
|
||||||
#[error("Database query error: {0}")]
|
|
||||||
#[diagnostic(code(arbiter_server::init::database_query))]
|
|
||||||
DatabaseQuery(#[from] diesel::result::Error),
|
|
||||||
|
|
||||||
#[error("TLS initialization failed: {0}")]
|
|
||||||
#[diagnostic(code(arbiter_server::init::tls_init))]
|
|
||||||
Tls(#[from] tls::InitError),
|
|
||||||
|
|
||||||
#[error("Actor spawn failed: {0}")]
|
|
||||||
#[diagnostic(code(arbiter_server::init::actor_spawn))]
|
|
||||||
ActorSpawn(#[from] crate::actors::SpawnError),
|
|
||||||
|
|
||||||
#[error("I/O Error: {0}")]
|
|
||||||
#[diagnostic(code(arbiter_server::init::io))]
|
|
||||||
Io(#[from] std::io::Error),
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct _ServerContextInner {
|
|
||||||
pub db: db::DatabasePool,
|
|
||||||
pub tls: TlsManager,
|
|
||||||
pub actors: GlobalActors,
|
|
||||||
}
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct ServerContext(Arc<_ServerContextInner>);
|
|
||||||
|
|
||||||
impl std::ops::Deref for ServerContext {
|
|
||||||
type Target = _ServerContextInner;
|
|
||||||
|
|
||||||
fn deref(&self) -> &Self::Target {
|
|
||||||
&self.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ServerContext {
|
|
||||||
pub async fn new(db: db::DatabasePool) -> Result<Self, InitError> {
|
|
||||||
Ok(Self(Arc::new(_ServerContextInner {
|
|
||||||
actors: GlobalActors::spawn(db.clone()).await?,
|
|
||||||
tls: TlsManager::new(db.clone()).await?,
|
|
||||||
db,
|
|
||||||
})))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,252 +0,0 @@
|
|||||||
use std::string::FromUtf8Error;
|
|
||||||
|
|
||||||
use diesel::{ExpressionMethods as _, QueryDsl, SelectableHelper as _};
|
|
||||||
use diesel_async::{AsyncConnection, RunQueryDsl};
|
|
||||||
use miette::Diagnostic;
|
|
||||||
use pem::Pem;
|
|
||||||
use rcgen::{
|
|
||||||
BasicConstraints, Certificate, CertificateParams, CertifiedIssuer, DistinguishedName, DnType,
|
|
||||||
IsCa, Issuer, KeyPair, KeyUsagePurpose,
|
|
||||||
};
|
|
||||||
use rustls::pki_types::{pem::PemObject};
|
|
||||||
use thiserror::Error;
|
|
||||||
use tonic::transport::CertificateDer;
|
|
||||||
|
|
||||||
use crate::db::{
|
|
||||||
self,
|
|
||||||
models::{NewTlsHistory, TlsHistory},
|
|
||||||
schema::{
|
|
||||||
arbiter_settings,
|
|
||||||
tls_history::{self},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
const ENCODE_CONFIG: pem::EncodeConfig = {
|
|
||||||
let line_ending = match cfg!(target_family = "windows") {
|
|
||||||
true => pem::LineEnding::CRLF,
|
|
||||||
false => pem::LineEnding::LF,
|
|
||||||
};
|
|
||||||
pem::EncodeConfig::new().set_line_ending(line_ending)
|
|
||||||
};
|
|
||||||
|
|
||||||
#[derive(Error, Debug, Diagnostic)]
|
|
||||||
pub enum InitError {
|
|
||||||
#[error("Key generation error during TLS initialization: {0}")]
|
|
||||||
#[diagnostic(code(arbiter_server::tls_init::key_generation))]
|
|
||||||
KeyGeneration(#[from] rcgen::Error),
|
|
||||||
|
|
||||||
#[error("Key invalid format: {0}")]
|
|
||||||
#[diagnostic(code(arbiter_server::tls_init::key_invalid_format))]
|
|
||||||
KeyInvalidFormat(#[from] FromUtf8Error),
|
|
||||||
|
|
||||||
#[error("Key deserialization error: {0}")]
|
|
||||||
#[diagnostic(code(arbiter_server::tls_init::key_deserialization))]
|
|
||||||
KeyDeserializationError(rcgen::Error),
|
|
||||||
|
|
||||||
#[error("Database error during TLS initialization: {0}")]
|
|
||||||
#[diagnostic(code(arbiter_server::tls_init::database_error))]
|
|
||||||
DatabaseError(#[from] diesel::result::Error),
|
|
||||||
|
|
||||||
#[error("Pem deserialization error during TLS initialization: {0}")]
|
|
||||||
#[diagnostic(code(arbiter_server::tls_init::pem_deserialization))]
|
|
||||||
PemDeserializationError(#[from] rustls::pki_types::pem::Error),
|
|
||||||
|
|
||||||
#[error("Database pool acquire error during TLS initialization: {0}")]
|
|
||||||
#[diagnostic(code(arbiter_server::tls_init::database_pool_acquire))]
|
|
||||||
DatabasePoolAcquire(#[from] db::PoolError),
|
|
||||||
}
|
|
||||||
|
|
||||||
pub type PemCert = String;
|
|
||||||
|
|
||||||
pub fn encode_cert_to_pem(cert: &CertificateDer) -> PemCert {
|
|
||||||
pem::encode_config(
|
|
||||||
&Pem::new("CERTIFICATE", cert.to_vec()),
|
|
||||||
ENCODE_CONFIG,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(unused)]
|
|
||||||
struct SerializedTls {
|
|
||||||
cert_pem: PemCert,
|
|
||||||
cert_key_pem: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
struct TlsCa {
|
|
||||||
issuer: Issuer<'static, KeyPair>,
|
|
||||||
cert: CertificateDer<'static>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TlsCa {
|
|
||||||
fn generate() -> Result<Self, InitError> {
|
|
||||||
let keypair = KeyPair::generate()?;
|
|
||||||
let mut params = CertificateParams::new(["Arbiter Instance CA".into()])?;
|
|
||||||
params.is_ca = IsCa::Ca(BasicConstraints::Unconstrained);
|
|
||||||
params.key_usages = vec![
|
|
||||||
KeyUsagePurpose::KeyCertSign,
|
|
||||||
KeyUsagePurpose::CrlSign,
|
|
||||||
KeyUsagePurpose::DigitalSignature,
|
|
||||||
];
|
|
||||||
|
|
||||||
let mut dn = DistinguishedName::new();
|
|
||||||
dn.push(DnType::CommonName, "Arbiter Instance CA");
|
|
||||||
params.distinguished_name = dn;
|
|
||||||
let certified_issuer = CertifiedIssuer::self_signed(params, keypair)?;
|
|
||||||
|
|
||||||
let cert_key_pem = certified_issuer.key().serialize_pem();
|
|
||||||
|
|
||||||
let issuer = Issuer::from_ca_cert_pem(
|
|
||||||
&certified_issuer.pem(),
|
|
||||||
KeyPair::from_pem(cert_key_pem.as_ref()).unwrap(),
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
Ok(Self {
|
|
||||||
issuer,
|
|
||||||
cert: certified_issuer.der().clone(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
fn generate_leaf(&self) -> Result<TlsCert, InitError> {
|
|
||||||
let cert_key = KeyPair::generate()?;
|
|
||||||
let mut params = CertificateParams::new(["Arbiter Instance Leaf".into()])?;
|
|
||||||
params.is_ca = IsCa::NoCa;
|
|
||||||
params.key_usages = vec![
|
|
||||||
KeyUsagePurpose::DigitalSignature,
|
|
||||||
KeyUsagePurpose::KeyEncipherment,
|
|
||||||
];
|
|
||||||
|
|
||||||
let mut dn = DistinguishedName::new();
|
|
||||||
dn.push(DnType::CommonName, "Arbiter Instance Leaf");
|
|
||||||
params.distinguished_name = dn;
|
|
||||||
|
|
||||||
let new_cert = params.signed_by(&cert_key, &self.issuer)?;
|
|
||||||
|
|
||||||
Ok(TlsCert {
|
|
||||||
cert: new_cert,
|
|
||||||
cert_key,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(unused)]
|
|
||||||
fn serialize(&self) -> Result<SerializedTls, InitError> {
|
|
||||||
let cert_key_pem = self.issuer.key().serialize_pem();
|
|
||||||
Ok(SerializedTls {
|
|
||||||
cert_pem: encode_cert_to_pem(&self.cert),
|
|
||||||
cert_key_pem,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(unused)]
|
|
||||||
fn try_deserialize(cert_pem: &str, cert_key_pem: &str) -> Result<Self, InitError> {
|
|
||||||
let keypair =
|
|
||||||
KeyPair::from_pem(cert_key_pem).map_err(InitError::KeyDeserializationError)?;
|
|
||||||
let issuer = Issuer::from_ca_cert_pem(cert_pem, keypair)?;
|
|
||||||
Ok(Self {
|
|
||||||
issuer,
|
|
||||||
cert: CertificateDer::from_pem_slice(cert_pem.as_bytes())?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct TlsCert {
|
|
||||||
cert: Certificate,
|
|
||||||
cert_key: KeyPair,
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: Implement cert rotation
|
|
||||||
pub struct TlsManager {
|
|
||||||
cert: CertificateDer<'static>,
|
|
||||||
keypair: KeyPair,
|
|
||||||
ca_cert: CertificateDer<'static>,
|
|
||||||
_db: db::DatabasePool,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TlsManager {
|
|
||||||
pub async fn generate_new(db: &db::DatabasePool) -> Result<Self, InitError> {
|
|
||||||
let ca = TlsCa::generate()?;
|
|
||||||
let new_cert = ca.generate_leaf()?;
|
|
||||||
|
|
||||||
{
|
|
||||||
let mut conn = db.get().await?;
|
|
||||||
conn.transaction(|conn| {
|
|
||||||
Box::pin(async {
|
|
||||||
let new_tls_history = NewTlsHistory {
|
|
||||||
cert: new_cert.cert.pem(),
|
|
||||||
cert_key: new_cert.cert_key.serialize_pem(),
|
|
||||||
ca_cert: encode_cert_to_pem(&ca.cert),
|
|
||||||
ca_key: ca.issuer.key().serialize_pem(),
|
|
||||||
};
|
|
||||||
|
|
||||||
let inserted_tls_history: i32 = diesel::insert_into(tls_history::table)
|
|
||||||
.values(&new_tls_history)
|
|
||||||
.returning(tls_history::id)
|
|
||||||
.get_result(conn)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
diesel::update(arbiter_settings::table)
|
|
||||||
.set(arbiter_settings::tls_id.eq(inserted_tls_history))
|
|
||||||
.execute(conn)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Result::<_, diesel::result::Error>::Ok(())
|
|
||||||
})
|
|
||||||
})
|
|
||||||
.await?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(Self {
|
|
||||||
cert: new_cert.cert.der().clone(),
|
|
||||||
keypair: new_cert.cert_key,
|
|
||||||
ca_cert: ca.cert,
|
|
||||||
_db: db.clone(),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn new(db: db::DatabasePool) -> Result<Self, InitError> {
|
|
||||||
let cert_data: Option<TlsHistory> = {
|
|
||||||
let mut conn = db.get().await?;
|
|
||||||
arbiter_settings::table
|
|
||||||
.left_join(tls_history::table)
|
|
||||||
.select(Option::<TlsHistory>::as_select())
|
|
||||||
.first(&mut conn)
|
|
||||||
.await?
|
|
||||||
};
|
|
||||||
|
|
||||||
match cert_data {
|
|
||||||
Some(data) => {
|
|
||||||
let try_load = || -> Result<_, Box<dyn std::error::Error>> {
|
|
||||||
let keypair = KeyPair::from_pem(&data.cert_key)?;
|
|
||||||
let cert = CertificateDer::from_pem_slice(data.cert.as_bytes())?;
|
|
||||||
let ca_cert = CertificateDer::from_pem_slice(data.ca_cert.as_bytes())?;
|
|
||||||
Ok(Self {
|
|
||||||
cert,
|
|
||||||
keypair,
|
|
||||||
ca_cert,
|
|
||||||
_db: db.clone(),
|
|
||||||
})
|
|
||||||
};
|
|
||||||
match try_load() {
|
|
||||||
Ok(manager) => Ok(manager),
|
|
||||||
Err(e) => {
|
|
||||||
eprintln!("Failed to load existing TLS certs: {e}. Generating new ones.");
|
|
||||||
Self::generate_new(&db).await
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
None => Self::generate_new(&db).await,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn cert(&self) -> &CertificateDer<'static> {
|
|
||||||
&self.cert
|
|
||||||
}
|
|
||||||
pub fn ca_cert(&self) -> &CertificateDer<'static> {
|
|
||||||
&self.ca_cert
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn cert_pem(&self) -> PemCert {
|
|
||||||
encode_cert_to_pem(&self.cert)
|
|
||||||
}
|
|
||||||
pub fn key_pem(&self) -> String {
|
|
||||||
self.keypair.serialize_pem()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
192
server/crates/arbiter-server/src/context/tls/mod.rs
Normal file
@@ -0,0 +1,192 @@
|
|||||||
|
use std::sync::Arc;
|
||||||
|
use std::string::FromUtf8Error;
|
||||||
|
|
||||||
|
use miette::Diagnostic;
|
||||||
|
use rcgen::{Certificate, KeyPair};
|
||||||
|
use rustls::pki_types::CertificateDer;
|
||||||
|
use thiserror::Error;
|
||||||
|
use tokio::sync::RwLock;
|
||||||
|
|
||||||
|
use crate::db;
|
||||||
|
|
||||||
|
pub mod rotation;
|
||||||
|
|
||||||
|
pub use rotation::{RotationError, RotationState, RotationTask};
|
||||||
|
|
||||||
|
#[derive(Error, Debug, Diagnostic)]
|
||||||
|
#[expect(clippy::enum_variant_names)]
|
||||||
|
pub enum TlsInitError {
|
||||||
|
#[error("Key generation error during TLS initialization: {0}")]
|
||||||
|
#[diagnostic(code(arbiter_server::tls_init::key_generation))]
|
||||||
|
KeyGeneration(#[from] rcgen::Error),
|
||||||
|
|
||||||
|
#[error("Key invalid format: {0}")]
|
||||||
|
#[diagnostic(code(arbiter_server::tls_init::key_invalid_format))]
|
||||||
|
KeyInvalidFormat(#[from] FromUtf8Error),
|
||||||
|
|
||||||
|
#[error("Key deserialization error: {0}")]
|
||||||
|
#[diagnostic(code(arbiter_server::tls_init::key_deserialization))]
|
||||||
|
KeyDeserializationError(rcgen::Error),
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct TlsData {
|
||||||
|
pub cert: CertificateDer<'static>,
|
||||||
|
pub keypair: KeyPair,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct TlsDataRaw {
|
||||||
|
pub cert: Vec<u8>,
|
||||||
|
pub key: Vec<u8>,
|
||||||
|
}
|
||||||
|
impl TlsDataRaw {
|
||||||
|
pub fn serialize(cert: &TlsData) -> Self {
|
||||||
|
Self {
|
||||||
|
cert: cert.cert.as_ref().to_vec(),
|
||||||
|
key: cert.keypair.serialize_pem().as_bytes().to_vec(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn deserialize(&self) -> Result<TlsData, TlsInitError> {
|
||||||
|
let cert = CertificateDer::from_slice(&self.cert).into_owned();
|
||||||
|
|
||||||
|
let key =
|
||||||
|
String::from_utf8(self.key.clone()).map_err(TlsInitError::KeyInvalidFormat)?;
|
||||||
|
|
||||||
|
let keypair = KeyPair::from_pem(&key).map_err(TlsInitError::KeyDeserializationError)?;
|
||||||
|
|
||||||
|
Ok(TlsData { cert, keypair })
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Metadata about a certificate including validity period
|
||||||
|
pub struct CertificateMetadata {
|
||||||
|
pub cert_id: i32,
|
||||||
|
pub cert: CertificateDer<'static>,
|
||||||
|
pub keypair: Arc<KeyPair>,
|
||||||
|
pub not_before: i64,
|
||||||
|
pub not_after: i64,
|
||||||
|
pub created_at: i64,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn generate_cert(key: &KeyPair) -> Result<(Certificate, i64, i64), rcgen::Error> {
|
||||||
|
let params = rcgen::CertificateParams::new(vec![
|
||||||
|
"arbiter.local".to_string(),
|
||||||
|
"localhost".to_string(),
|
||||||
|
])?;
|
||||||
|
|
||||||
|
// Set validity period: 90 days from now
|
||||||
|
let not_before = chrono::Utc::now();
|
||||||
|
let not_after = not_before + chrono::Duration::days(90);
|
||||||
|
|
||||||
|
// Note: rcgen doesn't directly expose not_before/not_after setting in all versions
|
||||||
|
// For now, we'll generate the cert and track validity separately
|
||||||
|
let cert = params.self_signed(key)?;
|
||||||
|
|
||||||
|
Ok((cert, not_before.timestamp(), not_after.timestamp()))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Certificate rotation enabled
|
||||||
|
pub(crate) struct TlsManager {
|
||||||
|
// Current active certificate (atomic replacement via RwLock)
|
||||||
|
current_cert: Arc<RwLock<CertificateMetadata>>,
|
||||||
|
|
||||||
|
// Database pool for persistence
|
||||||
|
db: db::DatabasePool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TlsManager {
|
||||||
|
/// Create new TlsManager with a generated certificate
|
||||||
|
pub async fn new(db: db::DatabasePool) -> Result<Self, TlsInitError> {
|
||||||
|
let keypair = KeyPair::generate()?;
|
||||||
|
let (cert, not_before, not_after) = generate_cert(&keypair)?;
|
||||||
|
let cert_der = cert.der().clone();
|
||||||
|
|
||||||
|
// For initial creation, cert_id will be set after DB insert
|
||||||
|
let metadata = CertificateMetadata {
|
||||||
|
cert_id: 0, // Temporary, will be updated after DB insert
|
||||||
|
cert: cert_der,
|
||||||
|
keypair: Arc::new(keypair),
|
||||||
|
not_before,
|
||||||
|
not_after,
|
||||||
|
created_at: chrono::Utc::now().timestamp(),
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
current_cert: Arc::new(RwLock::new(metadata)),
|
||||||
|
db,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Load TlsManager from database with specific certificate ID
|
||||||
|
pub async fn load_from_db(db: db::DatabasePool, cert_id: i32) -> Result<Self, TlsInitError> {
|
||||||
|
// TODO: Load certificate from database
|
||||||
|
// For now, return error - will be implemented when database access is ready
|
||||||
|
Err(TlsInitError::KeyGeneration(rcgen::Error::CouldNotParseCertificate))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create from legacy TlsDataRaw format
|
||||||
|
pub async fn new_from_legacy(
|
||||||
|
db: db::DatabasePool,
|
||||||
|
data: TlsDataRaw,
|
||||||
|
not_before: i64,
|
||||||
|
not_after: i64,
|
||||||
|
) -> Result<Self, TlsInitError> {
|
||||||
|
let tls_data = data.deserialize()?;
|
||||||
|
|
||||||
|
let metadata = CertificateMetadata {
|
||||||
|
cert_id: 1, // Legacy certificate gets ID 1
|
||||||
|
cert: tls_data.cert,
|
||||||
|
keypair: Arc::new(tls_data.keypair),
|
||||||
|
not_before,
|
||||||
|
not_after,
|
||||||
|
created_at: chrono::Utc::now().timestamp(),
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(Self {
|
||||||
|
current_cert: Arc::new(RwLock::new(metadata)),
|
||||||
|
db,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get current certificate data
|
||||||
|
pub async fn get_certificate(&self) -> (CertificateDer<'static>, Arc<KeyPair>) {
|
||||||
|
let cert = self.current_cert.read().await;
|
||||||
|
(cert.cert.clone(), cert.keypair.clone())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Replace certificate atomically
|
||||||
|
pub async fn replace_certificate(&self, new_cert: CertificateMetadata) -> Result<(), TlsInitError> {
|
||||||
|
let mut cert = self.current_cert.write().await;
|
||||||
|
*cert = new_cert;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Check if certificate is expiring soon
|
||||||
|
pub async fn check_expiration(&self, threshold_secs: i64) -> bool {
|
||||||
|
let cert = self.current_cert.read().await;
|
||||||
|
let now = chrono::Utc::now().timestamp();
|
||||||
|
cert.not_after - now < threshold_secs
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get certificate metadata for rotation logic
|
||||||
|
pub async fn get_certificate_metadata(&self) -> CertificateMetadata {
|
||||||
|
let cert = self.current_cert.read().await;
|
||||||
|
CertificateMetadata {
|
||||||
|
cert_id: cert.cert_id,
|
||||||
|
cert: cert.cert.clone(),
|
||||||
|
keypair: cert.keypair.clone(),
|
||||||
|
not_before: cert.not_before,
|
||||||
|
not_after: cert.not_after,
|
||||||
|
created_at: cert.created_at,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn bytes(&self) -> TlsDataRaw {
|
||||||
|
// This method is now async-compatible but we keep sync interface
|
||||||
|
// TODO: Make this async or remove if not needed
|
||||||
|
TlsDataRaw {
|
||||||
|
cert: vec![],
|
||||||
|
key: vec![],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||