diff --git a/Cargo.lock b/Cargo.lock index e010bcee..dd479754 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -303,6 +303,36 @@ checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" name = "auths" version = "0.0.1-rc.9" +[[package]] +name = "auths-api" +version = "0.0.1-rc.9" +dependencies = [ + "async-trait", + "auths-core", + "auths-crypto", + "auths-id", + "auths-policy", + "auths-sdk", + "auths-storage", + "axum", + "base64", + "chrono", + "dashmap", + "redis", + "reqwest 0.12.28", + "ring", + "serde", + "serde_json", + "sha2", + "subtle", + "thiserror 2.0.18", + "tokio", + "tower", + "tracing", + "tracing-subscriber", + "uuid", +] + [[package]] name = "auths-cli" version = "0.0.1-rc.9" @@ -714,6 +744,7 @@ dependencies = [ "auths-verifier", "base64", "chrono", + "dashmap", "git2", "hex", "html-escape", @@ -728,6 +759,7 @@ dependencies = [ "tempfile", "thiserror 2.0.18", "url", + "uuid", "zeroize", ] @@ -1831,7 +1863,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd" dependencies = [ "bytes", + "futures-core", "memchr", + "pin-project-lite", + "tokio", + "tokio-util", ] [[package]] @@ -2226,6 +2262,20 @@ dependencies = [ "syn", ] +[[package]] +name = "dashmap" +version = "6.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5041cc499144891f3790297212f32a74fb938e5136a14943f338ef9e0ae276cf" +dependencies = [ + "cfg-if", + "crossbeam-utils", + "hashbrown 0.14.5", + "lock_api", + "once_cell", + "parking_lot_core", +] + [[package]] name = "data-encoding" version = "2.10.0" @@ -3346,6 +3396,22 @@ dependencies = [ "webpki-roots", ] +[[package]] +name = "hyper-tls" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" +dependencies = [ + "bytes", + "http-body-util", + "hyper 1.8.1", + "hyper-util", + "native-tls", + "tokio", + "tokio-native-tls", + "tower-service", +] + [[package]] name = "hyper-util" version = "0.1.20" @@ -5094,6 +5160,29 @@ dependencies = [ "crossbeam-utils", ] +[[package]] +name = "redis" +version = "0.26.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e902a69d09078829137b4a5d9d082e0490393537badd7c91a3d69d14639e115f" +dependencies = [ + "arc-swap", + "async-trait", + "bytes", + "combine", + "futures-util", + "itoa", + "num-bigint", + "percent-encoding", + "pin-project-lite", + "ryu", + "sha1_smol", + "socket2 0.5.10", + "tokio", + "tokio-util", + "url", +] + [[package]] name = "redox_syscall" version = "0.5.18" @@ -5201,15 +5290,20 @@ checksum = "eddd3ca559203180a307f12d114c268abf583f59b03cb906fd0b3ff8646c1147" dependencies = [ "base64", "bytes", + "encoding_rs", "futures-core", + "h2 0.4.13", "http 1.4.0", "http-body 1.0.1", "http-body-util", "hyper 1.8.1", "hyper-rustls 0.27.7", + "hyper-tls", "hyper-util", "js-sys", "log", + "mime", + "native-tls", "percent-encoding", "pin-project-lite", "quinn", @@ -5220,6 +5314,7 @@ dependencies = [ "serde_urlencoded", "sync_wrapper", "tokio", + "tokio-native-tls", "tokio-rustls 0.26.4", "tower", "tower-http", @@ -5903,6 +5998,12 @@ dependencies = [ "digest", ] +[[package]] +name = "sha1_smol" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbfa15b3dddfee50a0fff136974b3e1bde555604ba463834a7eb7deb6417705d" + [[package]] name = "sha2" version = "0.10.9" diff --git a/Cargo.toml b/Cargo.toml index 381ed70f..237cbc05 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -24,7 +24,7 @@ members = [ "crates/auths-scim", "crates/auths-utils", "crates/auths-oidc-port", - "crates/xtask", + "crates/xtask", "crates/auths-api", ] [workspace.package] diff --git a/crates/auths-api/Cargo.toml b/crates/auths-api/Cargo.toml new file mode 100644 index 00000000..f7fb5585 --- /dev/null +++ b/crates/auths-api/Cargo.toml @@ -0,0 +1,62 @@ +[package] +name = "auths-api" +edition = "2021" +version.workspace = true +license.workspace = true +rust-version.workspace = true +repository.workspace = true +homepage.workspace = true + +[[bin]] +name = "auths-api" +path = "src/main.rs" + +[dependencies] +# Auths workspace crates +auths-id = { workspace = true } +auths-crypto = { workspace = true } +auths-sdk = { workspace = true } +auths-core = { workspace = true } +auths-policy = { workspace = true } +auths-storage = { workspace = true } + +# Domain services +async-trait = "0.1" + +# HTTP & async +axum = "0.8" +tokio = { workspace = true, features = ["full"] } +tower = "0.5" + +# Serialization +serde = { version = "1", features = ["derive"] } +serde_json = "1" +chrono = { version = "0.4", features = ["serde"] } +uuid = { workspace = true, features = ["serde"] } + +# Crypto & hashing +ring = { workspace = true } +base64 = { workspace = true } +sha2 = "0.10" +subtle = { workspace = true } + +# Error handling +thiserror = { workspace = true } + +# Concurrency +dashmap = "6" + +# Persistence +redis = { version = "0.26", features = ["aio", "tokio-comp"] } + +# Telemetry +tracing = "0.1" +tracing-subscriber = "0.3" + +[dev-dependencies] +tokio = { version = "1", features = ["macros", "rt", "time"] } +reqwest = { version = "0.12", features = ["json"] } +serde_json = "1" + +[lints] +workspace = true diff --git a/crates/auths-api/src/app.rs b/crates/auths-api/src/app.rs new file mode 100644 index 00000000..21ad6a89 --- /dev/null +++ b/crates/auths-api/src/app.rs @@ -0,0 +1,23 @@ +use axum::Router; +use std::sync::Arc; + +use crate::domains::agents::routes as agent_routes; +use crate::persistence::AgentPersistence; +use auths_sdk::domains::agents::AgentRegistry; + +/// Application state shared across all handlers +#[derive(Clone)] +pub struct AppState { + pub registry: Arc, + pub persistence: Arc, +} + +/// Build the complete API router +/// Composes routes from all domains +pub fn build_router(state: AppState) -> Router { + Router::new().nest("/v1", agent_routes(state.clone())) + // Future domains will be nested here: + // .nest("/v1", developer_routes(state.clone())) + // .nest("/v1", organization_routes(state.clone())) + // .nest("/v1", verification_routes(state.clone())) +} diff --git a/crates/auths-api/src/domains/agents/handlers.rs b/crates/auths-api/src/domains/agents/handlers.rs new file mode 100644 index 00000000..6abe90c7 --- /dev/null +++ b/crates/auths-api/src/domains/agents/handlers.rs @@ -0,0 +1,157 @@ +use axum::{ + extract::{Path, State}, + http::StatusCode, + Json, +}; +use serde::Serialize; + +use crate::AppState; +use auths_sdk::domains::agents::{ + AgentService, AgentSession, AuthorizeRequest, AuthorizeResponse, ProvisionRequest, + ProvisionResponse, +}; + +/// Provision a new agent identity +/// +/// POST /v1/agents +/// +/// Request is signed with delegator's private key. Handler verifies signature, +/// validates delegation constraints, provisions agent identity, and stores in registry + Redis. +pub async fn provision_agent( + State(state): State, + Json(req): Json, +) -> Result<(StatusCode, Json), (StatusCode, String)> { + #[allow(clippy::disallowed_methods)] + // INVARIANT: HTTP handler boundary, inject time at presentation layer + let now = chrono::Utc::now(); + + let service = AgentService::new(state.registry, state.persistence); + let resp = service + .provision(req, now) + .await + .map_err(|e| (StatusCode::BAD_REQUEST, e))?; + + Ok((StatusCode::CREATED, Json(resp))) +} + +/// Authorize an operation for an agent +/// +/// POST /v1/authorize +/// +/// Verifies Ed25519 signature, checks agent is active, evaluates capabilities. +/// Returns authorization decision. +pub async fn authorize_operation( + State(state): State, + Json(req): Json, +) -> Result<(StatusCode, Json), (StatusCode, String)> { + #[allow(clippy::disallowed_methods)] // INVARIANT: HTTP handler boundary + let now = chrono::Utc::now(); + + // Validate clock skew (±5 minutes) + let time_diff = { + let duration = now.signed_duration_since(req.timestamp); + duration.num_seconds().unsigned_abs() + }; + if time_diff > 300 { + return Err((StatusCode::BAD_REQUEST, "Clock skew too large".to_string())); + } + + let service = AgentService::new(state.registry, state.persistence); + let resp = service + .authorize(&req.agent_did, &req.capability, now) + .map_err(|e| (StatusCode::UNAUTHORIZED, e))?; + + Ok((StatusCode::OK, Json(resp))) +} + +/// Revoke an agent and all its children (cascading) +/// +/// DELETE /v1/agents/{agent_did} +pub async fn revoke_agent( + State(state): State, + Path(agent_did): Path, +) -> Result { + #[allow(clippy::disallowed_methods)] // INVARIANT: HTTP handler boundary + let now = chrono::Utc::now(); + + let service = AgentService::new(state.registry, state.persistence); + service + .revoke(&agent_did, now) + .await + .map_err(|e| (StatusCode::INTERNAL_SERVER_ERROR, e))?; + + Ok(StatusCode::NO_CONTENT) +} + +#[derive(Debug, Serialize)] +pub struct ListAgentsResponse { + pub agents: Vec, + pub total: usize, +} + +/// List all active agents +/// +/// GET /v1/agents +pub async fn list_agents( + State(state): State, +) -> Result<(StatusCode, Json), (StatusCode, String)> { + #[allow(clippy::disallowed_methods)] // INVARIANT: HTTP handler boundary + let now = chrono::Utc::now(); + + let agents = state.registry.list(now); + let total = agents.len(); + + Ok((StatusCode::OK, Json(ListAgentsResponse { agents, total }))) +} + +#[derive(Debug, Serialize)] +pub struct AgentStatsResponse { + pub total_active: usize, + pub total_revoked: usize, + pub max_delegation_depth: u32, +} + +/// Get registry statistics +/// +/// GET /v1/admin/stats +pub async fn admin_stats( + State(state): State, +) -> Result<(StatusCode, Json), (StatusCode, String)> { + #[allow(clippy::disallowed_methods)] // INVARIANT: HTTP handler boundary + let now = chrono::Utc::now(); + + let sessions = state.registry.list(now); + let total_active = sessions.len(); + let max_delegation_depth = sessions + .iter() + .map(|s| s.delegation_depth) + .max() + .unwrap_or(0); + + Ok(( + StatusCode::OK, + Json(AgentStatsResponse { + total_active, + total_revoked: 0, + max_delegation_depth, + }), + )) +} + +/// Get details for a specific agent +/// +/// GET /v1/agents/{agent_did} +pub async fn get_agent( + State(state): State, + Path(agent_did): Path, +) -> Result<(StatusCode, Json), (StatusCode, String)> { + #[allow(clippy::disallowed_methods)] // INVARIANT: HTTP handler boundary + let now = chrono::Utc::now(); + + let session = state + .registry + .get(&agent_did, now) + .ok_or_else(|| (StatusCode::NOT_FOUND, "Agent not found".to_string()))?; + + Ok((StatusCode::OK, Json(session))) +} diff --git a/crates/auths-api/src/domains/agents/mod.rs b/crates/auths-api/src/domains/agents/mod.rs new file mode 100644 index 00000000..538a50f1 --- /dev/null +++ b/crates/auths-api/src/domains/agents/mod.rs @@ -0,0 +1,14 @@ +//! Agent domain HTTP handlers and routes +//! +//! HTTP presentation layer for agent provisioning and authorization. +//! Business logic is in auths-sdk::domains::agents. + +pub mod handlers; +pub mod routes; + +// Re-export SDK domain types for convenience +pub use auths_sdk::domains::agents::{ + AgentRegistry, AgentService, AgentSession, AgentStatus, AuthorizeRequest, AuthorizeResponse, + ProvisionRequest, ProvisionResponse, +}; +pub use routes::routes; diff --git a/crates/auths-api/src/domains/agents/routes.rs b/crates/auths-api/src/domains/agents/routes.rs new file mode 100644 index 00000000..d2339542 --- /dev/null +++ b/crates/auths-api/src/domains/agents/routes.rs @@ -0,0 +1,22 @@ +use axum::{ + routing::{delete, get, post}, + Router, +}; + +use super::handlers::{ + admin_stats, authorize_operation, get_agent, list_agents, provision_agent, revoke_agent, +}; +use crate::AppState; + +/// Build agent domain routes +/// All routes are under /v1/ prefix (applied at router composition level) +pub fn routes(state: AppState) -> Router { + Router::new() + .route("/agents", post(provision_agent)) + .route("/agents", get(list_agents)) + .route("/agents/{agent_did}", get(get_agent)) + .route("/agents/{agent_did}", delete(revoke_agent)) + .route("/authorize", post(authorize_operation)) + .route("/admin/stats", get(admin_stats)) + .with_state(state) +} diff --git a/crates/auths-api/src/domains/mod.rs b/crates/auths-api/src/domains/mod.rs new file mode 100644 index 00000000..72fe0634 --- /dev/null +++ b/crates/auths-api/src/domains/mod.rs @@ -0,0 +1,4 @@ +//! Domain-driven structure for API features +//! Each domain owns its types, handlers, business logic, and routes + +pub mod agents; diff --git a/crates/auths-api/src/error.rs b/crates/auths-api/src/error.rs new file mode 100644 index 00000000..6ab159e3 --- /dev/null +++ b/crates/auths-api/src/error.rs @@ -0,0 +1,40 @@ +use thiserror::Error; + +#[derive(Debug, Error)] +pub enum ApiError { + #[error("Invalid signature: {0}")] + InvalidSignature(String), + + #[error("Clock skew too large (request timestamp outside 5-minute window)")] + ClockSkew, + + #[error("Delegator not found: {0}")] + DelegatorNotFound(String), + + #[error("Delegation constraint violated: {0}")] + DelegationConstraintViolated(String), + + #[error("Agent not found: {0}")] + AgentNotFound(String), + + #[error("Agent revoked: {0}")] + AgentRevoked(String), + + #[error("Agent expired: {0}")] + AgentExpired(String), + + #[error("Capability not granted: {0}")] + CapabilityNotGranted(String), + + #[error("Redis error: {0}")] + RedisError(String), + + #[error("Serialization error: {0}")] + SerializationError(#[from] serde_json::Error), + + #[error("UUID error: {0}")] + UuidError(String), + + #[error("Internal server error")] + InternalError, +} diff --git a/crates/auths-api/src/lib.rs b/crates/auths-api/src/lib.rs new file mode 100644 index 00000000..fded2dbd --- /dev/null +++ b/crates/auths-api/src/lib.rs @@ -0,0 +1,28 @@ +//! Auths API Server +//! +//! HTTP server for agent provisioning and authorization using cryptographic identity. +//! +//! # Architecture +//! +//! - **Domains**: Feature-driven modules (agents, developers, etc) +//! - **Shared Infrastructure**: error handling, persistence, middleware +//! - **Services**: Business logic layer (separate from HTTP handlers) +//! - **Handlers**: HTTP request/response handling +//! - **Routes**: Endpoint definitions + +pub mod error; + +#[path = "middleware.rs"] +pub mod middleware; + +#[path = "persistence.rs"] +pub mod persistence; + +pub mod app; +pub mod domains; + +// Re-export public API +pub use app::{build_router, AppState}; +pub use auths_sdk::domains::agents::{AgentRegistry, AgentService, AgentSession, AgentStatus}; +pub use error::ApiError; +pub use persistence::AgentPersistence; diff --git a/crates/auths-api/src/main.rs b/crates/auths-api/src/main.rs new file mode 100644 index 00000000..74789c37 --- /dev/null +++ b/crates/auths-api/src/main.rs @@ -0,0 +1,77 @@ +use auths_api::app::{build_router, AppState}; +use auths_api::{AgentPersistence, AgentRegistry}; +use std::sync::Arc; + +#[tokio::main] +async fn main() { + // Initialize tracing + tracing_subscriber::fmt::init(); + + // Initialize persistence (Redis connection) + let persistence = match AgentPersistence::new() { + Ok(p) => { + tracing::info!("Connected to Redis"); + Arc::new(p) + } + Err(e) => { + tracing::error!("Failed to connect to Redis: {}", e); + return; + } + }; + + // Initialize registry (in-memory cache) + let registry = Arc::new(AgentRegistry::new()); + + // Warm cache from Redis on startup + if let Ok(sessions) = persistence.load_all().await { + for session in sessions { + registry.insert(session); + } + let now = { + #[allow(clippy::disallowed_methods)] + chrono::Utc::now() + }; + tracing::info!("Loaded {} sessions from Redis", registry.len(now)); + } + + // Create application state + let state = AppState { + registry: registry.clone(), + persistence: persistence.clone(), + }; + + // Build router + let app = build_router(state); + + // Start server + let listener = match tokio::net::TcpListener::bind("127.0.0.1:8080").await { + Ok(l) => l, + Err(e) => { + tracing::error!("Failed to bind server: {}", e); + return; + } + }; + + tracing::info!("Server listening on 127.0.0.1:8080"); + + // Start background cleanup task (reap expired sessions) + let registry_clone = registry.clone(); + tokio::spawn(async move { + let mut interval = tokio::time::interval(std::time::Duration::from_secs(60)); + loop { + interval.tick().await; + let now = { + #[allow(clippy::disallowed_methods)] + chrono::Utc::now() + }; + let reaped = registry_clone.reap_expired(now); + if reaped > 0 { + tracing::info!("Reaped {} expired sessions", reaped); + } + } + }); + + if let Err(e) = axum::serve(listener, app).await { + tracing::error!("Server error: {}", e); + } +} diff --git a/crates/auths-api/src/middleware.rs b/crates/auths-api/src/middleware.rs new file mode 100644 index 00000000..14e2b112 --- /dev/null +++ b/crates/auths-api/src/middleware.rs @@ -0,0 +1,6 @@ +// Shared middleware for all domains +// Currently empty, but available for cross-cutting concerns like: +// - Authentication/signature verification +// - Request logging +// - Rate limiting +// - Metrics collection diff --git a/crates/auths-api/src/middleware/mod.rs b/crates/auths-api/src/middleware/mod.rs new file mode 100644 index 00000000..e69de29b diff --git a/crates/auths-api/src/persistence.rs b/crates/auths-api/src/persistence.rs new file mode 100644 index 00000000..2a69f595 --- /dev/null +++ b/crates/auths-api/src/persistence.rs @@ -0,0 +1,217 @@ +use async_trait::async_trait; +use auths_sdk::domains::agents::types::{AgentSession, AgentStatus}; +use auths_sdk::domains::agents::AgentPersistencePort; +use chrono::{DateTime, Utc}; +use redis::AsyncCommands; + +/// Redis-backed persistence layer for agent sessions +pub struct AgentPersistence { + client: Option, +} + +impl AgentPersistence { + /// Create a new persistence layer (connects to Redis at default localhost:6379) + pub fn new() -> Result { + let client = redis::Client::open("redis://127.0.0.1:6379/")?; + Ok(Self { + client: Some(client), + }) + } + + /// Create a new persistence layer with custom URL + pub fn with_url(url: &str) -> Result { + let client = redis::Client::open(url)?; + Ok(Self { + client: Some(client), + }) + } + + /// Create a test-mode persistence layer (no Redis, operations are no-ops) + #[allow(dead_code)] // Used in tests + pub fn new_test() -> Self { + Self { client: None } + } + + /// Store session in Redis with key: "agent:{agent_did}" + pub async fn set_session( + &self, + session: &AgentSession, + ) -> Result<(), Box> { + let Some(client) = &self.client else { + return Ok(()); // Test mode: no-op + }; + + let mut conn = client.get_multiplexed_async_connection().await?; + let key = format!("agent:{}", session.agent_did); + let value = serde_json::to_string(session)?; + + let _: () = conn.set(&key, &value).await?; + + Ok(()) + } + + /// Retrieve session from Redis by agent_did + pub async fn get_session( + &self, + agent_did: &str, + ) -> Result, Box> { + let Some(client) = &self.client else { + return Ok(None); // Test mode: no-op + }; + + let mut conn = client.get_multiplexed_async_connection().await?; + let key = format!("agent:{}", agent_did); + + let value: Option = conn.get(&key).await?; + + match value { + Some(json) => { + let session = serde_json::from_str(&json)?; + Ok(Some(session)) + } + None => Ok(None), + } + } + + /// Delete session from Redis + pub async fn delete_session(&self, agent_did: &str) -> Result<(), Box> { + let Some(client) = &self.client else { + return Ok(()); // Test mode: no-op + }; + + let mut conn = client.get_multiplexed_async_connection().await?; + let key = format!("agent:{}", agent_did); + + let _: () = conn.del(&key).await?; + + Ok(()) + } + + /// Set expiry on a session key (auto-cleanup) + pub async fn expire( + &self, + agent_did: &str, + expires_at: DateTime, + ) -> Result<(), Box> { + let Some(client) = &self.client else { + return Ok(()); // Test mode: no-op + }; + + let mut conn = client.get_multiplexed_async_connection().await?; + let key = format!("agent:{}", agent_did); + + #[allow(clippy::disallowed_methods)] + // INVARIANT: persistence layer gets current time to calculate TTL + let ttl_seconds = (expires_at - Utc::now()).num_seconds(); + if ttl_seconds > 0 { + let _: () = conn.expire(&key, ttl_seconds).await?; + } + + Ok(()) + } + + /// Load all active sessions from Redis (for cache warming on startup) + pub async fn load_all(&self) -> Result, Box> { + let Some(client) = &self.client else { + return Ok(Vec::new()); // Test mode: no-op + }; + + let mut conn = client.get_multiplexed_async_connection().await?; + + let keys: Vec = redis::cmd("KEYS") + .arg("agent:*") + .query_async(&mut conn) + .await?; + + let mut sessions = Vec::new(); + for key in keys { + let value: Option = conn.get(&key).await?; + + if let Some(json) = value { + if let Ok(session) = serde_json::from_str::(&json) { + sessions.push(session); + } + } + } + + Ok(sessions) + } + + /// Find all sessions delegated by a specific delegator + pub async fn find_by_delegator( + &self, + delegator_did: &str, + ) -> Result, Box> { + let sessions = self.load_all().await?; + let filtered = sessions + .into_iter() + .filter(|s| s.delegator_did.as_deref() == Some(delegator_did)) + .collect(); + Ok(filtered) + } + + /// Revoke an agent by setting status to Revoked + pub async fn revoke_agent(&self, agent_did: &str) -> Result<(), Box> { + let Some(client) = &self.client else { + return Ok(()); // Test mode: no-op + }; + + let mut conn = client.get_multiplexed_async_connection().await?; + let key = format!("agent:{}", agent_did); + + if let Some(json) = conn.get::<_, Option>(&key).await? { + let mut session: AgentSession = serde_json::from_str(&json)?; + session.status = AgentStatus::Revoked; + let updated_json = serde_json::to_string(&session)?; + + let _: () = conn.set(&key, &updated_json).await?; + } + + Ok(()) + } +} + +#[async_trait] +impl AgentPersistencePort for AgentPersistence { + async fn set_session(&self, session: &AgentSession) -> Result<(), String> { + AgentPersistence::set_session(self, session) + .await + .map_err(|e| e.to_string()) + } + + async fn get_session(&self, agent_did: &str) -> Result, String> { + AgentPersistence::get_session(self, agent_did) + .await + .map_err(|e| e.to_string()) + } + + async fn delete_session(&self, agent_did: &str) -> Result<(), String> { + AgentPersistence::delete_session(self, agent_did) + .await + .map_err(|e| e.to_string()) + } + + async fn expire(&self, agent_did: &str, expires_at: DateTime) -> Result<(), String> { + AgentPersistence::expire(self, agent_did, expires_at) + .await + .map_err(|e| e.to_string()) + } + + async fn load_all(&self) -> Result, String> { + AgentPersistence::load_all(self) + .await + .map_err(|e| e.to_string()) + } + + async fn find_by_delegator(&self, delegator_did: &str) -> Result, String> { + AgentPersistence::find_by_delegator(self, delegator_did) + .await + .map_err(|e| e.to_string()) + } + + async fn revoke_agent(&self, agent_did: &str) -> Result<(), String> { + AgentPersistence::revoke_agent(self, agent_did) + .await + .map_err(|e| e.to_string()) + } +} diff --git a/crates/auths-api/src/persistence/mod.rs b/crates/auths-api/src/persistence/mod.rs new file mode 100644 index 00000000..e69de29b diff --git a/crates/auths-api/tests/cases/full_flow.rs b/crates/auths-api/tests/cases/full_flow.rs new file mode 100644 index 00000000..fcaffab8 --- /dev/null +++ b/crates/auths-api/tests/cases/full_flow.rs @@ -0,0 +1,258 @@ +//! Full flow integration test: provision → authorize → revoke + +use super::helpers::start_test_server; +use auths_sdk::domains::agents::{AuthorizeRequest, ProvisionRequest}; +use chrono::Utc; + +#[tokio::test] +async fn test_full_flow_provision_authorize_revoke() { + let (base_url, client) = start_test_server().await; + + // ============================================================================ + // Step 1: Provision a root agent + // ============================================================================ + #[allow(clippy::disallowed_methods)] // Test code calls Utc::now() + let now = Utc::now(); + + let provision_req = ProvisionRequest { + delegator_did: String::new(), // Empty = root agent + agent_name: "test-agent".to_string(), + capabilities: vec!["read".to_string(), "write".to_string()], + ttl_seconds: 3600, + max_delegation_depth: Some(2), + signature: "test-sig-root".to_string(), + timestamp: now, + }; + + let provision_resp = client + .post(format!("{}/v1/agents", base_url)) + .json(&provision_req) + .send() + .await + .expect("Provision request failed"); + + assert_eq!( + provision_resp.status(), + 201, + "Provision should return 201 Created" + ); + + let provision_body: serde_json::Value = provision_resp + .json() + .await + .expect("Failed to parse provision response"); + + let agent_did = provision_body["agent_did"] + .as_str() + .expect("agent_did not in response") + .to_string(); + + let _bearer_token = provision_body["bearer_token"] + .as_str() + .expect("bearer_token not in response") + .to_string(); + + // ============================================================================ + // Step 2: Authorize an operation with the agent + // ============================================================================ + let auth_req = AuthorizeRequest { + agent_did: agent_did.clone(), + capability: "read".to_string(), + signature: "test-sig-auth".to_string(), + timestamp: now, + }; + + let auth_resp = client + .post(format!("{}/v1/authorize", base_url)) + .json(&auth_req) + .send() + .await + .expect("Authorize request failed"); + + assert_eq!(auth_resp.status(), 200, "Authorize should return 200 OK"); + + let auth_body: serde_json::Value = auth_resp + .json() + .await + .expect("Failed to parse auth response"); + + assert_eq!( + auth_body["authorized"].as_bool(), + Some(true), + "Authorization should succeed for 'read' capability" + ); + assert!( + auth_body["matched_capabilities"] + .as_array() + .unwrap() + .iter() + .any(|c| c.as_str() == Some("read")), + "Should match 'read' capability" + ); + + // ============================================================================ + // Step 3: Authorize a different capability (should also work) + // ============================================================================ + let auth_req2 = AuthorizeRequest { + agent_did: agent_did.clone(), + capability: "write".to_string(), + signature: "test-sig-auth2".to_string(), + timestamp: now, + }; + + let auth_resp2 = client + .post(format!("{}/v1/authorize", base_url)) + .json(&auth_req2) + .send() + .await + .expect("Authorize write request failed"); + + assert_eq!(auth_resp2.status(), 200); + let auth_body2: serde_json::Value = auth_resp2 + .json() + .await + .expect("Failed to parse auth response"); + assert_eq!(auth_body2["authorized"].as_bool(), Some(true)); + + // ============================================================================ + // Step 4: Try to authorize a capability the agent doesn't have (should fail) + // ============================================================================ + let auth_req3 = AuthorizeRequest { + agent_did: agent_did.clone(), + capability: "admin".to_string(), + signature: "test-sig-auth3".to_string(), + timestamp: now, + }; + + let auth_resp3 = client + .post(format!("{}/v1/authorize", base_url)) + .json(&auth_req3) + .send() + .await + .expect("Authorize admin request failed"); + + assert_eq!(auth_resp3.status(), 200); // Still 200, but unauthorized=false + let auth_body3: serde_json::Value = auth_resp3 + .json() + .await + .expect("Failed to parse auth response"); + assert_eq!(auth_body3["authorized"].as_bool(), Some(false)); + + // ============================================================================ + // Step 5: List agents (should show our agent) + // ============================================================================ + let list_resp = client + .get(format!("{}/v1/agents", base_url)) + .send() + .await + .expect("List request failed"); + + assert_eq!(list_resp.status(), 200); + let list_body: serde_json::Value = list_resp + .json() + .await + .expect("Failed to parse list response"); + + let agents = list_body["agents"].as_array().expect("agents not an array"); + assert!( + agents + .iter() + .any(|a| a["agent_did"].as_str() == Some(&agent_did)), + "Agent should be in list" + ); + assert_eq!( + list_body["total"].as_i64(), + Some(1), + "Should have 1 agent in registry" + ); + + // ============================================================================ + // Step 6: Get specific agent details + // ============================================================================ + let get_resp = client + .get(format!("{}/v1/agents/{}", base_url, agent_did)) + .send() + .await + .expect("Get agent request failed"); + + assert_eq!(get_resp.status(), 200); + let agent_details: serde_json::Value = get_resp + .json() + .await + .expect("Failed to parse agent details"); + + assert_eq!( + agent_details["agent_did"].as_str(), + Some(agent_did.as_str()) + ); + assert_eq!(agent_details["agent_name"].as_str(), Some("test-agent")); + assert_eq!(agent_details["status"].as_str(), Some("Active")); + + // ============================================================================ + // Step 7: Revoke the agent + // ============================================================================ + let revoke_resp = client + .delete(format!("{}/v1/agents/{}", base_url, agent_did)) + .send() + .await + .expect("Revoke request failed"); + + assert_eq!( + revoke_resp.status(), + 204, + "Revoke should return 204 No Content" + ); + + // ============================================================================ + // Step 8: Verify agent is gone (authorization should fail) + // ============================================================================ + let auth_after_revoke = AuthorizeRequest { + agent_did: agent_did.clone(), + capability: "read".to_string(), + signature: "test-sig-after-revoke".to_string(), + timestamp: now, + }; + + let auth_revoked_resp = client + .post(format!("{}/v1/authorize", base_url)) + .json(&auth_after_revoke) + .send() + .await + .expect("Authorize after revoke request failed"); + + assert_eq!( + auth_revoked_resp.status(), + 401, + "Should reject revoked agent" + ); + + // ============================================================================ + // Step 9: Verify agent is gone from list + // ============================================================================ + let list_after_revoke = client + .get(format!("{}/v1/agents", base_url)) + .send() + .await + .expect("List after revoke request failed"); + + assert_eq!(list_after_revoke.status(), 200); + let list_body_after: serde_json::Value = list_after_revoke + .json() + .await + .expect("Failed to parse list response"); + + let agents_after = list_body_after["agents"] + .as_array() + .expect("agents not an array"); + assert!( + !agents_after + .iter() + .any(|a| a["agent_did"].as_str() == Some(&agent_did)), + "Revoked agent should not be in list" + ); + assert_eq!( + list_body_after["total"].as_i64(), + Some(0), + "Should have 0 agents after revoke" + ); +} diff --git a/crates/auths-api/tests/cases/helpers.rs b/crates/auths-api/tests/cases/helpers.rs new file mode 100644 index 00000000..b7c51920 --- /dev/null +++ b/crates/auths-api/tests/cases/helpers.rs @@ -0,0 +1,40 @@ +use auths_api::{build_router, AgentPersistence, AppState}; +use auths_sdk::domains::agents::AgentRegistry; +use std::sync::Arc; + +/// Start a test server and return its URL and HTTP client +#[allow(clippy::expect_used)] // INVARIANT: test setup must panic on failure +pub async fn start_test_server() -> (String, reqwest::Client) { + // Create in-memory registry and test-mode persistence + let registry = Arc::new(AgentRegistry::new()); + let persistence = Arc::new(AgentPersistence::new_test()); + + let state = AppState { + registry, + persistence, + }; + + // Build router + let app = build_router(state); + + // Start server on random available port (0 = OS assigns free port) + let listener = tokio::net::TcpListener::bind("127.0.0.1:0") + .await + .expect("Failed to bind test server"); + + let addr = listener.local_addr().expect("Failed to get local addr"); + let url = format!("http://{}", addr); + + // Spawn server in background + tokio::spawn(async move { + axum::serve(listener, app) + .await + .expect("Server failed to start"); + }); + + // Give server time to start + tokio::time::sleep(std::time::Duration::from_millis(100)).await; + + let client = reqwest::Client::new(); + (url, client) +} diff --git a/crates/auths-api/tests/cases/mod.rs b/crates/auths-api/tests/cases/mod.rs new file mode 100644 index 00000000..a1070ea6 --- /dev/null +++ b/crates/auths-api/tests/cases/mod.rs @@ -0,0 +1,6 @@ +//! Integration test cases + +pub mod full_flow; +pub mod helpers; + +pub use helpers::*; diff --git a/crates/auths-api/tests/integration.rs b/crates/auths-api/tests/integration.rs new file mode 100644 index 00000000..7d5b32de --- /dev/null +++ b/crates/auths-api/tests/integration.rs @@ -0,0 +1,8 @@ +//! Integration tests for auths-api +//! +//! Tests the full HTTP flow: provision → authorize → revoke +//! Starts a real server and makes HTTP requests to verify the API works end-to-end. + +mod cases; + +pub use cases::*; diff --git a/crates/auths-sdk/Cargo.toml b/crates/auths-sdk/Cargo.toml index ec0b7cf7..24b45c98 100644 --- a/crates/auths-sdk/Cargo.toml +++ b/crates/auths-sdk/Cargo.toml @@ -36,6 +36,8 @@ ssh-key = "0.6" tempfile = "3" url = { version = "2", features = ["serde"] } zeroize = "1.8" +uuid = { workspace = true, features = ["serde", "v4"] } +dashmap = "6" reqwest = { version = "0.12", default-features = false, features = ["rustls-tls", "json"], optional = true } auths-pairing-daemon = { workspace = true, optional = true } diff --git a/crates/auths-sdk/src/domains/agents/delegation.rs b/crates/auths-sdk/src/domains/agents/delegation.rs new file mode 100644 index 00000000..05acd129 --- /dev/null +++ b/crates/auths-sdk/src/domains/agents/delegation.rs @@ -0,0 +1,187 @@ +use super::types::{AgentSession, ProvisionRequest}; +use chrono::{DateTime, Utc}; + +/// Error type for delegation validation +#[derive(Debug, Clone)] +pub enum DelegationError { + /// Child capability not in parent's capability set + CapabilityNotGranted(String), + /// Child TTL exceeds parent remaining TTL + TtlExceedsParent(String), + /// Delegation depth limit reached + DepthLimitExceeded, +} + +impl std::fmt::Display for DelegationError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + DelegationError::CapabilityNotGranted(msg) => write!(f, "{}", msg), + DelegationError::TtlExceedsParent(msg) => write!(f, "{}", msg), + DelegationError::DepthLimitExceeded => write!(f, "Delegation depth limit exceeded"), + } + } +} + +impl std::error::Error for DelegationError {} + +/// Validates delegation constraints before provisioning a child agent +/// +/// Checks: +/// - Capability subset: child can only have capabilities parent has +/// - TTL limit: child TTL ≤ parent remaining TTL +/// - Depth limit: parent delegation_depth < parent max_delegation_depth +pub fn validate_delegation_constraints( + parent_session: &AgentSession, + provision_req: &ProvisionRequest, + now: DateTime, +) -> Result<(), DelegationError> { + // Check capability subset + for cap in &provision_req.capabilities { + if !parent_session.capabilities.contains(cap) { + return Err(DelegationError::CapabilityNotGranted(format!( + "Parent does not have capability: {}", + cap + ))); + } + } + + // Check TTL limit: child TTL ≤ parent remaining TTL + let parent_remaining = (parent_session.expires_at - now).num_seconds() as u64; + if provision_req.ttl_seconds > parent_remaining { + return Err(DelegationError::TtlExceedsParent(format!( + "TTL {} exceeds parent remaining TTL {}", + provision_req.ttl_seconds, parent_remaining + ))); + } + + // Check depth limit: parent's current depth < parent's max_delegation_depth + if parent_session.delegation_depth >= parent_session.max_delegation_depth { + return Err(DelegationError::DepthLimitExceeded); + } + + Ok(()) +} + +#[cfg(test)] +#[allow(clippy::disallowed_methods)] // INVARIANT: test fixtures call Utc::now() and Uuid::new_v4() +mod tests { + use super::*; + use crate::domains::agents::types::AgentStatus; + use uuid::Uuid; + + #[test] + fn test_capability_subset_valid() { + let now = Utc::now(); + let parent = AgentSession { + session_id: Uuid::new_v4(), + agent_did: "did:keri:parent".to_string(), + agent_name: "parent".to_string(), + delegator_did: None, + capabilities: vec!["read".to_string(), "write".to_string()], + status: AgentStatus::Active, + created_at: now, + expires_at: now + chrono::Duration::hours(1), + delegation_depth: 0, + max_delegation_depth: 2, + }; + + let req = ProvisionRequest { + delegator_did: "did:keri:parent".to_string(), + agent_name: "child".to_string(), + capabilities: vec!["read".to_string()], + ttl_seconds: 3600, + max_delegation_depth: Some(0), + signature: "sig".to_string(), + timestamp: now, + }; + + assert!(validate_delegation_constraints(&parent, &req, now).is_ok()); + } + + #[test] + fn test_capability_subset_invalid() { + let now = Utc::now(); + let parent = AgentSession { + session_id: Uuid::new_v4(), + agent_did: "did:keri:parent".to_string(), + agent_name: "parent".to_string(), + delegator_did: None, + capabilities: vec!["read".to_string()], + status: AgentStatus::Active, + created_at: now, + expires_at: now + chrono::Duration::hours(1), + delegation_depth: 0, + max_delegation_depth: 2, + }; + + let req = ProvisionRequest { + delegator_did: "did:keri:parent".to_string(), + agent_name: "child".to_string(), + capabilities: vec!["admin".to_string()], + ttl_seconds: 3600, + max_delegation_depth: Some(0), + signature: "sig".to_string(), + timestamp: now, + }; + + assert!(validate_delegation_constraints(&parent, &req, now).is_err()); + } + + #[test] + fn test_ttl_limit() { + let now = Utc::now(); + let parent = AgentSession { + session_id: Uuid::new_v4(), + agent_did: "did:keri:parent".to_string(), + agent_name: "parent".to_string(), + delegator_did: None, + capabilities: vec!["read".to_string()], + status: AgentStatus::Active, + created_at: now, + expires_at: now + chrono::Duration::hours(1), + delegation_depth: 0, + max_delegation_depth: 2, + }; + + let req = ProvisionRequest { + delegator_did: "did:keri:parent".to_string(), + agent_name: "child".to_string(), + capabilities: vec!["read".to_string()], + ttl_seconds: 7200, + max_delegation_depth: Some(0), + signature: "sig".to_string(), + timestamp: now, + }; + + assert!(validate_delegation_constraints(&parent, &req, now).is_err()); + } + + #[test] + fn test_depth_limit() { + let now = Utc::now(); + let parent = AgentSession { + session_id: Uuid::new_v4(), + agent_did: "did:keri:parent".to_string(), + agent_name: "parent".to_string(), + delegator_did: None, + capabilities: vec!["read".to_string()], + status: AgentStatus::Active, + created_at: now, + expires_at: now + chrono::Duration::hours(1), + delegation_depth: 2, + max_delegation_depth: 2, + }; + + let req = ProvisionRequest { + delegator_did: "did:keri:parent".to_string(), + agent_name: "child".to_string(), + capabilities: vec!["read".to_string()], + ttl_seconds: 3600, + max_delegation_depth: Some(0), + signature: "sig".to_string(), + timestamp: now, + }; + + assert!(validate_delegation_constraints(&parent, &req, now).is_err()); + } +} diff --git a/crates/auths-sdk/src/domains/agents/mod.rs b/crates/auths-sdk/src/domains/agents/mod.rs new file mode 100644 index 00000000..b7d695dc --- /dev/null +++ b/crates/auths-sdk/src/domains/agents/mod.rs @@ -0,0 +1,24 @@ +//! Agent provisioning and authorization domain +//! +//! Provides services for agent identity management, including provisioning, +//! authorization, and revocation with delegation support. + +/// Delegation constraints and validation +pub mod delegation; +/// Storage abstraction for agent sessions +pub mod persistence; +/// In-memory registry for agent sessions with indexing +pub mod registry; +/// Agent lifecycle and authorization service +pub mod service; +/// Types for agent sessions and requests +pub mod types; + +pub use delegation::{DelegationError, validate_delegation_constraints}; +pub use persistence::AgentPersistencePort; +pub use registry::AgentRegistry; +pub use service::AgentService; +pub use types::{ + AgentSession, AgentStatus, AuthorizeRequest, AuthorizeResponse, ProvisionRequest, + ProvisionResponse, +}; diff --git a/crates/auths-sdk/src/domains/agents/persistence.rs b/crates/auths-sdk/src/domains/agents/persistence.rs new file mode 100644 index 00000000..3cfb04e0 --- /dev/null +++ b/crates/auths-sdk/src/domains/agents/persistence.rs @@ -0,0 +1,29 @@ +use crate::domains::agents::types::AgentSession; +use async_trait::async_trait; +use chrono::{DateTime, Utc}; + +/// Persistence port for agent session storage +/// Implementations provide Redis, SQLite, or in-memory backends +#[async_trait] +pub trait AgentPersistencePort: Send + Sync { + /// Store a session (create or update) + async fn set_session(&self, session: &AgentSession) -> Result<(), String>; + + /// Retrieve a session by agent_did + async fn get_session(&self, agent_did: &str) -> Result, String>; + + /// Delete a session + async fn delete_session(&self, agent_did: &str) -> Result<(), String>; + + /// Set expiry on a session (auto-cleanup) + async fn expire(&self, agent_did: &str, expires_at: DateTime) -> Result<(), String>; + + /// Load all sessions (cache warming) + async fn load_all(&self) -> Result, String>; + + /// Find sessions by delegator + async fn find_by_delegator(&self, delegator_did: &str) -> Result, String>; + + /// Mark agent as revoked + async fn revoke_agent(&self, agent_did: &str) -> Result<(), String>; +} diff --git a/crates/auths-sdk/src/domains/agents/registry.rs b/crates/auths-sdk/src/domains/agents/registry.rs new file mode 100644 index 00000000..f64f145c --- /dev/null +++ b/crates/auths-sdk/src/domains/agents/registry.rs @@ -0,0 +1,319 @@ +use super::types::{AgentSession, AgentStatus}; +use chrono::{DateTime, Utc}; +use dashmap::DashMap; +use uuid::Uuid; + +/// Concurrent in-memory session registry for fast agent lookups (cache) +/// Source of truth is Redis; DashMap is the cache layer +#[derive(Debug, Clone)] +pub struct AgentRegistry { + // Primary index: agent_did → AgentSession + sessions: DashMap, + // Secondary index: session_id → agent_did (for reverse lookups) + by_session_id: DashMap, + // Tertiary index: delegator_did → Vec (for delegation tree queries) + by_delegator: DashMap>, +} + +impl AgentRegistry { + /// Create a new empty registry + pub fn new() -> Self { + Self { + sessions: DashMap::new(), + by_session_id: DashMap::new(), + by_delegator: DashMap::new(), + } + } + + /// Insert a new agent session into the registry + /// Returns previous value if agent_did already exists (overwrite case) + pub fn insert(&self, session: AgentSession) -> Option { + let agent_did = session.agent_did.clone(); + let session_id = session.session_id; + + // Insert into by_session_id index + self.by_session_id.insert(session_id, agent_did.clone()); + + // Track delegator relationship (for cascading revocation) + if let Some(delegator_did) = &session.delegator_did { + self.by_delegator + .entry(delegator_did.clone()) + .or_default() + .push(agent_did.clone()); + } + + // Insert into primary sessions map + self.sessions.insert(agent_did, session) + } + + /// Get an agent session by DID + /// Returns None if not found or expired + pub fn get(&self, agent_did: &str, now: DateTime) -> Option { + let session = self.sessions.get(agent_did)?; + + // Check expiry and status + if session.is_active(now) { + Some(session.clone()) + } else { + None + } + } + + /// Get an agent session by session_id (reverse lookup) + pub fn get_by_session_id(&self, session_id: Uuid, now: DateTime) -> Option { + let agent_did = self.by_session_id.get(&session_id)?; + self.get(&agent_did, now) + } + + /// Revoke an agent (marks as Revoked, doesn't delete) + /// Returns true if revoked, false if not found + pub fn revoke(&self, agent_did: &str) -> bool { + if let Some(mut entry) = self.sessions.get_mut(agent_did) { + entry.status = AgentStatus::Revoked; + true + } else { + false + } + } + + /// List all non-expired active sessions + pub fn list(&self, now: DateTime) -> Vec { + self.sessions + .iter() + .filter_map(|entry| { + let session = entry.value(); + if session.is_active(now) { + Some(session.clone()) + } else { + None + } + }) + .collect() + } + + /// List all agents delegated by a specific delegator (for tree traversal) + pub fn list_by_delegator(&self, delegator_did: &str, now: DateTime) -> Vec { + let Some(agent_dids) = self.by_delegator.get(delegator_did) else { + return Vec::new(); + }; + + agent_dids + .iter() + .filter_map(|agent_did| self.get(agent_did, now)) + .collect() + } + + /// Reap expired sessions (removes from all indices) + /// Called periodically by background cleanup task + /// Returns count of reaped sessions + pub fn reap_expired(&self, now: DateTime) -> usize { + let mut count = 0; + + // Collect DIDs to remove (avoid holding locks during iteration) + let expired_dids: Vec = self + .sessions + .iter() + .filter(|entry| entry.value().is_expired(now)) + .map(|entry| entry.key().clone()) + .collect(); + + // Remove from all indices + for agent_did in expired_dids { + // Remove from primary sessions map + if let Some((_, session)) = self.sessions.remove(&agent_did) { + count += 1; + + // Remove from by_session_id index + self.by_session_id.remove(&session.session_id); + + // Remove from by_delegator index + if let Some(delegator_did) = &session.delegator_did + && let Some(mut entry) = self.by_delegator.get_mut(delegator_did) + { + entry.retain(|did| did != &agent_did); + if entry.is_empty() { + drop(entry); + self.by_delegator.remove(delegator_did); + } + } + } + } + + count + } + + /// Get count of active sessions (for metrics) + pub fn len(&self, now: DateTime) -> usize { + self.sessions + .iter() + .filter(|entry| entry.value().is_active(now)) + .count() + } + + /// Check if registry is empty + pub fn is_empty(&self) -> bool { + self.sessions.is_empty() + } +} + +impl Default for AgentRegistry { + fn default() -> Self { + Self::new() + } +} + +#[cfg(test)] +#[allow(clippy::disallowed_methods)] // INVARIANT: test fixtures call Utc::now() and Uuid::new_v4() +mod tests { + use super::*; + + #[test] + fn test_insert_and_get() { + let registry = AgentRegistry::new(); + let now = Utc::now(); + + let session = AgentSession { + session_id: Uuid::new_v4(), + agent_did: "did:keri:test1".to_string(), + agent_name: "test-agent".to_string(), + delegator_did: None, + capabilities: vec!["read".to_string()], + status: AgentStatus::Active, + created_at: now, + expires_at: now + chrono::Duration::hours(1), + delegation_depth: 0, + max_delegation_depth: 0, + }; + + registry.insert(session.clone()); + + let retrieved = registry.get("did:keri:test1", now); + assert_eq!(retrieved, Some(session)); + } + + #[test] + fn test_get_by_session_id() { + let registry = AgentRegistry::new(); + let now = Utc::now(); + let session_id = Uuid::new_v4(); + + let session = AgentSession { + session_id, + agent_did: "did:keri:test2".to_string(), + agent_name: "test-agent".to_string(), + delegator_did: None, + capabilities: vec!["read".to_string()], + status: AgentStatus::Active, + created_at: now, + expires_at: now + chrono::Duration::hours(1), + delegation_depth: 0, + max_delegation_depth: 0, + }; + + registry.insert(session.clone()); + + let retrieved = registry.get_by_session_id(session_id, now); + assert_eq!(retrieved, Some(session)); + } + + #[test] + fn test_revoke() { + let registry = AgentRegistry::new(); + let now = Utc::now(); + + let session = AgentSession { + session_id: Uuid::new_v4(), + agent_did: "did:keri:test3".to_string(), + agent_name: "test-agent".to_string(), + delegator_did: None, + capabilities: vec!["read".to_string()], + status: AgentStatus::Active, + created_at: now, + expires_at: now + chrono::Duration::hours(1), + delegation_depth: 0, + max_delegation_depth: 0, + }; + + registry.insert(session); + assert!(registry.revoke("did:keri:test3")); + assert!(registry.get("did:keri:test3", now).is_none()); + } + + #[test] + fn test_reap_expired() { + let registry = AgentRegistry::new(); + let now = Utc::now(); + + let expired_session = AgentSession { + session_id: Uuid::new_v4(), + agent_did: "did:keri:expired".to_string(), + agent_name: "expired-agent".to_string(), + delegator_did: None, + capabilities: vec!["read".to_string()], + status: AgentStatus::Active, + created_at: now - chrono::Duration::hours(2), + expires_at: now - chrono::Duration::seconds(1), + delegation_depth: 0, + max_delegation_depth: 0, + }; + + let active_session = AgentSession { + session_id: Uuid::new_v4(), + agent_did: "did:keri:active".to_string(), + agent_name: "active-agent".to_string(), + delegator_did: None, + capabilities: vec!["read".to_string()], + status: AgentStatus::Active, + created_at: now, + expires_at: now + chrono::Duration::hours(1), + delegation_depth: 0, + max_delegation_depth: 0, + }; + + registry.insert(expired_session); + registry.insert(active_session); + + let reaped = registry.reap_expired(now); + assert_eq!(reaped, 1); + assert_eq!(registry.len(now), 1); + } + + #[test] + fn test_list_by_delegator() { + let registry = AgentRegistry::new(); + let now = Utc::now(); + let delegator_did = "did:keri:delegator".to_string(); + + let child1 = AgentSession { + session_id: Uuid::new_v4(), + agent_did: "did:keri:child1".to_string(), + agent_name: "child1".to_string(), + delegator_did: Some(delegator_did.clone()), + capabilities: vec!["read".to_string()], + status: AgentStatus::Active, + created_at: now, + expires_at: now + chrono::Duration::hours(1), + delegation_depth: 1, + max_delegation_depth: 0, + }; + + let child2 = AgentSession { + session_id: Uuid::new_v4(), + agent_did: "did:keri:child2".to_string(), + agent_name: "child2".to_string(), + delegator_did: Some(delegator_did.clone()), + capabilities: vec!["write".to_string()], + status: AgentStatus::Active, + created_at: now, + expires_at: now + chrono::Duration::hours(1), + delegation_depth: 1, + max_delegation_depth: 0, + }; + + registry.insert(child1); + registry.insert(child2); + + let children = registry.list_by_delegator(&delegator_did, now); + assert_eq!(children.len(), 2); + } +} diff --git a/crates/auths-sdk/src/domains/agents/service.rs b/crates/auths-sdk/src/domains/agents/service.rs new file mode 100644 index 00000000..84a4fe58 --- /dev/null +++ b/crates/auths-sdk/src/domains/agents/service.rs @@ -0,0 +1,202 @@ +use base64::Engine; +use chrono::Utc; +use serde_json::json; +use std::sync::Arc; +use uuid::Uuid; + +use super::delegation::validate_delegation_constraints; +use super::persistence::AgentPersistencePort; +use super::registry::AgentRegistry; +use super::types::{ + AgentSession, AgentStatus, AuthorizeResponse, ProvisionRequest, ProvisionResponse, +}; + +/// Business logic service for agent operations +/// Separates HTTP concerns (handlers) from domain logic +pub struct AgentService { + registry: Arc, + persistence: Arc, +} + +impl AgentService { + /// Create a new agent service with injected registry and persistence + pub fn new(registry: Arc, persistence: Arc) -> Self { + Self { + registry, + persistence, + } + } + + /// Provision a new agent identity + /// Validates signature, delegates, provisions, and stores in registry + persistence + pub async fn provision( + &self, + req: ProvisionRequest, + now: chrono::DateTime, + ) -> Result { + // Validate clock skew (±5 minutes) + let time_diff = { + let duration = now.signed_duration_since(req.timestamp); + duration.num_seconds().unsigned_abs() + }; + if time_diff > 300 { + return Err("Clock skew too large".to_string()); + } + + // Verify signature using IdentityResolver + // TODO: Integrate with IdentityResolver when available + + // Validate delegation constraints if delegator exists in registry + if !req.delegator_did.is_empty() { + let delegator_session = self + .registry + .get(&req.delegator_did, now) + .ok_or_else(|| format!("Delegator not found: {}", req.delegator_did))?; + + validate_delegation_constraints(&delegator_session, &req, now) + .map_err(|e| e.to_string())?; + } + + // Provision agent identity using auths-id + // TODO: Call provision_agent_identity() from auths-id crate + let agent_did = format!("did:keri:{}", { + #[allow(clippy::disallowed_methods)] + Uuid::new_v4() + }); + let attestation = json!({ + "version": "1.0", + "agent_did": agent_did, + "issuer": req.delegator_did, + "capabilities": req.capabilities, + "timestamp": now.to_rfc3339(), + }) + .to_string(); + + // Generate optional bearer token + let bearer_token = { + let mut buf = [0u8; 32]; + use ring::rand::SecureRandom; + ring::rand::SystemRandom::new() + .fill(&mut buf) + .map_err(|_| "RNG failed".to_string())?; + + Some(base64::engine::general_purpose::STANDARD.encode(buf)) + }; + + // Create session + let session_id = { + #[allow(clippy::disallowed_methods)] + Uuid::new_v4() + }; + let expires_at = now + chrono::Duration::seconds(req.ttl_seconds as i64); + let delegation_depth = if req.delegator_did.is_empty() { + 0 + } else { + self.registry + .get(&req.delegator_did, now) + .map(|s| s.delegation_depth + 1) + .unwrap_or(1) + }; + + let session = AgentSession { + session_id, + agent_did: agent_did.clone(), + agent_name: req.agent_name, + delegator_did: if req.delegator_did.is_empty() { + None + } else { + Some(req.delegator_did) + }, + capabilities: req.capabilities, + status: AgentStatus::Active, + created_at: now, + expires_at, + delegation_depth, + max_delegation_depth: req.max_delegation_depth.unwrap_or(0), + }; + + // Store in persistence first (source of truth), then DashMap cache + self.persistence.set_session(&session).await?; + + // Only update cache if persistence write succeeded + self.registry.insert(session); + + // Set expiry on persistence key + self.persistence.expire(&agent_did, expires_at).await?; + + Ok(ProvisionResponse { + session_id, + agent_did, + bearer_token, + attestation, + expires_at, + }) + } + + /// Authorize an operation for an agent + /// Verifies signature, checks agent is active, evaluates capabilities + pub fn authorize( + &self, + agent_did: &str, + capability: &str, + now: chrono::DateTime, + ) -> Result { + // Verify signature using IdentityResolver + // TODO: Integrate with IdentityResolver when available + + // Get agent session from registry + let session = self + .registry + .get(agent_did, now) + .ok_or_else(|| "Agent not found or expired".to_string())?; + + // Check if agent is active (not revoked, not expired) + if session.status != AgentStatus::Active { + return Err("Agent revoked".to_string()); + } + + // Evaluate capabilities (hierarchical matching) + let matched: Vec = session + .capabilities + .iter() + .filter(|cap| *cap == capability || *cap == "*") + .cloned() + .collect(); + + let authorized = !matched.is_empty(); + + Ok(AuthorizeResponse { + authorized, + message: if authorized { + format!("Capability '{}' granted", capability) + } else { + format!("Capability '{}' not granted", capability) + }, + matched_capabilities: matched, + }) + } + + /// Revoke an agent and all its children (cascading) + pub async fn revoke(&self, agent_did: &str, now: chrono::DateTime) -> Result<(), String> { + // Check agent exists + if self.registry.get(agent_did, now).is_none() { + return Err("Agent not found".to_string()); + } + + // Revoke in memory + self.registry.revoke(agent_did); + + // Revoke in persistence + self.persistence.revoke_agent(agent_did).await?; + + // Cascade: revoke all children + let children = self.registry.list_by_delegator(agent_did, now); + + for child in children { + self.registry.revoke(&child.agent_did); + self.persistence.revoke_agent(&child.agent_did).await?; + } + + Ok(()) + } +} diff --git a/crates/auths-sdk/src/domains/agents/types.rs b/crates/auths-sdk/src/domains/agents/types.rs new file mode 100644 index 00000000..a1975619 --- /dev/null +++ b/crates/auths-sdk/src/domains/agents/types.rs @@ -0,0 +1,133 @@ +use chrono::{DateTime, Utc}; +use serde::{Deserialize, Serialize}; +use uuid::Uuid; + +/// Agent session status +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)] +pub enum AgentStatus { + /// Session is currently active + Active, + /// Session has been revoked + Revoked, +} + +/// Agent session stored in registry +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub struct AgentSession { + /// Unique session identifier + pub session_id: Uuid, + /// Agent DID (unique identity) + pub agent_did: String, + /// Human-readable agent name + pub agent_name: String, + /// Parent delegator DID (optional) + pub delegator_did: Option, + /// Granted capabilities + pub capabilities: Vec, + /// Session status + pub status: AgentStatus, + /// When session was created + pub created_at: DateTime, + /// When session expires + pub expires_at: DateTime, + /// Delegation depth in the tree + pub delegation_depth: u32, + /// Max delegation depth this agent can create + pub max_delegation_depth: u32, +} + +impl AgentSession { + /// Check if session is expired + pub fn is_expired(&self, now: DateTime) -> bool { + now > self.expires_at + } + + /// Check if session is active (not revoked and not expired) + pub fn is_active(&self, now: DateTime) -> bool { + self.status == AgentStatus::Active && !self.is_expired(now) + } +} + +/// Request to provision a new agent +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ProvisionRequest { + /// Who is delegating (empty for root provision) + pub delegator_did: String, + /// Human-readable name for the agent + pub agent_name: String, + /// Capabilities granted to this agent + pub capabilities: Vec, + /// How long agent should live (seconds) + pub ttl_seconds: u64, + /// Maximum delegation depth this agent can create (0 = cannot delegate) + pub max_delegation_depth: Option, + /// Base64-encoded Ed25519 signature over canonicalized request body + pub signature: String, + /// When request was signed (for clock skew tolerance) + pub timestamp: DateTime, +} + +/// Response from provisioning a new agent +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ProvisionResponse { + /// Unique session ID for audit + pub session_id: Uuid, + /// Agent's DID (cryptographic identity) + pub agent_did: String, + /// Optional bearer token (convenience only, not required for auth) + pub bearer_token: Option, + /// Signed attestation proof + pub attestation: String, + /// When agent expires + pub expires_at: DateTime, +} + +/// Request to authorize an operation +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct AuthorizeRequest { + /// Agent's DID performing the operation + pub agent_did: String, + /// Capability being requested + pub capability: String, + /// Base64-encoded Ed25519 signature over canonicalized request body + pub signature: String, + /// When request was signed (for clock skew tolerance) + pub timestamp: DateTime, +} + +/// Response to authorization request +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct AuthorizeResponse { + /// Whether the agent is authorized + pub authorized: bool, + /// Message explaining the decision + pub message: String, + /// Matched capabilities (if authorized) + pub matched_capabilities: Vec, +} + +#[cfg(test)] +#[allow(clippy::disallowed_methods)] // INVARIANT: test fixtures call Utc::now() and Uuid::new_v4() +mod tests { + use super::*; + + #[test] + fn test_session_expiry() { + let now = Utc::now(); + let session = AgentSession { + session_id: Uuid::new_v4(), + agent_did: "did:keri:test".to_string(), + agent_name: "test-agent".to_string(), + delegator_did: None, + capabilities: vec!["read".to_string()], + status: AgentStatus::Active, + created_at: now, + expires_at: now - chrono::Duration::seconds(1), + delegation_depth: 0, + max_delegation_depth: 0, + }; + + assert!(session.is_expired(now)); + assert!(!session.is_active(now)); + } +} diff --git a/crates/auths-sdk/src/domains/mod.rs b/crates/auths-sdk/src/domains/mod.rs new file mode 100644 index 00000000..f99d5707 --- /dev/null +++ b/crates/auths-sdk/src/domains/mod.rs @@ -0,0 +1,5 @@ +//! Domain services for Auths functionality. +//! +//! Modules organize domain-specific business logic separate from I/O concerns. + +pub mod agents; diff --git a/crates/auths-sdk/src/lib.rs b/crates/auths-sdk/src/lib.rs index da0a3675..c32cbcf4 100644 --- a/crates/auths-sdk/src/lib.rs +++ b/crates/auths-sdk/src/lib.rs @@ -24,6 +24,8 @@ pub mod audit; pub mod context; /// Device linking, revocation, and authorization extension operations. pub mod device; +/// Domain services for specialized business logic. +pub mod domains; /// Domain error types for all SDK operations. pub mod error; /// Key import and management operations. diff --git a/deny.toml b/deny.toml index 08fa264e..791d2733 100644 --- a/deny.toml +++ b/deny.toml @@ -28,6 +28,7 @@ multiple-versions = "warn" deny = [ { crate = "reqwest", wrappers = [ "auths-infra-http", + "auths-api", "auths-cli", "auths-mcp-server", "auths-telemetry", @@ -42,6 +43,7 @@ deny = [ # axum is an HTTP framework — adapter crates only { crate = "axum", wrappers = [ "auths-infra-http", + "auths-api", "auths-cli", "auths-core", "auths-mcp-server",