From 346fdb6351e083e9b5d98532121b90085975cdd8 Mon Sep 17 00:00:00 2001 From: Vasili Pascal Date: Mon, 11 May 2026 15:54:05 +0300 Subject: [PATCH 1/3] service creds implemented --- src/bin/stacker.rs | 44 ++-- src/cli/compose_targets.rs | 309 ++++++++++++++++++++++++++++ src/cli/install_runner.rs | 14 +- src/cli/mod.rs | 1 + src/cli/stacker_client.rs | 50 ++++- src/console/commands/cli/deploy.rs | 42 ++++ src/console/commands/cli/secrets.rs | 161 ++++++++++----- tests/remote_secrets.rs | 84 ++++++++ 8 files changed, 625 insertions(+), 80 deletions(-) create mode 100644 src/cli/compose_targets.rs diff --git a/src/bin/stacker.rs b/src/bin/stacker.rs index 828be3cc..953dcc23 100644 --- a/src/bin/stacker.rs +++ b/src/bin/stacker.rs @@ -609,8 +609,10 @@ enum SecretsCommands { Local .env secret:\n\ stacker secrets set DB_PASSWORD=supersecret\n\ \n\ - Remote service secret (project.identity from stacker.yml):\n\ + Remote deployable service/app target secret (project.identity from stacker.yml):\n\ stacker secrets set S3_SECRET_KEY --scope service --service uploader --body supersecret\n\ +\n\ + Use the target code listed by `stacker secrets apps` for --service.\n\ \n\ Remote server secret from stdin:\n\ cat token.txt | stacker secrets set NPM_TOKEN --scope server --server-id 42")] @@ -630,8 +632,8 @@ enum SecretsCommands { /// Project name or ID for service-scoped secrets (defaults to project.identity in stacker.yml) #[arg(long, value_name = "PROJECT", requires = "scope")] project: Option, - /// App code for service-scoped secrets - #[arg(long, value_name = "APP_CODE", requires = "scope")] + /// Deployable service/app target code listed by `stacker secrets apps` + #[arg(long, value_name = "TARGET_CODE", requires = "scope")] service: Option, /// Server ID for server-scoped secrets #[arg(long, value_name = "SERVER_ID", requires = "scope")] @@ -684,8 +686,8 @@ Remote get is metadata-only in v1 and does not reveal plaintext values.")] /// Project name or ID for service-scoped secrets (defaults to project.identity in stacker.yml) #[arg(long, value_name = "PROJECT", requires = "scope")] project: Option, - /// App code for service-scoped secrets - #[arg(long, value_name = "APP_CODE", requires = "scope")] + /// Deployable service/app target code listed by `stacker secrets apps` + #[arg(long, value_name = "TARGET_CODE", requires = "scope")] service: Option, /// Server ID for server-scoped secrets #[arg(long, value_name = "SERVER_ID", requires = "scope")] @@ -723,8 +725,8 @@ Remote get is metadata-only in v1 and does not reveal plaintext values.")] /// Project name or ID for service-scoped secrets (defaults to project.identity in stacker.yml) #[arg(long, value_name = "PROJECT", requires = "scope")] project: Option, - /// App code for service-scoped secrets - #[arg(long, value_name = "APP_CODE", requires = "scope")] + /// Deployable service/app target code listed by `stacker secrets apps` + #[arg(long, value_name = "TARGET_CODE", requires = "scope")] service: Option, /// Server ID for server-scoped secrets #[arg(long, value_name = "SERVER_ID", requires = "scope")] @@ -733,16 +735,16 @@ Remote get is metadata-only in v1 and does not reveal plaintext values.")] #[arg(long, requires = "scope")] json: bool, }, - /// List valid remote app codes for a project + /// List valid remote deployable service/app target codes (`stacker secrets apps`) #[command( visible_alias = "services", after_help = "Examples:\n\ - List remote app codes using project.identity from stacker.yml:\n\ + List remote target codes using project.identity from stacker.yml:\n\ stacker secrets apps\n\ - \n\ - List remote app codes for a project:\n\ + \n\ + List remote target codes for a project:\n\ stacker secrets apps --project blog\n\ - \n\ + \n\ Output app metadata as JSON:\n\ stacker secrets apps --json" )] @@ -780,8 +782,8 @@ Remote get is metadata-only in v1 and does not reveal plaintext values.")] /// Project name or ID for service-scoped secrets (defaults to project.identity in stacker.yml) #[arg(long, value_name = "PROJECT", requires = "scope")] project: Option, - /// App code for service-scoped secrets - #[arg(long, value_name = "APP_CODE", requires = "scope")] + /// Deployable service/app target code listed by `stacker secrets apps` + #[arg(long, value_name = "TARGET_CODE", requires = "scope")] service: Option, /// Server ID for server-scoped secrets #[arg(long, value_name = "SERVER_ID", requires = "scope")] @@ -2242,7 +2244,19 @@ mod tests { assert!(help.contains("--scope service")); assert!(help.contains("--scope server")); assert!(help.contains("metadata-only")); - assert!(help.contains("List valid remote app codes for a project")); + assert!(help.contains("List valid remote deployable service/app target codes")); + } + + #[test] + fn test_secrets_help_describes_service_scope_as_deployable_target() { + let mut command = Cli::command(); + let secrets = command + .find_subcommand_mut("secrets") + .expect("secrets subcommand should exist"); + let help = render_command_help(secrets); + + assert!(help.contains("deployable service/app target")); + assert!(help.contains("stacker secrets apps")); } #[test] diff --git a/src/cli/compose_targets.rs b/src/cli/compose_targets.rs new file mode 100644 index 00000000..33d167aa --- /dev/null +++ b/src/cli/compose_targets.rs @@ -0,0 +1,309 @@ +use std::collections::HashSet; +use std::path::{Path, PathBuf}; + +use serde_yaml::{Mapping, Value}; + +use crate::cli::config_parser::{ServiceDefinition, StackerConfig}; +use crate::cli::error::CliError; + +pub fn config_with_compose_secret_target_services( + config: &StackerConfig, + compose_path: &Path, +) -> Result { + let mut config = config.clone(); + let mut existing = config + .services + .iter() + .map(|service| service.name.to_ascii_lowercase()) + .collect::>(); + + for service in extract_compose_secret_target_services(compose_path, &config)? { + if existing.insert(service.name.to_ascii_lowercase()) { + config.services.push(service); + } + } + + Ok(config) +} + +pub fn extract_compose_secret_target_services( + compose_path: &Path, + config: &StackerConfig, +) -> Result, CliError> { + let mut visited = HashSet::new(); + let mut services = Vec::new(); + collect_compose_services(compose_path, config, &mut visited, &mut services)?; + Ok(services) +} + +fn collect_compose_services( + compose_path: &Path, + config: &StackerConfig, + visited: &mut HashSet, + services: &mut Vec, +) -> Result<(), CliError> { + let canonical = compose_path + .canonicalize() + .unwrap_or_else(|_| compose_path.to_path_buf()); + if !visited.insert(canonical) { + return Ok(()); + } + + let content = std::fs::read_to_string(compose_path).map_err(|err| { + CliError::ConfigValidation(format!( + "Failed to read compose file for service target discovery '{}': {}", + compose_path.display(), + err + )) + })?; + let document: Value = serde_yaml::from_str(&content).map_err(|err| { + CliError::ConfigValidation(format!( + "Failed to parse compose file for service target discovery '{}': {}", + compose_path.display(), + err + )) + })?; + + let base_dir = compose_path.parent().unwrap_or_else(|| Path::new(".")); + let existing_config_services = config + .services + .iter() + .map(|service| service.name.to_ascii_lowercase()) + .collect::>(); + let already_extracted = services + .iter() + .map(|service: &ServiceDefinition| service.name.to_ascii_lowercase()) + .collect::>(); + + if let Some(service_map) = document + .get(Value::String("services".to_string())) + .and_then(Value::as_mapping) + { + for (name, definition) in service_map { + let Some(service_name) = name.as_str() else { + continue; + }; + let normalized_name = service_name.to_ascii_lowercase(); + if normalized_name == "app" + || normalized_name == config.name.to_ascii_lowercase() + || existing_config_services.contains(&normalized_name) + || already_extracted.contains(&normalized_name) + { + continue; + } + + let Some(definition) = definition.as_mapping() else { + eprintln!( + " Skipping compose service '{}' as a remote secret target: service definition is not a map.", + service_name + ); + continue; + }; + + if is_platform_managed_compose_service(service_name, definition) { + eprintln!( + " Skipping compose service '{}' as a remote secret target: service is platform-managed.", + service_name + ); + continue; + } + + let Some(image) = mapping_string(definition, "image") else { + eprintln!( + " Skipping compose service '{}' as a remote secret target: image-backed services are required.", + service_name + ); + continue; + }; + + services.push(ServiceDefinition { + name: service_name.to_string(), + image, + ports: mapping_sequence(definition, "ports") + .into_iter() + .filter_map(compose_port_to_string) + .collect(), + environment: compose_environment(definition), + volumes: mapping_sequence(definition, "volumes") + .into_iter() + .filter_map(compose_volume_to_string) + .collect(), + depends_on: compose_depends_on(definition), + }); + } + } + + for include_path in compose_include_paths(&document, base_dir) { + collect_compose_services(&include_path, config, visited, services)?; + } + + Ok(()) +} + +fn mapping_string(mapping: &Mapping, key: &str) -> Option { + mapping + .get(Value::String(key.to_string())) + .and_then(Value::as_str) + .map(ToOwned::to_owned) +} + +fn mapping_sequence<'a>(mapping: &'a Mapping, key: &str) -> Vec<&'a Value> { + mapping + .get(Value::String(key.to_string())) + .and_then(Value::as_sequence) + .map(|values| values.iter().collect()) + .unwrap_or_default() +} + +fn compose_environment(mapping: &Mapping) -> std::collections::HashMap { + let mut environment = std::collections::HashMap::new(); + let Some(value) = mapping.get(Value::String("environment".to_string())) else { + return environment; + }; + + if let Some(map) = value.as_mapping() { + for (key, value) in map { + if let Some(key) = key.as_str() { + environment.insert(key.to_string(), yaml_scalar_to_string(value)); + } + } + return environment; + } + + if let Some(sequence) = value.as_sequence() { + for item in sequence { + if let Some(entry) = item.as_str() { + if let Some((key, value)) = entry.split_once('=') { + environment.insert(key.to_string(), value.to_string()); + } + } + } + } + + environment +} + +fn compose_depends_on(mapping: &Mapping) -> Vec { + let Some(value) = mapping.get(Value::String("depends_on".to_string())) else { + return Vec::new(); + }; + + if let Some(sequence) = value.as_sequence() { + return sequence + .iter() + .filter_map(Value::as_str) + .map(ToOwned::to_owned) + .collect(); + } + + value + .as_mapping() + .map(|depends_on| { + depends_on + .keys() + .filter_map(Value::as_str) + .map(ToOwned::to_owned) + .collect() + }) + .unwrap_or_default() +} + +fn compose_port_to_string(value: &Value) -> Option { + if let Some(port) = value.as_str() { + return Some(port.to_string()); + } + + let map = value.as_mapping()?; + let target = mapping_scalar(map, "target")?; + let published = mapping_scalar(map, "published"); + Some(match published { + Some(published) => format!("{published}:{target}"), + None => target, + }) +} + +fn compose_volume_to_string(value: &Value) -> Option { + if let Some(volume) = value.as_str() { + return Some(volume.to_string()); + } + + let map = value.as_mapping()?; + let target = mapping_scalar(map, "target")?; + let source = mapping_scalar(map, "source").unwrap_or_default(); + let read_only = map + .get(Value::String("read_only".to_string())) + .and_then(Value::as_bool) + .unwrap_or(false); + + Some(if read_only { + format!("{source}:{target}:ro") + } else if source.is_empty() { + target + } else { + format!("{source}:{target}") + }) +} + +fn mapping_scalar(mapping: &Mapping, key: &str) -> Option { + mapping + .get(Value::String(key.to_string())) + .map(yaml_scalar_to_string) + .filter(|value| !value.is_empty()) +} + +fn yaml_scalar_to_string(value: &Value) -> String { + match value { + Value::Null => String::new(), + Value::Bool(value) => value.to_string(), + Value::Number(value) => value.to_string(), + Value::String(value) => value.clone(), + _ => serde_yaml::to_string(value) + .unwrap_or_default() + .trim() + .to_string(), + } +} + +fn compose_include_paths(document: &Value, base_dir: &Path) -> Vec { + let Some(include) = document.get(Value::String("include".to_string())) else { + return Vec::new(); + }; + + let mut paths = Vec::new(); + collect_include_value(include, base_dir, &mut paths); + paths +} + +fn collect_include_value(value: &Value, base_dir: &Path, paths: &mut Vec) { + if let Some(path) = value.as_str() { + paths.push(base_dir.join(path)); + return; + } + + if let Some(sequence) = value.as_sequence() { + for item in sequence { + collect_include_value(item, base_dir, paths); + } + return; + } + + if let Some(mapping) = value.as_mapping() { + if let Some(path) = mapping_string(mapping, "path") { + paths.push(base_dir.join(path)); + } + } +} + +fn is_platform_managed_compose_service(service_name: &str, definition: &Mapping) -> bool { + let service_name = service_name.to_ascii_lowercase().replace('-', "_"); + let image = mapping_string(definition, "image") + .unwrap_or_default() + .to_ascii_lowercase(); + + service_name == "nginx_proxy_manager" + || service_name == "npm" + || service_name == "statuspanel" + || service_name == "status_panel" + || image.contains("nginx-proxy-manager") + || image.contains("statuspanel") +} diff --git a/src/cli/install_runner.rs b/src/cli/install_runner.rs index 5d4eee13..d1a6eb4a 100644 --- a/src/cli/install_runner.rs +++ b/src/cli/install_runner.rs @@ -1,6 +1,7 @@ use std::path::{Path, PathBuf}; use crate::cli::cloud_env; +use crate::cli::compose_targets; use crate::cli::config_parser::{CloudOrchestrator, DeployTarget, StackerConfig}; use crate::cli::credentials::{CredentialsManager, StoredCredentials}; use crate::cli::error::CliError; @@ -515,7 +516,12 @@ impl DeployStrategy for CloudDeploy { // Step 1: Resolve or auto-create project eprintln!(" Resolving project '{}'...", project_name); - let mut project_body = stacker_client::build_project_body(config); + let project_config = + compose_targets::config_with_compose_secret_target_services( + config, + &context.compose_path, + )?; + let mut project_body = stacker_client::build_project_body(&project_config); if let Some(bundle) = &context.config_bundle { stacker_client::attach_config_bundle_to_project_body( &mut project_body, @@ -1371,7 +1377,11 @@ impl DeployStrategy for ServerDeploy { .as_ref() .ok_or(CliError::ServerHostMissing)?; let project_name = resolve_remote_project_name(config, context); - let mut project_body = stacker_client::build_project_body(config); + let project_config = compose_targets::config_with_compose_secret_target_services( + config, + &context.compose_path, + )?; + let mut project_body = stacker_client::build_project_body(&project_config); if let Some(bundle) = &context.config_bundle { stacker_client::attach_config_bundle_to_project_body(&mut project_body, bundle); } diff --git a/src/cli/mod.rs b/src/cli/mod.rs index 9ce615b9..78837467 100644 --- a/src/cli/mod.rs +++ b/src/cli/mod.rs @@ -4,6 +4,7 @@ pub mod ai_pipe_suggest; pub mod ai_scanner; pub mod ci_export; pub mod cloud_env; +pub mod compose_targets; pub mod config_bundle; pub mod config_parser; pub mod credentials; diff --git a/src/cli/stacker_client.rs b/src/cli/stacker_client.rs index 2c3b170e..520ab32a 100644 --- a/src/cli/stacker_client.rs +++ b/src/cli/stacker_client.rs @@ -3033,6 +3033,7 @@ pub fn build_project_body(config: &StackerConfig) -> serde_json::Value { // app format. The main `app` section is the primary web application; // additional `services` are supporting containers. let mut web_apps: Vec = Vec::new(); + let mut service_apps: Vec = Vec::new(); // Include the main app (if it has an image) if let Some(main_app) = app_source_to_app_json(config, &network_ids) { @@ -3045,13 +3046,14 @@ pub fn build_project_body(config: &StackerConfig) -> serde_json::Value { | crate::cli::config_parser::ProxyType::NginxProxyManager ); - // Include additional services. Managed proxy services are installed via - // extended_features, not as project apps, to avoid duplicate NPM containers. + // Include additional services as service targets. Managed proxy services + // are installed via extended_features, not as project apps, to avoid + // duplicate NPM containers. for svc in &config.services { if proxy_is_managed && is_nginx_proxy_manager_service(svc) { continue; } - web_apps.push(service_to_app_json(svc, &network_ids)); + service_apps.push(service_to_app_json(svc, &network_ids)); } serde_json::json!({ @@ -3060,7 +3062,7 @@ pub fn build_project_body(config: &StackerConfig) -> serde_json::Value { "project_name": config.name.clone(), "web": web_apps, "feature": [], - "service": [], + "service": service_apps, "networks": [{ "id": network_id, "name": "default_network", @@ -3748,14 +3750,50 @@ mod tests { .unwrap(); let body = build_project_body(&config); - let web = body["custom"]["web"].as_array().unwrap(); - let codes = web + let service = body["custom"]["service"].as_array().unwrap(); + let codes = service .iter() .filter_map(|app| app["code"].as_str()) .collect::>(); assert_eq!(codes, vec!["redis"]); } + #[test] + fn test_scn_001_stacker_yml_service_serializes_as_service_target() { + let upload_service = ServiceDefinition { + name: "upload".to_string(), + image: "ghcr.io/example/upload:1.0".to_string(), + ports: vec!["8081:8080".to_string()], + environment: std::collections::HashMap::new(), + volumes: vec![], + depends_on: vec![], + }; + let config = crate::cli::config_parser::ConfigBuilder::new() + .name("Device API") + .project_identity("device-api") + .app_image("ghcr.io/example/device-api:1.0") + .add_service(upload_service) + .build() + .unwrap(); + + let body = build_project_body(&config); + let web_codes = body["custom"]["web"] + .as_array() + .unwrap() + .iter() + .filter_map(|app| app["code"].as_str()) + .collect::>(); + let service_codes = body["custom"]["service"] + .as_array() + .unwrap() + .iter() + .filter_map(|app| app["code"].as_str()) + .collect::>(); + + assert_eq!(web_codes, vec!["device-api"]); + assert_eq!(service_codes, vec!["upload"]); + } + #[test] fn test_build_project_body_with_status_panel_does_not_add_status_panel_feature() { let config = crate::cli::config_parser::ConfigBuilder::new() diff --git a/src/console/commands/cli/deploy.rs b/src/console/commands/cli/deploy.rs index 5a60c78b..d8823ddd 100644 --- a/src/console/commands/cli/deploy.rs +++ b/src/console/commands/cli/deploy.rs @@ -6,6 +6,8 @@ use crate::cli::ai_client::{ build_prompt, create_provider, ollama_complete_streaming, AiTask, PromptContext, }; use crate::cli::cloud_env; +#[cfg(test)] +use crate::cli::compose_targets::extract_compose_secret_target_services; use crate::cli::config_bundle::build_config_bundle; use crate::cli::config_parser::{ AiProviderType, CloudConfig, CloudOrchestrator, CloudProvider, DeployTarget, RegistryConfig, @@ -3109,6 +3111,46 @@ mod tests { dir } + #[test] + fn test_scn_004_compose_image_services_register_as_remote_secret_targets() { + let dir = setup_local_project(&[( + "docker-compose.yml", + r#" +services: + app: + image: ghcr.io/example/device-api:1.0 + upload: + image: ghcr.io/example/upload:1.0 + ports: + - "8081:8080" + environment: + S3_BUCKET: "${S3_BUCKET}" + worker: + build: . + nginx_proxy_manager: + image: jc21/nginx-proxy-manager:latest +"#, + )]); + let config = StackerConfig { + name: "device-api".to_string(), + ..StackerConfig::default() + }; + + let services = extract_compose_secret_target_services( + dir.path().join("docker-compose.yml").as_path(), + &config, + ) + .unwrap(); + let service_names = services + .iter() + .map(|service| service.name.as_str()) + .collect::>(); + + assert_eq!(service_names, vec!["upload"]); + assert_eq!(services[0].image, "ghcr.io/example/upload:1.0"); + assert_eq!(services[0].ports, vec!["8081:8080"]); + } + fn minimal_config_yaml() -> String { "name: test-app\napp:\n type: static\n path: .\n".to_string() } diff --git a/src/console/commands/cli/secrets.rs b/src/console/commands/cli/secrets.rs index 4ee5f445..5220e4fc 100644 --- a/src/console/commands/cli/secrets.rs +++ b/src/console/commands/cli/secrets.rs @@ -332,29 +332,59 @@ fn resolve_project( .ok_or_else(|| CliError::ConfigValidation(format!("Project '{}' was not found", reference))) } +#[cfg(test)] fn project_app_codes(project: &ProjectInfo) -> Vec { - let mut codes: Vec = project - .metadata - .get("custom") - .and_then(|custom| custom.get("web")) - .and_then(|web| web.as_array()) - .map(|apps| { - apps.iter() - .filter_map(|app| app.get("code").and_then(|code| code.as_str())) - .map(|code| code.to_string()) - .collect() - }) - .unwrap_or_default(); + let mut codes: Vec = Vec::new(); + for group in ["web", "service", "feature"] { + if let Some(apps) = project + .metadata + .get("custom") + .and_then(|custom| custom.get(group)) + .and_then(|web| web.as_array()) + { + codes.extend( + apps.iter() + .filter_map(|app| app.get("code").and_then(|code| code.as_str())) + .map(|code| code.to_string()), + ); + } + } codes.sort(); codes.dedup(); codes } -fn resolve_remote_service_code(project: &ProjectInfo, requested: &str) -> Result { - let available_codes = project_app_codes(project); +fn resolve_remote_service_code( + ctx: &CliRuntime, + project: &ProjectInfo, + requested: &str, + operation: &str, +) -> Result { + let apps = ctx + .block_on(ctx.client.list_project_apps(project.id)) + .map_err(|error| remap_remote_secret_error(operation, error))?; + + resolve_remote_service_code_from_apps(&project.name, &apps, requested) +} + +fn resolve_remote_service_code_from_apps( + project_name: &str, + apps: &[ProjectAppInfo], + requested: &str, +) -> Result { + let mut available_codes = apps + .iter() + .map(|app| app.code.clone()) + .collect::>(); + available_codes.sort(); + available_codes.dedup(); + if available_codes.is_empty() { - return Ok(requested.to_string()); + return Err(CliError::ConfigValidation(format!( + "No remote secret targets are registered for project '{}'. Run `stacker deploy --target ` to sync project/service registration, then run `stacker secrets apps` to list valid targets.", + project_name + ))); } let requested_lower = requested.to_lowercase(); @@ -366,9 +396,9 @@ fn resolve_remote_service_code(project: &ProjectInfo, requested: &str) -> Result } Err(CliError::ConfigValidation(format!( - "Service '{}' was not found in project '{}'. Available app codes: {}", + "Unknown remote secret target '{}' for project '{}'. Available targets: {}. Run `stacker deploy --target ` to sync project/service registration, then run `stacker secrets apps`.", requested, - project.name, + project_name, available_codes.join(", ") ))) } @@ -385,7 +415,7 @@ fn print_remote_secret(secret: &RemoteSecretMetadataInfo, json: bool) -> Result< println!("Project ID: {}", project_id); } if let Some(app_code) = &secret.app_code { - println!("Service: {}", app_code); + println!("Target: {}", app_code); } if let Some(server_id) = secret.server_id { println!("Server ID: {}", server_id); @@ -422,7 +452,7 @@ fn print_remote_secret_list( println!("{}", "─".repeat(92)); for secret in secrets { let target = if let Some(app_code) = &secret.app_code { - format!("app:{app_code}") + app_code.to_string() } else if let Some(server_id) = secret.server_id { format!("server:{server_id}") } else { @@ -446,13 +476,13 @@ fn print_project_app_list(apps: &[ProjectAppInfo], json: bool) -> Result<(), Cli } if apps.is_empty() { - println!("(no project apps found)"); + println!("(no remote secret targets found)"); return Ok(()); } println!( "{:<24} {:<24} {:<8} {:<12} {}", - "CODE", "NAME", "ENABLED", "PARENT", "IMAGE" + "TARGET", "NAME", "ENABLED", "PARENT", "IMAGE" ); println!("{}", "─".repeat(96)); for app in apps { @@ -562,7 +592,7 @@ impl SecretsSetCommand { "Service-scoped secrets require --service".to_string(), ) })?; - let app_code = resolve_remote_service_code(&project, app_code)?; + let app_code = resolve_remote_service_code(&ctx, &project, app_code, operation)?; let secret = ctx .block_on(ctx.client.set_service_secret( project.id, @@ -694,7 +724,8 @@ impl CallableTrait for SecretsGetCommand { "Service-scoped secrets require --service".to_string(), ) })?; - let app_code = resolve_remote_service_code(&project, app_code)?; + let app_code = + resolve_remote_service_code(&ctx, &project, app_code, operation)?; ctx.block_on(ctx.client.get_service_secret_metadata( project.id, &app_code, @@ -816,7 +847,8 @@ impl CallableTrait for SecretsListCommand { "Service-scoped secrets require --service".to_string(), ) })?; - let app_code = resolve_remote_service_code(&project, app_code)?; + let app_code = + resolve_remote_service_code(&ctx, &project, app_code, operation)?; ctx.block_on(ctx.client.list_service_secrets(project.id, &app_code)) .map_err(|error| remap_remote_secret_error(operation, error))? } @@ -958,7 +990,8 @@ impl CallableTrait for SecretsDeleteCommand { "Service-scoped secrets require --service".to_string(), ) })?; - let app_code = resolve_remote_service_code(&project, app_code)?; + let app_code = + resolve_remote_service_code(&ctx, &project, app_code, operation)?; ctx.block_on(ctx.client.delete_service_secret(project.id, &app_code, key)) .map_err(|error| remap_remote_secret_error(operation, error))?; println!("✓ Deleted service secret {} from {}", key, app_code); @@ -1082,6 +1115,19 @@ mod tests { use super::*; use tempfile::TempDir; + fn project_app_info(id: i32, code: &str) -> ProjectAppInfo { + ProjectAppInfo { + id, + project_id: 7, + code: code.to_string(), + name: code.to_string(), + image: "nginx:stable".to_string(), + enabled: true, + deploy_order: None, + parent_app_code: None, + } + } + // ── SECURITY: Path traversal via --file flag ────── // CWE-22: Improper Limitation of a Pathname to a Restricted Directory // @@ -1482,49 +1528,50 @@ mod tests { #[test] fn test_resolve_remote_service_code_matches_case_insensitively() { - let project = ProjectInfo { - id: 7, - name: "syncopia".to_string(), - user_id: "user-1".to_string(), - metadata: serde_json::json!({ - "custom": { - "web": [ - {"code": "device-apis"} - ] - } - }), - created_at: "2026-01-01T00:00:00Z".to_string(), - updated_at: "2026-01-01T00:00:00Z".to_string(), - }; + let apps = vec![project_app_info(1, "device-apis")]; assert_eq!( - resolve_remote_service_code(&project, "Device-APIs").unwrap(), + resolve_remote_service_code_from_apps("syncopia", &apps, "Device-APIs").unwrap(), "device-apis" ); } #[test] fn test_resolve_remote_service_code_reports_available_codes() { - let project = ProjectInfo { - id: 7, - name: "syncopia".to_string(), - user_id: "user-1".to_string(), - metadata: serde_json::json!({ - "custom": { - "web": [ - {"code": "app"}, - {"code": "device-apis"} - ] - } - }), - created_at: "2026-01-01T00:00:00Z".to_string(), - updated_at: "2026-01-01T00:00:00Z".to_string(), - }; + let apps = vec![ + project_app_info(1, "app"), + project_app_info(2, "device-apis"), + ]; - let error = resolve_remote_service_code(&project, "device-api") + let error = resolve_remote_service_code_from_apps("syncopia", &apps, "device-api") .unwrap_err() .to_string(); - assert!(error.contains("Available app codes: app, device-apis")); + assert!(error.contains("Available targets: app, device-apis")); + } + + #[test] + fn test_scn_001_remote_target_resolution_accepts_registered_service_code() { + let apps = vec![project_app_info(2, "upload")]; + + let resolved = resolve_remote_service_code_from_apps("syncopia", &apps, "UPLOAD").unwrap(); + + assert_eq!(resolved, "upload"); + } + + #[test] + fn test_scn_007_unknown_remote_target_error_lists_available_targets() { + let apps = vec![ + project_app_info(1, "device-api"), + project_app_info(2, "upload"), + ]; + + let error = resolve_remote_service_code_from_apps("syncopia", &apps, "media") + .unwrap_err() + .to_string(); + + assert!(error.contains("Unknown remote secret target 'media'")); + assert!(error.contains("Available targets: device-api, upload")); + assert!(error.contains("stacker deploy --target")); } #[test] diff --git a/tests/remote_secrets.rs b/tests/remote_secrets.rs index 86049c11..53d8b0a2 100644 --- a/tests/remote_secrets.rs +++ b/tests/remote_secrets.rs @@ -426,6 +426,90 @@ async fn test_render_bundle_merges_service_secrets_and_overrides_plain_env() { assert!(bundle.compose_content.contains("S3_KEY=vault-wins")); } +#[tokio::test] +async fn test_scn_003_service_secret_injects_only_into_registered_service_target() { + let Some(app) = common::spawn_app_with_vault().await else { + return; + }; + + let project_id = common::create_test_project(&app.db_pool, common::USER_A_ID).await; + let mut project = db::project::fetch(&app.db_pool, project_id) + .await + .expect("project fetch failed") + .expect("project missing"); + project.request_json = json!({ + "report": { + "deployment_hash": "deploy-hash-upload" + } + }); + + let device_api = create_test_project_app(&app.db_pool, project_id, "device-api").await; + let mut upload = create_test_project_app(&app.db_pool, project_id, "upload").await; + upload.environment = Some(json!({ + "S3_BUCKET": "plain-value" + })); + let upload = db::project_app::update(&app.db_pool, &upload) + .await + .expect("upload app update failed"); + + let vault_path = format!( + "agent/users/{}/projects/{}/apps/{}/secrets/S3_BUCKET", + common::USER_A_ID, + project_id, + upload.code + ); + db::remote_secret::upsert_service_secret( + &app.db_pool, + common::USER_A_ID, + project_id, + &upload.code, + "S3_BUCKET", + &vault_path, + common::USER_A_ID, + "synced", + ) + .await + .expect("service secret metadata insert failed"); + + Mock::given(method("GET")) + .and(path_regex(format!(r"/v1/{}", vault_path))) + .respond_with(ResponseTemplate::new(200).set_body_json(json!({ + "data": { + "data": { + "value": "superbucket" + } + } + }))) + .mount(&app.vault_server) + .await; + + let mut configuration = get_configuration().expect("Failed to get configuration"); + configuration.vault.address = app.vault_server.uri(); + configuration.vault.token = "test-vault-token".to_string(); + configuration.vault.api_prefix = "v1".to_string(); + configuration.vault.agent_path_prefix = "agent".to_string(); + let vault_service = + VaultService::from_settings(&configuration.vault).expect("failed to build vault service"); + let renderer = ConfigRenderer::with_vault(vault_service).expect("renderer init failed"); + + let bundle = renderer + .render_bundle( + &app.db_pool, + &project, + &[device_api.clone(), upload.clone()], + "deploy-hash-upload", + ) + .await + .expect("render bundle failed"); + + let upload_env = &bundle.app_configs.get("upload").unwrap().content; + let device_api_env = &bundle.app_configs.get("device-api").unwrap().content; + + assert!(upload_env.contains("S3_BUCKET=superbucket")); + assert!(!upload_env.contains("S3_BUCKET=plain-value")); + assert!(!device_api_env.contains("S3_BUCKET=")); +} + #[tokio::test] async fn test_get_env_vars_includes_remote_secret_placeholders() { let Some(app) = common::spawn_app_with_vault().await else { From bf0b203c57726af3ecaa21a1e140d0de4f43943c Mon Sep 17 00:00:00 2001 From: Vasili Pascal Date: Mon, 11 May 2026 16:32:16 +0300 Subject: [PATCH 2/3] store ip --- src/console/commands/cli/deploy.rs | 29 ++++++ src/console/commands/mq/listener.rs | 133 ++++++++++++++++++++-------- src/helpers/ip.rs | 33 +++++++ src/helpers/mod.rs | 1 + 4 files changed, 161 insertions(+), 35 deletions(-) create mode 100644 src/helpers/ip.rs diff --git a/src/console/commands/cli/deploy.rs b/src/console/commands/cli/deploy.rs index d8823ddd..4b152421 100644 --- a/src/console/commands/cli/deploy.rs +++ b/src/console/commands/cli/deploy.rs @@ -24,6 +24,7 @@ use crate::cli::install_runner::{ use crate::cli::progress; use crate::cli::stacker_client::{self, StackerClient}; use crate::console::commands::CallableTrait; +use crate::helpers::ip::extract_ipv4_from_text; use crate::helpers::ssh_client; /// Default config filename. @@ -2697,10 +2698,16 @@ fn fetch_server_for_project( let deploy_poll = Duration::from_secs(10); let deploy_timeout = Duration::from_secs(600); let deploy_start = std::time::Instant::now(); + let mut fallback_server_ip: Option = None; loop { match client.get_deployment_status_by_project(project_id).await { Ok(Some(info)) if is_terminal(&info.status) => { + fallback_server_ip = fallback_server_ip.or_else(|| { + info.status_message + .as_deref() + .and_then(extract_ipv4_from_text) + }); if info.status != "completed" { eprintln!( " Deployment #{} finished with status '{}' — server IP may not be available.", @@ -2710,6 +2717,11 @@ fn fetch_server_for_project( break; } Ok(Some(info)) => { + fallback_server_ip = fallback_server_ip.or_else(|| { + info.status_message + .as_deref() + .and_then(extract_ipv4_from_text) + }); if deploy_start.elapsed() > deploy_timeout { eprintln!( " Deployment #{} still '{}' after extended wait — saving what we have.", @@ -2742,6 +2754,10 @@ fn fetch_server_for_project( Some(ref s) if s.srv_ip.is_some() => { return Ok(server); } + Some(mut s) if fallback_server_ip.is_some() => { + s.srv_ip = fallback_server_ip.clone(); + return Ok(Some(s)); + } Some(_) if attempt < ip_retries - 1 => { eprintln!( " Server found but IP not yet assigned (attempt {}/{}), retrying in {}s...", @@ -3256,6 +3272,19 @@ services: assert_eq!(selected.id, 3); } + #[test] + fn extracts_server_ip_from_deployment_status_message() { + assert_eq!( + extract_ipv4_from_text("178.104.222.170: Copy files is done"), + Some("178.104.222.170".to_string()) + ); + assert_eq!(extract_ipv4_from_text("Deployment still in progress"), None); + assert_eq!( + extract_ipv4_from_text("invalid 999.104.222.170: message"), + None + ); + } + #[test] fn test_deploy_local_dry_run_generates_files() { let dir = setup_local_project(&[ diff --git a/src/console/commands/mq/listener.rs b/src/console/commands/mq/listener.rs index 1b46e85b..daa263ca 100644 --- a/src/console/commands/mq/listener.rs +++ b/src/console/commands/mq/listener.rs @@ -1,5 +1,6 @@ use crate::configuration::get_configuration; use crate::db; +use crate::helpers::ip::extract_ipv4_from_text; use crate::helpers::mq_manager::MqManager; use actix_web::rt; use actix_web::web; @@ -69,6 +70,15 @@ impl ListenCommand { } } +fn progress_message_server_ip(msg: &ProgressMessage) -> Option { + msg.srv_ip + .as_deref() + .map(str::trim) + .filter(|ip| !ip.is_empty()) + .map(ToOwned::to_owned) + .or_else(|| extract_ipv4_from_text(&msg.message)) +} + impl crate::console::commands::CallableTrait for ListenCommand { fn call(&self) -> Result<(), Box> { rt::System::new().block_on(async { @@ -85,18 +95,19 @@ impl crate::console::commands::CallableTrait for ListenCommand { println!("Connecting to RabbitMQ..."); // Try to establish connection with retry - let mq_manager = match Self::connect_with_retry(&settings.amqp.connection_string()).await { - Ok(m) => m, - Err(e) => { - eprintln!("Failed to connect to RabbitMQ after retries: {}", e); - sleep(Duration::from_secs(5)).await; - continue; - } - }; + let mq_manager = + match Self::connect_with_retry(&settings.amqp.connection_string()).await { + Ok(m) => m, + Err(e) => { + eprintln!("Failed to connect to RabbitMQ after retries: {}", e); + sleep(Duration::from_secs(5)).await; + continue; + } + }; let consumer_channel = match mq_manager .consume("install_progress", queue_name, "install.progress.*.*.*") - .await + .await { Ok(c) => c, Err(e) => { @@ -114,7 +125,7 @@ impl crate::console::commands::CallableTrait for ListenCommand { BasicConsumeOptions::default(), FieldTable::default(), ) - .await + .await { Ok(c) => c, Err(e) => { @@ -171,24 +182,37 @@ impl crate::console::commands::CallableTrait for ListenCommand { msg.status.clone() }; // Try to find deployment by deploy_id or deployment_hash - let deployment_result = if let Some(ref deploy_id_str) = msg.deploy_id { + let deployment_result = if let Some(ref deploy_id_str) = + msg.deploy_id + { // Try deploy_id first (numeric ID) if let Ok(id) = deploy_id_str.parse::() { deployment::fetch(db_pool.get_ref(), id).await } else if let Some(ref hash) = msg.deployment_hash { // deploy_id might be the hash string - deployment::fetch_by_deployment_hash(db_pool.get_ref(), hash).await + deployment::fetch_by_deployment_hash( + db_pool.get_ref(), + hash, + ) + .await } else { // Try deploy_id as hash - deployment::fetch_by_deployment_hash(db_pool.get_ref(), deploy_id_str).await + deployment::fetch_by_deployment_hash( + db_pool.get_ref(), + deploy_id_str, + ) + .await } } else if let Some(ref hash) = msg.deployment_hash { // Use deployment_hash - deployment::fetch_by_deployment_hash(db_pool.get_ref(), hash).await + deployment::fetch_by_deployment_hash(db_pool.get_ref(), hash) + .await } else { // No identifier available println!("No deploy_id or deployment_hash in message"); - if let Err(ack_err) = delivery.ack(BasicAckOptions::default()).await { + if let Err(ack_err) = + delivery.ack(BasicAckOptions::default()).await + { eprintln!("Failed to ack: {}", ack_err); } continue; @@ -220,25 +244,23 @@ impl crate::console::commands::CallableTrait for ListenCommand { // but the IP is already known after Terraform succeeds // even when the subsequent Ansible step fails (status // "paused" / "failed"). - if let Some(ref ip) = msg.srv_ip { - if !ip.is_empty() { - match db::server::update_srv_ip( - db_pool.get_ref(), - row.project_id, - ip, - msg.ssh_port, - ) - .await - { - Ok(s) => println!( - "Updated server {} srv_ip={} for project {}", - s.id, ip, row.project_id - ), - Err(e) => eprintln!( - "Failed to update srv_ip for project {}: {}", - row.project_id, e - ), - } + if let Some(ip) = progress_message_server_ip(&msg) { + match db::server::update_srv_ip( + db_pool.get_ref(), + row.project_id, + &ip, + msg.ssh_port, + ) + .await + { + Ok(s) => println!( + "Updated server {} srv_ip={} for project {}", + s.id, ip, row.project_id + ), + Err(e) => eprintln!( + "Failed to update srv_ip for project {}: {}", + row.project_id, e + ), } } @@ -246,7 +268,9 @@ impl crate::console::commands::CallableTrait for ListenCommand { "Deployment {} updated with status {}", &row.id, &row.status ); - if let Err(e) = deployment::update(db_pool.get_ref(), row).await { + if let Err(e) = + deployment::update(db_pool.get_ref(), row).await + { eprintln!("Failed to update deployment: {}", e); } } @@ -299,3 +323,42 @@ impl ListenCommand { Err(format!("Failed to connect after {} attempts", max_retries)) } } + +#[cfg(test)] +mod tests { + use super::*; + + fn progress_message(message: &str, srv_ip: Option<&str>) -> ProgressMessage { + ProgressMessage { + id: "1".to_string(), + deploy_id: Some("174".to_string()), + deployment_hash: Some("hash".to_string()), + alert: 0, + message: message.to_string(), + status: "paused".to_string(), + progress: "90".to_string(), + srv_ip: srv_ip.map(ToOwned::to_owned), + ssh_port: Some(22), + } + } + + #[test] + fn progress_message_server_ip_prefers_structured_srv_ip() { + let msg = progress_message("178.104.222.170: Copy files is done", Some("203.0.113.42")); + + assert_eq!( + progress_message_server_ip(&msg), + Some("203.0.113.42".to_string()) + ); + } + + #[test] + fn progress_message_server_ip_falls_back_to_message_prefix() { + let msg = progress_message("178.104.222.170: Copy files is done", None); + + assert_eq!( + progress_message_server_ip(&msg), + Some("178.104.222.170".to_string()) + ); + } +} diff --git a/src/helpers/ip.rs b/src/helpers/ip.rs new file mode 100644 index 00000000..6b66c7b4 --- /dev/null +++ b/src/helpers/ip.rs @@ -0,0 +1,33 @@ +pub(crate) fn extract_ipv4_from_text(text: &str) -> Option { + text.split(|c: char| !(c.is_ascii_digit() || c == '.')) + .find_map(|candidate| { + let trimmed = candidate.trim_matches('.'); + if trimmed.parse::().is_ok() { + Some(trimmed.to_string()) + } else { + None + } + }) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn extracts_ipv4_from_status_message_prefix() { + assert_eq!( + extract_ipv4_from_text("178.104.222.170: Copy files is done"), + Some("178.104.222.170".to_string()) + ); + } + + #[test] + fn ignores_text_without_valid_ipv4() { + assert_eq!(extract_ipv4_from_text("Deployment still in progress"), None); + assert_eq!( + extract_ipv4_from_text("invalid 999.104.222.170: message"), + None + ); + } +} diff --git a/src/helpers/mod.rs b/src/helpers/mod.rs index 3932137b..da6ccadc 100644 --- a/src/helpers/mod.rs +++ b/src/helpers/mod.rs @@ -19,6 +19,7 @@ pub use vault::*; pub(crate) mod cloud; pub(crate) mod compressor; pub mod dockerhub; +pub(crate) mod ip; pub use dockerhub::*; From e5343f759dac416f534908a9e573150a6873dae0 Mon Sep 17 00:00:00 2001 From: Vasili Pascal Date: Mon, 11 May 2026 17:02:27 +0300 Subject: [PATCH 3/3] fix(project-apps): add delete route authorization Add the saved project-app delete endpoint, register it, and add Casbin rules for both project API prefixes. Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- CHANGELOG.md | 1 + README.md | 1 + ...7120017_casbin_project_app_delete.down.sql | 10 +++ ...717120017_casbin_project_app_delete.up.sql | 7 +++ src/routes/project/app.rs | 63 +++++++++++++++++++ src/startup.rs | 1 + 6 files changed, 83 insertions(+) create mode 100644 migrations/20260717120017_casbin_project_app_delete.down.sql create mode 100644 migrations/20260717120017_casbin_project_app_delete.up.sql diff --git a/CHANGELOG.md b/CHANGELOG.md index f61a36f5..ef285305 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -470,6 +470,7 @@ stacker init --with-ai # no Ollama running → template fallback #### REST API Routes (`/project/{id}/apps/*`) - `GET /project/{id}/apps` - List all apps for a project - `GET /project/{id}/apps/{code}` - Get single app details +- `DELETE /project/{id}/apps/{code}` - Delete a saved app from a project - `GET /project/{id}/apps/{code}/config` - Get full app configuration - `GET /project/{id}/apps/{code}/env` - Get environment variables (sensitive values redacted) - `PUT /project/{id}/apps/{code}/env` - Update environment variables diff --git a/README.md b/README.md index c63fcef4..ca16a44b 100644 --- a/README.md +++ b/README.md @@ -290,6 +290,7 @@ cargo run --bin server # http://127.0.0.1:8000 | `POST /project` | Create a project from a stack definition | | `POST /{id}/deploy/{cloud_id}` | Deploy to a cloud provider | | `GET /project/{id}/apps` | List apps in a project | +| `DELETE /project/{id}/apps/{code}` | Remove an app from a project | | `PUT /project/{id}/apps/{code}/env` | Update app environment variables | | `GET /project/{id}/apps/{code}/secrets` | List service-scoped secret metadata for an app | | `PUT /project/{id}/apps/{code}/secrets/{name}` | Create or update a Vault-backed service secret | diff --git a/migrations/20260717120017_casbin_project_app_delete.down.sql b/migrations/20260717120017_casbin_project_app_delete.down.sql new file mode 100644 index 00000000..d867bdda --- /dev/null +++ b/migrations/20260717120017_casbin_project_app_delete.down.sql @@ -0,0 +1,10 @@ +-- Remove Casbin rules for deleting project apps + +DELETE FROM public.casbin_rule +WHERE ptype = 'p' + AND v0 = 'group_user' + AND v1 IN ( + '/project/:id/apps/:code', + '/api/v1/project/:id/apps/:code' + ) + AND v2 = 'DELETE'; diff --git a/migrations/20260717120017_casbin_project_app_delete.up.sql b/migrations/20260717120017_casbin_project_app_delete.up.sql new file mode 100644 index 00000000..f05fe3a9 --- /dev/null +++ b/migrations/20260717120017_casbin_project_app_delete.up.sql @@ -0,0 +1,7 @@ +-- Add Casbin rules for deleting project apps + +INSERT INTO public.casbin_rule (ptype, v0, v1, v2, v3, v4, v5) +VALUES + ('p', 'group_user', '/project/:id/apps/:code', 'DELETE', '', '', ''), + ('p', 'group_user', '/api/v1/project/:id/apps/:code', 'DELETE', '', '', '') +ON CONFLICT DO NOTHING; diff --git a/src/routes/project/app.rs b/src/routes/project/app.rs index 2f74cfa8..3d75b0d0 100644 --- a/src/routes/project/app.rs +++ b/src/routes/project/app.rs @@ -4,6 +4,7 @@ //! - POST /project/{project_id}/apps - Create or update an app in a project //! - GET /project/{project_id}/apps - List all apps in a project //! - GET /project/{project_id}/apps/{code} - Get a specific app +//! - DELETE /project/{project_id}/apps/{code} - Delete a specific app //! - GET /project/{project_id}/apps/{code}/config - Get app configuration //! - PUT /project/{project_id}/apps/{code}/config - Update app configuration //! - GET /project/{project_id}/apps/{code}/env - Get environment variables @@ -140,6 +141,11 @@ pub struct UpdateDomainRequest { pub ssl_enabled: bool, } +#[derive(Debug, Deserialize)] +pub struct DeleteAppQuery { + pub deployment_hash: Option, +} + /// Request to create or update an app in a project #[derive(Debug, Deserialize)] pub struct CreateAppRequest { @@ -340,6 +346,63 @@ pub async fn get_app( Ok(JsonResponse::build().set_item(Some(hydrated)).ok("OK")) } +/// Delete a specific app by code +#[tracing::instrument(name = "Delete project app", skip_all)] +#[delete("/{project_id}/apps/{code}")] +pub async fn delete_app( + user: web::ReqData>, + path: web::Path<(i32, String)>, + query: web::Query, + pg_pool: web::Data, +) -> Result { + let (project_id, code) = path.into_inner(); + + let project = db::project::fetch(pg_pool.get_ref(), project_id) + .await + .map_err(|e| JsonResponse::internal_server_error(e))? + .ok_or_else(|| JsonResponse::not_found("Project not found"))?; + + if project.user_id != user.id { + return Err(JsonResponse::not_found("Project not found")); + } + + let app = db::project_app::fetch_by_project_and_code(pg_pool.get_ref(), project_id, &code) + .await + .map_err(|e| JsonResponse::internal_server_error(e))? + .ok_or_else(|| JsonResponse::not_found("App not found"))?; + + let app_service = if let Some(deployment_hash) = query.deployment_hash.as_deref() { + let service = ProjectAppService::new(Arc::new(pg_pool.get_ref().clone())) + .map_err(|e| JsonResponse::<()>::build().internal_server_error(e))?; + (service, deployment_hash.to_string()) + } else { + let service = ProjectAppService::new_without_sync(Arc::new(pg_pool.get_ref().clone())) + .map_err(|e| JsonResponse::<()>::build().internal_server_error(e))?; + (service, String::new()) + }; + + let deleted = app_service + .0 + .delete(app.id, &app_service.1) + .await + .map_err(|e| JsonResponse::<()>::build().internal_server_error(e.to_string()))?; + + tracing::info!( + user_id = %user.id, + project_id = project_id, + app_code = %code, + deleted = deleted, + "Deleted project app" + ); + + Ok(JsonResponse::build() + .set_item(Some(json!({ + "success": deleted, + "message": if deleted { "App removed from project" } else { "App was not removed" } + }))) + .ok("OK")) +} + /// Get app configuration (env vars, ports, domain, etc.) #[tracing::instrument(name = "Get app config", skip_all)] #[get("/{project_id}/apps/{code}/config")] diff --git a/src/startup.rs b/src/startup.rs index 213fab84..2ec3bda6 100644 --- a/src/startup.rs +++ b/src/startup.rs @@ -34,6 +34,7 @@ fn project_scope(path: &str) -> actix_web::Scope { .service(crate::routes::project::app::list_apps) .service(crate::routes::project::app::create_app) .service(crate::routes::project::app::get_app) + .service(crate::routes::project::app::delete_app) .service(crate::routes::project::app::get_app_config) .service(crate::routes::project::app::get_env_vars) .service(crate::routes::project::app::update_env_vars)