diff --git a/cmd/settings/list.go b/cmd/settings/list.go index 6072e3a..da8c341 100644 --- a/cmd/settings/list.go +++ b/cmd/settings/list.go @@ -5,6 +5,7 @@ import ( "context" "errors" "fmt" + "regexp" "slices" "sort" "strconv" @@ -29,6 +30,7 @@ const ( isMultiPartArchive = false expectedListJobPodCount = 1 expectedListJobContainerCount = 1 + backupFileNameRegex = `^sts-backup-.*\.sty$` ) // Shared flag for --from-old-pvc, used by both list and restore commands @@ -182,7 +184,14 @@ func getBackupListFromS3(appCtx *app.Context) ([]BackupFileInfo, error) { } // Filter objects based on whether the archive is split or not - filteredObjects := s3client.FilterBackupObjects(result.Contents, isMultiPartArchive) + filteredObjects := s3client.FilterMultipartBackupObjects(result.Contents, isMultiPartArchive) + + // Filter to only include direct children of the prefix that match the backup filename pattern, + // and strip the prefix from the key + filteredObjects, err = s3client.FilterByPrefixAndRegex(filteredObjects, prefix, backupFileNameRegex) + if err != nil { + return nil, fmt.Errorf("failed to filter objects: %w", err) + } var backups []BackupFileInfo for _, obj := range filteredObjects { @@ -229,7 +238,12 @@ func getBackupListFromLocalBucket(appCtx *app.Context) ([]BackupFileInfo, error) return nil, fmt.Errorf("failed to list objects in local bucket: %w", err) } - filteredObjects := s3client.FilterBackupObjects(result.Contents, isMultiPartArchive) + filteredObjects := s3client.FilterMultipartBackupObjects(result.Contents, isMultiPartArchive) + + filteredObjects, err = s3client.FilterByPrefixAndRegex(filteredObjects, "", backupFileNameRegex) + if err != nil { + return nil, fmt.Errorf("failed to filter objects: %w", err) + } var backups []BackupFileInfo for _, obj := range filteredObjects { @@ -298,6 +312,9 @@ func getBackupListFromPVC(appCtx *app.Context) ([]BackupFileInfo, error) { return nil, fmt.Errorf("failed to parse list job output: %w", err) } + // Filter by backup filename pattern + files = filterBackupsByRegex(files, backupFileNameRegex) + return files, nil } @@ -376,3 +393,15 @@ func ParseListJobOutput(input string) ([]BackupFileInfo, error) { return files, nil } + +// filterBackupsByRegex filters BackupFileInfo by matching filename against a regex pattern +func filterBackupsByRegex(backups []BackupFileInfo, pattern string) []BackupFileInfo { + re := regexp.MustCompile(pattern) + var filtered []BackupFileInfo + for _, b := range backups { + if re.MatchString(b.Filename) { + filtered = append(filtered, b) + } + } + return filtered +} diff --git a/cmd/settings/restore.go b/cmd/settings/restore.go index 905f488..5a8ea32 100644 --- a/cmd/settings/restore.go +++ b/cmd/settings/restore.go @@ -3,6 +3,7 @@ package settings import ( "fmt" "strconv" + "strings" "time" "github.com/spf13/cobra" @@ -28,13 +29,16 @@ var ( useLatest bool background bool skipConfirmation bool + skipStackpacks bool ) func restoreCmd(globalFlags *config.CLIGlobalFlags) *cobra.Command { cmd := &cobra.Command{ Use: "restore", Short: "Restore Settings from a backup archive", - Long: `Restore Settings data from a backup archive stored in S3. Can use --latest or --archive to specify which backup to restore.`, + Long: "Restore Settings data from a backup archive stored in S3. Automatically also restores " + + "Stackpacks backup that was made at the same time, it can be skipped with --skip-stackpacks. " + + "Can use --latest or --archive to specify which backup to restore.", Run: func(_ *cobra.Command, _ []string) { cmdutils.Run(globalFlags, runRestore, cmdutils.StorageIsNotRequired) }, @@ -45,6 +49,7 @@ func restoreCmd(globalFlags *config.CLIGlobalFlags) *cobra.Command { cmd.Flags().BoolVar(&background, "background", false, "Run restore job in background without waiting for completion") cmd.Flags().BoolVarP(&skipConfirmation, "yes", "y", false, "Skip confirmation prompt") cmd.Flags().BoolVar(&fromPVC, "from-old-pvc", false, "Restore backup from legacy PVC instead of S3") + cmd.Flags().BoolVar(&skipStackpacks, "skip-stackpacks", false, "Skip restoring stackpacks backup") cmd.MarkFlagsMutuallyExclusive("archive", "latest") cmd.MarkFlagsOneRequired("archive", "latest") @@ -192,12 +197,14 @@ func buildEnvVar(extraEnvVar []corev1.EnvVar, config *config.Config) []corev1.En commonVar := []corev1.EnvVar{ {Name: "BACKUP_CONFIGURATION_BUCKET_NAME", Value: config.Settings.Bucket}, {Name: "BACKUP_CONFIGURATION_S3_PREFIX", Value: config.Settings.S3Prefix}, + {Name: "BACKUP_CONFIGURATION_STACKPACKS_S3_PREFIX", Value: config.Settings.StackpacksS3Prefix}, {Name: "MINIO_ENDPOINT", Value: fmt.Sprintf("%s:%d", storageService.Name, storageService.Port)}, - {Name: "STACKSTATE_BASE_URL", Value: config.Settings.Restore.BaseURL}, - {Name: "RECEIVER_BASE_URL", Value: config.Settings.Restore.ReceiverBaseURL}, - {Name: "PLATFORM_VERSION", Value: config.Settings.Restore.PlatformVersion}, + {Name: "STACKSTATE_BASE_URL", Value: config.GetBaseURL()}, + {Name: "RECEIVER_BASE_URL", Value: config.GetReceiverBaseURL()}, + {Name: "PLATFORM_VERSION", Value: config.GetPlatformVersion()}, {Name: "ZOOKEEPER_QUORUM", Value: config.Settings.Restore.ZookeeperQuorum}, {Name: "BACKUP_CONFIGURATION_UPLOAD_REMOTE", Value: strconv.FormatBool(config.GlobalBackupEnabled())}, + {Name: "SKIP_STACKPACKS", Value: strconv.FormatBool(skipStackpacks)}, } if fromPVC { // Force PVC mode in the shell script, suppress local bucket @@ -205,13 +212,16 @@ func buildEnvVar(extraEnvVar []corev1.EnvVar, config *config.Config) []corev1.En } else if config.Settings.LocalBucket != "" { commonVar = append(commonVar, corev1.EnvVar{Name: "BACKUP_CONFIGURATION_LOCAL_BUCKET", Value: config.Settings.LocalBucket}) } + if config.Stackpacks != nil { + commonVar = append(commonVar, corev1.EnvVar{Name: "CONFIG_FORCE_stackstate_stackPacks_localStackPacksUri", Value: config.Stackpacks.LocalStackPacksURI}) + } commonVar = append(commonVar, extraEnvVar...) return commonVar } // buildVolumeMounts constructs volume mounts for the restore job container func buildVolumeMounts(config *config.Config) []corev1.VolumeMount { - mounts := []corev1.VolumeMount{ + volumeMounts := []corev1.VolumeMount{ {Name: "backup-log", MountPath: "/opt/docker/etc_log"}, {Name: "backup-restore-scripts", MountPath: "/backup-restore-scripts"}, {Name: "minio-keys", MountPath: "/aws-keys"}, @@ -219,9 +229,17 @@ func buildVolumeMounts(config *config.Config) []corev1.VolumeMount { } // Mount PVC in legacy mode or when --from-old-pvc is set if config.IsLegacyMode() || fromPVC { - mounts = append(mounts, corev1.VolumeMount{Name: "settings-backup-data", MountPath: "/settings-backup-data"}) + volumeMounts = append(volumeMounts, corev1.VolumeMount{Name: "settings-backup-data", MountPath: "/settings-backup-data"}) + } + + if config.Stackpacks != nil && config.Stackpacks.PVC != "" && strings.HasPrefix(config.Stackpacks.LocalStackPacksURI, "file://") { + volumeMounts = append(volumeMounts, corev1.VolumeMount{ + Name: "stackpacks-local", + MountPath: strings.TrimPrefix(config.Stackpacks.LocalStackPacksURI, "file://"), + }) } - return mounts + + return volumeMounts } // buildVolumes constructs volumes for the restore job pod @@ -274,6 +292,17 @@ func buildVolumes(config *config.Config, defaultMode int32) []corev1.Volume { }, }) } + if config.Stackpacks != nil && config.Stackpacks.PVC != "" { + volumes = append(volumes, corev1.Volume{ + Name: "stackpacks-local", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: config.Stackpacks.PVC, + }, + }, + }) + } + return volumes } diff --git a/cmd/settings/restore_test.go b/cmd/settings/restore_test.go new file mode 100644 index 0000000..30233d1 --- /dev/null +++ b/cmd/settings/restore_test.go @@ -0,0 +1,64 @@ +package settings + +import ( + "testing" + + "github.com/stackvista/stackstate-backup-cli/internal/foundation/config" + "github.com/stretchr/testify/assert" +) + +func TestBuildVolumeMounts_StackpacksLocalFileURI(t *testing.T) { + tests := []struct { + name string + stackpacks *config.StackpacksConfig + expectStackpacks bool + expectedMountPath string + }{ + { + name: "no stackpacks config", + stackpacks: nil, + expectStackpacks: false, + }, + { + name: "stackpacks with no PVC", + stackpacks: &config.StackpacksConfig{LocalStackPacksURI: "file:///var/stackpacks_local"}, + expectStackpacks: false, + }, + { + name: "stackpacks with file:// URI and PVC", + stackpacks: &config.StackpacksConfig{LocalStackPacksURI: "file:///var/stackpacks_local", PVC: "stackpacks-pvc"}, + expectStackpacks: true, + expectedMountPath: "/var/stackpacks_local", + }, + { + name: "stackpacks with non-file URI and PVC", + stackpacks: &config.StackpacksConfig{LocalStackPacksURI: "s3://my-bucket/stackpacks", PVC: "stackpacks-pvc"}, + expectStackpacks: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Ensure the package-level fromPVC flag does not interfere + fromPVC = false + + cfg := &config.Config{Stackpacks: tt.stackpacks} + mounts := buildVolumeMounts(cfg) + + var stackpacksMount *struct{ Name, MountPath string } + for _, m := range mounts { + if m.Name == "stackpacks-local" { + stackpacksMount = &struct{ Name, MountPath string }{m.Name, m.MountPath} + break + } + } + + if tt.expectStackpacks { + assert.NotNil(t, stackpacksMount, "expected stackpacks-local volume mount to be present") + assert.Equal(t, tt.expectedMountPath, stackpacksMount.MountPath) + } else { + assert.Nil(t, stackpacksMount, "expected stackpacks-local volume mount to be absent") + } + }) + } +} diff --git a/cmd/stackgraph/list.go b/cmd/stackgraph/list.go index 1d169d9..c9b86b7 100644 --- a/cmd/stackgraph/list.go +++ b/cmd/stackgraph/list.go @@ -17,6 +17,10 @@ import ( "github.com/stackvista/stackstate-backup-cli/internal/orchestration/portforward" ) +const ( + backupFileNameRegex = `^sts-backup-.*\.graph$` +) + func listCmd(globalFlags *config.CLIGlobalFlags) *cobra.Command { return &cobra.Command{ Use: "list", @@ -63,7 +67,14 @@ func runList(appCtx *app.Context) error { } // Filter objects based on whether the archive is split or not - filteredObjects := s3client.FilterBackupObjects(result.Contents, multipartArchive) + filteredObjects := s3client.FilterMultipartBackupObjects(result.Contents, multipartArchive) + + // Filter to only include direct children of the prefix that match the backup filename pattern, + // and strip the prefix from the key + filteredObjects, err = s3client.FilterByPrefixAndRegex(filteredObjects, prefix, backupFileNameRegex) + if err != nil { + return fmt.Errorf("failed to filter objects: %w", err) + } // Sort by LastModified time (most recent first) sort.Slice(filteredObjects, func(i, j int) bool { diff --git a/cmd/stackgraph/restore.go b/cmd/stackgraph/restore.go index a56fc82..ee620da 100644 --- a/cmd/stackgraph/restore.go +++ b/cmd/stackgraph/restore.go @@ -35,13 +35,16 @@ var ( useLatest bool background bool skipConfirmation bool + skipStackpacks bool ) func restoreCmd(globalFlags *config.CLIGlobalFlags) *cobra.Command { cmd := &cobra.Command{ Use: "restore", Short: "Restore Stackgraph from a backup archive", - Long: `Restore Stackgraph data from a backup archive stored in S3. Can use --latest or --archive to specify which backup to restore.`, + Long: "Restore Stackgraph data from a backup archive stored in S3. Automatically also restores " + + "Stackpacks backup that was made at the same time, it can be skipped with --skip-stackpacks. " + + "Can use --latest or --archive to specify which backup to restore.", Run: func(_ *cobra.Command, _ []string) { cmdutils.Run(globalFlags, runRestore, cmdutils.StorageIsRequired) }, @@ -51,6 +54,7 @@ func restoreCmd(globalFlags *config.CLIGlobalFlags) *cobra.Command { cmd.Flags().BoolVar(&useLatest, "latest", false, "Restore from the most recent backup") cmd.Flags().BoolVar(&background, "background", false, "Run restore job in background without waiting for completion") cmd.Flags().BoolVarP(&skipConfirmation, "yes", "y", false, "Skip confirmation prompt") + cmd.Flags().BoolVar(&skipStackpacks, "skip-stackpacks", false, "Skip restoring stackpacks backup") cmd.MarkFlagsMutuallyExclusive("archive", "latest") cmd.MarkFlagsOneRequired("archive", "latest") @@ -178,7 +182,14 @@ func getLatestBackup(k8sClient *k8s.Client, namespace string, config *config.Con } // Filter objects based on whether the archive is split or not - filteredObjects := s3client.FilterBackupObjects(result.Contents, multipartArchive) + filteredObjects := s3client.FilterMultipartBackupObjects(result.Contents, multipartArchive) + + // Filter to only include direct children of the prefix that match the backup filename pattern, + // and strip the prefix from the key + filteredObjects, err = s3client.FilterByPrefixAndRegex(filteredObjects, prefix, backupFileNameRegex) + if err != nil { + return "", fmt.Errorf("failed to filter objects: %w", err) + } if len(filteredObjects) == 0 { return "", fmt.Errorf("no backups found in bucket %s", bucket) @@ -188,8 +199,7 @@ func getLatestBackup(k8sClient *k8s.Client, namespace string, config *config.Con sort.Slice(filteredObjects, func(i, j int) bool { return filteredObjects[i].LastModified.After(filteredObjects[j].LastModified) }) - latestBackup := strings.TrimPrefix(filteredObjects[0].Key, prefix) - return latestBackup, nil + return filteredObjects[0].Key, nil } // buildPVCSpec builds a PVCSpec from configuration @@ -263,25 +273,43 @@ func createRestoreJob(k8sClient *k8s.Client, namespace, jobName, backupFile stri // buildRestoreEnvVars constructs environment variables for the restore job func buildRestoreEnvVars(backupFile string, config *config.Config) []corev1.EnvVar { storageService := config.GetStorageService() - return []corev1.EnvVar{ + env := []corev1.EnvVar{ {Name: "BACKUP_FILE", Value: backupFile}, {Name: "FORCE_DELETE", Value: purgeStackgraphDataFlag}, {Name: "BACKUP_STACKGRAPH_BUCKET_NAME", Value: config.Stackgraph.Bucket}, {Name: "BACKUP_STACKGRAPH_S3_PREFIX", Value: config.Stackgraph.S3Prefix}, + {Name: "BACKUP_STACKGRAPH_STACKPACKS_S3_PREFIX", Value: config.Stackgraph.StackpacksS3Prefix}, {Name: "BACKUP_STACKGRAPH_MULTIPART_ARCHIVE", Value: strconv.FormatBool(config.Stackgraph.MultipartArchive)}, {Name: "MINIO_ENDPOINT", Value: fmt.Sprintf("%s:%d", storageService.Name, storageService.Port)}, + {Name: "STACKSTATE_BASE_URL", Value: config.GetBaseURL()}, + {Name: "RECEIVER_BASE_URL", Value: config.GetReceiverBaseURL()}, + {Name: "PLATFORM_VERSION", Value: config.GetPlatformVersion()}, {Name: "ZOOKEEPER_QUORUM", Value: config.Stackgraph.Restore.ZookeeperQuorum}, + {Name: "SKIP_STACKPACKS", Value: strconv.FormatBool(skipStackpacks)}, + } + if config.Stackpacks != nil { + env = append(env, corev1.EnvVar{Name: "CONFIG_FORCE_stackstate_stackPacks_localStackPacksUri", Value: config.Stackpacks.LocalStackPacksURI}) } + return env } // buildRestoreVolumeMounts constructs volume mounts for the restore job container -func buildRestoreVolumeMounts() []corev1.VolumeMount { - return []corev1.VolumeMount{ +func buildRestoreVolumeMounts(config *config.Config) []corev1.VolumeMount { + volumeMounts := []corev1.VolumeMount{ {Name: "backup-log", MountPath: "/opt/docker/etc_log"}, {Name: "backup-restore-scripts", MountPath: "/backup-restore-scripts"}, {Name: "minio-keys", MountPath: "/aws-keys"}, {Name: "tmp-data", MountPath: "/tmp-data"}, } + + if config.Stackpacks != nil && config.Stackpacks.PVC != "" && strings.HasPrefix(config.Stackpacks.LocalStackPacksURI, "file://") { + volumeMounts = append(volumeMounts, corev1.VolumeMount{ + Name: "stackpacks-local", + MountPath: strings.TrimPrefix(config.Stackpacks.LocalStackPacksURI, "file://"), + }) + } + + return volumeMounts } // buildRestoreInitContainers constructs init containers for the restore job @@ -304,7 +332,7 @@ func buildRestoreInitContainers(config *config.Config) []corev1.Container { // buildRestoreVolumes constructs volumes for the restore job pod func buildRestoreVolumes(jobName string, config *config.Config, defaultMode int32) []corev1.Volume { - return []corev1.Volume{ + volumes := []corev1.Volume{ { Name: "backup-log", VolumeSource: corev1.VolumeSource{ @@ -343,6 +371,18 @@ func buildRestoreVolumes(jobName string, config *config.Config, defaultMode int3 }, }, } + if config.Stackpacks != nil && config.Stackpacks.PVC != "" { + volumes = append(volumes, corev1.Volume{ + Name: "stackpacks-local", + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: config.Stackpacks.PVC, + }, + }, + }) + } + + return volumes } // buildRestoreContainers constructs containers for the restore job @@ -356,7 +396,7 @@ func buildRestoreContainers(backupFile string, config *config.Config) []corev1.C Command: []string{"/backup-restore-scripts/restore-stackgraph-backup.sh"}, Env: buildRestoreEnvVars(backupFile, config), Resources: k8s.ConvertResources(config.Stackgraph.Restore.Job.Resources), - VolumeMounts: buildRestoreVolumeMounts(), + VolumeMounts: buildRestoreVolumeMounts(config), }, } } diff --git a/cmd/stackgraph/restore_test.go b/cmd/stackgraph/restore_test.go new file mode 100644 index 0000000..77de1a1 --- /dev/null +++ b/cmd/stackgraph/restore_test.go @@ -0,0 +1,61 @@ +package stackgraph + +import ( + "testing" + + "github.com/stackvista/stackstate-backup-cli/internal/foundation/config" + "github.com/stretchr/testify/assert" +) + +func TestBuildRestoreVolumeMounts_StackpacksLocalFileURI(t *testing.T) { + tests := []struct { + name string + stackpacks *config.StackpacksConfig + expectStackpacks bool + expectedMountPath string + }{ + { + name: "no stackpacks config", + stackpacks: nil, + expectStackpacks: false, + }, + { + name: "stackpacks with no PVC", + stackpacks: &config.StackpacksConfig{LocalStackPacksURI: "file:///var/stackpacks_local"}, + expectStackpacks: false, + }, + { + name: "stackpacks with file:// URI and PVC", + stackpacks: &config.StackpacksConfig{LocalStackPacksURI: "file:///var/stackpacks_local", PVC: "stackpacks-pvc"}, + expectStackpacks: true, + expectedMountPath: "/var/stackpacks_local", + }, + { + name: "stackpacks with non-file URI and PVC", + stackpacks: &config.StackpacksConfig{LocalStackPacksURI: "s3://my-bucket/stackpacks", PVC: "stackpacks-pvc"}, + expectStackpacks: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg := &config.Config{Stackpacks: tt.stackpacks} + mounts := buildRestoreVolumeMounts(cfg) + + var stackpacksMount *struct{ Name, MountPath string } + for _, m := range mounts { + if m.Name == "stackpacks-local" { + stackpacksMount = &struct{ Name, MountPath string }{m.Name, m.MountPath} + break + } + } + + if tt.expectStackpacks { + assert.NotNil(t, stackpacksMount, "expected stackpacks-local volume mount to be present") + assert.Equal(t, tt.expectedMountPath, stackpacksMount.MountPath) + } else { + assert.Nil(t, stackpacksMount, "expected stackpacks-local volume mount to be absent") + } + }) + } +} diff --git a/internal/clients/s3/filter.go b/internal/clients/s3/filter.go index 5787352..f44ceb7 100644 --- a/internal/clients/s3/filter.go +++ b/internal/clients/s3/filter.go @@ -1,6 +1,8 @@ package s3 import ( + "fmt" + "regexp" "strings" "time" @@ -19,10 +21,10 @@ type Object struct { Size int64 } -// FilterBackupObjects filters S3 objects based on whether the archive is split or not +// FilterMultipartBackupObjects filters S3 objects based on whether the archive is split or not // If it is not multipartArchive, it filters out multipart archives (files ending with .digits) // Otherwise, it groups multipart archives by base name and sums their sizes -func FilterBackupObjects(objects []s3types.Object, multipartArchive bool) []Object { +func FilterMultipartBackupObjects(objects []s3types.Object, multipartArchive bool) []Object { if !multipartArchive { return filterNonMultipart(objects) } @@ -141,6 +143,49 @@ func getBaseName(key string) (string, bool) { return key, false } +// FilterByPrefixAndRegex filters objects to only include direct children of the given prefix +// that match the specified regex pattern. It excludes objects in nested subdirectories and +// strips the prefix from the key, returning just the filename portion. +// +// For example, with prefix "backups/" and pattern `^sts-backup-.*\.graph$`: +// - "backups/sts-backup-20240101.graph" -> included, Key becomes "sts-backup-20240101.graph" +// - "backups/other-file.txt" -> excluded (doesn't match pattern) +// - "backups/subdir/sts-backup-20240101.graph" -> excluded (nested) +func FilterByPrefixAndRegex(objects []Object, prefix string, pattern string) ([]Object, error) { + re, err := regexp.Compile(pattern) + if err != nil { + return nil, fmt.Errorf("invalid regex pattern: %w", err) + } + + var filtered []Object + for _, obj := range objects { + // Strip the prefix from the key + relativePath := strings.TrimPrefix(obj.Key, prefix) + + // Skip if the relative path contains a slash (indicating nested directory) + if strings.Contains(relativePath, "/") { + continue + } + + // Skip empty relative paths (the prefix itself) + if relativePath == "" { + continue + } + + // Check if the filename matches the regex pattern + if !re.MatchString(relativePath) { + continue + } + + filtered = append(filtered, Object{ + Key: relativePath, + LastModified: obj.LastModified, + Size: obj.Size, + }) + } + return filtered, nil +} + func FilterByCommonPrefix(objects []s3types.CommonPrefix) []Object { var filteredObjects []Object diff --git a/internal/clients/s3/filter_test.go b/internal/clients/s3/filter_test.go index 2cf9b86..7880c3a 100644 --- a/internal/clients/s3/filter_test.go +++ b/internal/clients/s3/filter_test.go @@ -79,7 +79,7 @@ func TestFilterBackupObjects_SingleFileMode(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - result := FilterBackupObjects(tt.objects, false) + result := FilterMultipartBackupObjects(tt.objects, false) assert.Equal(t, tt.expectedCount, len(result)) @@ -158,7 +158,7 @@ func TestFilterBackupObjects_MultipartMode(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - result := FilterBackupObjects(tt.objects, tt.multipartArchive) + result := FilterMultipartBackupObjects(tt.objects, tt.multipartArchive) assert.Equal(t, tt.expectedCount, len(result)) @@ -196,14 +196,14 @@ func TestFilterBackupObjects_ObjectMetadata(t *testing.T) { } // Test single file mode - result := FilterBackupObjects(objects, false) + result := FilterMultipartBackupObjects(objects, false) assert.Equal(t, 1, len(result)) assert.Equal(t, "backup-2024-01-01.tar.gz", result[0].Key) assert.Equal(t, int64(1234567890), result[0].Size) assert.Equal(t, now.Unix(), result[0].LastModified.Unix()) // Test multipart mode - should group parts and sum sizes - result = FilterBackupObjects(objects, true) + result = FilterMultipartBackupObjects(objects, true) assert.Equal(t, 2, len(result)) // tar.gz file + grouped multipart // Find the multipart archive result @@ -275,7 +275,7 @@ func TestFilterBackupObjects_EdgeCases(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - result := FilterBackupObjects(tt.objects, tt.multipartArchive) + result := FilterMultipartBackupObjects(tt.objects, tt.multipartArchive) assert.Equal(t, tt.expectedCount, len(result)) }) } @@ -331,7 +331,7 @@ func TestFilterBackupObjects_RealWorldScenarios(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - result := FilterBackupObjects(tt.objects, tt.multipartArchive) + result := FilterMultipartBackupObjects(tt.objects, tt.multipartArchive) assert.Equal(t, tt.expectedCount, len(result), "Scenario: %s", tt.scenario) }) } @@ -348,7 +348,7 @@ func TestFilterBackupObjects_SizeSummation(t *testing.T) { {Key: aws.String("sts-backup-20251029-0924.graph.01"), Size: aws.Int64(6567239)}, } - result := FilterBackupObjects(objects, true) + result := FilterMultipartBackupObjects(objects, true) // Should have 3 grouped archives assert.Equal(t, 3, len(result)) @@ -364,3 +364,186 @@ func TestFilterBackupObjects_SizeSummation(t *testing.T) { assert.Equal(t, int64(109206155), sizeMap["sts-backup-20251029-0300.graph"]) // 104857600 + 4348555 assert.Equal(t, int64(111424839), sizeMap["sts-backup-20251029-0924.graph"]) // 104857600 + 6567239 } + +// TestFilterByPrefixAndRegex tests the combined filtering by prefix and regex pattern +func TestFilterByPrefixAndRegex(t *testing.T) { //nolint:funlen // Table-driven test + now := time.Now() + + tests := []struct { + name string + objects []Object + prefix string + pattern string + expectedKeys []string + expectError bool + }{ + { + name: "filters stackgraph backups with prefix and .graph extension", + objects: []Object{ + {Key: "backups/sts-backup-20240101.graph", Size: 1000, LastModified: now}, + {Key: "backups/sts-backup-20240102.graph", Size: 2000, LastModified: now}, + {Key: "backups/other-file.txt", Size: 500, LastModified: now}, + {Key: "backups/sts-backup-20240103.tar.gz", Size: 3000, LastModified: now}, + }, + prefix: "backups/", + pattern: `^sts-backup-.*\.graph$`, + expectedKeys: []string{"sts-backup-20240101.graph", "sts-backup-20240102.graph"}, + expectError: false, + }, + { + name: "filters settings backups with .sty extension", + objects: []Object{ + {Key: "settings/sts-backup-20240101.sty", Size: 1000, LastModified: now}, + {Key: "settings/sts-backup-20240102.sty", Size: 2000, LastModified: now}, + {Key: "settings/other-file.txt", Size: 500, LastModified: now}, + {Key: "settings/sts-backup-20240103.graph", Size: 3000, LastModified: now}, + }, + prefix: "settings/", + pattern: `^sts-backup-.*\.sty$`, + expectedKeys: []string{"sts-backup-20240101.sty", "sts-backup-20240102.sty"}, + expectError: false, + }, + { + name: "excludes nested files even if they match pattern", + objects: []Object{ + {Key: "backups/sts-backup-20240101.graph", Size: 1000, LastModified: now}, + {Key: "backups/old/sts-backup-20240102.graph", Size: 2000, LastModified: now}, + {Key: "backups/archive/2023/sts-backup-20230101.graph", Size: 3000, LastModified: now}, + }, + prefix: "backups/", + pattern: `^sts-backup-.*\.graph$`, + expectedKeys: []string{"sts-backup-20240101.graph"}, + expectError: false, + }, + { + name: "works with empty prefix", + objects: []Object{ + {Key: "sts-backup-20240101.graph", Size: 1000, LastModified: now}, + {Key: "sts-backup-20240102.graph", Size: 2000, LastModified: now}, + {Key: "subdir/sts-backup-20240103.graph", Size: 3000, LastModified: now}, + {Key: "other-file.txt", Size: 500, LastModified: now}, + }, + prefix: "", + pattern: `^sts-backup-.*\.graph$`, + expectedKeys: []string{"sts-backup-20240101.graph", "sts-backup-20240102.graph"}, + expectError: false, + }, + { + name: "returns empty slice when no matches", + objects: []Object{ + {Key: "backups/other-file.txt", Size: 500, LastModified: now}, + {Key: "backups/another-file.log", Size: 100, LastModified: now}, + }, + prefix: "backups/", + pattern: `^sts-backup-.*\.graph$`, + expectedKeys: []string{}, + expectError: false, + }, + { + name: "handles empty object list", + objects: []Object{}, + prefix: "backups/", + pattern: `^sts-backup-.*\.graph$`, + expectedKeys: []string{}, + expectError: false, + }, + { + name: "returns error for invalid regex", + objects: []Object{ + {Key: "backups/sts-backup-20240101.graph", Size: 1000, LastModified: now}, + }, + prefix: "backups/", + pattern: `[invalid`, + expectedKeys: nil, + expectError: true, + }, + { + name: "excludes the prefix directory itself", + objects: []Object{ + {Key: "backups/", Size: 0, LastModified: now}, + {Key: "backups/sts-backup-20240101.graph", Size: 1000, LastModified: now}, + }, + prefix: "backups/", + pattern: `^sts-backup-.*\.graph$`, + expectedKeys: []string{"sts-backup-20240101.graph"}, + expectError: false, + }, + { + name: "returns empty when all files are nested", + objects: []Object{ + {Key: "backups/old/sts-backup-20240101.graph", Size: 1000, LastModified: now}, + {Key: "backups/archive/sts-backup-20240102.graph", Size: 2000, LastModified: now}, + }, + prefix: "backups/", + pattern: `^sts-backup-.*\.graph$`, + expectedKeys: []string{}, + expectError: false, + }, + { + name: "excludes stackpacks backups when listing settings local bucket (empty prefix)", + objects: []Object{ + {Key: "sts-backup-20240101.sty", Size: 1000, LastModified: now}, + {Key: "sts-backup-20240101.sty.stackpacks.zip", Size: 500, LastModified: now}, + {Key: "sts-backup-20240102.sty", Size: 2000, LastModified: now}, + {Key: "sts-backup-20240102.sty.stackpacks.zip", Size: 300, LastModified: now}, + }, + prefix: "", + pattern: `^sts-backup-.*\.sty$`, + expectedKeys: []string{"sts-backup-20240101.sty", "sts-backup-20240102.sty"}, + expectError: false, + }, + { + name: "filters with complex regex pattern", + objects: []Object{ + {Key: "backups/sts-backup-20240101-1200.graph", Size: 1000, LastModified: now}, + {Key: "backups/sts-backup-20240102-1300.graph", Size: 2000, LastModified: now}, + {Key: "backups/sts-backup-invalid.graph", Size: 500, LastModified: now}, + {Key: "backups/sts-backup-20240103.graph", Size: 3000, LastModified: now}, + }, + prefix: "backups/", + pattern: `^sts-backup-\d{8}-\d{4}\.graph$`, + expectedKeys: []string{"sts-backup-20240101-1200.graph", "sts-backup-20240102-1300.graph"}, + expectError: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := FilterByPrefixAndRegex(tt.objects, tt.prefix, tt.pattern) + + if tt.expectError { + assert.Error(t, err) + assert.Nil(t, result) + return + } + + assert.NoError(t, err) + + resultKeys := make([]string, len(result)) + for i, obj := range result { + resultKeys[i] = obj.Key + } + + assert.Equal(t, tt.expectedKeys, resultKeys) + }) + } +} + +// TestFilterByPrefixAndRegex_PreservesMetadata tests that object metadata is preserved after filtering +func TestFilterByPrefixAndRegex_PreservesMetadata(t *testing.T) { + now := time.Now() + + objects := []Object{ + {Key: "backups/sts-backup-20240101.graph", Size: 1234567890, LastModified: now}, + {Key: "backups/other-file.txt", Size: 500, LastModified: now.Add(-24 * time.Hour)}, + {Key: "backups/nested/sts-backup-20240102.graph", Size: 999, LastModified: now}, + } + + result, err := FilterByPrefixAndRegex(objects, "backups/", `^sts-backup-.*\.graph$`) + + assert.NoError(t, err) + assert.Equal(t, 1, len(result)) + assert.Equal(t, "sts-backup-20240101.graph", result[0].Key) + assert.Equal(t, int64(1234567890), result[0].Size) + assert.Equal(t, now.Unix(), result[0].LastModified.Unix()) +} diff --git a/internal/foundation/config/config.go b/internal/foundation/config/config.go index e944301..61d652b 100644 --- a/internal/foundation/config/config.go +++ b/internal/foundation/config/config.go @@ -20,12 +20,52 @@ type Config struct { Elasticsearch ElasticsearchConfig `yaml:"elasticsearch" validate:"required"` Minio MinioConfig `yaml:"minio"` Storage StorageConfig `yaml:"storage"` + Stackpacks *StackpacksConfig `yaml:"stackpacks"` Stackgraph StackgraphConfig `yaml:"stackgraph" validate:"required"` Settings SettingsConfig `yaml:"settings" validate:"required"` VictoriaMetrics VictoriaMetricsConfig `yaml:"victoriaMetrics" validate:"required"` Clickhouse ClickhouseConfig `yaml:"clickhouse" validate:"required"` } +// StackpacksConfig holds stackpacks-specific configuration shared across restore operations. +// This section is optional. When present, its values are provided to restore pods/scripts. +// BaseURL, ReceiverBaseURL, and PlatformVersion take precedence over the per-restore-type +// fields in SettingsRestoreConfig when set. +type StackpacksConfig struct { + BaseURL string `yaml:"baseUrl"` + ReceiverBaseURL string `yaml:"receiverBaseUrl"` + PlatformVersion string `yaml:"platformVersion"` + LocalStackPacksURI string `yaml:"localStackPacksUri" validate:"required"` + PVC string `yaml:"pvc"` +} + +// GetBaseURL returns the StackState base URL, preferring the top-level stackpacks section +// over the legacy settings.restore.baseUrl for backward compatibility. +func (c *Config) GetBaseURL() string { + if c.Stackpacks != nil && c.Stackpacks.BaseURL != "" { + return c.Stackpacks.BaseURL + } + return c.Settings.Restore.BaseURL +} + +// GetReceiverBaseURL returns the receiver base URL, preferring the top-level stackpacks section +// over the legacy settings.restore.receiverBaseUrl for backward compatibility. +func (c *Config) GetReceiverBaseURL() string { + if c.Stackpacks != nil && c.Stackpacks.ReceiverBaseURL != "" { + return c.Stackpacks.ReceiverBaseURL + } + return c.Settings.Restore.ReceiverBaseURL +} + +// GetPlatformVersion returns the platform version, preferring the top-level stackpacks section +// over the legacy settings.restore.platformVersion for backward compatibility. +func (c *Config) GetPlatformVersion() string { + if c.Stackpacks != nil && c.Stackpacks.PlatformVersion != "" { + return c.Stackpacks.PlatformVersion + } + return c.Settings.Restore.PlatformVersion +} + // IsLegacyMode returns true when the configuration uses the legacy Minio config. // Legacy mode is detected by the presence of the Minio config with a non-empty service name. func (c *Config) IsLegacyMode() bool { @@ -135,10 +175,11 @@ type StorageConfig struct { // StackgraphConfig holds Stackgraph backup-specific configuration type StackgraphConfig struct { - Bucket string `yaml:"bucket" validate:"required"` - S3Prefix string `yaml:"s3Prefix"` - MultipartArchive bool `yaml:"multipartArchive" validate:"boolean"` - Restore StackgraphRestoreConfig `yaml:"restore" validate:"required"` + Bucket string `yaml:"bucket" validate:"required"` + S3Prefix string `yaml:"s3Prefix"` + StackpacksS3Prefix string `yaml:"stackpacksS3Prefix"` + MultipartArchive bool `yaml:"multipartArchive" validate:"boolean"` + Restore StackgraphRestoreConfig `yaml:"restore" validate:"required"` } type VictoriaMetricsConfig struct { @@ -169,18 +210,19 @@ type StackgraphRestoreConfig struct { } type SettingsConfig struct { - Bucket string `yaml:"bucket" validate:"required"` - S3Prefix string `yaml:"s3Prefix"` - LocalBucket string `yaml:"localBucket"` - Restore SettingsRestoreConfig `yaml:"restore" validate:"required"` + Bucket string `yaml:"bucket" validate:"required"` + S3Prefix string `yaml:"s3Prefix"` + StackpacksS3Prefix string `yaml:"stackpacksS3Prefix"` + LocalBucket string `yaml:"localBucket"` + Restore SettingsRestoreConfig `yaml:"restore" validate:"required"` } type SettingsRestoreConfig struct { ScaleDownLabelSelector string `yaml:"scaleDownLabelSelector" validate:"required"` LoggingConfigConfigMapName string `yaml:"loggingConfigConfigMap" validate:"required"` - BaseURL string `yaml:"baseUrl" validate:"required"` - ReceiverBaseURL string `yaml:"receiverBaseUrl" validate:"required"` - PlatformVersion string `yaml:"platformVersion" validate:"required"` + BaseURL string `yaml:"baseUrl"` + ReceiverBaseURL string `yaml:"receiverBaseUrl"` + PlatformVersion string `yaml:"platformVersion"` ZookeeperQuorum string `yaml:"zookeeperQuorum" validate:"required"` Job JobConfig `yaml:"job" validate:"required"` PVC string `yaml:"pvc"` // Required only in legacy mode @@ -388,6 +430,10 @@ func LoadConfig(clientset kubernetes.Interface, namespace, configMapName, secret } // Custom validation: either minio or storage must be configured + if config.GetBaseURL() == "" || config.GetReceiverBaseURL() == "" || config.GetPlatformVersion() == "" { + return nil, fmt.Errorf("configuration validation failed: baseUrl, receiverBaseUrl, and platformVersion must be set in either 'stackstate' or 'settings.restore'") + } + if config.Minio.Service.Name == "" && config.Storage.Service.Name == "" { return nil, fmt.Errorf("configuration validation failed: either 'minio' or 'storage' must be configured") } diff --git a/internal/foundation/config/testdata/validConfigMapConfig.yaml b/internal/foundation/config/testdata/validConfigMapConfig.yaml index 76aca9a..60efe2a 100644 --- a/internal/foundation/config/testdata/validConfigMapConfig.yaml +++ b/internal/foundation/config/testdata/validConfigMapConfig.yaml @@ -59,6 +59,13 @@ elasticsearch: # Pattern for indices to restore from snapshot (comma-separated glob patterns) indicesPattern: sts*,.ds-sts_k8s_logs* +# StackState platform configuration shared across restore operations +stackpacks: + baseUrl: "http://suse-observability-server:7070" + receiverBaseUrl: "http://suse-observability-receiver:7077" + platformVersion: "5.2.0" + localStackPacksUri: "/var/stackpacks_local" + # Minio configuration for S3-compatible storage minio: enabled: true @@ -76,6 +83,8 @@ stackgraph: bucket: sts-stackgraph-backup # S3 prefix path for backups s3Prefix: "" + # S3 prefix path for stackpacks backups + stackpacksS3Prefix: "stackpacks/" # Archive split to multiple parts multipartArchive: true # Restore configuration @@ -139,12 +148,10 @@ victoriaMetrics: settings: bucket: sts-settings-backup s3Prefix: "" + stackpacksS3Prefix: "stackpacks/" restore: scaleDownLabelSelector: "observability.suse.com/scalable-during-settings-restore=true" loggingConfigConfigMap: suse-observability-logging - baseUrl: "http://suse-observability-server:7070" - receiverBaseUrl: "http://suse-observability-receiver:7077" - platformVersion: "5.2.0" zookeeperQuorum: "suse-observability-zookeeper:2181" pvc: "suse-observability-settings-backup-data" job: diff --git a/internal/foundation/config/testdata/validConfigMapOnly.yaml b/internal/foundation/config/testdata/validConfigMapOnly.yaml index dbf8ea1..323993e 100644 --- a/internal/foundation/config/testdata/validConfigMapOnly.yaml +++ b/internal/foundation/config/testdata/validConfigMapOnly.yaml @@ -66,6 +66,12 @@ elasticsearch: # Pattern for indices to restore from snapshot (comma-separated glob patterns) indicesPattern: sts*,.ds-sts_k8s_logs* +stackpacks: + baseUrl: "http://suse-observability-server:7070" + receiverBaseUrl: "http://suse-observability-receiver:7077" + platformVersion: "5.2.0" + localStackPacksUri: "/var/stackpacks_local" + # Minio configuration for S3-compatible storage minio: enabled: true @@ -132,9 +138,6 @@ settings: restore: scaleDownLabelSelector: "observability.suse.com/scalable-during-settings-restore=true" loggingConfigConfigMap: suse-observability-logging - baseUrl: "http://suse-observability-server:7070" - receiverBaseUrl: "http://suse-observability-receiver:7077" - platformVersion: "5.2.0" zookeeperQuorum: "suse-observability-zookeeper:2181" pvc: "suse-observability-settings-backup-data" job: diff --git a/internal/foundation/config/testdata/validStorageConfigMapConfig.yaml b/internal/foundation/config/testdata/validStorageConfigMapConfig.yaml index 5b928f8..227d8a3 100644 --- a/internal/foundation/config/testdata/validStorageConfigMapConfig.yaml +++ b/internal/foundation/config/testdata/validStorageConfigMapConfig.yaml @@ -61,6 +61,12 @@ elasticsearch: # Pattern for indices to restore from snapshot (comma-separated glob patterns) indicesPattern: sts*,.ds-sts_k8s_logs* +stackpacks: + baseUrl: "http://suse-observability-server:7070" + receiverBaseUrl: "http://suse-observability-receiver:7077" + platformVersion: "5.2.0" + localStackPacksUri: "/var/stackpacks_local" + # Storage configuration for S3-compatible storage (new mode, replaces Minio) storage: globalBackupEnabled: true @@ -147,9 +153,6 @@ settings: restore: scaleDownLabelSelector: "observability.suse.com/scalable-during-settings-restore=true" loggingConfigConfigMap: suse-observability-logging - baseUrl: "http://suse-observability-server:7070" - receiverBaseUrl: "http://suse-observability-receiver:7077" - platformVersion: "5.2.0" zookeeperQuorum: "suse-observability-zookeeper:2181" job: labels: diff --git a/internal/foundation/config/testdata/validStorageConfigMapOnly.yaml b/internal/foundation/config/testdata/validStorageConfigMapOnly.yaml index 9498e59..d08832d 100644 --- a/internal/foundation/config/testdata/validStorageConfigMapOnly.yaml +++ b/internal/foundation/config/testdata/validStorageConfigMapOnly.yaml @@ -68,6 +68,12 @@ elasticsearch: # Pattern for indices to restore from snapshot (comma-separated glob patterns) indicesPattern: sts*,.ds-sts_k8s_logs* +stackpacks: + baseUrl: "http://suse-observability-server:7070" + receiverBaseUrl: "http://suse-observability-receiver:7077" + platformVersion: "5.2.0" + localStackPacksUri: "/var/stackpacks_local" + # Storage configuration for S3-compatible storage (new mode, replaces Minio) storage: globalBackupEnabled: true @@ -137,9 +143,6 @@ settings: restore: scaleDownLabelSelector: "observability.suse.com/scalable-during-settings-restore=true" loggingConfigConfigMap: suse-observability-logging - baseUrl: "http://suse-observability-server:7070" - receiverBaseUrl: "http://suse-observability-receiver:7077" - platformVersion: "5.2.0" zookeeperQuorum: "suse-observability-zookeeper:2181" job: labels: diff --git a/internal/scripts/scripts/restore-settings-backup.sh b/internal/scripts/scripts/restore-settings-backup.sh index 68b95c4..0b42ffa 100644 --- a/internal/scripts/scripts/restore-settings-backup.sh +++ b/internal/scripts/scripts/restore-settings-backup.sh @@ -15,8 +15,9 @@ download_from_s3() { local bucket="$1" local prefix="$2" local dest="$3" - echo "=== Downloading Settings backup \"${BACKUP_FILE}\" from bucket \"${bucket}\"..." - sts-toolbox aws s3 --endpoint "http://${MINIO_ENDPOINT}" --region minio cp "s3://${bucket}/${prefix}${BACKUP_FILE}" "${dest}/${BACKUP_FILE}" + local backup_file="$4" + echo "=== Downloading Settings backup \"${backup_file}\" from bucket \"${bucket}\"..." + sts-toolbox aws s3 --endpoint "http://${MINIO_ENDPOINT}" --region minio cp "s3://${bucket}/${prefix}${backup_file}" "${dest}/${backup_file}" } RESTORE_FILE="" @@ -28,11 +29,11 @@ elif [ -n "${BACKUP_CONFIGURATION_LOCAL_BUCKET:-}" ]; then # New mode: no PVC, download from local bucket first, fall back to remote bucket setup_aws_credentials - if download_from_s3 "${BACKUP_CONFIGURATION_LOCAL_BUCKET}" "" "${TMP_DIR}"; then + if download_from_s3 "${BACKUP_CONFIGURATION_LOCAL_BUCKET}" "" "${TMP_DIR}" "${BACKUP_FILE}"; then RESTORE_FILE="${TMP_DIR}/${BACKUP_FILE}" elif [ "${BACKUP_CONFIGURATION_UPLOAD_REMOTE}" == "true" ]; then echo "=== Backup not found in local bucket, trying remote bucket..." - if download_from_s3 "${BACKUP_CONFIGURATION_BUCKET_NAME}" "${BACKUP_CONFIGURATION_S3_PREFIX}" "${TMP_DIR}"; then + if download_from_s3 "${BACKUP_CONFIGURATION_BUCKET_NAME}" "${BACKUP_CONFIGURATION_S3_PREFIX}" "${TMP_DIR}" "${BACKUP_FILE}"; then RESTORE_FILE="${TMP_DIR}/${BACKUP_FILE}" fi fi @@ -43,7 +44,7 @@ else if [ "$BACKUP_CONFIGURATION_UPLOAD_REMOTE" == "true" ] && [ ! -f "${RESTORE_FILE}" ]; then setup_aws_credentials - download_from_s3 "${BACKUP_CONFIGURATION_BUCKET_NAME}" "${BACKUP_CONFIGURATION_S3_PREFIX}" "${TMP_DIR}" + download_from_s3 "${BACKUP_CONFIGURATION_BUCKET_NAME}" "${BACKUP_CONFIGURATION_S3_PREFIX}" "${TMP_DIR}" "${BACKUP_FILE}" RESTORE_FILE="${TMP_DIR}/${BACKUP_FILE}" fi fi @@ -55,4 +56,36 @@ fi echo "=== Restoring settings backup from \"${BACKUP_FILE}\"..." /opt/docker/bin/settings-backup -Dlogback.configurationFile=/opt/docker/etc_log/logback.xml -restore "${RESTORE_FILE}" +echo "=== Settings restore complete" + +# === StackPacks Restore === +# StackPacks backups are always stored, next to the settings backup. +if [ "${SKIP_STACKPACKS:-false}" == "true" ]; then + echo "=== Skipping StackPacks restore (--skip-stackpacks flag set)" +else + # Construct stackpacks backup filename from the original backup file + STACKPACKS_FILE="${BACKUP_FILE}.stackpacks.zip" + STACKPACKS_RESTORE_FILE="" + + echo "=== Checking for StackPacks backup \"${STACKPACKS_FILE}\" in bucket \"${BACKUP_CONFIGURATION_LOCAL_BUCKET}\"..." + setup_aws_credentials + + if download_from_s3 "${BACKUP_CONFIGURATION_LOCAL_BUCKET}" "${BACKUP_CONFIGURATION_STACKPACKS_S3_PREFIX}" "${TMP_DIR}" "${STACKPACKS_FILE}"; then + STACKPACKS_RESTORE_FILE="${TMP_DIR}/${STACKPACKS_FILE}" + elif [ "${BACKUP_CONFIGURATION_UPLOAD_REMOTE}" == "true" ]; then + echo "=== StackPacks backup not found in kubernetes settings storage, trying main backups storage..." + if download_from_s3 "${BACKUP_CONFIGURATION_BUCKET_NAME}" "${BACKUP_CONFIGURATION_STACKPACKS_S3_PREFIX}" "${TMP_DIR}" "${STACKPACKS_FILE}"; then + STACKPACKS_RESTORE_FILE="${TMP_DIR}/${STACKPACKS_FILE}" + fi + fi + + if [ -z "${STACKPACKS_RESTORE_FILE}" ] || [ ! -f "${STACKPACKS_RESTORE_FILE}" ]; then + echo "=== WARNING: StackPacks backup \"${STACKPACKS_FILE}\" not found, skipping StackPacks restore" + exit 0 + fi + + echo "=== Restoring StackPacks from \"${STACKPACKS_FILE}\"..." + /opt/docker/bin/stack-packs-backup -Dlogback.configurationFile=/opt/docker/etc_log/logback.xml -restore "${STACKPACKS_RESTORE_FILE}" + echo "=== StackPacks restore complete" +fi echo "===" diff --git a/internal/scripts/scripts/restore-stackgraph-backup.sh b/internal/scripts/scripts/restore-stackgraph-backup.sh index 2e89c30..0f3a9c4 100644 --- a/internal/scripts/scripts/restore-stackgraph-backup.sh +++ b/internal/scripts/scripts/restore-stackgraph-backup.sh @@ -30,4 +30,27 @@ fi echo "=== Importing StackGraph data from \"${BACKUP_FILE}\"..." /opt/docker/bin/stackstate-server -Dlogback.configurationFile=/opt/docker/etc_log/logback.xml -import "${TMP_DIR}/${BACKUP_FILE}" "${FORCE_DELETE}" +echo "=== StackGraph restore complete" + +# === StackPacks Restore === +if [ "${SKIP_STACKPACKS:-false}" == "true" ]; then + echo "=== Skipping StackPacks restore (--skip-stackpacks flag set)" +else + # Construct stackpacks backup filename from the original backup file + STACKPACKS_FILE="${BACKUP_FILE}.stackpacks.zip" + + echo "=== Checking for StackPacks backup \"${STACKPACKS_FILE}\" in bucket \"${BACKUP_STACKGRAPH_BUCKET_NAME}\"..." + + # Check if stackpacks backup exists in S3 + if sts-toolbox aws s3 ls --endpoint "http://${MINIO_ENDPOINT}" --region minio --bucket "${BACKUP_STACKGRAPH_BUCKET_NAME}" --prefix "${BACKUP_STACKGRAPH_STACKPACKS_S3_PREFIX}${STACKPACKS_FILE}" 2>/dev/null | grep -q "${STACKPACKS_FILE}"; then + echo "=== Downloading StackPacks backup..." + sts-toolbox aws s3 cp --endpoint "http://${MINIO_ENDPOINT}" --region minio "s3://${BACKUP_STACKGRAPH_BUCKET_NAME}/${BACKUP_STACKGRAPH_STACKPACKS_S3_PREFIX}${STACKPACKS_FILE}" "${TMP_DIR}/${STACKPACKS_FILE}" + + echo "=== Restoring StackPacks from \"${STACKPACKS_FILE}\"..." + /opt/docker/bin/stack-packs-backup -Dlogback.configurationFile=/opt/docker/etc_log/logback.xml -restore "${TMP_DIR}/${STACKPACKS_FILE}" + echo "=== StackPacks restore complete" + else + echo "=== WARNING: StackPacks backup \"${STACKPACKS_FILE}\" not found in S3, skipping StackPacks restore" + fi +fi echo "==="