From 97d0f4ae7bc7188c2821dfc4681ddd30eadd78b3 Mon Sep 17 00:00:00 2001 From: Nicolas De Loof Date: Sun, 15 Mar 2026 20:03:08 +0100 Subject: [PATCH 1/3] re-implement compose logic - build observed state - compute reconciliation plan by comparing observed vs desired state - execute plan this decorelates reconciliation (aka "convergence") logic from docker API, and write simpler and efficient tests to cover various scenarios Signed-off-by: Nicolas De Loof --- cmd/compose/compose.go | 3 +- pkg/compose/convergence.go | 380 --------- pkg/compose/create.go | 128 +-- pkg/compose/observed_state.go | 144 ++++ pkg/compose/observed_state_test.go | 157 ++++ pkg/compose/plan_executor.go | 533 ++++++++++++ pkg/compose/progress.go | 2 +- pkg/compose/publish.go | 6 +- pkg/compose/reconcile.go | 1228 ++++++++++++++++++++++++++++ pkg/compose/reconcile_test.go | 1178 ++++++++++++++++++++++++++ pkg/compose/run.go | 3 +- pkg/e2e/networks_test.go | 4 + pkg/e2e/orphans_test.go | 2 + pkg/e2e/recreate_no_deps_test.go | 2 + pkg/e2e/scale_test.go | 9 + pkg/e2e/up_test.go | 2 + pkg/e2e/volumes_test.go | 2 + 17 files changed, 3350 insertions(+), 433 deletions(-) create mode 100644 pkg/compose/observed_state.go create mode 100644 pkg/compose/observed_state_test.go create mode 100644 pkg/compose/plan_executor.go create mode 100644 pkg/compose/reconcile.go create mode 100644 pkg/compose/reconcile_test.go diff --git a/cmd/compose/compose.go b/cmd/compose/compose.go index 2b4bcb638ee..10e7ae0753a 100644 --- a/cmd/compose/compose.go +++ b/cmd/compose/compose.go @@ -30,6 +30,7 @@ import ( "syscall" "github.com/compose-spec/compose-go/v2/cli" + "github.com/compose-spec/compose-go/v2/consts" "github.com/compose-spec/compose-go/v2/dotenv" "github.com/compose-spec/compose-go/v2/loader" composepaths "github.com/compose-spec/compose-go/v2/paths" @@ -221,7 +222,7 @@ func makeJSONError(err error) error { } func (o *ProjectOptions) addProjectFlags(f *pflag.FlagSet) { - f.StringArrayVar(&o.Profiles, "profile", []string{}, "Specify a profile to enable") + f.StringArrayVar(&o.Profiles, "profile", defaultStringArrayVar(consts.ComposeProfiles), "Specify a profile to enable") f.StringVarP(&o.ProjectName, "project-name", "p", "", "Project name") f.StringArrayVarP(&o.ConfigPaths, "file", "f", []string{}, "Compose configuration files") f.StringArrayVar(&o.insecureRegistries, "insecure-registry", []string{}, "Use insecure registry to pull Compose OCI artifacts. Doesn't apply to images") diff --git a/pkg/compose/convergence.go b/pkg/compose/convergence.go index b480b6be9d8..ab499c21838 100644 --- a/pkg/compose/convergence.go +++ b/pkg/compose/convergence.go @@ -21,8 +21,6 @@ import ( "errors" "fmt" "maps" - "slices" - "sort" "strconv" "strings" "sync" @@ -35,13 +33,9 @@ import ( "github.com/moby/moby/client" specs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" "golang.org/x/sync/errgroup" - "github.com/docker/compose/v5/internal/tracing" "github.com/docker/compose/v5/pkg/api" - "github.com/docker/compose/v5/pkg/utils" ) const ( @@ -50,207 +44,6 @@ const ( "Remove the custom name to scale the service" ) -// convergence manages service's container lifecycle. -// Based on initially observed state, it reconciles the existing container with desired state, which might include -// re-creating container, adding or removing replicas, or starting stopped containers. -// Cross services dependencies are managed by creating services in expected order and updating `service:xx` reference -// when a service has converged, so dependent ones can be managed with resolved containers references. -type convergence struct { - compose *composeService - services map[string]Containers - networks map[string]string - volumes map[string]string - stateMutex sync.Mutex -} - -func (c *convergence) getObservedState(serviceName string) Containers { - c.stateMutex.Lock() - defer c.stateMutex.Unlock() - return c.services[serviceName] -} - -func (c *convergence) setObservedState(serviceName string, containers Containers) { - c.stateMutex.Lock() - defer c.stateMutex.Unlock() - c.services[serviceName] = containers -} - -func newConvergence(services []string, state Containers, networks map[string]string, volumes map[string]string, s *composeService) *convergence { - observedState := map[string]Containers{} - for _, s := range services { - observedState[s] = Containers{} - } - for _, c := range state.filter(isNotOneOff) { - service := c.Labels[api.ServiceLabel] - observedState[service] = append(observedState[service], c) - } - return &convergence{ - compose: s, - services: observedState, - networks: networks, - volumes: volumes, - } -} - -func (c *convergence) apply(ctx context.Context, project *types.Project, options api.CreateOptions) error { - return InDependencyOrder(ctx, project, func(ctx context.Context, name string) error { - service, err := project.GetService(name) - if err != nil { - return err - } - - return tracing.SpanWrapFunc("service/apply", tracing.ServiceOptions(service), func(ctx context.Context) error { - strategy := options.RecreateDependencies - if slices.Contains(options.Services, name) { - strategy = options.Recreate - } - return c.ensureService(ctx, project, service, strategy, options.Inherit, options.Timeout) - })(ctx) - }) -} - -func (c *convergence) ensureService(ctx context.Context, project *types.Project, service types.ServiceConfig, recreate string, inherit bool, timeout *time.Duration) error { //nolint:gocyclo - if service.Provider != nil { - return c.compose.runPlugin(ctx, project, service, "up") - } - expected, err := getScale(service) - if err != nil { - return err - } - containers := c.getObservedState(service.Name) - actual := len(containers) - updated := make(Containers, expected) - - eg, ctx := errgroup.WithContext(ctx) - - err = c.resolveServiceReferences(&service) - if err != nil { - return err - } - - sort.Slice(containers, func(i, j int) bool { - // select obsolete containers first, so they get removed as we scale down - if obsolete, _ := c.mustRecreate(service, containers[i], recreate); obsolete { - // i is obsolete, so must be first in the list - return true - } - if obsolete, _ := c.mustRecreate(service, containers[j], recreate); obsolete { - // j is obsolete, so must be first in the list - return false - } - - // For up-to-date containers, sort by container number to preserve low-values in container numbers - ni, erri := strconv.Atoi(containers[i].Labels[api.ContainerNumberLabel]) - nj, errj := strconv.Atoi(containers[j].Labels[api.ContainerNumberLabel]) - if erri == nil && errj == nil { - return ni > nj - } - - // If we don't get a container number (?) just sort by creation date - return containers[i].Created < containers[j].Created - }) - - slices.Reverse(containers) - for i, ctr := range containers { - if i >= expected { - // Scale Down - // As we sorted containers, obsolete ones and/or highest number will be removed - ctr := ctr - traceOpts := append(tracing.ServiceOptions(service), tracing.ContainerOptions(ctr)...) - eg.Go(tracing.SpanWrapFuncForErrGroup(ctx, "service/scale/down", traceOpts, func(ctx context.Context) error { - return c.compose.stopAndRemoveContainer(ctx, ctr, &service, timeout, false) - })) - continue - } - - mustRecreate, err := c.mustRecreate(service, ctr, recreate) - if err != nil { - return err - } - if mustRecreate { - err := c.stopDependentContainers(ctx, project, service) - if err != nil { - return err - } - - i, ctr := i, ctr - eg.Go(tracing.SpanWrapFuncForErrGroup(ctx, "container/recreate", tracing.ContainerOptions(ctr), func(ctx context.Context) error { - recreated, err := c.compose.recreateContainer(ctx, project, service, ctr, inherit, timeout) - updated[i] = recreated - return err - })) - continue - } - - // Enforce non-diverged containers are running - name := getContainerProgressName(ctr) - switch ctr.State { - case container.StateRunning: - c.compose.events.On(runningEvent(name)) - case container.StateCreated: - case container.StateRestarting: - case container.StateExited: - default: - ctr := ctr - eg.Go(tracing.EventWrapFuncForErrGroup(ctx, "service/start", tracing.ContainerOptions(ctr), func(ctx context.Context) error { - return c.compose.startContainer(ctx, ctr) - })) - } - updated[i] = ctr - } - - next := nextContainerNumber(containers) - for i := 0; i < expected-actual; i++ { - // Scale UP - number := next + i - name := getContainerName(project.Name, service, number) - eventOpts := tracing.SpanOptions{trace.WithAttributes(attribute.String("container.name", name))} - eg.Go(tracing.EventWrapFuncForErrGroup(ctx, "service/scale/up", eventOpts, func(ctx context.Context) error { - opts := createOptions{ - AutoRemove: false, - AttachStdin: false, - UseNetworkAliases: true, - Labels: mergeLabels(service.Labels, service.CustomLabels), - } - ctr, err := c.compose.createContainer(ctx, project, service, name, number, opts) - updated[actual+i] = ctr - return err - })) - continue - } - - err = eg.Wait() - c.setObservedState(service.Name, updated) - return err -} - -func (c *convergence) stopDependentContainers(ctx context.Context, project *types.Project, service types.ServiceConfig) error { - // Stop dependent containers, so they will be restarted after service is re-created - dependents := project.GetDependentsForService(service, func(dependency types.ServiceDependency) bool { - return dependency.Restart - }) - if len(dependents) == 0 { - return nil - } - err := c.compose.stop(ctx, project.Name, api.StopOptions{ - Services: dependents, - Project: project, - }, nil) - if err != nil { - return err - } - - for _, name := range dependents { - dependentStates := c.getObservedState(name) - for i, dependent := range dependentStates { - dependent.State = container.StateExited - dependentStates[i] = dependent - } - c.setObservedState(name, dependentStates) - } - return nil -} - func getScale(config types.ServiceConfig) (int, error) { scale := config.GetScale() if scale > 1 && config.ContainerName != "" { @@ -261,103 +54,6 @@ func getScale(config types.ServiceConfig) (int, error) { return scale, nil } -// resolveServiceReferences replaces reference to another service with reference to an actual container -func (c *convergence) resolveServiceReferences(service *types.ServiceConfig) error { - err := c.resolveVolumeFrom(service) - if err != nil { - return err - } - - err = c.resolveSharedNamespaces(service) - if err != nil { - return err - } - return nil -} - -func (c *convergence) resolveVolumeFrom(service *types.ServiceConfig) error { - for i, vol := range service.VolumesFrom { - spec := strings.Split(vol, ":") - if len(spec) == 0 { - continue - } - if spec[0] == "container" { - service.VolumesFrom[i] = spec[1] - continue - } - name := spec[0] - dependencies := c.getObservedState(name) - if len(dependencies) == 0 { - return fmt.Errorf("cannot share volume with service %s: container missing", name) - } - service.VolumesFrom[i] = dependencies.sorted()[0].ID - } - return nil -} - -func (c *convergence) resolveSharedNamespaces(service *types.ServiceConfig) error { - str := service.NetworkMode - if name := getDependentServiceFromMode(str); name != "" { - dependencies := c.getObservedState(name) - if len(dependencies) == 0 { - return fmt.Errorf("cannot share network namespace with service %s: container missing", name) - } - service.NetworkMode = types.ContainerPrefix + dependencies.sorted()[0].ID - } - - str = service.Ipc - if name := getDependentServiceFromMode(str); name != "" { - dependencies := c.getObservedState(name) - if len(dependencies) == 0 { - return fmt.Errorf("cannot share IPC namespace with service %s: container missing", name) - } - service.Ipc = types.ContainerPrefix + dependencies.sorted()[0].ID - } - - str = service.Pid - if name := getDependentServiceFromMode(str); name != "" { - dependencies := c.getObservedState(name) - if len(dependencies) == 0 { - return fmt.Errorf("cannot share PID namespace with service %s: container missing", name) - } - service.Pid = types.ContainerPrefix + dependencies.sorted()[0].ID - } - - return nil -} - -func (c *convergence) mustRecreate(expected types.ServiceConfig, actual container.Summary, policy string) (bool, error) { - if policy == api.RecreateNever { - return false, nil - } - if policy == api.RecreateForce { - return true, nil - } - configHash, err := ServiceHash(expected) - if err != nil { - return false, err - } - configChanged := actual.Labels[api.ConfigHashLabel] != configHash - imageUpdated := actual.Labels[api.ImageDigestLabel] != expected.CustomLabels[api.ImageDigestLabel] - if configChanged || imageUpdated { - return true, nil - } - - if c.networks != nil && actual.State == "running" { - if checkExpectedNetworks(expected, actual, c.networks) { - return true, nil - } - } - - if c.volumes != nil { - if checkExpectedVolumes(expected, actual, c.volumes) { - return true, nil - } - } - - return false, nil -} - func checkExpectedNetworks(expected types.ServiceConfig, actual container.Summary, networks map[string]string) bool { // check the networks container is connected to are the expected ones for net := range expected.Networks { @@ -614,85 +310,9 @@ func (s *composeService) createContainer(ctx context.Context, project *types.Pro return ctr, nil } -func (s *composeService) recreateContainer(ctx context.Context, project *types.Project, service types.ServiceConfig, - replaced container.Summary, inherit bool, timeout *time.Duration, -) (created container.Summary, err error) { - eventName := getContainerProgressName(replaced) - s.events.On(newEvent(eventName, api.Working, "Recreate")) - defer func() { - if err != nil && ctx.Err() == nil { - s.events.On(api.Resource{ - ID: eventName, - Status: api.Error, - Text: err.Error(), - }) - } - }() - - number, err := strconv.Atoi(replaced.Labels[api.ContainerNumberLabel]) - if err != nil { - return created, err - } - - var inherited *container.Summary - if inherit { - inherited = &replaced - } - - replacedContainerName := service.ContainerName - if replacedContainerName == "" { - replacedContainerName = service.Name + api.Separator + strconv.Itoa(number) - } - name := getContainerName(project.Name, service, number) - tmpName := fmt.Sprintf("%s_%s", replaced.ID[:12], name) - opts := createOptions{ - AutoRemove: false, - AttachStdin: false, - UseNetworkAliases: true, - Labels: mergeLabels(service.Labels, service.CustomLabels).Add(api.ContainerReplaceLabel, replacedContainerName), - } - created, err = s.createMobyContainer(ctx, project, service, tmpName, number, inherited, opts) - if err != nil { - return created, err - } - - timeoutInSecond := utils.DurationSecondToInt(timeout) - _, err = s.apiClient().ContainerStop(ctx, replaced.ID, client.ContainerStopOptions{Timeout: timeoutInSecond}) - if err != nil { - return created, err - } - - _, err = s.apiClient().ContainerRemove(ctx, replaced.ID, client.ContainerRemoveOptions{}) - if err != nil { - return created, err - } - - _, err = s.apiClient().ContainerRename(ctx, tmpName, client.ContainerRenameOptions{ - NewName: name, - }) - if err != nil { - return created, err - } - - s.events.On(newEvent(eventName, api.Done, "Recreated")) - return created, err -} - // force sequential calls to ContainerStart to prevent race condition in engine assigning ports from ranges var startMx sync.Mutex -func (s *composeService) startContainer(ctx context.Context, ctr container.Summary) error { - s.events.On(newEvent(getContainerProgressName(ctr), api.Working, "Restart")) - startMx.Lock() - defer startMx.Unlock() - _, err := s.apiClient().ContainerStart(ctx, ctr.ID, client.ContainerStartOptions{}) - if err != nil { - return err - } - s.events.On(newEvent(getContainerProgressName(ctr), api.Done, "Restarted")) - return nil -} - func (s *composeService) createMobyContainer(ctx context.Context, project *types.Project, service types.ServiceConfig, name string, number int, inherit *container.Summary, opts createOptions, ) (container.Summary, error) { diff --git a/pkg/compose/create.go b/pkg/compose/create.go index 23cba9f5d19..71ce2eb922b 100644 --- a/pkg/compose/create.go +++ b/pkg/compose/create.go @@ -87,82 +87,116 @@ func (s *composeService) create(ctx context.Context, project *types.Project, opt prepareNetworks(project) - networks, err := s.ensureNetworks(ctx, project) + // Temporary implementation of use_api_socket until we get actual support inside docker engine + project, err = s.useAPISocket(project) if err != nil { return err } - volumes, err := s.ensureProjectVolumes(ctx, project) + // Phase 1: Inspect current state + observed, err := s.InspectState(ctx, project) if err != nil { return err } - var observedState Containers - observedState, err = s.getContainers(ctx, project.Name, oneOffInclude, true) - if err != nil { - return err - } - orphans := observedState.filter(isOrphaned(project)) - if len(orphans) > 0 && !options.IgnoreOrphans { - if options.RemoveOrphans { - err := s.removeContainers(ctx, orphans, nil, nil, false) - if err != nil { - return err - } - } else { + // Handle orphan containers + if len(observed.Orphans) > 0 && !options.IgnoreOrphans { + if !options.RemoveOrphans { logrus.Warnf("Found orphan containers (%s) for this project. If "+ "you removed or renamed this service in your compose "+ "file, you can run this command with the "+ - "--remove-orphans flag to clean it up.", orphans.names()) + "--remove-orphans flag to clean it up.", observed.Orphans.names()) } } - // Temporary implementation of use_api_socket until we get actual support inside docker engine - project, err = s.useAPISocket(project) - if err != nil { + // Validate external networks exist before reconciling + if err := s.validateExternalNetworks(ctx, project, options.Services); err != nil { return err } - return newConvergence(options.Services, observedState, networks, volumes, s).apply(ctx, project, options) -} + // Phase 2: Reconcile desired vs observed state (pure function) + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: options.Recreate, + RecreateDependencies: options.RecreateDependencies, + Services: options.Services, + Inherit: options.Inherit, + Timeout: options.Timeout, + RemoveOrphans: options.RemoveOrphans, + }) + if err != nil { + return err + } -func prepareNetworks(project *types.Project) { - for k, nw := range project.Networks { - nw.CustomLabels = nw.CustomLabels. - Add(api.NetworkLabel, k). - Add(api.ProjectLabel, project.Name). - Add(api.VersionLabel, api.ComposeVersion) - project.Networks[k] = nw + if plan.IsEmpty() { + return nil } + + s.emitUntouchedContainerEvents(project, observed, plan) + + // Phase 3: Execute the plan + return s.ExecutePlan(ctx, project, plan) } -func (s *composeService) ensureNetworks(ctx context.Context, project *types.Project) (map[string]string, error) { - networks := map[string]string{} - for name, nw := range project.Networks { - id, err := s.ensureNetwork(ctx, project, name, &nw) - if err != nil { - return nil, err +// emitUntouchedContainerEvents emits progress events for containers that are +// already up-to-date and running, so that callers (e.g. scale) can see them. +func (s *composeService) emitUntouchedContainerEvents(project *types.Project, observed *ObservedState, plan *ReconciliationPlan) { + for _, service := range project.Services { + for _, ctr := range observed.Containers[service.Name] { + ctrName := getCanonicalContainerName(ctr) + if _, touched := plan.Operations["stop-container:"+ctrName]; touched { + continue + } + if _, touched := plan.Operations["start-container:"+ctrName]; touched { + continue + } + if _, touched := plan.Operations["create-container:"+ctrName]; touched { + continue + } + if ctr.State == container.StateRunning { + s.events.On(runningEvent(getContainerProgressName(ctr))) + } } - networks[name] = id - project.Networks[name] = nw } - return networks, nil } -func (s *composeService) ensureProjectVolumes(ctx context.Context, project *types.Project) (map[string]string, error) { - ids := map[string]string{} - for k, volume := range project.Volumes { - volume.CustomLabels = volume.CustomLabels.Add(api.VolumeLabel, k) - volume.CustomLabels = volume.CustomLabels.Add(api.ProjectLabel, project.Name) - volume.CustomLabels = volume.CustomLabels.Add(api.VersionLabel, api.ComposeVersion) - id, err := s.ensureVolume(ctx, k, volume, project) +// validateExternalNetworks checks that external networks exist for services +// that are part of the current operation. Returns an error if a required +// external network is not found. +func (s *composeService) validateExternalNetworks(ctx context.Context, project *types.Project, services []string) error { + for key, net := range project.Networks { + if !net.External { + continue + } + // Check if any targeted service uses this network + usedByTargetedService := false + for _, service := range project.Services { + if len(services) > 0 && !slices.Contains(services, service.Name) { + continue + } + if _, ok := service.Networks[key]; ok { + usedByTargetedService = true + break + } + } + if !usedByTargetedService { + continue + } + _, err := s.resolveExternalNetwork(ctx, &net) if err != nil { - return nil, err + return err } - ids[k] = id } + return nil +} - return ids, nil +func prepareNetworks(project *types.Project) { + for k, nw := range project.Networks { + nw.CustomLabels = nw.CustomLabels. + Add(api.NetworkLabel, k). + Add(api.ProjectLabel, project.Name). + Add(api.VersionLabel, api.ComposeVersion) + project.Networks[k] = nw + } } //nolint:gocyclo diff --git a/pkg/compose/observed_state.go b/pkg/compose/observed_state.go new file mode 100644 index 00000000000..c0a1fb53e57 --- /dev/null +++ b/pkg/compose/observed_state.go @@ -0,0 +1,144 @@ +/* + Copyright 2020 Docker Compose CLI authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package compose + +import ( + "context" + + "github.com/compose-spec/compose-go/v2/types" + "github.com/moby/moby/client" + "golang.org/x/sync/errgroup" + + "github.com/docker/compose/v5/pkg/api" +) + +// ObservedState captures the current state of a Compose project as seen by the Docker engine. +type ObservedState struct { + ProjectName string + Containers map[string]Containers + Networks map[string]ObservedNetwork + Volumes map[string]ObservedVolume + Orphans Containers +} + +// ObservedNetwork represents a Docker network associated with a Compose project. +type ObservedNetwork struct { + ID string + Name string + Driver string + Labels map[string]string + ConfigHash string +} + +// ObservedVolume represents a Docker volume associated with a Compose project. +type ObservedVolume struct { + Name string + Driver string + Labels map[string]string + ConfigHash string +} + +// allContainers returns all containers from the observed state (across all services and orphans). +func (s *ObservedState) allContainers() Containers { + var result Containers + for _, ctrs := range s.Containers { + result = append(result, ctrs...) + } + result = append(result, s.Orphans...) + return result +} + +// InspectState queries the Docker engine to build an ObservedState for the given project. +func (s *composeService) InspectState(ctx context.Context, project *types.Project) (*ObservedState, error) { + var ( + allContainers Containers + networks client.NetworkListResult + volumes client.VolumeListResult + ) + + eg, ctx := errgroup.WithContext(ctx) + + eg.Go(func() error { + var err error + allContainers, err = s.getContainers(ctx, project.Name, oneOffInclude, true) + return err + }) + + eg.Go(func() error { + var err error + networks, err = s.apiClient().NetworkList(ctx, client.NetworkListOptions{ + Filters: projectFilter(project.Name), + }) + return err + }) + + eg.Go(func() error { + var err error + volumes, err = s.apiClient().VolumeList(ctx, client.VolumeListOptions{ + Filters: projectFilter(project.Name), + }) + return err + }) + + if err := eg.Wait(); err != nil { + return nil, err + } + + // Partition containers by service, excluding one-off containers + // (e.g. from `compose run`) which should not affect scale/recreate decisions. + containersByService := map[string]Containers{} + for _, c := range allContainers.filter(isNotOneOff) { + service := c.Labels[api.ServiceLabel] + containersByService[service] = append(containersByService[service], c) + } + + // Identify orphan containers (include one-offs so they get cleaned up) + orphans := allContainers.filter(isOrphaned(project)) + + // Map networks by their Compose network name + observedNetworks := map[string]ObservedNetwork{} + for _, n := range networks.Items { + name := n.Labels[api.NetworkLabel] + observedNetworks[name] = ObservedNetwork{ + ID: n.ID, + Name: n.Name, + Driver: n.Driver, + Labels: n.Labels, + ConfigHash: n.Labels[api.ConfigHashLabel], + } + } + + // Map volumes by their Compose volume name + observedVolumes := map[string]ObservedVolume{} + for _, v := range volumes.Items { + name := v.Labels[api.VolumeLabel] + observedVolumes[name] = ObservedVolume{ + Name: v.Name, + Driver: v.Driver, + Labels: v.Labels, + ConfigHash: v.Labels[api.ConfigHashLabel], + } + } + + return &ObservedState{ + ProjectName: project.Name, + Containers: containersByService, + Networks: observedNetworks, + Volumes: observedVolumes, + Orphans: orphans, + }, nil +} diff --git a/pkg/compose/observed_state_test.go b/pkg/compose/observed_state_test.go new file mode 100644 index 00000000000..7e60373be17 --- /dev/null +++ b/pkg/compose/observed_state_test.go @@ -0,0 +1,157 @@ +/* + Copyright 2020 Docker Compose CLI authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package compose + +import ( + "testing" + + "gotest.tools/v3/assert" +) + +func TestObservedStateTypes(t *testing.T) { + net := ObservedNetwork{ + ID: "net123", + Name: "myproject_default", + Driver: "bridge", + Labels: map[string]string{"com.docker.compose.network": "default"}, + ConfigHash: "abc123", + } + assert.Equal(t, net.ID, "net123") + assert.Equal(t, net.Name, "myproject_default") + assert.Equal(t, net.Driver, "bridge") + assert.Equal(t, net.ConfigHash, "abc123") + assert.Equal(t, net.Labels["com.docker.compose.network"], "default") + + vol := ObservedVolume{ + Name: "myproject_data", + Driver: "local", + Labels: map[string]string{"com.docker.compose.volume": "data"}, + ConfigHash: "def456", + } + assert.Equal(t, vol.Name, "myproject_data") + assert.Equal(t, vol.Driver, "local") + assert.Equal(t, vol.ConfigHash, "def456") + assert.Equal(t, vol.Labels["com.docker.compose.volume"], "data") + + state := ObservedState{ + ProjectName: "myproject", + Containers: map[string]Containers{}, + Networks: map[string]ObservedNetwork{"default": net}, + Volumes: map[string]ObservedVolume{"data": vol}, + Orphans: Containers{}, + } + assert.Equal(t, state.ProjectName, "myproject") + assert.Equal(t, len(state.Networks), 1) + assert.Equal(t, len(state.Volumes), 1) + assert.Equal(t, state.Networks["default"].ID, "net123") + assert.Equal(t, state.Volumes["data"].Name, "myproject_data") +} + +func TestReconciliationPlanRoots(t *testing.T) { + plan := &ReconciliationPlan{ + Operations: map[string]*Operation{ + "create-network:mynet": { + ID: "create-network:mynet", + Type: OpCreateNetwork, + }, + "create-volume:myvol": { + ID: "create-volume:myvol", + Type: OpCreateVolume, + }, + "create-container:web-1": { + ID: "create-container:web-1", + Type: OpCreateContainer, + DependsOn: []string{"create-network:mynet", "create-volume:myvol"}, + }, + "start-container:db-1": { + ID: "start-container:db-1", + Type: OpStartContainer, + DependsOn: []string{}, + }, + }, + Dependents: map[string][]string{}, + } + + roots := plan.Roots() + // Roots should be the operations with empty DependsOn: network, volume, and start-container + assert.Equal(t, len(roots), 3) + // Roots are sorted by ID + assert.Equal(t, roots[0].ID, "create-network:mynet") + assert.Equal(t, roots[1].ID, "create-volume:myvol") + assert.Equal(t, roots[2].ID, "start-container:db-1") +} + +func TestReconciliationPlanIsEmpty(t *testing.T) { + emptyPlan := &ReconciliationPlan{ + Operations: map[string]*Operation{}, + Dependents: map[string][]string{}, + } + assert.Assert(t, emptyPlan.IsEmpty()) + + nonEmptyPlan := &ReconciliationPlan{ + Operations: map[string]*Operation{ + "create-network:mynet": { + ID: "create-network:mynet", + Type: OpCreateNetwork, + }, + }, + Dependents: map[string][]string{}, + } + assert.Assert(t, !nonEmptyPlan.IsEmpty()) +} + +func TestReconciliationPlanString(t *testing.T) { + emptyPlan := &ReconciliationPlan{ + Operations: map[string]*Operation{}, + Dependents: map[string][]string{}, + } + assert.Equal(t, emptyPlan.String(), "(empty plan)") + + plan := &ReconciliationPlan{ + Operations: map[string]*Operation{ + "create-network:mynet": { + ID: "create-network:mynet", + Type: OpCreateNetwork, + Resource: "mynet", + NetworkOp: &NetworkOperation{ + NetworkKey: "default", + }, + Reason: "network does not exist", + }, + "create-container:web-1": { + ID: "create-container:web-1", + Type: OpCreateContainer, + ServiceName: "web", + Resource: "web-1", + ContainerOp: &ContainerOperation{ + ContainerName: "web-1", + ContainerNumber: 1, + }, + DependsOn: []string{"create-network:mynet"}, + Reason: "scale up", + }, + }, + Dependents: map[string][]string{ + "create-network:mynet": {"create-container:web-1"}, + }, + } + expected := ` +1. create network mynet reason: network does not exist +[1] -> 2. create container web-1 reason: scale up +` + assert.Equal(t, plan.String(), expected) +} diff --git a/pkg/compose/plan_executor.go b/pkg/compose/plan_executor.go new file mode 100644 index 00000000000..b2d135ea587 --- /dev/null +++ b/pkg/compose/plan_executor.go @@ -0,0 +1,533 @@ +/* + Copyright 2020 Docker Compose CLI authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package compose + +import ( + "context" + "fmt" + "io" + "slices" + "sort" + "strconv" + "strings" + "sync" + + "github.com/compose-spec/compose-go/v2/types" + containerType "github.com/moby/moby/api/types/container" + "github.com/moby/moby/client" + "golang.org/x/sync/errgroup" + + "github.com/docker/compose/v5/pkg/api" +) + +// executionState tracks the results of operations as they complete, allowing +// dependent operations to resolve service references. +type executionState struct { + mu sync.Mutex + containers map[string]Containers // service name -> containers created/updated + networks map[string]string // network key -> ID + volumes map[string]string // volume key -> ID +} + +func newExecutionState() *executionState { + return &executionState{ + containers: make(map[string]Containers), + networks: make(map[string]string), + volumes: make(map[string]string), + } +} + +// newExecutionStateFrom builds an executionState pre-populated with existing +// containers partitioned by service name. Used by run.go to resolve service +// references without the old convergence struct. +func newExecutionStateFrom(containers Containers) *executionState { + es := newExecutionState() + for _, c := range containers.filter(isNotOneOff) { + service := c.Labels[api.ServiceLabel] + es.containers[service] = append(es.containers[service], c) + } + return es +} + +func (es *executionState) addContainer(serviceName string, ctr containerType.Summary) { + es.mu.Lock() + defer es.mu.Unlock() + es.containers[serviceName] = append(es.containers[serviceName], ctr) +} + +func (es *executionState) getContainers(serviceName string) Containers { + es.mu.Lock() + defer es.mu.Unlock() + return slices.Clone(es.containers[serviceName]) +} + +func (es *executionState) setNetworkID(key, id string) { + es.mu.Lock() + defer es.mu.Unlock() + es.networks[key] = id +} + +func (es *executionState) setVolumeID(key, id string) { + es.mu.Lock() + defer es.mu.Unlock() + es.volumes[key] = id +} + +// resolveServiceReferences replaces service references in a ServiceConfig with +// actual container IDs from the execution state. This mirrors the logic in +// convergence.resolveServiceReferences but uses executionState instead. +func (es *executionState) resolveServiceReferences(service *types.ServiceConfig) error { + if err := es.resolveVolumeFrom(service); err != nil { + return err + } + return es.resolveSharedNamespaces(service) +} + +func (es *executionState) resolveVolumeFrom(service *types.ServiceConfig) error { + for i, vol := range service.VolumesFrom { + spec := strings.Split(vol, ":") + if len(spec) == 0 { + continue + } + if spec[0] == "container" { + service.VolumesFrom[i] = spec[1] + continue + } + name := spec[0] + dependencies := es.getContainers(name) + if len(dependencies) == 0 { + return fmt.Errorf("cannot share volume with service %s: container missing", name) + } + service.VolumesFrom[i] = dependencies.sorted()[0].ID + } + return nil +} + +func (es *executionState) resolveSharedNamespaces(service *types.ServiceConfig) error { + if name := getDependentServiceFromMode(service.NetworkMode); name != "" { + dependencies := es.getContainers(name) + if len(dependencies) == 0 { + return fmt.Errorf("cannot share network namespace with service %s: container missing", name) + } + service.NetworkMode = types.ContainerPrefix + dependencies.sorted()[0].ID + } + + if name := getDependentServiceFromMode(service.Ipc); name != "" { + dependencies := es.getContainers(name) + if len(dependencies) == 0 { + return fmt.Errorf("cannot share IPC namespace with service %s: container missing", name) + } + service.Ipc = types.ContainerPrefix + dependencies.sorted()[0].ID + } + + if name := getDependentServiceFromMode(service.Pid); name != "" { + dependencies := es.getContainers(name) + if len(dependencies) == 0 { + return fmt.Errorf("cannot share PID namespace with service %s: container missing", name) + } + service.Pid = types.ContainerPrefix + dependencies.sorted()[0].ID + } + + return nil +} + +// ExecutePlan executes a reconciliation plan using DAG traversal similar to +// graphTraversal.visit() in dependencies.go. Operations are executed +// concurrently, respecting dependency ordering. +func (s *composeService) ExecutePlan(ctx context.Context, project *types.Project, plan *ReconciliationPlan) error { + if plan.IsEmpty() { + return nil + } + + // Pre-populate execution state with existing containers so that + // resolveServiceReferences can find containers for services not + // included in the plan (e.g. --no-deps scenarios). + allContainers, err := s.getContainers(ctx, project.Name, oneOffExclude, true) + if err != nil { + return err + } + state := newExecutionStateFrom(allContainers) + + // Build dependency count map: number of unsatisfied deps per operation. + // The consumer goroutine is single-threaded, so no mutex is needed for depCount. + depCount := make(map[string]int, len(plan.Operations)) + for _, op := range plan.Operations { + depCount[op.ID] = len(op.DependsOn) + } + + expect := len(plan.Operations) + eg, ctx := errgroup.WithContext(ctx) + opCh := make(chan *Operation, expect) + defer close(opCh) + + // Consumer goroutine: waits for completed ops and enqueues newly-ready dependents + eg.Go(func() error { + for { + select { + case <-ctx.Done(): + return nil + case doneOp := <-opCh: + expect-- + if expect == 0 { + return nil + } + + // Decrement dep count for each dependent; schedule when ready + for _, depID := range plan.Dependents[doneOp.ID] { + depCount[depID]-- + if depCount[depID] == 0 { + depOp := plan.Operations[depID] + eg.Go(func() error { + if err := s.executeOperation(ctx, project, depOp, state); err != nil { + return err + } + opCh <- depOp + return nil + }) + } + } + } + } + }) + + // Launch root operations + for _, op := range plan.Roots() { + eg.Go(func() error { + if err := s.executeOperation(ctx, project, op, state); err != nil { + return err + } + opCh <- op + return nil + }) + } + + return eg.Wait() +} + +func (s *composeService) executeOperation(ctx context.Context, project *types.Project, op *Operation, state *executionState) error { + switch op.Type { + case OpCreateNetwork: + return s.executePlanCreateNetwork(ctx, project, op, state) + case OpRemoveNetwork: + return s.executePlanRemoveNetwork(ctx, project, op) + case OpDisconnectNetwork: + return s.executePlanDisconnectNetwork(ctx, op) + case OpConnectNetwork: + return s.executePlanConnectNetwork(ctx, op) + case OpCreateVolume: + return s.executePlanCreateVolume(ctx, project, op, state) + case OpRemoveVolume: + return s.executePlanRemoveVolume(ctx, op) + case OpCreateContainer: + return s.executePlanCreateContainer(ctx, project, op, state) + case OpStartContainer: + return s.executePlanStartContainer(ctx, op) + case OpStopContainer: + return s.executePlanStopContainer(ctx, op) + case OpRemoveContainer: + return s.executePlanRemoveContainer(ctx, op) + case OpRenameContainer: + return s.executePlanRenameContainer(ctx, op) + case OpRunPlugin: + return s.executePlanRunPlugin(ctx, project, op) + default: + return fmt.Errorf("unknown operation type: %d", op.Type) + } +} + +func (s *composeService) executePlanCreateNetwork(ctx context.Context, project *types.Project, op *Operation, state *executionState) error { + id, err := s.ensureNetwork(ctx, project, op.NetworkOp.NetworkKey, op.NetworkOp.Desired) + if err != nil { + return err + } + state.setNetworkID(op.NetworkOp.NetworkKey, id) + return nil +} + +func (s *composeService) executePlanRemoveNetwork(ctx context.Context, project *types.Project, op *Operation) error { + return s.removeNetwork(ctx, op.NetworkOp.NetworkKey, project.Name, op.NetworkOp.Existing.Name) +} + +func (s *composeService) executePlanDisconnectNetwork(ctx context.Context, op *Operation) error { + _, err := s.apiClient().NetworkDisconnect(ctx, op.ContainerNetworkOp.NetworkName, client.NetworkDisconnectOptions{ + Container: op.ContainerNetworkOp.ContainerID, + Force: true, + }) + return err +} + +func (s *composeService) executePlanConnectNetwork(ctx context.Context, op *Operation) error { + _, err := s.apiClient().NetworkConnect(ctx, op.ContainerNetworkOp.NetworkName, client.NetworkConnectOptions{ + Container: op.ContainerNetworkOp.ContainerID, + }) + return err +} + +func (s *composeService) executePlanCreateVolume(ctx context.Context, project *types.Project, op *Operation, state *executionState) error { + volume := *op.VolumeOp.Desired + volume.CustomLabels = volume.CustomLabels.Add(api.VolumeLabel, op.VolumeOp.VolumeKey) + volume.CustomLabels = volume.CustomLabels.Add(api.ProjectLabel, project.Name) + volume.CustomLabels = volume.CustomLabels.Add(api.VersionLabel, api.ComposeVersion) + id, err := s.ensureVolume(ctx, op.VolumeOp.VolumeKey, volume, project) + if err != nil { + return err + } + state.setVolumeID(op.VolumeOp.VolumeKey, id) + return nil +} + +func (s *composeService) executePlanRemoveVolume(ctx context.Context, op *Operation) error { + return s.removeVolume(ctx, op.VolumeOp.Existing.Name) +} + +func (s *composeService) executePlanCreateContainer(ctx context.Context, project *types.Project, op *Operation, state *executionState) error { + service := op.ContainerOp.Service + + // Resolve service references using execution state, falling back to + // pre-populated existing containers for --no-deps scenarios. + if err := state.resolveServiceReferences(&service); err != nil { + return err + } + + eventName := "Container " + op.ContainerOp.ContainerName + s.events.On(creatingEvent(eventName)) + + labels := mergeLabels(service.Labels, service.CustomLabels) + + // When Existing is set, this is the "create" step of a recreate chain: + // inherit from old container and add replace label. + var inherited *containerType.Summary + if op.ContainerOp.Existing != nil && op.ContainerOp.Inherit { + inherited = op.ContainerOp.Existing + } + if op.ContainerOp.Existing != nil { + replacedName := service.ContainerName + if replacedName == "" { + replacedName = service.Name + api.Separator + strconv.Itoa(op.ContainerOp.ContainerNumber) + } + labels = labels.Add(api.ContainerReplaceLabel, replacedName) + } + + opts := createOptions{ + AutoRemove: false, + AttachStdin: false, + UseNetworkAliases: true, + Labels: labels, + } + + ctr, err := s.createMobyContainer(ctx, project, service, op.ContainerOp.ContainerName, op.ContainerOp.ContainerNumber, inherited, opts) + if err != nil { + return err + } + s.events.On(createdEvent(eventName)) + + state.addContainer(op.ServiceName, ctr) + return nil +} + +func (s *composeService) executePlanRenameContainer(ctx context.Context, op *Operation) error { + eventName := "Container " + op.RenameOp.NewName + s.events.On(newEvent(eventName, api.Working, "Recreate")) + _, err := s.apiClient().ContainerRename(ctx, op.RenameOp.CurrentName, client.ContainerRenameOptions{ + NewName: op.RenameOp.NewName, + }) + if err != nil { + return err + } + s.events.On(newEvent(eventName, api.Done, "Recreated")) + return nil +} + +func (s *composeService) executePlanStartContainer(ctx context.Context, op *Operation) error { + eventName := "Container " + op.ContainerOp.ContainerName + var containerID string + if op.ContainerOp.Existing != nil { + containerID = op.ContainerOp.Existing.ID + } else { + // Container was just created/renamed; look it up by name + res, err := s.apiClient().ContainerInspect(ctx, op.ContainerOp.ContainerName, client.ContainerInspectOptions{}) + if err != nil { + return fmt.Errorf("cannot start container %s: %w", op.ContainerOp.ContainerName, err) + } + containerID = res.Container.ID + } + s.events.On(startingEvent(eventName)) + startMx.Lock() + _, err := s.apiClient().ContainerStart(ctx, containerID, client.ContainerStartOptions{}) + startMx.Unlock() + if err != nil { + return err + } + s.events.On(startedEvent(eventName)) + return nil +} + +func (s *composeService) executePlanStopContainer(ctx context.Context, op *Operation) error { + var svc *types.ServiceConfig + if op.ContainerOp.Service.Name != "" { + s := op.ContainerOp.Service + svc = &s + } + return s.stopContainer(ctx, svc, *op.ContainerOp.Existing, op.ContainerOp.Timeout, nil) +} + +func (s *composeService) executePlanRemoveContainer(ctx context.Context, op *Operation) error { + service := op.ContainerOp.Service + return s.stopAndRemoveContainer(ctx, *op.ContainerOp.Existing, &service, op.ContainerOp.Timeout, false) +} + +func (s *composeService) executePlanRunPlugin(ctx context.Context, project *types.Project, op *Operation) error { + return s.runPlugin(ctx, project, op.PluginOp.Service, op.PluginOp.Action) +} + +// DisplayPlan performs a topological sort of operations and displays them +// grouped by resource type. +func DisplayPlan(plan *ReconciliationPlan, w io.Writer) error { + ops := topologicalSort(plan) + + // Group operations by category + var networkOps, volumeOps []*Operation + serviceOps := make(map[string][]*Operation) + + for _, op := range ops { + switch { + case op.NetworkOp != nil: + networkOps = append(networkOps, op) + case op.ContainerNetworkOp != nil: + networkOps = append(networkOps, op) + case op.VolumeOp != nil: + volumeOps = append(volumeOps, op) + case op.ContainerOp != nil: + serviceOps[op.ServiceName] = append(serviceOps[op.ServiceName], op) + case op.RenameOp != nil: + serviceOps[op.ServiceName] = append(serviceOps[op.ServiceName], op) + case op.PluginOp != nil: + serviceOps[op.ServiceName] = append(serviceOps[op.ServiceName], op) + } + } + + if err := displayOpsSection(w, "Networks:", " ", networkOps); err != nil { + return err + } + if err := displayOpsSection(w, "Volumes:", " ", volumeOps); err != nil { + return err + } + return displayServiceOps(w, serviceOps) +} + +func displayOpsSection(w io.Writer, header, indent string, ops []*Operation) error { + if len(ops) == 0 { + return nil + } + if _, err := fmt.Fprintln(w, header); err != nil { + return err + } + for _, op := range ops { + if _, err := fmt.Fprintf(w, "%s[%-10s] %-20s reason: %s\n", indent, opVerb(op.Type), op.Resource, op.Reason); err != nil { + return err + } + } + return nil +} + +func displayServiceOps(w io.Writer, serviceOps map[string][]*Operation) error { + if len(serviceOps) == 0 { + return nil + } + if _, err := fmt.Fprintln(w, "Services:"); err != nil { + return err + } + + // Sort service names for stable output + serviceNames := make([]string, 0, len(serviceOps)) + for name := range serviceOps { + serviceNames = append(serviceNames, name) + } + sort.Strings(serviceNames) + + for _, svcName := range serviceNames { + if _, err := fmt.Fprintf(w, " %s:\n", svcName); err != nil { + return err + } + for _, op := range serviceOps[svcName] { + if _, err := fmt.Fprintf(w, " [%-10s] %-20s reason: %s\n", opVerb(op.Type), op.Resource, op.Reason); err != nil { + return err + } + } + } + return nil +} + +// opVerb returns a short action verb for display purposes. +func opVerb(t OperationType) string { + switch t { + case OpCreateNetwork, OpCreateVolume, OpCreateContainer: + return "create" + case OpRenameContainer: + return "rename" + case OpRemoveNetwork, OpRemoveVolume, OpRemoveContainer: + return "remove" + case OpDisconnectNetwork: + return "disconnect" + case OpConnectNetwork: + return "connect" + case OpStartContainer: + return "start" + case OpStopContainer: + return "stop" + case OpRunPlugin: + return "plugin" + default: + return "unknown" + } +} + +// topologicalSort returns operations in dependency order using Kahn's algorithm. +func topologicalSort(plan *ReconciliationPlan) []*Operation { + inDegree := make(map[string]int, len(plan.Operations)) + for _, op := range plan.Operations { + inDegree[op.ID] = len(op.DependsOn) + } + + // Start with nodes that have no dependencies + var queue []string + for _, op := range plan.Operations { + if inDegree[op.ID] == 0 { + queue = append(queue, op.ID) + } + } + sort.Strings(queue) // deterministic ordering + + var sorted []*Operation + for len(queue) > 0 { + id := queue[0] + queue = queue[1:] + sorted = append(sorted, plan.Operations[id]) + + var next []string + for _, depID := range plan.Dependents[id] { + inDegree[depID]-- + if inDegree[depID] == 0 { + next = append(next, depID) + } + } + sort.Strings(next) + queue = append(queue, next...) + } + + return sorted +} diff --git a/pkg/compose/progress.go b/pkg/compose/progress.go index 26f9b5d8590..0d608473758 100644 --- a/pkg/compose/progress.go +++ b/pkg/compose/progress.go @@ -82,7 +82,7 @@ func restartingEvent(id string) api.Resource { return newEvent(id, api.Working, api.StatusRestarting) } -// runningEvent creates a new Running in progress Resource +// runningEvent creates a new Running (done) Resource func runningEvent(id string) api.Resource { return newEvent(id, api.Done, api.StatusRunning) } diff --git a/pkg/compose/publish.go b/pkg/compose/publish.go index fb466607559..2aec424db93 100644 --- a/pkg/compose/publish.go +++ b/pkg/compose/publish.go @@ -332,7 +332,7 @@ func (s *composeService) preChecks(project *types.Project, options api.PublishOp for _, val := range detectedSecrets { b.WriteString(val.Type) b.WriteRune('\n') - b.WriteString(fmt.Sprintf("%q: %s\n", val.Key, val.Value)) + fmt.Fprintf(&b, "%q: %s\n", val.Key, val.Value) } b.WriteString("Are you ok to publish these sensitive data?") confirm, err := s.prompt(b.String(), false) @@ -362,7 +362,7 @@ func (s *composeService) checkEnvironmentVariables(project *types.Project, optio var errorMsg strings.Builder for _, errors := range errorList { for _, err := range errors { - errorMsg.WriteString(fmt.Sprintf("%s\n", err)) + fmt.Fprintf(&errorMsg, "%s\n", err) } } return fmt.Errorf("%s%s", errorMsg.String(), errorMsgSuffix) @@ -396,7 +396,7 @@ func (s *composeService) checkOnlyBuildSection(project *types.Project) (bool, er var errMsg strings.Builder errMsg.WriteString("your Compose stack cannot be published as it only contains a build section for service(s):\n") for _, serviceInError := range errorList { - errMsg.WriteString(fmt.Sprintf("- %q\n", serviceInError)) + fmt.Fprintf(&errMsg, "- %q\n", serviceInError) } return false, errors.New(errMsg.String()) } diff --git a/pkg/compose/reconcile.go b/pkg/compose/reconcile.go new file mode 100644 index 00000000000..d2d9909d676 --- /dev/null +++ b/pkg/compose/reconcile.go @@ -0,0 +1,1228 @@ +/* + Copyright 2020 Docker Compose CLI authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package compose + +import ( + "fmt" + "slices" + "sort" + "strconv" + "strings" + "time" + + "github.com/compose-spec/compose-go/v2/types" + "github.com/moby/moby/api/types/container" + mmount "github.com/moby/moby/api/types/mount" + + "github.com/docker/compose/v5/pkg/api" +) + +// OperationType represents the kind of reconciliation operation to perform. +type OperationType int + +const ( + OpCreateNetwork OperationType = iota + OpRemoveNetwork + OpDisconnectNetwork + OpConnectNetwork + OpCreateVolume + OpRemoveVolume + OpCreateContainer + OpStartContainer + OpStopContainer + OpRemoveContainer + OpRenameContainer + OpRunPlugin +) + +// String returns a human-readable name for the operation type. +func (o OperationType) String() string { + switch o { + case OpCreateNetwork: + return "create-network" + case OpRemoveNetwork: + return "remove-network" + case OpDisconnectNetwork: + return "disconnect-network" + case OpConnectNetwork: + return "connect-network" + case OpCreateVolume: + return "create-volume" + case OpRemoveVolume: + return "remove-volume" + case OpCreateContainer: + return "create-container" + case OpStartContainer: + return "start-container" + case OpStopContainer: + return "stop-container" + case OpRemoveContainer: + return "remove-container" + case OpRenameContainer: + return "rename-container" + case OpRunPlugin: + return "run-plugin" + default: + return fmt.Sprintf("unknown(%d)", int(o)) + } +} + +// Operation describes a single unit of work produced by the reconciliation algorithm. +type Operation struct { + ID string + Type OperationType + ServiceName string + Resource string + NetworkOp *NetworkOperation + VolumeOp *VolumeOperation + ContainerOp *ContainerOperation + PluginOp *PluginOperation + ContainerNetworkOp *ContainerNetworkOperation + RenameOp *RenameOperation + DependsOn []string + Reason string +} + +// NetworkOperation holds details for network create/recreate/remove operations. +type NetworkOperation struct { + NetworkKey string + Desired *types.NetworkConfig + Existing *ObservedNetwork +} + +// ContainerNetworkOperation holds details for connecting or disconnecting a container from a network. +type ContainerNetworkOperation struct { + NetworkName string + ContainerID string +} + +// VolumeOperation holds details for volume create/recreate/remove operations. +type VolumeOperation struct { + VolumeKey string + Desired *types.VolumeConfig + Existing *ObservedVolume +} + +// ContainerOperation holds details for container create/start/stop/remove operations. +type ContainerOperation struct { + Service types.ServiceConfig + ContainerName string + ContainerNumber int + Existing *container.Summary + Inherit bool + Timeout *time.Duration +} + +// RenameOperation holds details for renaming a container. +type RenameOperation struct { + ContainerID string + CurrentName string + NewName string +} + +// PluginOperation holds details for plugin service operations. +type PluginOperation struct { + Service types.ServiceConfig + Action string +} + +// ReconciliationPlan holds the full set of operations and their dependency edges. +type ReconciliationPlan struct { + Operations map[string]*Operation + Dependents map[string][]string // op ID -> IDs of ops that depend on it +} + +// Roots returns all operations that have no dependencies (empty DependsOn). +func (p *ReconciliationPlan) Roots() []*Operation { + var roots []*Operation + for _, op := range p.Operations { + if len(op.DependsOn) == 0 { + roots = append(roots, op) + } + } + sort.Slice(roots, func(i, j int) bool { + return roots[i].ID < roots[j].ID + }) + return roots +} + +// IsEmpty reports whether the plan contains no operations. +func (p *ReconciliationPlan) IsEmpty() bool { + return len(p.Operations) == 0 +} + +// String returns a deterministic, test-friendly dump of the plan. +// +// Operations are listed in topological order, each prefixed by a sequential +// number. Operations that depend on earlier ones show their dependency +// numbers in brackets before an arrow. +// +// Example output: +// +// 1. create network testproject_default reason: network does not exist +// 2. create volume testproject_myvol reason: volume does not exist +// [1,2] -> 3. create container testproject-web-1 reason: scale up +// [3] -> 4. start container testproject-web-1 reason: container not running +func (p *ReconciliationPlan) String() string { + if p.IsEmpty() { + return "(empty plan)" + } + + ops := topologicalSort(p) + + // Assign a 1-based index to each operation ID + index := make(map[string]int, len(ops)) + for i, op := range ops { + index[op.ID] = i + 1 + } + + var b strings.Builder + b.WriteByte('\n') + for _, op := range ops { + if len(op.DependsOn) > 0 { + depNums := make([]string, 0, len(op.DependsOn)) + sorted := make([]string, len(op.DependsOn)) + copy(sorted, op.DependsOn) + sort.Strings(sorted) + for _, depID := range sorted { + depNums = append(depNums, strconv.Itoa(index[depID])) + } + fmt.Fprintf(&b, "[%s] -> ", strings.Join(depNums, ",")) + } + fmt.Fprintf(&b, "%d. %s %s %s reason: %s\n", index[op.ID], opVerb(op.Type), opKind(op), op.Resource, op.Reason) + } + return b.String() +} + +// opKind returns a resource-type label for the operation (network, volume, container, plugin). +func opKind(op *Operation) string { + switch { + case op.NetworkOp != nil: + return "network" + case op.ContainerNetworkOp != nil: + return "network" + case op.VolumeOp != nil: + return "volume" + case op.PluginOp != nil: + return "plugin" + default: + return "container" + } +} + +// ReconcileOptions controls the behavior of the Reconcile function. +type ReconcileOptions struct { + Recreate string + RecreateDependencies string + Services []string + Inherit bool + Timeout *time.Duration + RemoveOrphans bool +} + +// Reconcile computes the set of operations needed to bring the observed state +// in line with the desired project configuration. It is a pure function: it +// makes no Docker API calls and has no side effects. +func Reconcile(project *types.Project, observed *ObservedState, opts ReconcileOptions) (*ReconciliationPlan, error) { + plan := &ReconciliationPlan{ + Operations: map[string]*Operation{}, + Dependents: map[string][]string{}, + } + + // Expand targeted services to include all transitive dependencies. + // Keep the original list for recreate policy decisions. + targetedServices := opts.Services + if len(opts.Services) > 0 { + opts.Services = expandServiceDependencies(project, opts.Services) + } + + // Step 1 - Networks + if err := reconcileNetworks(project, observed, plan, opts); err != nil { + return nil, err + } + + // Step 2 - Volumes + if err := reconcileVolumes(project, observed, plan); err != nil { + return nil, err + } + + // Collect volume keys being recreated so we can force-recreate containers using them + recreatedVolumes := map[string]bool{} + for _, op := range plan.Operations { + if op.Type == OpRemoveVolume && op.VolumeOp != nil { + recreatedVolumes[op.VolumeOp.VolumeKey] = true + } + } + + // Build network ID map and volume name map for needsRecreate checks + networkIDs := map[string]string{} + for key, n := range observed.Networks { + networkIDs[key] = n.ID + } + volumeNames := map[string]string{} + for key, v := range observed.Volumes { + volumeNames[key] = v.Name + } + + // Populate networkIDs and volumeNames for external resources. + // External networks/volumes lack the project label, so they won't appear + // in observed.Networks/Volumes. We scan containers to find the actual IDs. + resolveExternalNetworkIDs(project, observed, networkIDs) + resolveExternalVolumeNames(project, observed, volumeNames) + + // Step 3 - Containers per service + if err := reconcileServices(project, observed, networkIDs, volumeNames, recreatedVolumes, plan, opts, targetedServices); err != nil { + return nil, err + } + + // Step 4 - Orphans + if opts.RemoveOrphans { + reconcileOrphans(observed.Orphans, plan, opts) + } + + // Step 5 - Cascading restarts: when a service is recreated, stop+start + // dependent services that have restart: true in their dependency config. + addCascadingRestarts(project, observed, plan, opts) + + // Step 6 - Dependencies + buildDependencyEdges(project, plan) + + return plan, nil +} + +// reconcileServices iterates over project services and adds container or plugin +// operations to the plan for each targeted service. +func reconcileServices( + project *types.Project, + observed *ObservedState, + networkIDs, volumeNames map[string]string, + recreatedVolumes map[string]bool, + plan *ReconciliationPlan, + opts ReconcileOptions, + targetedServices []string, +) error { + for _, service := range project.Services { + if len(opts.Services) > 0 && !slices.Contains(opts.Services, service.Name) { + continue + } + + if service.Provider != nil { + id := fmt.Sprintf("run-plugin:%s", service.Name) + plan.Operations[id] = &Operation{ + ID: id, + Type: OpRunPlugin, + ServiceName: service.Name, + Resource: service.Name, + PluginOp: &PluginOperation{ + Service: service, + Action: "up", + }, + Reason: "plugin service", + } + continue + } + + if err := reconcileServiceContainers(project, service, observed.Containers[service.Name], networkIDs, volumeNames, recreatedVolumes, plan, opts, targetedServices); err != nil { + return err + } + } + return nil +} + +// reconcileOrphans adds stop+remove operations for orphan containers. +func reconcileOrphans(orphans Containers, plan *ReconciliationPlan, opts ReconcileOptions) { + for _, ctr := range orphans { + ctrName := getCanonicalContainerName(ctr) + stopID := fmt.Sprintf("stop-container:%s", ctrName) + plan.Operations[stopID] = &Operation{ + ID: stopID, + Type: OpStopContainer, + ServiceName: ctr.Labels[api.ServiceLabel], + Resource: ctrName, + ContainerOp: &ContainerOperation{ + ContainerName: ctrName, + Existing: &ctr, + Timeout: opts.Timeout, + }, + Reason: "orphan container", + } + removeID := fmt.Sprintf("remove-container:%s", ctrName) + plan.Operations[removeID] = &Operation{ + ID: removeID, + Type: OpRemoveContainer, + ServiceName: ctr.Labels[api.ServiceLabel], + Resource: ctrName, + ContainerOp: &ContainerOperation{ + ContainerName: ctrName, + Existing: &ctr, + Timeout: opts.Timeout, + }, + DependsOn: []string{stopID}, + Reason: "orphan container", + } + } +} + +// reconcileNetworks adds network create/remove/disconnect operations to the plan. +// When a network must be recreated (config hash diverged), it decomposes the +// recreation into discrete operations: stop containers → disconnect → remove → create, +// each with proper dependency edges. +func reconcileNetworks(project *types.Project, observed *ObservedState, plan *ReconciliationPlan, opts ReconcileOptions) error { + for key, net := range project.Networks { + if net.External { + continue + } + n := net + existing, found := observed.Networks[key] + if !found { + id := fmt.Sprintf("create-network:%s", n.Name) + plan.Operations[id] = &Operation{ + ID: id, + Type: OpCreateNetwork, + Resource: n.Name, + NetworkOp: &NetworkOperation{ + NetworkKey: key, + Desired: &n, + }, + Reason: "network does not exist", + } + continue + } + desiredHash, err := NetworkHash(&n) + if err != nil { + return fmt.Errorf("hashing network %q: %w", key, err) + } + if existing.ConfigHash == desiredHash { + continue + } + + // Network config has diverged — decompose recreation into: + // 1. Stop containers connected to this network + // 2. Disconnect containers from the network + // 3. Remove the old network + // 4. Create the new network + + // Find all containers connected to this network + connectedContainers := findContainersOnNetwork(observed, n.Name, existing.ID) + + var disconnectIDs []string + for _, ctr := range connectedContainers { + ctrName := getCanonicalContainerName(ctr) + + // Stop the container (needed before disconnect) + stopID := fmt.Sprintf("stop-container:%s", ctrName) + if _, exists := plan.Operations[stopID]; !exists { + plan.Operations[stopID] = &Operation{ + ID: stopID, + Type: OpStopContainer, + ServiceName: ctr.Labels[api.ServiceLabel], + Resource: ctrName, + ContainerOp: &ContainerOperation{ + ContainerName: ctrName, + Existing: &ctr, + Timeout: opts.Timeout, + }, + Reason: fmt.Sprintf("network %q is being recreated", n.Name), + } + } + + // Disconnect the container from the network + disconnectID := fmt.Sprintf("disconnect-network:%s/%s", n.Name, ctrName) + plan.Operations[disconnectID] = &Operation{ + ID: disconnectID, + Type: OpDisconnectNetwork, + Resource: fmt.Sprintf("%s from %s", ctrName, n.Name), + ContainerNetworkOp: &ContainerNetworkOperation{ + NetworkName: n.Name, + ContainerID: ctr.ID, + }, + DependsOn: []string{stopID}, + Reason: fmt.Sprintf("network %q is being recreated", n.Name), + } + disconnectIDs = append(disconnectIDs, disconnectID) + } + + // Remove the old network (depends on all disconnects) + removeID := fmt.Sprintf("remove-network:%s", n.Name) + plan.Operations[removeID] = &Operation{ + ID: removeID, + Type: OpRemoveNetwork, + Resource: n.Name, + NetworkOp: &NetworkOperation{ + NetworkKey: key, + Existing: &existing, + }, + DependsOn: disconnectIDs, + Reason: "config hash changed", + } + + // Create the new network (depends on remove) + createID := fmt.Sprintf("create-network:%s", n.Name) + plan.Operations[createID] = &Operation{ + ID: createID, + Type: OpCreateNetwork, + Resource: n.Name, + NetworkOp: &NetworkOperation{ + NetworkKey: key, + Desired: &n, + }, + DependsOn: []string{removeID}, + Reason: "config hash changed", + } + + // Reconnect and restart containers + for _, ctr := range connectedContainers { + ctrName := getCanonicalContainerName(ctr) + + // Connect the container to the new network + connectID := fmt.Sprintf("connect-network:%s/%s", n.Name, ctrName) + plan.Operations[connectID] = &Operation{ + ID: connectID, + Type: OpConnectNetwork, + Resource: fmt.Sprintf("%s to %s", ctrName, n.Name), + ContainerNetworkOp: &ContainerNetworkOperation{ + NetworkName: n.Name, + ContainerID: ctr.ID, + }, + DependsOn: []string{createID}, + Reason: fmt.Sprintf("network %q has been recreated", n.Name), + } + + // Start the container (depends on connect) + startID := fmt.Sprintf("start-container:%s", ctrName) + if _, exists := plan.Operations[startID]; !exists { + plan.Operations[startID] = &Operation{ + ID: startID, + Type: OpStartContainer, + ServiceName: ctr.Labels[api.ServiceLabel], + Resource: ctrName, + ContainerOp: &ContainerOperation{ + ContainerName: ctrName, + Existing: &ctr, + }, + DependsOn: []string{connectID}, + Reason: fmt.Sprintf("network %q has been recreated", n.Name), + } + } + } + } + return nil +} + +// findContainersOnNetwork returns all containers from the observed state +// that are connected to the given network (by name or ID). +func findContainersOnNetwork(observed *ObservedState, networkName string, networkID string) []container.Summary { + var result []container.Summary + allContainers := observed.allContainers() + for _, ctr := range allContainers { + if ctr.NetworkSettings == nil { + continue + } + for name, ep := range ctr.NetworkSettings.Networks { + if ep != nil && ((networkID != "" && ep.NetworkID == networkID) || (networkName != "" && name == networkName)) { + result = append(result, ctr) + break + } + } + } + return result +} + +// reconcileVolumes adds volume create/remove operations to the plan. +// When a volume must be recreated (config hash diverged), it decomposes the +// recreation into discrete operations: stop containers → remove containers → remove volume → create volume, +// each with proper dependency edges. +func reconcileVolumes(project *types.Project, observed *ObservedState, plan *ReconciliationPlan) error { + for key, vol := range project.Volumes { + if vol.External { + continue + } + v := vol + existing, found := observed.Volumes[key] + if !found { + id := fmt.Sprintf("create-volume:%s", v.Name) + plan.Operations[id] = &Operation{ + ID: id, + Type: OpCreateVolume, + Resource: v.Name, + VolumeOp: &VolumeOperation{ + VolumeKey: key, + Desired: &v, + }, + Reason: "volume does not exist", + } + continue + } + desiredHash, err := VolumeHash(v) + if err != nil { + return fmt.Errorf("hashing volume %q: %w", key, err) + } + if existing.ConfigHash == desiredHash { + continue + } + + // Volume config has diverged — decompose recreation into: + // 1. Stop containers using this volume + // 2. Remove containers (required before volume can be removed) + // 3. Remove the old volume + // 4. Create the new volume + + connectedContainers := findContainersUsingVolume(project, observed, key) + + var removeContainerIDs []string + for _, ctr := range connectedContainers { + ctrName := getCanonicalContainerName(ctr) + + // Stop the container + stopID := fmt.Sprintf("stop-container:%s", ctrName) + if _, exists := plan.Operations[stopID]; !exists { + plan.Operations[stopID] = &Operation{ + ID: stopID, + Type: OpStopContainer, + ServiceName: ctr.Labels[api.ServiceLabel], + Resource: ctrName, + ContainerOp: &ContainerOperation{ + ContainerName: ctrName, + Existing: &ctr, + }, + Reason: fmt.Sprintf("volume %q is being recreated", v.Name), + } + } + + // Remove the container (volumes require container removal, not just stop) + removeID := fmt.Sprintf("remove-container:%s", ctrName) + if _, exists := plan.Operations[removeID]; !exists { + plan.Operations[removeID] = &Operation{ + ID: removeID, + Type: OpRemoveContainer, + ServiceName: ctr.Labels[api.ServiceLabel], + Resource: ctrName, + ContainerOp: &ContainerOperation{ + ContainerName: ctrName, + Existing: &ctr, + }, + DependsOn: []string{stopID}, + Reason: fmt.Sprintf("volume %q is being recreated", v.Name), + } + } + removeContainerIDs = append(removeContainerIDs, removeID) + } + + // Remove the old volume (depends on all container removals) + removeID := fmt.Sprintf("remove-volume:%s", v.Name) + plan.Operations[removeID] = &Operation{ + ID: removeID, + Type: OpRemoveVolume, + Resource: v.Name, + VolumeOp: &VolumeOperation{ + VolumeKey: key, + Existing: &existing, + }, + DependsOn: removeContainerIDs, + Reason: "config hash changed", + } + + // Create the new volume (depends on remove) + createID := fmt.Sprintf("create-volume:%s", v.Name) + plan.Operations[createID] = &Operation{ + ID: createID, + Type: OpCreateVolume, + Resource: v.Name, + VolumeOp: &VolumeOperation{ + VolumeKey: key, + Desired: &v, + }, + DependsOn: []string{removeID}, + Reason: "config hash changed", + } + } + return nil +} + +// findContainersUsingVolume returns all containers from the observed state +// that mount the given volume key. +func findContainersUsingVolume(project *types.Project, observed *ObservedState, volumeKey string) []container.Summary { + // Find services that use this volume + var result []container.Summary + for _, service := range project.Services { + usesVolume := false + for _, vol := range service.Volumes { + if vol.Type == string(mmount.TypeVolume) && vol.Source == volumeKey { + usesVolume = true + break + } + } + if !usesVolume { + continue + } + if ctrs, ok := observed.Containers[service.Name]; ok { + result = append(result, ctrs...) + } + } + return result +} + +// resolveExternalNetworkIDs populates networkIDs with IDs for external networks +// by scanning observed containers' network settings. +func resolveExternalNetworkIDs(project *types.Project, observed *ObservedState, networkIDs map[string]string) { + for key, net := range project.Networks { + if !net.External || networkIDs[key] != "" { + continue + } + for _, ctrs := range observed.Containers { + for _, ctr := range ctrs { + if ctr.NetworkSettings == nil { + continue + } + for netName, ep := range ctr.NetworkSettings.Networks { + if ep != nil && netName == net.Name { + networkIDs[key] = ep.NetworkID + } + } + } + } + } +} + +// resolveExternalVolumeNames populates volumeNames with names for external volumes +// by scanning observed containers' mounts. +func resolveExternalVolumeNames(project *types.Project, observed *ObservedState, volumeNames map[string]string) { + for key, vol := range project.Volumes { + if !vol.External || volumeNames[key] != "" { + continue + } + for _, ctrs := range observed.Containers { + for _, ctr := range ctrs { + for _, mount := range ctr.Mounts { + if mount.Type == mmount.TypeVolume && mount.Name == vol.Name { + volumeNames[key] = vol.Name + } + } + } + } + } +} + +// reconcileServiceContainers computes plan operations for a single service's +// containers: scale down, recreate, start stopped, and scale up. +func reconcileServiceContainers( + project *types.Project, + service types.ServiceConfig, + containers []container.Summary, + networkIDs map[string]string, + volumeNames map[string]string, + recreatedVolumes map[string]bool, + plan *ReconciliationPlan, + opts ReconcileOptions, + targetedServices []string, +) error { + expected, err := getScale(service) + if err != nil { + return err + } + + // Determine recreate policy for this service. + // Use the original targeted services list (before dependency expansion) + // so that dependencies get the RecreateDependencies policy, not the main Recreate policy. + policy := opts.RecreateDependencies + if len(targetedServices) == 0 || slices.Contains(targetedServices, service.Name) { + policy = opts.Recreate + } + + // Precompute which containers are obsolete to avoid repeated hashing in the sort comparator. + obsolete := make(map[string]bool, len(containers)) + for _, ctr := range containers { + recreate, _, _ := needsRecreate(service, ctr, networkIDs, volumeNames, policy) + obsolete[ctr.ID] = recreate + } + + // Sort containers: obsolete first, then by container number ascending after reverse + sort.Slice(containers, func(i, j int) bool { + oi, oj := obsolete[containers[i].ID], obsolete[containers[j].ID] + if oi != oj { + return oi + } + + ni, erri := strconv.Atoi(containers[i].Labels[api.ContainerNumberLabel]) + nj, errj := strconv.Atoi(containers[j].Labels[api.ContainerNumberLabel]) + if erri == nil && errj == nil { + return ni > nj + } + return containers[i].Created < containers[j].Created + }) + slices.Reverse(containers) + + actual := len(containers) + for i, ctr := range containers { + if i >= expected { + // Scale down: stop + remove + ctrName := getCanonicalContainerName(ctr) + stopID := fmt.Sprintf("stop-container:%s", ctrName) + plan.Operations[stopID] = &Operation{ + ID: stopID, + Type: OpStopContainer, + ServiceName: service.Name, + Resource: ctrName, + ContainerOp: &ContainerOperation{ + Service: service, + ContainerName: ctrName, + Existing: &ctr, + Timeout: opts.Timeout, + }, + Reason: "scale down", + } + removeID := fmt.Sprintf("remove-container:%s", ctrName) + plan.Operations[removeID] = &Operation{ + ID: removeID, + Type: OpRemoveContainer, + ServiceName: service.Name, + Resource: ctrName, + ContainerOp: &ContainerOperation{ + Service: service, + ContainerName: ctrName, + Existing: &ctr, + Timeout: opts.Timeout, + }, + DependsOn: []string{stopID}, + Reason: "scale down", + } + continue + } + + // Check if the service uses a volume that is being recreated. + // In that case, ensureVolume will stop+remove the container internally, + // so we emit OpCreateContainer since the old + // container will be gone by the time we execute. + if volReason := serviceUsesRecreatedVolume(service, recreatedVolumes); volReason != "" { + ctrName := getCanonicalContainerName(ctr) + number, _ := strconv.Atoi(ctr.Labels[api.ContainerNumberLabel]) + id := fmt.Sprintf("create-container:%s", ctrName) + plan.Operations[id] = &Operation{ + ID: id, + Type: OpCreateContainer, + ServiceName: service.Name, + Resource: ctrName, + ContainerOp: &ContainerOperation{ + Service: service, + ContainerName: ctrName, + ContainerNumber: number, + Inherit: opts.Inherit, + Timeout: opts.Timeout, + }, + Reason: volReason, + } + continue + } + + recreate, reason, err := needsRecreate(service, ctr, networkIDs, volumeNames, policy) + if err != nil { + return err + } + if recreate { + ctrName := getCanonicalContainerName(ctr) + number, _ := strconv.Atoi(ctr.Labels[api.ContainerNumberLabel]) + idPrefix := ctr.ID + if len(idPrefix) > 12 { + idPrefix = idPrefix[:12] + } + tmpName := fmt.Sprintf("%s_%s", idPrefix, ctrName) + + // 1. Create new container with temp name (inheriting from old if needed) + createID := fmt.Sprintf("create-container:%s", tmpName) + plan.Operations[createID] = &Operation{ + ID: createID, + Type: OpCreateContainer, + ServiceName: service.Name, + Resource: tmpName, + ContainerOp: &ContainerOperation{ + Service: service, + ContainerName: tmpName, + ContainerNumber: number, + Existing: &ctr, + Inherit: opts.Inherit, + }, + Reason: reason, + } + + // 2. Stop old container (depends on create being ready) + stopID := fmt.Sprintf("stop-container:%s", ctrName) + plan.Operations[stopID] = &Operation{ + ID: stopID, + Type: OpStopContainer, + ServiceName: service.Name, + Resource: ctrName, + ContainerOp: &ContainerOperation{ + Service: service, + ContainerName: ctrName, + Existing: &ctr, + Timeout: opts.Timeout, + }, + DependsOn: []string{createID}, + Reason: reason, + } + + // 3. Remove old container (depends on stop) + removeID := fmt.Sprintf("remove-container:%s", ctrName) + plan.Operations[removeID] = &Operation{ + ID: removeID, + Type: OpRemoveContainer, + ServiceName: service.Name, + Resource: ctrName, + ContainerOp: &ContainerOperation{ + Service: service, + ContainerName: ctrName, + Existing: &ctr, + Timeout: opts.Timeout, + }, + DependsOn: []string{stopID}, + Reason: reason, + } + + // 4. Rename new container to final name (depends on remove) + renameID := fmt.Sprintf("rename-container:%s", ctrName) + plan.Operations[renameID] = &Operation{ + ID: renameID, + Type: OpRenameContainer, + ServiceName: service.Name, + Resource: ctrName, + RenameOp: &RenameOperation{ + CurrentName: tmpName, + NewName: ctrName, + }, + DependsOn: []string{removeID}, + Reason: reason, + } + + // 5. Start the new container (depends on rename) + startID := fmt.Sprintf("start-container:%s", ctrName) + plan.Operations[startID] = &Operation{ + ID: startID, + Type: OpStartContainer, + ServiceName: service.Name, + Resource: ctrName, + ContainerOp: &ContainerOperation{ + Service: service, + ContainerName: ctrName, + // Existing is nil here; executePlanStartContainer will + // look up the container by name after rename. + }, + DependsOn: []string{renameID}, + Reason: reason, + } + continue + } + + // Container is up-to-date; check if it needs starting + switch ctr.State { + case container.StateRunning, container.StateCreated, container.StateRestarting, container.StateExited: + // no action needed + default: + ctrName := getCanonicalContainerName(ctr) + id := fmt.Sprintf("start-container:%s", ctrName) + plan.Operations[id] = &Operation{ + ID: id, + Type: OpStartContainer, + ServiceName: service.Name, + Resource: ctrName, + ContainerOp: &ContainerOperation{ + Service: service, + ContainerName: ctrName, + Existing: &ctr, + }, + Reason: fmt.Sprintf("container not running (state: %s)", ctr.State), + } + } + } + + // Scale up: create missing containers + next := nextContainerNumber(containers) + for i := 0; i < expected-actual; i++ { + number := next + i + name := getContainerName(project.Name, service, number) + id := fmt.Sprintf("create-container:%s", name) + plan.Operations[id] = &Operation{ + ID: id, + Type: OpCreateContainer, + ServiceName: service.Name, + Resource: name, + ContainerOp: &ContainerOperation{ + Service: service, + ContainerName: name, + ContainerNumber: number, + Inherit: opts.Inherit, + Timeout: opts.Timeout, + }, + Reason: "scale up", + } + } + + return nil +} + +// buildDependencyEdges wires up DependsOn and Dependents for container operations +// based on service dependencies, network dependencies, and volume dependencies. +func buildDependencyEdges(project *types.Project, plan *ReconciliationPlan) { + readyOpsByService := indexReadyOps(plan) + + for _, op := range plan.Operations { + if (op.Type == OpCreateContainer || op.Type == OpStartContainer) && op.ContainerOp != nil { + addOperationDependencies(project, plan, op, readyOpsByService) + } + } + + buildReverseEdges(plan) +} + +// indexReadyOps indexes the "service ready" operation for each service name. +// This is the last operation that must complete before dependents can proceed: +// - Recreate chain: the start after rename (create → stop → remove → rename → start) +// - Fresh create: the OpCreateContainer itself +// - Plugin: the OpRunPlugin +func indexReadyOps(plan *ReconciliationPlan) map[string][]string { + readyOpsByService := map[string][]string{} + for id, op := range plan.Operations { + switch op.Type { + case OpStartContainer: + if len(op.DependsOn) == 1 { + if dep := plan.Operations[op.DependsOn[0]]; dep != nil && dep.Type == OpRenameContainer { + readyOpsByService[op.ServiceName] = append(readyOpsByService[op.ServiceName], id) + } + } + case OpCreateContainer: + if op.ContainerOp != nil && op.ContainerOp.Existing == nil { + readyOpsByService[op.ServiceName] = append(readyOpsByService[op.ServiceName], id) + } + case OpRunPlugin: + readyOpsByService[op.ServiceName] = append(readyOpsByService[op.ServiceName], id) + } + } + return readyOpsByService +} + +// buildReverseEdges populates plan.Dependents from each operation's DependsOn list. +func buildReverseEdges(plan *ReconciliationPlan) { + for _, op := range plan.Operations { + sort.Strings(op.DependsOn) + for _, depID := range op.DependsOn { + plan.Dependents[depID] = append(plan.Dependents[depID], op.ID) + } + } + for depID := range plan.Dependents { + sort.Strings(plan.Dependents[depID]) + } +} + +// addOperationDependencies adds service, network, and volume dependency edges +// to a single container operation. +func addOperationDependencies(project *types.Project, plan *ReconciliationPlan, op *Operation, createOpsByService map[string][]string) { + service := op.ContainerOp.Service + + // Depend on create ops for dependency services + for _, depName := range service.GetDependencies() { + for _, depOpID := range createOpsByService[depName] { + if !slices.Contains(op.DependsOn, depOpID) { + op.DependsOn = append(op.DependsOn, depOpID) + } + } + } + + // Depend on network create ops for networks used by this service + for net := range service.Networks { + networkConfig, ok := project.Networks[net] + if !ok { + continue + } + netOpID := "create-network:" + networkConfig.Name + if _, exists := plan.Operations[netOpID]; exists { + if !slices.Contains(op.DependsOn, netOpID) { + op.DependsOn = append(op.DependsOn, netOpID) + } + } + } + + // Depend on volume create ops for volumes used by this service + for _, vol := range service.Volumes { + if vol.Type != string(mmount.TypeVolume) { + continue + } + if vol.Source == "" { + continue + } + volConfig, ok := project.Volumes[vol.Source] + if !ok { + continue + } + volOpID := "create-volume:" + volConfig.Name + if _, exists := plan.Operations[volOpID]; exists { + if !slices.Contains(op.DependsOn, volOpID) { + op.DependsOn = append(op.DependsOn, volOpID) + } + } + } +} + +// needsRecreate determines whether a container must be recreated based on the +// service configuration, observed state, and recreate policy. It returns whether +// recreation is needed, the reason string, and any error. +func needsRecreate(expected types.ServiceConfig, actual container.Summary, networks map[string]string, volumes map[string]string, policy string) (bool, string, error) { + if policy == api.RecreateNever { + return false, "", nil + } + if policy == api.RecreateForce { + return true, "force recreate", nil + } + + configHash, err := ServiceHash(expected) + if err != nil { + return false, "", err + } + if actual.Labels[api.ConfigHashLabel] != configHash { + return true, "config hash changed", nil + } + + if actual.Labels[api.ImageDigestLabel] != expected.CustomLabels[api.ImageDigestLabel] { + return true, "image digest changed", nil + } + + if networks != nil && actual.State == container.StateRunning { + if checkExpectedNetworks(expected, actual, networks) { + return true, "network configuration changed", nil + } + } + + if volumes != nil { + if checkExpectedVolumes(expected, actual, volumes) { + return true, "volume configuration changed", nil + } + } + + return false, "", nil +} + +// expandServiceDependencies returns a list that includes all services in +// `services` plus all their transitive dependencies found in the project. +func expandServiceDependencies(project *types.Project, services []string) []string { + seen := map[string]bool{} + var walk func(name string) + walk = func(name string) { + if seen[name] { + return + } + seen[name] = true + svc, ok := project.Services[name] + if !ok { + return + } + for _, dep := range svc.GetDependencies() { + walk(dep) + } + } + for _, s := range services { + walk(s) + } + result := make([]string, 0, len(seen)) + for name := range seen { + result = append(result, name) + } + sort.Strings(result) + return result +} + +// addCascadingRestarts adds stop+start operations for services whose +// dependencies are being recreated and that have restart: true in their +// depends_on config. +func addCascadingRestarts(project *types.Project, observed *ObservedState, plan *ReconciliationPlan, opts ReconcileOptions) { + // Collect services being recreated (rename ops indicate a recreate chain) + recreatedServices := map[string]bool{} + for _, op := range plan.Operations { + if op.Type == OpRenameContainer { + recreatedServices[op.ServiceName] = true + } + } + if len(recreatedServices) == 0 { + return + } + + // For each service in the project, check if it depends on a recreated service with restart: true + for _, service := range project.Services { + for depName, dep := range service.DependsOn { + if !dep.Restart || !recreatedServices[depName] { + continue + } + // This service's dependency is being recreated and has restart: true. + // Add stop+start ops for its running containers (if not already being recreated/stopped). + for _, ctr := range observed.Containers[service.Name] { + ctrName := getCanonicalContainerName(ctr) + // Skip if we already have operations for this container + if _, exists := plan.Operations["recreate-container:"+ctrName]; exists { + continue + } + if _, exists := plan.Operations["stop-container:"+ctrName]; exists { + continue + } + if ctr.State != container.StateRunning { + continue + } + + stopID := fmt.Sprintf("stop-container:%s", ctrName) + plan.Operations[stopID] = &Operation{ + ID: stopID, + Type: OpStopContainer, + ServiceName: service.Name, + Resource: ctrName, + ContainerOp: &ContainerOperation{ + Service: service, + ContainerName: ctrName, + Existing: &ctr, + Timeout: opts.Timeout, + }, + Reason: fmt.Sprintf("dependency %q is being recreated (restart: true)", depName), + } + + startID := fmt.Sprintf("start-container:%s", ctrName) + plan.Operations[startID] = &Operation{ + ID: startID, + Type: OpStartContainer, + ServiceName: service.Name, + Resource: ctrName, + ContainerOp: &ContainerOperation{ + Service: service, + ContainerName: ctrName, + Existing: &ctr, + }, + DependsOn: []string{stopID}, + Reason: fmt.Sprintf("restart after dependency %q recreated", depName), + } + } + break // Only need to process once per service + } + } +} + +// serviceUsesRecreatedVolume checks if a service mounts any volume that is +// being recreated, and returns a reason string if so. +func serviceUsesRecreatedVolume(service types.ServiceConfig, recreatedVolumes map[string]bool) string { + for _, vol := range service.Volumes { + if vol.Type != string(mmount.TypeVolume) { + continue + } + if recreatedVolumes[vol.Source] { + return fmt.Sprintf("volume %q is being recreated", vol.Source) + } + } + return "" +} diff --git a/pkg/compose/reconcile_test.go b/pkg/compose/reconcile_test.go new file mode 100644 index 00000000000..299ec5c3024 --- /dev/null +++ b/pkg/compose/reconcile_test.go @@ -0,0 +1,1178 @@ +/* + Copyright 2020 Docker Compose CLI authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package compose + +import ( + "fmt" + "testing" + + "github.com/compose-spec/compose-go/v2/types" + "github.com/moby/moby/api/types/container" + "github.com/moby/moby/api/types/network" + "gotest.tools/v3/assert" + + "github.com/docker/compose/v5/pkg/api" +) + +// --------------------------------------------------------------------------- +// needsRecreate tests +// --------------------------------------------------------------------------- + +func TestNeedsRecreateNeverPolicy(t *testing.T) { + service := types.ServiceConfig{Name: "web", Image: "nginx"} + ctr := container.Summary{ + ID: "abc123", + Names: []string{"/testproject-web-1"}, + Labels: map[string]string{}, + State: container.StateRunning, + } + + recreate, reason, err := needsRecreate(service, ctr, nil, nil, api.RecreateNever) + assert.NilError(t, err) + assert.Assert(t, !recreate) + assert.Equal(t, reason, "") +} + +func TestNeedsRecreateForcePolicy(t *testing.T) { + service := types.ServiceConfig{Name: "web", Image: "nginx"} + ctr := container.Summary{ + ID: "abc123", + Names: []string{"/testproject-web-1"}, + Labels: map[string]string{}, + State: container.StateRunning, + } + + recreate, reason, err := needsRecreate(service, ctr, nil, nil, api.RecreateForce) + assert.NilError(t, err) + assert.Assert(t, recreate) + assert.Equal(t, reason, "force recreate") +} + +func TestNeedsRecreateConfigHashChanged(t *testing.T) { + service := types.ServiceConfig{Name: "web", Image: "nginx"} + ctr := container.Summary{ + ID: "abc123", + Names: []string{"/testproject-web-1"}, + Labels: map[string]string{ + api.ConfigHashLabel: "stale-hash-value", + }, + State: container.StateRunning, + } + + recreate, reason, err := needsRecreate(service, ctr, nil, nil, api.RecreateDiverged) + assert.NilError(t, err) + assert.Assert(t, recreate) + assert.Equal(t, reason, "config hash changed") +} + +func TestNeedsRecreateUpToDate(t *testing.T) { + service := types.ServiceConfig{Name: "web", Image: "nginx"} + hash, err := ServiceHash(service) + assert.NilError(t, err) + + ctr := container.Summary{ + ID: "abc123", + Names: []string{"/testproject-web-1"}, + Labels: map[string]string{ + api.ConfigHashLabel: hash, + api.ImageDigestLabel: "", // matches zero-value in CustomLabels + }, + State: container.StateRunning, + } + + recreate, reason, err := needsRecreate(service, ctr, nil, nil, api.RecreateDiverged) + assert.NilError(t, err) + assert.Assert(t, !recreate) + assert.Equal(t, reason, "") +} + +// --------------------------------------------------------------------------- +// Reconcile tests +// --------------------------------------------------------------------------- + +func TestReconcileCreateMissingNetwork(t *testing.T) { + project := &types.Project{ + Name: "testproject", + Services: types.Services{ + "web": types.ServiceConfig{ + Name: "web", + Image: "nginx", + Networks: map[string]*types.ServiceNetworkConfig{ + "default": nil, + }, + }, + }, + Networks: types.Networks{ + "default": types.NetworkConfig{Name: "testproject_default"}, + }, + } + observed := &ObservedState{ + ProjectName: "testproject", + Containers: map[string]Containers{}, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + assert.Equal(t, plan.String(), ` +1. create network testproject_default reason: network does not exist +[1] -> 2. create container testproject-web-1 reason: scale up +`) +} + +func TestReconcileSkipUpToDateNetwork(t *testing.T) { + net := types.NetworkConfig{Name: "testproject_default"} + hash, err := NetworkHash(&net) + assert.NilError(t, err) + + project := &types.Project{ + Name: "testproject", + Services: types.Services{ + "web": types.ServiceConfig{Name: "web", Image: "nginx"}, + }, + Networks: types.Networks{ + "default": net, + }, + } + observed := &ObservedState{ + ProjectName: "testproject", + Containers: map[string]Containers{}, + Networks: map[string]ObservedNetwork{ + "default": { + ID: "net123", + Name: "testproject_default", + ConfigHash: hash, + }, + }, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + + for _, op := range plan.Operations { + if op.Type == OpCreateNetwork || op.Type == OpRemoveNetwork { + t.Fatalf("unexpected network operation: %s", op.ID) + } + } +} + +func TestReconcileRecreateChangedNetwork(t *testing.T) { + service := types.ServiceConfig{ + Name: "web", + Image: "nginx", + Networks: map[string]*types.ServiceNetworkConfig{ + "default": nil, + }, + } + configHash, err := ServiceHash(service) + assert.NilError(t, err) + + project := &types.Project{ + Name: "testproject", + Services: types.Services{ + "web": service, + }, + Networks: types.Networks{ + "default": types.NetworkConfig{Name: "testproject_default"}, + }, + } + observed := &ObservedState{ + ProjectName: "testproject", + Containers: map[string]Containers{ + "web": { + { + ID: "ctr1", + Names: []string{"/testproject-web-1"}, + State: container.StateRunning, + Labels: map[string]string{ + api.ServiceLabel: "web", + api.ContainerNumberLabel: "1", + api.ProjectLabel: "testproject", + api.ConfigHashLabel: configHash, + }, + NetworkSettings: &container.NetworkSettingsSummary{ + Networks: map[string]*network.EndpointSettings{ + "testproject_default": {NetworkID: "net123"}, + }, + }, + }, + }, + }, + Networks: map[string]ObservedNetwork{ + "default": { + ID: "net123", + Name: "testproject_default", + ConfigHash: "outdated-hash", + }, + }, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + assert.Equal(t, plan.String(), ` +1. stop container testproject-web-1 reason: network "testproject_default" is being recreated +[1] -> 2. disconnect network testproject-web-1 from testproject_default reason: network "testproject_default" is being recreated +[2] -> 3. remove network testproject_default reason: config hash changed +[3] -> 4. create network testproject_default reason: config hash changed +[4] -> 5. connect network testproject-web-1 to testproject_default reason: network "testproject_default" has been recreated +[5] -> 6. start container testproject-web-1 reason: network "testproject_default" has been recreated +`) +} + +func TestReconcileCreateMissingVolume(t *testing.T) { + project := &types.Project{ + Name: "testproject", + Services: types.Services{ + "web": types.ServiceConfig{ + Name: "web", + Image: "nginx", + Volumes: []types.ServiceVolumeConfig{ + {Type: "volume", Source: "data", Target: "/data"}, + }, + }, + }, + Volumes: types.Volumes{ + "data": types.VolumeConfig{Name: "testproject_data"}, + }, + } + observed := &ObservedState{ + ProjectName: "testproject", + Containers: map[string]Containers{}, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + assert.Equal(t, plan.String(), ` +1. create volume testproject_data reason: volume does not exist +[1] -> 2. create container testproject-web-1 reason: scale up +`) +} + +func TestReconcileScaleUp(t *testing.T) { + service := types.ServiceConfig{ + Name: "web", + Image: "nginx", + Scale: intPtr(2), + } + // Compute hash before project is built, since ServiceHash strips Scale + hash, err := ServiceHash(service) + assert.NilError(t, err) + + project := &types.Project{ + Name: "testproject", + Services: types.Services{ + "web": service, + }, + } + + observed := &ObservedState{ + ProjectName: "testproject", + Containers: map[string]Containers{ + "web": { + { + ID: "abc123", + Names: []string{"/testproject-web-1"}, + Labels: map[string]string{ + api.ServiceLabel: "web", + api.ContainerNumberLabel: "1", + api.ConfigHashLabel: hash, + api.ProjectLabel: "testproject", + }, + State: container.StateRunning, + }, + }, + }, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + assert.Equal(t, plan.String(), ` +1. create container testproject-web-2 reason: scale up +`) +} + +func TestReconcileScaleDown(t *testing.T) { + service := types.ServiceConfig{Name: "web", Image: "nginx"} + hash, err := ServiceHash(service) + assert.NilError(t, err) + + project := &types.Project{ + Name: "testproject", + Services: types.Services{ + "web": service, + }, + } + + observed := &ObservedState{ + ProjectName: "testproject", + Containers: map[string]Containers{ + "web": { + { + ID: "abc123", + Names: []string{"/testproject-web-1"}, + Labels: map[string]string{ + api.ServiceLabel: "web", + api.ContainerNumberLabel: "1", + api.ConfigHashLabel: hash, + api.ProjectLabel: "testproject", + }, + State: container.StateRunning, + }, + { + ID: "def456", + Names: []string{"/testproject-web-2"}, + Labels: map[string]string{ + api.ServiceLabel: "web", + api.ContainerNumberLabel: "2", + api.ConfigHashLabel: hash, + api.ProjectLabel: "testproject", + }, + State: container.StateRunning, + }, + }, + }, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + assert.Equal(t, plan.String(), ` +1. stop container testproject-web-2 reason: scale down +[1] -> 2. remove container testproject-web-2 reason: scale down +`) +} + +func TestReconcileRecreateContainer(t *testing.T) { + service := types.ServiceConfig{Name: "web", Image: "nginx"} + + project := &types.Project{ + Name: "testproject", + Services: types.Services{ + "web": service, + }, + } + + observed := &ObservedState{ + ProjectName: "testproject", + Containers: map[string]Containers{ + "web": { + { + ID: "abc123def456", + Names: []string{"/testproject-web-1"}, + Labels: map[string]string{ + api.ServiceLabel: "web", + api.ContainerNumberLabel: "1", + api.ConfigHashLabel: "stale-hash", + api.ProjectLabel: "testproject", + }, + State: container.StateRunning, + }, + }, + }, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + assert.Equal(t, plan.String(), ` +1. create container abc123def456_testproject-web-1 reason: config hash changed +[1] -> 2. stop container testproject-web-1 reason: config hash changed +[2] -> 3. remove container testproject-web-1 reason: config hash changed +[3] -> 4. rename container testproject-web-1 reason: config hash changed +[4] -> 5. start container testproject-web-1 reason: config hash changed +`) +} + +func TestReconcileNoChanges(t *testing.T) { + service := types.ServiceConfig{Name: "web", Image: "nginx"} + hash, err := ServiceHash(service) + assert.NilError(t, err) + + project := &types.Project{ + Name: "testproject", + Services: types.Services{ + "web": service, + }, + } + + observed := &ObservedState{ + ProjectName: "testproject", + Containers: map[string]Containers{ + "web": { + { + ID: "abc123", + Names: []string{"/testproject-web-1"}, + Labels: map[string]string{ + api.ServiceLabel: "web", + api.ContainerNumberLabel: "1", + api.ConfigHashLabel: hash, + api.ProjectLabel: "testproject", + }, + State: container.StateRunning, + }, + }, + }, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + assert.Assert(t, plan.IsEmpty(), "expected empty plan but got:\n%s", plan.String()) +} + +func TestReconcileOrphans(t *testing.T) { + project := &types.Project{ + Name: "testproject", + Services: types.Services{ + "web": types.ServiceConfig{Name: "web", Image: "nginx"}, + }, + } + + observed := &ObservedState{ + ProjectName: "testproject", + Containers: map[string]Containers{}, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{ + { + ID: "orphan1", + Names: []string{"/testproject-old-1"}, + Labels: map[string]string{ + api.ServiceLabel: "old", + api.ContainerNumberLabel: "1", + api.ProjectLabel: "testproject", + }, + State: container.StateRunning, + }, + }, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + RemoveOrphans: true, + }) + assert.NilError(t, err) + assert.Equal(t, plan.String(), ` +1. create container testproject-web-1 reason: scale up +2. stop container testproject-old-1 reason: orphan container +[2] -> 3. remove container testproject-old-1 reason: orphan container +`) +} + +func TestReconcilePluginService(t *testing.T) { + project := &types.Project{ + Name: "testproject", + Services: types.Services{ + "plugin-svc": types.ServiceConfig{ + Name: "plugin-svc", + Provider: &types.ServiceProviderConfig{ + Type: "aws", + }, + }, + }, + } + + observed := &ObservedState{ + ProjectName: "testproject", + Containers: map[string]Containers{}, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + assert.Equal(t, plan.String(), ` +1. plugin plugin plugin-svc reason: plugin service +`) +} + +func TestReconcileDependencyEdges(t *testing.T) { + project := &types.Project{ + Name: "testproject", + Services: types.Services{ + "web": types.ServiceConfig{ + Name: "web", + Image: "nginx", + DependsOn: types.DependsOnConfig{ + "db": types.ServiceDependency{Condition: "service_started"}, + }, + }, + "db": types.ServiceConfig{ + Name: "db", + Image: "postgres", + }, + }, + } + + observed := &ObservedState{ + ProjectName: "testproject", + Containers: map[string]Containers{}, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + assert.Equal(t, plan.String(), ` +1. create container testproject-db-1 reason: scale up +[1] -> 2. create container testproject-web-1 reason: scale up +`) +} + +// --------------------------------------------------------------------------- +// Tests mirroring e2e scenarios — pure Reconcile tests that cover the same +// decision logic as the corresponding e2e/integration tests. +// --------------------------------------------------------------------------- + +// TestReconcileScaleUpMultipleServices mirrors e2e TestScaleBasicCases: +// scaling up 2 services simultaneously should produce create ops for each. +func TestReconcileScaleUpMultipleServices(t *testing.T) { + frontSvc := types.ServiceConfig{Name: "front", Image: "nginx", Scale: intPtr(3)} + backSvc := types.ServiceConfig{Name: "back", Image: "nginx", Scale: intPtr(2)} + frontHash, err := ServiceHash(frontSvc) + assert.NilError(t, err) + backHash, err := ServiceHash(backSvc) + assert.NilError(t, err) + + project := &types.Project{ + Name: "scale-basic-tests", + Services: types.Services{ + "front": frontSvc, + "back": backSvc, + }, + } + observed := &ObservedState{ + ProjectName: "scale-basic-tests", + Containers: map[string]Containers{ + "front": { + makeContainer("scale-basic-tests", "front", 1, frontHash), + }, + "back": { + makeContainer("scale-basic-tests", "back", 1, backHash), + }, + }, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + assert.Equal(t, plan.String(), ` +1. create container scale-basic-tests-back-2 reason: scale up +2. create container scale-basic-tests-front-2 reason: scale up +3. create container scale-basic-tests-front-3 reason: scale up +`) +} + +// TestReconcileScaleDownMultipleServices mirrors e2e TestScaleBasicCases: +// scaling down 2 services simultaneously should produce stop+remove ops. +func TestReconcileScaleDownMultipleServices(t *testing.T) { + frontSvc := types.ServiceConfig{Name: "front", Image: "nginx", Scale: intPtr(2)} + backSvc := types.ServiceConfig{Name: "back", Image: "nginx", Scale: intPtr(1)} + frontHash, err := ServiceHash(frontSvc) + assert.NilError(t, err) + backHash, err := ServiceHash(backSvc) + assert.NilError(t, err) + + project := &types.Project{ + Name: "scale-basic-tests", + Services: types.Services{ + "front": frontSvc, + "back": backSvc, + }, + } + observed := &ObservedState{ + ProjectName: "scale-basic-tests", + Containers: map[string]Containers{ + "front": { + makeContainer("scale-basic-tests", "front", 1, frontHash), + makeContainer("scale-basic-tests", "front", 2, frontHash), + makeContainer("scale-basic-tests", "front", 3, frontHash), + }, + "back": { + makeContainer("scale-basic-tests", "back", 1, backHash), + makeContainer("scale-basic-tests", "back", 2, backHash), + }, + }, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + assert.Equal(t, plan.String(), ` +1. stop container scale-basic-tests-back-2 reason: scale down +2. stop container scale-basic-tests-front-3 reason: scale down +[1] -> 3. remove container scale-basic-tests-back-2 reason: scale down +[2] -> 4. remove container scale-basic-tests-front-3 reason: scale down +`) +} + +// TestReconcileScaleToZero mirrors part of e2e TestScaleBasicCases: +// scaling a service to 0 should stop+remove all its containers. +func TestReconcileScaleToZero(t *testing.T) { + svc := types.ServiceConfig{Name: "dbadmin", Image: "nginx", Scale: intPtr(0)} + hash, err := ServiceHash(svc) + assert.NilError(t, err) + + project := &types.Project{ + Name: "testproject", + Services: types.Services{ + "dbadmin": svc, + }, + } + observed := &ObservedState{ + ProjectName: "testproject", + Containers: map[string]Containers{ + "dbadmin": { + makeContainer("testproject", "dbadmin", 1, hash), + makeContainer("testproject", "dbadmin", 2, hash), + }, + }, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + assert.Equal(t, plan.String(), ` +1. stop container testproject-dbadmin-1 reason: scale down +2. stop container testproject-dbadmin-2 reason: scale down +[1] -> 3. remove container testproject-dbadmin-1 reason: scale down +[2] -> 4. remove container testproject-dbadmin-2 reason: scale down +`) +} + +// TestReconcileScaleDownRemovesObsoleteFirst mirrors e2e TestScaleDownRemovesObsolete: +// when scaling down and some containers are obsolete (stale hash), the obsolete +// ones should be removed first (via stop+remove), keeping the up-to-date ones. +func TestReconcileScaleDownRemovesObsoleteFirst(t *testing.T) { + svc := types.ServiceConfig{Name: "db", Image: "postgres"} + currentHash, err := ServiceHash(svc) + assert.NilError(t, err) + + project := &types.Project{ + Name: "testproject", + Services: types.Services{ + "db": svc, + }, + } + + // Container 1 has a stale hash (obsolete), container 2 is up-to-date. + // Sorting puts obsolete containers first for removal during scale down. + observed := &ObservedState{ + ProjectName: "testproject", + Containers: map[string]Containers{ + "db": { + makeContainer("testproject", "db", 1, "stale-hash-obsolete"), + makeContainer("testproject", "db", 2, currentHash), + }, + }, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + // Obsolete container (db-1) is removed first, up-to-date one (db-2) stays + assert.Equal(t, plan.String(), ` +1. stop container testproject-db-1 reason: scale down +[1] -> 2. remove container testproject-db-1 reason: scale down +`) +} + +// TestReconcileScaleUpNoRecreate mirrors e2e TestScaleDoesntRecreate and +// TestScaleDownNoRecreate: scaling up with --no-recreate should only create +// new containers, not recreate existing ones even if their image has changed. +func TestReconcileScaleUpNoRecreate(t *testing.T) { + svc := types.ServiceConfig{Name: "test", Image: "nginx", Scale: intPtr(4)} + + project := &types.Project{ + Name: "testproject", + Services: types.Services{ + "test": svc, + }, + } + + // 2 existing containers with a stale hash (image was rebuilt) + observed := &ObservedState{ + ProjectName: "testproject", + Containers: map[string]Containers{ + "test": { + makeContainer("testproject", "test", 1, "old-hash-before-rebuild"), + makeContainer("testproject", "test", 2, "old-hash-before-rebuild"), + }, + }, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateNever, // --no-recreate + RecreateDependencies: api.RecreateNever, + }) + assert.NilError(t, err) + // Only new containers created, no recreates despite stale hash + assert.Equal(t, plan.String(), ` +1. create container testproject-test-3 reason: scale up +2. create container testproject-test-4 reason: scale up +`) +} + +// TestReconcileForceRecreateNoDeps mirrors e2e TestRecreateWithNoDeps: +// --force-recreate with --no-deps on a single service should only recreate +// that service, not its dependencies. +func TestReconcileForceRecreateNoDeps(t *testing.T) { + mySvc := types.ServiceConfig{ + Name: "my-service", + Image: "nginx", + DependsOn: types.DependsOnConfig{ + "db": types.ServiceDependency{Condition: "service_started"}, + }, + } + dbSvc := types.ServiceConfig{Name: "db", Image: "postgres"} + myHash, err := ServiceHash(mySvc) + assert.NilError(t, err) + dbHash, err := ServiceHash(dbSvc) + assert.NilError(t, err) + + project := &types.Project{ + Name: "recreate-no-deps", + Services: types.Services{ + "my-service": mySvc, + "db": dbSvc, + }, + } + + observed := &ObservedState{ + ProjectName: "recreate-no-deps", + Containers: map[string]Containers{ + "my-service": {makeContainer("recreate-no-deps", "my-service", 1, myHash)}, + "db": {makeContainer("recreate-no-deps", "db", 1, dbHash)}, + }, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + // Only target "my-service" with force recreate; deps get "never" + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateForce, + RecreateDependencies: api.RecreateNever, + Services: []string{"my-service"}, + }) + assert.NilError(t, err) + // Only my-service is recreated, db is left untouched + assert.Equal(t, plan.String(), ` +1. create container recreate-no-_recreate-no-deps-my-service-1 reason: force recreate +[1] -> 2. stop container recreate-no-deps-my-service-1 reason: force recreate +[2] -> 3. remove container recreate-no-deps-my-service-1 reason: force recreate +[3] -> 4. rename container recreate-no-deps-my-service-1 reason: force recreate +[4] -> 5. start container recreate-no-deps-my-service-1 reason: force recreate +`) +} + +// TestReconcileNetworkConfigChanged mirrors e2e TestNetworkConfigChanged: +// when a network's configuration changes (e.g., subnet), the plan should +// include a recreate-network operation. +func TestReconcileNetworkConfigChanged(t *testing.T) { + originalNet := types.NetworkConfig{ + Name: "testproject_mynet", + Ipam: types.IPAMConfig{ + Config: []*types.IPAMPool{{Subnet: "172.99.0.0/16"}}, + }, + } + originalHash, err := NetworkHash(&originalNet) + assert.NilError(t, err) + + // Now the desired config has a different subnet + updatedNet := types.NetworkConfig{ + Name: "testproject_mynet", + Ipam: types.IPAMConfig{ + Config: []*types.IPAMPool{{Subnet: "192.168.0.0/16"}}, + }, + } + project := &types.Project{ + Name: "testproject", + Services: types.Services{ + "test": types.ServiceConfig{Name: "test", Image: "nginx"}, + }, + Networks: types.Networks{ + "mynet": updatedNet, + }, + } + + observed := &ObservedState{ + ProjectName: "testproject", + Containers: map[string]Containers{}, + Networks: map[string]ObservedNetwork{ + "mynet": { + ID: "net-old-id", + Name: "testproject_mynet", + ConfigHash: originalHash, + }, + }, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + assert.Equal(t, plan.String(), ` +1. create container testproject-test-1 reason: scale up +2. remove network testproject_mynet reason: config hash changed +[2] -> 3. create network testproject_mynet reason: config hash changed +`) +} + +// TestReconcileVolumeConfigChanged mirrors e2e TestUpRecreateVolumes: +// when a volume's config (e.g., labels) changes, the plan should include +// a recreate-volume operation. +func TestReconcileVolumeConfigChanged(t *testing.T) { + originalVol := types.VolumeConfig{ + Name: "testproject_my_vol", + Labels: types.Labels{"foo": "bar"}, + } + originalHash, err := VolumeHash(originalVol) + assert.NilError(t, err) + + // Updated config with different label + updatedVol := types.VolumeConfig{ + Name: "testproject_my_vol", + Labels: types.Labels{"foo": "zot"}, + } + project := &types.Project{ + Name: "testproject", + Services: types.Services{ + "app": types.ServiceConfig{Name: "app", Image: "nginx"}, + }, + Volumes: types.Volumes{ + "my_vol": updatedVol, + }, + } + + observed := &ObservedState{ + ProjectName: "testproject", + Containers: map[string]Containers{}, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{ + "my_vol": { + Name: "testproject_my_vol", + Driver: "local", + ConfigHash: originalHash, + }, + }, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + assert.Equal(t, plan.String(), ` +1. create container testproject-app-1 reason: scale up +2. remove volume testproject_my_vol reason: config hash changed +[2] -> 3. create volume testproject_my_vol reason: config hash changed +`) +} + +// TestReconcileExternalNetworkSkipped verifies that external networks are +// never created or recreated, matching the behavior tested in e2e network tests. +func TestReconcileExternalNetworkSkipped(t *testing.T) { + project := &types.Project{ + Name: "testproject", + Services: types.Services{ + "web": types.ServiceConfig{Name: "web", Image: "nginx"}, + }, + Networks: types.Networks{ + "ext": types.NetworkConfig{Name: "external_net", External: true}, + }, + } + observed := &ObservedState{ + ProjectName: "testproject", + Containers: map[string]Containers{}, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + // External network is not created — only the container + assert.Equal(t, plan.String(), ` +1. create container testproject-web-1 reason: scale up +`) +} + +// TestReconcileExternalVolumeSkipped verifies that external volumes are never +// created or recreated. +func TestReconcileExternalVolumeSkipped(t *testing.T) { + project := &types.Project{ + Name: "testproject", + Services: types.Services{ + "web": types.ServiceConfig{Name: "web", Image: "nginx"}, + }, + Volumes: types.Volumes{ + "ext": types.VolumeConfig{Name: "external_vol", External: true}, + }, + } + observed := &ObservedState{ + ProjectName: "testproject", + Containers: map[string]Containers{}, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + // External volume is not created — only the container + assert.Equal(t, plan.String(), ` +1. create container testproject-web-1 reason: scale up +`) +} + +// TestReconcileOrphansNotRemovedByDefault mirrors e2e TestRemoveOrphans: +// orphan containers should NOT be removed unless RemoveOrphans is set. +func TestReconcileOrphansNotRemovedByDefault(t *testing.T) { + project := &types.Project{ + Name: "testproject", + Services: types.Services{ + "web": types.ServiceConfig{Name: "web", Image: "nginx"}, + }, + } + observed := &ObservedState{ + ProjectName: "testproject", + Containers: map[string]Containers{}, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{ + makeContainer("testproject", "old-service", 1, "some-hash"), + }, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + RemoveOrphans: false, + }) + assert.NilError(t, err) + // Orphan is ignored — only the web container is created + assert.Equal(t, plan.String(), ` +1. create container testproject-web-1 reason: scale up +`) +} + +// TestReconcileContainerCreateDependsOnNetworkAndVolume mirrors e2e +// TestUpWithAllResources: when a service uses a network and a volume, +// its create-container ops should depend on the network and volume create ops. +func TestReconcileContainerCreateDependsOnNetworkAndVolume(t *testing.T) { + project := &types.Project{ + Name: "testproject", + Services: types.Services{ + "app": types.ServiceConfig{ + Name: "app", + Image: "nginx", + Networks: map[string]*types.ServiceNetworkConfig{ + "mynet": {}, + }, + Volumes: []types.ServiceVolumeConfig{ + {Type: "volume", Source: "myvol", Target: "/data"}, + }, + }, + }, + Networks: types.Networks{ + "mynet": types.NetworkConfig{Name: "testproject_mynet"}, + }, + Volumes: types.Volumes{ + "myvol": types.VolumeConfig{Name: "testproject_myvol"}, + }, + } + observed := &ObservedState{ + ProjectName: "testproject", + Containers: map[string]Containers{}, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + assert.Equal(t, plan.String(), ` +1. create network testproject_mynet reason: network does not exist +2. create volume testproject_myvol reason: volume does not exist +[1,2] -> 3. create container testproject-app-1 reason: scale up +`) +} + +// TestReconcileImageDigestChanged mirrors the behavior tested in e2e +// volume/build tests where a container is recreated because the image +// digest has changed (e.g., after a rebuild). +func TestReconcileImageDigestChanged(t *testing.T) { + svc := types.ServiceConfig{ + Name: "web", + Image: "nginx", + CustomLabels: types.Labels{ + api.ImageDigestLabel: "sha256:newdigest", + }, + } + hash, err := ServiceHash(svc) + assert.NilError(t, err) + + project := &types.Project{ + Name: "testproject", + Services: types.Services{ + "web": svc, + }, + } + + observed := &ObservedState{ + ProjectName: "testproject", + Containers: map[string]Containers{ + "web": { + { + ID: "ctr1", + Names: []string{"/testproject-web-1"}, + Labels: map[string]string{ + api.ServiceLabel: "web", + api.ContainerNumberLabel: "1", + api.ConfigHashLabel: hash, + api.ImageDigestLabel: "sha256:olddigest", + api.ProjectLabel: "testproject", + }, + State: container.StateRunning, + }, + }, + }, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + assert.Equal(t, plan.String(), ` +1. create container ctr1_testproject-web-1 reason: image digest changed +[1] -> 2. stop container testproject-web-1 reason: image digest changed +[2] -> 3. remove container testproject-web-1 reason: image digest changed +[3] -> 4. rename container testproject-web-1 reason: image digest changed +[4] -> 5. start container testproject-web-1 reason: image digest changed +`) +} + +// --------------------------------------------------------------------------- +// Test helpers +// --------------------------------------------------------------------------- + +func makeContainer(projectName, serviceName string, number int, configHash string) container.Summary { + name := projectName + "-" + serviceName + "-" + fmt.Sprintf("%d", number) + return container.Summary{ + ID: fmt.Sprintf("%s-%s-%d", projectName, serviceName, number), + Names: []string{"/" + name}, + Labels: map[string]string{ + api.ServiceLabel: serviceName, + api.ContainerNumberLabel: fmt.Sprintf("%d", number), + api.ConfigHashLabel: configHash, + api.ProjectLabel: projectName, + }, + State: container.StateRunning, + } +} diff --git a/pkg/compose/run.go b/pkg/compose/run.go index 7ca080264da..cacc874196c 100644 --- a/pkg/compose/run.go +++ b/pkg/compose/run.go @@ -181,7 +181,8 @@ func (s *composeService) prepareRun(ctx context.Context, project *types.Project, Labels: mergeLabels(service.Labels, service.CustomLabels), } - err = newConvergence(project.ServiceNames(), observedState, nil, nil, s).resolveServiceReferences(&service) + es := newExecutionStateFrom(observedState) + err = es.resolveServiceReferences(&service) if err != nil { return prepareRunResult{}, err } diff --git a/pkg/e2e/networks_test.go b/pkg/e2e/networks_test.go index f556681e664..8ff9187b0dd 100644 --- a/pkg/e2e/networks_test.go +++ b/pkg/e2e/networks_test.go @@ -145,6 +145,8 @@ func TestNetworkModes(t *testing.T) { }) } +// TODO: network config change detection logic is also covered by pure Reconcile unit test: +// TestReconcileNetworkConfigChanged in pkg/compose/reconcile_test.go func TestNetworkConfigChanged(t *testing.T) { t.Skip("unstable") // fixture is shared with TestNetworks and is not safe to run concurrently @@ -200,6 +202,8 @@ func TestInterfaceName(t *testing.T) { res.Assert(t, icmd.Expected{Out: "foobar@"}) } +// TODO: network recreation detection logic is also covered by pure Reconcile unit test: +// TestReconcileNetworkConfigChanged in pkg/compose/reconcile_test.go func TestNetworkRecreate(t *testing.T) { c := NewCLI(t) const projectName = "network_recreate" diff --git a/pkg/e2e/orphans_test.go b/pkg/e2e/orphans_test.go index e721e7a540b..6d47b857649 100644 --- a/pkg/e2e/orphans_test.go +++ b/pkg/e2e/orphans_test.go @@ -23,6 +23,8 @@ import ( "gotest.tools/v3/assert" ) +// TODO: orphan removal decision logic is also covered by pure Reconcile unit tests: +// TestReconcileOrphans, TestReconcileOrphansNotRemovedByDefault in pkg/compose/reconcile_test.go func TestRemoveOrphans(t *testing.T) { c := NewCLI(t) diff --git a/pkg/e2e/recreate_no_deps_test.go b/pkg/e2e/recreate_no_deps_test.go index 2b32e0d5bc3..6cf39cfb268 100644 --- a/pkg/e2e/recreate_no_deps_test.go +++ b/pkg/e2e/recreate_no_deps_test.go @@ -22,6 +22,8 @@ import ( "gotest.tools/v3/icmd" ) +// TODO: force-recreate with no-deps decision logic is also covered by pure Reconcile unit test: +// TestReconcileForceRecreateNoDeps in pkg/compose/reconcile_test.go func TestRecreateWithNoDeps(t *testing.T) { c := NewParallelCLI(t, WithEnv( "COMPOSE_PROJECT_NAME=recreate-no-deps", diff --git a/pkg/e2e/scale_test.go b/pkg/e2e/scale_test.go index 0e16c7afb3e..b6f613edf31 100644 --- a/pkg/e2e/scale_test.go +++ b/pkg/e2e/scale_test.go @@ -27,6 +27,9 @@ import ( const NO_STATE_TO_CHECK = "" +// TODO: decision logic (which ops to generate for scale up/down) is also covered by +// pure Reconcile unit tests: TestReconcileScaleUpMultipleServices, TestReconcileScaleDownMultipleServices, +// TestReconcileScaleToZero in pkg/compose/reconcile_test.go func TestScaleBasicCases(t *testing.T) { c := NewCLI(t, WithEnv( "COMPOSE_PROJECT_NAME=scale-basic-tests")) @@ -95,6 +98,8 @@ func TestScaleWithDepsCases(t *testing.T) { checkServiceContainer(t, res.Combined(), "scale-deps-tests-db", NO_STATE_TO_CHECK, 1) } +// TODO: scale up/down decision logic is also covered by pure Reconcile unit tests: +// TestReconcileScaleUp, TestReconcileScaleDown in pkg/compose/reconcile_test.go func TestScaleUpAndDownPreserveContainerNumber(t *testing.T) { const projectName = "scale-up-down-test" @@ -129,6 +134,8 @@ func TestScaleUpAndDownPreserveContainerNumber(t *testing.T) { assert.Equal(t, strings.TrimSpace(res.Stdout()), projectName+"-db-1\n"+projectName+"-db-2") } +// TODO: obsolete-first removal logic is also covered by pure Reconcile unit test: +// TestReconcileScaleDownRemovesObsoleteFirst in pkg/compose/reconcile_test.go func TestScaleDownRemovesObsolete(t *testing.T) { const projectName = "scale-down-obsolete-test" c := NewCLI(t, WithEnv( @@ -185,6 +192,8 @@ func checkServiceContainer(t *testing.T, stdout, containerName, containerState s testify.Fail(t, errMessage, stdout) } +// TODO: no-recreate scale logic is also covered by pure Reconcile unit test: +// TestReconcileScaleUpNoRecreate in pkg/compose/reconcile_test.go func TestScaleDownNoRecreate(t *testing.T) { const projectName = "scale-down-recreated-test" c := NewCLI(t, WithEnv( diff --git a/pkg/e2e/up_test.go b/pkg/e2e/up_test.go index d34f2061e25..f31d82d1642 100644 --- a/pkg/e2e/up_test.go +++ b/pkg/e2e/up_test.go @@ -141,6 +141,8 @@ func TestUpWithDependencyExit(t *testing.T) { }) } +// TODO: scale-without-recreate decision logic is also covered by pure Reconcile unit test: +// TestReconcileScaleUpNoRecreate in pkg/compose/reconcile_test.go func TestScaleDoesntRecreate(t *testing.T) { c := NewCLI(t) const projectName = "compose-e2e-scale" diff --git a/pkg/e2e/volumes_test.go b/pkg/e2e/volumes_test.go index d3e5787fa02..e2869b1a9c8 100644 --- a/pkg/e2e/volumes_test.go +++ b/pkg/e2e/volumes_test.go @@ -142,6 +142,8 @@ func TestUpSwitchVolumes(t *testing.T) { res.Assert(t, icmd.Expected{Out: "test_external_volume_2"}) } +// TODO: volume recreation decision logic is also covered by pure Reconcile unit test: +// TestReconcileVolumeConfigChanged in pkg/compose/reconcile_test.go func TestUpRecreateVolumes(t *testing.T) { c := NewCLI(t) const projectName = "compose-e2e-recreate-volumes" From 137c312ac8c35bb6808d8543fbd400cbef8b66f4 Mon Sep 17 00:00:00 2001 From: Nicolas De Loof Date: Thu, 19 Mar 2026 08:17:28 +0100 Subject: [PATCH 2/3] more tests Signed-off-by: Nicolas De Loof --- pkg/compose/reconcile_test.go | 1033 +++++++++++++++++++++++++++++++++ 1 file changed, 1033 insertions(+) diff --git a/pkg/compose/reconcile_test.go b/pkg/compose/reconcile_test.go index 299ec5c3024..47ff241b402 100644 --- a/pkg/compose/reconcile_test.go +++ b/pkg/compose/reconcile_test.go @@ -1158,6 +1158,1033 @@ func TestReconcileImageDigestChanged(t *testing.T) { `) } +// --------------------------------------------------------------------------- +// 1. Stopped container gets started +// --------------------------------------------------------------------------- + +func TestReconcileDeadContainerGetsStarted(t *testing.T) { + service := types.ServiceConfig{Name: "web", Image: "nginx"} + hash, err := ServiceHash(service) + assert.NilError(t, err) + + project := &types.Project{ + Name: "testproject", + Services: types.Services{ + "web": service, + }, + } + observed := &ObservedState{ + ProjectName: "testproject", + Containers: map[string]Containers{ + "web": { + { + ID: "abc123", + Names: []string{"/testproject-web-1"}, + Labels: map[string]string{ + api.ServiceLabel: "web", + api.ContainerNumberLabel: "1", + api.ConfigHashLabel: hash, + api.ProjectLabel: "testproject", + }, + State: container.StateDead, + }, + }, + }, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + assert.Equal(t, plan.String(), ` +1. start container testproject-web-1 reason: container not running (state: dead) +`) +} + +// --------------------------------------------------------------------------- +// 2. Exited container is left alone +// --------------------------------------------------------------------------- + +func TestReconcileExitedContainerNoOps(t *testing.T) { + service := types.ServiceConfig{Name: "web", Image: "nginx"} + hash, err := ServiceHash(service) + assert.NilError(t, err) + + project := &types.Project{ + Name: "testproject", + Services: types.Services{ + "web": service, + }, + } + observed := &ObservedState{ + ProjectName: "testproject", + Containers: map[string]Containers{ + "web": { + { + ID: "abc123", + Names: []string{"/testproject-web-1"}, + Labels: map[string]string{ + api.ServiceLabel: "web", + api.ContainerNumberLabel: "1", + api.ConfigHashLabel: hash, + api.ProjectLabel: "testproject", + }, + State: container.StateExited, + }, + }, + }, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + assert.Assert(t, plan.IsEmpty(), "expected empty plan but got:\n%s", plan.String()) +} + +// --------------------------------------------------------------------------- +// 3. Force recreate on up-to-date container produces full chain +// --------------------------------------------------------------------------- + +func TestReconcileForceRecreateUpToDate(t *testing.T) { + service := types.ServiceConfig{Name: "web", Image: "nginx"} + hash, err := ServiceHash(service) + assert.NilError(t, err) + + project := &types.Project{ + Name: "testproject", + Services: types.Services{ + "web": service, + }, + } + observed := &ObservedState{ + ProjectName: "testproject", + Containers: map[string]Containers{ + "web": { + { + ID: "abc123def456", + Names: []string{"/testproject-web-1"}, + Labels: map[string]string{ + api.ServiceLabel: "web", + api.ContainerNumberLabel: "1", + api.ConfigHashLabel: hash, + api.ProjectLabel: "testproject", + }, + State: container.StateRunning, + }, + }, + }, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateForce, + RecreateDependencies: api.RecreateForce, + }) + assert.NilError(t, err) + assert.Equal(t, plan.String(), ` +1. create container abc123def456_testproject-web-1 reason: force recreate +[1] -> 2. stop container testproject-web-1 reason: force recreate +[2] -> 3. remove container testproject-web-1 reason: force recreate +[3] -> 4. rename container testproject-web-1 reason: force recreate +[4] -> 5. start container testproject-web-1 reason: force recreate +`) +} + +// --------------------------------------------------------------------------- +// 4. RecreateNever with stale containers — no ops +// --------------------------------------------------------------------------- + +func TestReconcileNeverRecreateStaleContainers(t *testing.T) { + service := types.ServiceConfig{Name: "web", Image: "nginx"} + + project := &types.Project{ + Name: "testproject", + Services: types.Services{ + "web": service, + }, + } + observed := &ObservedState{ + ProjectName: "testproject", + Containers: map[string]Containers{ + "web": { + makeContainer("testproject", "web", 1, "stale-hash"), + }, + }, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateNever, + RecreateDependencies: api.RecreateNever, + }) + assert.NilError(t, err) + assert.Assert(t, plan.IsEmpty(), "expected empty plan but got:\n%s", plan.String()) +} + +// --------------------------------------------------------------------------- +// 5. Network recreate with multiple connected containers +// --------------------------------------------------------------------------- + +func TestReconcileNetworkRecreateMultipleContainers(t *testing.T) { + webSvc := types.ServiceConfig{ + Name: "web", + Image: "nginx", + Networks: map[string]*types.ServiceNetworkConfig{ + "default": nil, + }, + } + workerSvc := types.ServiceConfig{ + Name: "worker", + Image: "worker:latest", + Networks: map[string]*types.ServiceNetworkConfig{ + "default": nil, + }, + } + webHash, err := ServiceHash(webSvc) + assert.NilError(t, err) + workerHash, err := ServiceHash(workerSvc) + assert.NilError(t, err) + + project := &types.Project{ + Name: "testproject", + Services: types.Services{ + "web": webSvc, + "worker": workerSvc, + }, + Networks: types.Networks{ + "default": types.NetworkConfig{Name: "testproject_default"}, + }, + } + observed := &ObservedState{ + ProjectName: "testproject", + Containers: map[string]Containers{ + "web": { + { + ID: "ctr-web", + Names: []string{"/testproject-web-1"}, + State: container.StateRunning, + Labels: map[string]string{ + api.ServiceLabel: "web", + api.ContainerNumberLabel: "1", + api.ProjectLabel: "testproject", + api.ConfigHashLabel: webHash, + }, + NetworkSettings: &container.NetworkSettingsSummary{ + Networks: map[string]*network.EndpointSettings{ + "testproject_default": {NetworkID: "net123"}, + }, + }, + }, + }, + "worker": { + { + ID: "ctr-worker", + Names: []string{"/testproject-worker-1"}, + State: container.StateRunning, + Labels: map[string]string{ + api.ServiceLabel: "worker", + api.ContainerNumberLabel: "1", + api.ProjectLabel: "testproject", + api.ConfigHashLabel: workerHash, + }, + NetworkSettings: &container.NetworkSettingsSummary{ + Networks: map[string]*network.EndpointSettings{ + "testproject_default": {NetworkID: "net123"}, + }, + }, + }, + }, + }, + Networks: map[string]ObservedNetwork{ + "default": { + ID: "net123", + Name: "testproject_default", + ConfigHash: "outdated-hash", + }, + }, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + assert.Equal(t, plan.String(), ` +1. stop container testproject-web-1 reason: network "testproject_default" is being recreated +2. stop container testproject-worker-1 reason: network "testproject_default" is being recreated +[1] -> 3. disconnect network testproject-web-1 from testproject_default reason: network "testproject_default" is being recreated +[2] -> 4. disconnect network testproject-worker-1 from testproject_default reason: network "testproject_default" is being recreated +[3,4] -> 5. remove network testproject_default reason: config hash changed +[5] -> 6. create network testproject_default reason: config hash changed +[6] -> 7. connect network testproject-web-1 to testproject_default reason: network "testproject_default" has been recreated +[6] -> 8. connect network testproject-worker-1 to testproject_default reason: network "testproject_default" has been recreated +[7] -> 9. start container testproject-web-1 reason: network "testproject_default" has been recreated +[8] -> 10. start container testproject-worker-1 reason: network "testproject_default" has been recreated +`) +} + +// --------------------------------------------------------------------------- +// 6. Container matched by network name (not ID) +// --------------------------------------------------------------------------- + +func TestReconcileNetworkMatchByName(t *testing.T) { + webSvc := types.ServiceConfig{ + Name: "web", + Image: "nginx", + Networks: map[string]*types.ServiceNetworkConfig{ + "default": nil, + }, + } + webHash, err := ServiceHash(webSvc) + assert.NilError(t, err) + + project := &types.Project{ + Name: "testproject", + Services: types.Services{ + "web": webSvc, + }, + Networks: types.Networks{ + "default": types.NetworkConfig{Name: "testproject_default"}, + }, + } + observed := &ObservedState{ + ProjectName: "testproject", + Containers: map[string]Containers{ + "web": { + { + ID: "ctr-web", + Names: []string{"/testproject-web-1"}, + // StateCreated avoids checkExpectedNetworks (only runs for running containers), + // isolating the name-based matching in findContainersOnNetwork. + State: container.StateCreated, + Labels: map[string]string{ + api.ServiceLabel: "web", + api.ContainerNumberLabel: "1", + api.ProjectLabel: "testproject", + api.ConfigHashLabel: webHash, + }, + // NetworkID is empty — findContainersOnNetwork can only find this by name match + NetworkSettings: &container.NetworkSettingsSummary{ + Networks: map[string]*network.EndpointSettings{ + "testproject_default": {NetworkID: ""}, + }, + }, + }, + }, + }, + Networks: map[string]ObservedNetwork{ + "default": { + ID: "net123", + Name: "testproject_default", + ConfigHash: "outdated-hash", + }, + }, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + assert.Equal(t, plan.String(), ` +1. stop container testproject-web-1 reason: network "testproject_default" is being recreated +[1] -> 2. disconnect network testproject-web-1 from testproject_default reason: network "testproject_default" is being recreated +[2] -> 3. remove network testproject_default reason: config hash changed +[3] -> 4. create network testproject_default reason: config hash changed +[4] -> 5. connect network testproject-web-1 to testproject_default reason: network "testproject_default" has been recreated +[5] -> 6. start container testproject-web-1 reason: network "testproject_default" has been recreated +`) +} + +// --------------------------------------------------------------------------- +// 7. Service references network not in project.Networks +// --------------------------------------------------------------------------- + +func TestReconcileServiceWithUnknownNetwork(t *testing.T) { + project := &types.Project{ + Name: "testproject", + Services: types.Services{ + "web": types.ServiceConfig{ + Name: "web", + Image: "nginx", + Networks: map[string]*types.ServiceNetworkConfig{ + "nonexistent": nil, + }, + }, + }, + Networks: types.Networks{}, // no networks defined + } + observed := &ObservedState{ + ProjectName: "testproject", + Containers: map[string]Containers{}, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + // Should still produce a create container op without panicking + assert.Equal(t, plan.String(), ` +1. create container testproject-web-1 reason: scale up +`) +} + +// --------------------------------------------------------------------------- +// 8. Volume recreate with connected containers +// --------------------------------------------------------------------------- + +func TestReconcileVolumeRecreateWithContainers(t *testing.T) { + svc := types.ServiceConfig{ + Name: "app", + Image: "nginx", + Volumes: []types.ServiceVolumeConfig{ + {Type: "volume", Source: "data", Target: "/data"}, + }, + } + svcHash, err := ServiceHash(svc) + assert.NilError(t, err) + + originalVol := types.VolumeConfig{Name: "testproject_data"} + originalHash, err := VolumeHash(originalVol) + assert.NilError(t, err) + + updatedVol := types.VolumeConfig{ + Name: "testproject_data", + Labels: types.Labels{"version": "2"}, + } + + project := &types.Project{ + Name: "testproject", + Services: types.Services{ + "app": svc, + }, + Volumes: types.Volumes{ + "data": updatedVol, + }, + } + observed := &ObservedState{ + ProjectName: "testproject", + Containers: map[string]Containers{ + "app": { + { + ID: "ctr-app", + Names: []string{"/testproject-app-1"}, + State: container.StateRunning, + Labels: map[string]string{ + api.ServiceLabel: "app", + api.ContainerNumberLabel: "1", + api.ProjectLabel: "testproject", + api.ConfigHashLabel: svcHash, + }, + Mounts: []container.MountPoint{ + {Type: "volume", Name: "testproject_data"}, + }, + }, + }, + }, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{ + "data": { + Name: "testproject_data", + Driver: "local", + ConfigHash: originalHash, + }, + }, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + assert.Equal(t, plan.String(), ` +1. stop container testproject-app-1 reason: volume "testproject_data" is being recreated +[1] -> 2. remove container testproject-app-1 reason: volume "testproject_data" is being recreated +[2] -> 3. remove volume testproject_data reason: config hash changed +[3] -> 4. create volume testproject_data reason: config hash changed +[4] -> 5. create container testproject-app-1 reason: volume "data" is being recreated +`) +} + +// --------------------------------------------------------------------------- +// 9. Bind mount does not trigger volume reconciliation +// --------------------------------------------------------------------------- + +func TestReconcileBindMountNotAffectedByVolumeReconcile(t *testing.T) { + svc := types.ServiceConfig{ + Name: "app", + Image: "nginx", + Volumes: []types.ServiceVolumeConfig{ + {Type: "bind", Source: "/host/path", Target: "/data"}, + }, + } + hash, err := ServiceHash(svc) + assert.NilError(t, err) + + project := &types.Project{ + Name: "testproject", + Services: types.Services{ + "app": svc, + }, + } + observed := &ObservedState{ + ProjectName: "testproject", + Containers: map[string]Containers{ + "app": { + makeContainerWithHash("testproject", "app", 1, hash), + }, + }, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + assert.Assert(t, plan.IsEmpty(), "expected empty plan but got:\n%s", plan.String()) +} + +// --------------------------------------------------------------------------- +// 10. Diamond dependency: D ← B,C ← A +// --------------------------------------------------------------------------- + +func TestReconcileDiamondDependency(t *testing.T) { + project := &types.Project{ + Name: "testproject", + Services: types.Services{ + "a": types.ServiceConfig{ + Name: "a", + Image: "nginx", + DependsOn: types.DependsOnConfig{ + "b": types.ServiceDependency{Condition: "service_started"}, + "c": types.ServiceDependency{Condition: "service_started"}, + }, + }, + "b": types.ServiceConfig{ + Name: "b", + Image: "nginx", + DependsOn: types.DependsOnConfig{ + "d": types.ServiceDependency{Condition: "service_started"}, + }, + }, + "c": types.ServiceConfig{ + Name: "c", + Image: "nginx", + DependsOn: types.DependsOnConfig{ + "d": types.ServiceDependency{Condition: "service_started"}, + }, + }, + "d": types.ServiceConfig{ + Name: "d", + Image: "nginx", + }, + }, + } + observed := &ObservedState{ + ProjectName: "testproject", + Containers: map[string]Containers{}, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + assert.Equal(t, plan.String(), ` +1. create container testproject-d-1 reason: scale up +[1] -> 2. create container testproject-b-1 reason: scale up +[1] -> 3. create container testproject-c-1 reason: scale up +[2,3] -> 4. create container testproject-a-1 reason: scale up +`) +} + +// --------------------------------------------------------------------------- +// 11. Cascading restart when dependency is recreated (restart: true) +// --------------------------------------------------------------------------- + +func TestReconcileCascadingRestart(t *testing.T) { + dbSvc := types.ServiceConfig{Name: "db", Image: "postgres"} + webSvc := types.ServiceConfig{ + Name: "web", + Image: "nginx", + DependsOn: types.DependsOnConfig{ + "db": types.ServiceDependency{ + Condition: "service_started", + Restart: true, + }, + }, + } + webHash, err := ServiceHash(webSvc) + assert.NilError(t, err) + + project := &types.Project{ + Name: "testproject", + Services: types.Services{ + "db": dbSvc, + "web": webSvc, + }, + } + observed := &ObservedState{ + ProjectName: "testproject", + Containers: map[string]Containers{ + "db": { + makeContainer("testproject", "db", 1, "stale-hash"), + }, + "web": { + makeContainer("testproject", "web", 1, webHash), + }, + }, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + assert.Equal(t, plan.String(), ` +1. create container testproject-_testproject-db-1 reason: config hash changed +2. stop container testproject-web-1 reason: dependency "db" is being recreated (restart: true) +[1] -> 3. stop container testproject-db-1 reason: config hash changed +[3] -> 4. remove container testproject-db-1 reason: config hash changed +[4] -> 5. rename container testproject-db-1 reason: config hash changed +[5] -> 6. start container testproject-db-1 reason: config hash changed +[6,2] -> 7. start container testproject-web-1 reason: restart after dependency "db" recreated +`) +} + +// --------------------------------------------------------------------------- +// 12. No cascading restart when restart: false +// --------------------------------------------------------------------------- + +func TestReconcileNoCascadingRestartWhenFalse(t *testing.T) { + dbSvc := types.ServiceConfig{Name: "db", Image: "postgres"} + webSvc := types.ServiceConfig{ + Name: "web", + Image: "nginx", + DependsOn: types.DependsOnConfig{ + "db": types.ServiceDependency{ + Condition: "service_started", + Restart: false, + }, + }, + } + webHash, err := ServiceHash(webSvc) + assert.NilError(t, err) + + project := &types.Project{ + Name: "testproject", + Services: types.Services{ + "db": dbSvc, + "web": webSvc, + }, + } + observed := &ObservedState{ + ProjectName: "testproject", + Containers: map[string]Containers{ + "db": { + makeContainer("testproject", "db", 1, "stale-hash"), + }, + "web": { + makeContainer("testproject", "web", 1, webHash), + }, + }, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + assert.Equal(t, plan.String(), ` +1. create container testproject-_testproject-db-1 reason: config hash changed +[1] -> 2. stop container testproject-db-1 reason: config hash changed +[2] -> 3. remove container testproject-db-1 reason: config hash changed +[3] -> 4. rename container testproject-db-1 reason: config hash changed +[4] -> 5. start container testproject-db-1 reason: config hash changed +`) +} + +// --------------------------------------------------------------------------- +// 13. Scale up + config change simultaneously +// --------------------------------------------------------------------------- + +func TestReconcileScaleUpWithConfigChange(t *testing.T) { + svc := types.ServiceConfig{Name: "web", Image: "nginx", Scale: intPtr(3)} + + project := &types.Project{ + Name: "testproject", + Services: types.Services{ + "web": svc, + }, + } + observed := &ObservedState{ + ProjectName: "testproject", + Containers: map[string]Containers{ + "web": { + makeContainer("testproject", "web", 1, "stale-hash"), + }, + }, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + assert.Equal(t, plan.String(), ` +1. create container testproject-_testproject-web-1 reason: config hash changed +2. create container testproject-web-2 reason: scale up +3. create container testproject-web-3 reason: scale up +[1] -> 4. stop container testproject-web-1 reason: config hash changed +[4] -> 5. remove container testproject-web-1 reason: config hash changed +[5] -> 6. rename container testproject-web-1 reason: config hash changed +[6] -> 7. start container testproject-web-1 reason: config hash changed +`) +} + +// --------------------------------------------------------------------------- +// 14. Scale down + config change +// --------------------------------------------------------------------------- + +func TestReconcileScaleDownWithConfigChange(t *testing.T) { + svc := types.ServiceConfig{Name: "web", Image: "nginx"} + + project := &types.Project{ + Name: "testproject", + Services: types.Services{ + "web": svc, + }, + } + observed := &ObservedState{ + ProjectName: "testproject", + Containers: map[string]Containers{ + "web": { + makeContainer("testproject", "web", 1, "stale-hash-1"), + makeContainer("testproject", "web", 2, "stale-hash-2"), + }, + }, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + assert.Equal(t, plan.String(), ` +1. create container testproject-_testproject-web-1 reason: config hash changed +2. stop container testproject-web-2 reason: scale down +[1] -> 3. stop container testproject-web-1 reason: config hash changed +[2] -> 4. remove container testproject-web-2 reason: scale down +[3] -> 5. remove container testproject-web-1 reason: config hash changed +[5] -> 6. rename container testproject-web-1 reason: config hash changed +[6] -> 7. start container testproject-web-1 reason: config hash changed +`) +} + +// --------------------------------------------------------------------------- +// 15. Custom container_name with scale > 1 returns error +// --------------------------------------------------------------------------- + +func TestReconcileCustomContainerNameScaleError(t *testing.T) { + svc := types.ServiceConfig{ + Name: "web", + Image: "nginx", + Scale: intPtr(2), + ContainerName: "my-custom-name", + } + + project := &types.Project{ + Name: "testproject", + Services: types.Services{ + "web": svc, + }, + } + observed := &ObservedState{ + ProjectName: "testproject", + Containers: map[string]Containers{}, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + _, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.Assert(t, err != nil, "expected error for custom container_name with scale > 1") +} + +// --------------------------------------------------------------------------- +// 16. Targeted service with dependency — dep uses RecreateDependencies policy +// --------------------------------------------------------------------------- + +func TestReconcileTargetedServiceDependencyPolicy(t *testing.T) { + dbSvc := types.ServiceConfig{Name: "db", Image: "postgres"} + webSvc := types.ServiceConfig{ + Name: "web", + Image: "nginx", + DependsOn: types.DependsOnConfig{ + "db": types.ServiceDependency{Condition: "service_started"}, + }, + } + + project := &types.Project{ + Name: "testproject", + Services: types.Services{ + "db": dbSvc, + "web": webSvc, + }, + } + + observed := &ObservedState{ + ProjectName: "testproject", + Containers: map[string]Containers{ + "db": { + makeContainer("testproject", "db", 1, "stale-hash"), + }, + "web": { + makeContainer("testproject", "web", 1, "stale-hash"), + }, + }, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + // Target only "web" with force-recreate; deps get "never" — db is NOT recreated + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateForce, + RecreateDependencies: api.RecreateNever, + Services: []string{"web"}, + }) + assert.NilError(t, err) + assert.Equal(t, plan.String(), ` +1. create container testproject-_testproject-web-1 reason: force recreate +[1] -> 2. stop container testproject-web-1 reason: force recreate +[2] -> 3. remove container testproject-web-1 reason: force recreate +[3] -> 4. rename container testproject-web-1 reason: force recreate +[4] -> 5. start container testproject-web-1 reason: force recreate +`) + + // Same setup but deps get "diverged" — db IS stale so it gets recreated too + plan2, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateForce, + RecreateDependencies: api.RecreateDiverged, + Services: []string{"web"}, + }) + assert.NilError(t, err) + assert.Equal(t, plan2.String(), ` +1. create container testproject-_testproject-db-1 reason: config hash changed +[1] -> 2. stop container testproject-db-1 reason: config hash changed +[2] -> 3. remove container testproject-db-1 reason: config hash changed +[3] -> 4. rename container testproject-db-1 reason: config hash changed +[4] -> 5. start container testproject-db-1 reason: config hash changed +[5] -> 6. create container testproject-_testproject-web-1 reason: force recreate +[6] -> 7. stop container testproject-web-1 reason: force recreate +[7] -> 8. remove container testproject-web-1 reason: force recreate +[8] -> 9. rename container testproject-web-1 reason: force recreate +[9,5] -> 10. start container testproject-web-1 reason: force recreate +`) +} + +// --------------------------------------------------------------------------- +// 17. Non-targeted service is skipped entirely +// --------------------------------------------------------------------------- + +func TestReconcileNonTargetedServiceSkipped(t *testing.T) { + webSvc := types.ServiceConfig{Name: "web", Image: "nginx"} + workerSvc := types.ServiceConfig{Name: "worker", Image: "worker:latest"} + + project := &types.Project{ + Name: "testproject", + Services: types.Services{ + "web": webSvc, + "worker": workerSvc, + }, + } + observed := &ObservedState{ + ProjectName: "testproject", + Containers: map[string]Containers{ + "web": { + makeContainer("testproject", "web", 1, "stale-hash"), + }, + "worker": { + makeContainer("testproject", "worker", 1, "stale-hash"), + }, + }, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + Services: []string{"web"}, + }) + assert.NilError(t, err) + // Only web is recreated — worker is completely skipped + assert.Equal(t, plan.String(), ` +1. create container testproject-_testproject-web-1 reason: config hash changed +[1] -> 2. stop container testproject-web-1 reason: config hash changed +[2] -> 3. remove container testproject-web-1 reason: config hash changed +[3] -> 4. rename container testproject-web-1 reason: config hash changed +[4] -> 5. start container testproject-web-1 reason: config hash changed +`) +} + +// --------------------------------------------------------------------------- +// 18. Empty project produces empty plan +// --------------------------------------------------------------------------- + +func TestReconcileEmptyProject(t *testing.T) { + project := &types.Project{ + Name: "testproject", + Services: types.Services{}, + Networks: types.Networks{}, + Volumes: types.Volumes{}, + } + observed := &ObservedState{ + ProjectName: "testproject", + Containers: map[string]Containers{}, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + assert.Assert(t, plan.IsEmpty(), "expected empty plan but got:\n%s", plan.String()) +} + +// --------------------------------------------------------------------------- +// 19. Multiple orphans with RemoveOrphans: true +// --------------------------------------------------------------------------- + +func TestReconcileMultipleOrphans(t *testing.T) { + project := &types.Project{ + Name: "testproject", + Services: types.Services{ + "web": types.ServiceConfig{Name: "web", Image: "nginx"}, + }, + } + observed := &ObservedState{ + ProjectName: "testproject", + Containers: map[string]Containers{}, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{ + makeContainer("testproject", "old-a", 1, "hash-a"), + makeContainer("testproject", "old-b", 1, "hash-b"), + makeContainer("testproject", "old-c", 1, "hash-c"), + }, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + RemoveOrphans: true, + }) + assert.NilError(t, err) + assert.Equal(t, plan.String(), ` +1. create container testproject-web-1 reason: scale up +2. stop container testproject-old-a-1 reason: orphan container +3. stop container testproject-old-b-1 reason: orphan container +4. stop container testproject-old-c-1 reason: orphan container +[2] -> 5. remove container testproject-old-a-1 reason: orphan container +[3] -> 6. remove container testproject-old-b-1 reason: orphan container +[4] -> 7. remove container testproject-old-c-1 reason: orphan container +`) +} + +// --------------------------------------------------------------------------- +// 20. Plugin service unaffected by recreate policies +// --------------------------------------------------------------------------- + +func TestReconcilePluginServiceIgnoresRecreatePolicy(t *testing.T) { + project := &types.Project{ + Name: "testproject", + Services: types.Services{ + "plugin-svc": types.ServiceConfig{ + Name: "plugin-svc", + Provider: &types.ServiceProviderConfig{ + Type: "aws", + }, + }, + }, + } + observed := &ObservedState{ + ProjectName: "testproject", + Containers: map[string]Containers{}, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + // Test with RecreateNever — plugin should still get an op + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateNever, + RecreateDependencies: api.RecreateNever, + }) + assert.NilError(t, err) + assert.Equal(t, plan.String(), ` +1. plugin plugin plugin-svc reason: plugin service +`) + + // Test with RecreateForce — same result + plan2, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateForce, + RecreateDependencies: api.RecreateForce, + }) + assert.NilError(t, err) + assert.Equal(t, plan2.String(), ` +1. plugin plugin plugin-svc reason: plugin service +`) +} + // --------------------------------------------------------------------------- // Test helpers // --------------------------------------------------------------------------- @@ -1176,3 +2203,9 @@ func makeContainer(projectName, serviceName string, number int, configHash strin State: container.StateRunning, } } + +// makeContainerWithHash is like makeContainer but returns a container +// with the given hash precomputed (alias for readability). +func makeContainerWithHash(projectName, serviceName string, number int, configHash string) container.Summary { + return makeContainer(projectName, serviceName, number, configHash) +} From 428b02015f98fe33c977599c78d2974ba3ac44db Mon Sep 17 00:00:00 2001 From: Nicolas De Loof Date: Thu, 19 Mar 2026 11:07:18 +0100 Subject: [PATCH 3/3] more tests Signed-off-by: Nicolas De Loof --- pkg/compose/reconcile.go | 2 +- pkg/compose/reconcile_test.go | 1798 +++++++++++++++++++++++++++++++++ 2 files changed, 1799 insertions(+), 1 deletion(-) diff --git a/pkg/compose/reconcile.go b/pkg/compose/reconcile.go index d2d9909d676..91f73e7c126 100644 --- a/pkg/compose/reconcile.go +++ b/pkg/compose/reconcile.go @@ -1098,7 +1098,7 @@ func needsRecreate(expected types.ServiceConfig, actual container.Summary, netwo return true, "image digest changed", nil } - if networks != nil && actual.State == container.StateRunning { + if networks != nil && actual.State == container.StateRunning && actual.NetworkSettings != nil { if checkExpectedNetworks(expected, actual, networks) { return true, "network configuration changed", nil } diff --git a/pkg/compose/reconcile_test.go b/pkg/compose/reconcile_test.go index 47ff241b402..f4cdbd3835c 100644 --- a/pkg/compose/reconcile_test.go +++ b/pkg/compose/reconcile_test.go @@ -19,9 +19,11 @@ package compose import ( "fmt" "testing" + "time" "github.com/compose-spec/compose-go/v2/types" "github.com/moby/moby/api/types/container" + mmount "github.com/moby/moby/api/types/mount" "github.com/moby/moby/api/types/network" "gotest.tools/v3/assert" @@ -2185,6 +2187,1802 @@ func TestReconcilePluginServiceIgnoresRecreatePolicy(t *testing.T) { `) } +// --------------------------------------------------------------------------- +// Corner-case tests +// --------------------------------------------------------------------------- + +// TestReconcileNonContiguousScaleDown verifies that when scaling down with +// non-sequential container numbers (gaps), the highest numbers are removed first. +func TestReconcileNonContiguousScaleDown(t *testing.T) { + svc := types.ServiceConfig{Name: "web", Image: "nginx"} + hash, err := ServiceHash(svc) + assert.NilError(t, err) + + project := &types.Project{ + Name: "tp", + Services: types.Services{ + "web": svc, + }, + } + observed := &ObservedState{ + ProjectName: "tp", + Containers: map[string]Containers{ + "web": { + makeContainer("tp", "web", 1, hash), + makeContainer("tp", "web", 3, hash), + makeContainer("tp", "web", 5, hash), + }, + }, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + assert.Equal(t, plan.String(), ` +1. stop container tp-web-3 reason: scale down +2. stop container tp-web-5 reason: scale down +[1] -> 3. remove container tp-web-3 reason: scale down +[2] -> 4. remove container tp-web-5 reason: scale down +`) +} + +// TestReconcileScaleUpFillsAfterMax verifies that new containers are numbered +// after the current maximum, not filling gaps. +func TestReconcileScaleUpFillsAfterMax(t *testing.T) { + svc := types.ServiceConfig{Name: "web", Image: "nginx", Scale: intPtr(3)} + hash, err := ServiceHash(svc) + assert.NilError(t, err) + + project := &types.Project{ + Name: "tp", + Services: types.Services{ + "web": svc, + }, + } + observed := &ObservedState{ + ProjectName: "tp", + Containers: map[string]Containers{ + "web": { + makeContainer("tp", "web", 1, hash), + makeContainer("tp", "web", 3, hash), + }, + }, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + // New container numbered 4 (max+1), not 2 (the gap) + assert.Equal(t, plan.String(), ` +1. create container tp-web-4 reason: scale up +`) +} + +// TestReconcileInvalidContainerNumberFallback verifies that containers with +// invalid ContainerNumberLabel fall back to Created-timestamp-based sorting. +func TestReconcileInvalidContainerNumberFallback(t *testing.T) { + svc := types.ServiceConfig{Name: "web", Image: "nginx"} + hash, err := ServiceHash(svc) + assert.NilError(t, err) + + project := &types.Project{ + Name: "tp", + Services: types.Services{ + "web": svc, + }, + } + observed := &ObservedState{ + ProjectName: "tp", + Containers: map[string]Containers{ + "web": { + { + ID: "c1", + Names: []string{"/tp-web-1"}, + State: container.StateRunning, + Labels: map[string]string{ + api.ServiceLabel: "web", + api.ContainerNumberLabel: "abc", // invalid + api.ConfigHashLabel: hash, + api.ProjectLabel: "tp", + }, + Created: 100, + }, + { + ID: "c2", + Names: []string{"/tp-web-2"}, + State: container.StateRunning, + Labels: map[string]string{ + api.ServiceLabel: "web", + api.ContainerNumberLabel: "2", + api.ConfigHashLabel: hash, + api.ProjectLabel: "tp", + }, + Created: 200, + }, + }, + }, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + // Container with invalid number (older Created) is removed via scale down + assert.Equal(t, plan.String(), ` +1. stop container tp-web-1 reason: scale down +[1] -> 2. remove container tp-web-1 reason: scale down +`) +} + +// TestReconcilePausedContainerGetsStarted verifies that paused containers +// trigger a start operation (paused is NOT in the "no action" list). +func TestReconcilePausedContainerGetsStarted(t *testing.T) { + svc := types.ServiceConfig{Name: "web", Image: "nginx"} + hash, err := ServiceHash(svc) + assert.NilError(t, err) + + ctr := makeContainer("tp", "web", 1, hash) + ctr.State = container.StatePaused + + project := &types.Project{ + Name: "tp", + Services: types.Services{ + "web": svc, + }, + } + observed := &ObservedState{ + ProjectName: "tp", + Containers: map[string]Containers{"web": {ctr}}, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + assert.Equal(t, plan.String(), ` +1. start container tp-web-1 reason: container not running (state: paused) +`) +} + +// TestReconcileRestartingContainerNoOps verifies that a restarting container +// with matching config produces no operations. +func TestReconcileRestartingContainerNoOps(t *testing.T) { + svc := types.ServiceConfig{Name: "web", Image: "nginx"} + hash, err := ServiceHash(svc) + assert.NilError(t, err) + + ctr := makeContainer("tp", "web", 1, hash) + ctr.State = container.StateRestarting + + project := &types.Project{ + Name: "tp", + Services: types.Services{ + "web": svc, + }, + } + observed := &ObservedState{ + ProjectName: "tp", + Containers: map[string]Containers{"web": {ctr}}, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + assert.Assert(t, plan.IsEmpty(), "expected empty plan but got:\n%s", plan.String()) +} + +// TestReconcileNetworkCheckSkippedNonRunning verifies that checkExpectedNetworks +// is NOT called for non-running containers (only gated on StateRunning). +func TestReconcileNetworkCheckSkippedNonRunning(t *testing.T) { + svc := types.ServiceConfig{ + Name: "web", + Image: "nginx", + Networks: map[string]*types.ServiceNetworkConfig{ + "mynet": nil, + }, + } + hash, err := ServiceHash(svc) + assert.NilError(t, err) + + netCfg := types.NetworkConfig{Name: "tp_mynet"} + netHash, err := NetworkHash(&netCfg) + assert.NilError(t, err) + + project := &types.Project{ + Name: "tp", + Services: types.Services{ + "web": svc, + }, + Networks: types.Networks{ + "mynet": netCfg, + }, + } + observed := &ObservedState{ + ProjectName: "tp", + Containers: map[string]Containers{ + "web": { + { + ID: "c1", + Names: []string{"/tp-web-1"}, + // StateCreated → checkExpectedNetworks skipped + State: container.StateCreated, + Labels: map[string]string{ + api.ServiceLabel: "web", + api.ContainerNumberLabel: "1", + api.ConfigHashLabel: hash, + api.ProjectLabel: "tp", + }, + // Connected to a different network ID — would trigger recreate if running + NetworkSettings: &container.NetworkSettingsSummary{ + Networks: map[string]*network.EndpointSettings{ + "tp_mynet": {NetworkID: "wrong-id"}, + }, + }, + }, + }, + }, + Networks: map[string]ObservedNetwork{ + "mynet": { + ID: "correct-id", + Name: "tp_mynet", + ConfigHash: netHash, + }, + }, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + // No recreate despite network mismatch — container is not running + assert.Assert(t, plan.IsEmpty(), "expected empty plan but got:\n%s", plan.String()) +} + +// TestReconcileSwarmNetworkSkipped verifies that the "swarm" overlay network +// special case is handled correctly — containers using it should not trigger +// a false "network configuration changed" recreate. +func TestReconcileSwarmNetworkSkipped(t *testing.T) { + svc := types.ServiceConfig{ + Name: "web", + Image: "nginx", + Networks: map[string]*types.ServiceNetworkConfig{ + "overlay": nil, + }, + } + hash, err := ServiceHash(svc) + assert.NilError(t, err) + + netCfg := types.NetworkConfig{Name: "tp_overlay"} + netHash, err := NetworkHash(&netCfg) + assert.NilError(t, err) + + ctr := makeContainer("tp", "web", 1, hash) + ctr.NetworkSettings = &container.NetworkSettingsSummary{ + Networks: map[string]*network.EndpointSettings{ + "tp_overlay": {NetworkID: "net1"}, + }, + } + + project := &types.Project{ + Name: "tp", + Services: types.Services{ + "web": svc, + }, + Networks: types.Networks{ + "overlay": netCfg, + }, + } + observed := &ObservedState{ + ProjectName: "tp", + Containers: map[string]Containers{"web": {ctr}}, + Networks: map[string]ObservedNetwork{ + // Network ID is "swarm" — the special case + "overlay": {ID: "swarm", Name: "tp_overlay", ConfigHash: netHash}, + }, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + assert.Assert(t, plan.IsEmpty(), "expected empty plan but got:\n%s", plan.String()) +} + +// TestReconcileNilNetworkSettingsNoPanic verifies that a container with nil +// NetworkSettings does not cause a panic during network recreate. +func TestReconcileNilNetworkSettingsNoPanic(t *testing.T) { + svc := types.ServiceConfig{ + Name: "web", + Image: "nginx", + Networks: map[string]*types.ServiceNetworkConfig{ + "default": nil, + }, + } + hash, err := ServiceHash(svc) + assert.NilError(t, err) + + project := &types.Project{ + Name: "tp", + Services: types.Services{ + "web": svc, + }, + Networks: types.Networks{ + "default": types.NetworkConfig{Name: "tp_default"}, + }, + } + observed := &ObservedState{ + ProjectName: "tp", + Containers: map[string]Containers{ + "web": { + { + ID: "c1", + Names: []string{"/tp-web-1"}, + State: container.StateRunning, + Labels: map[string]string{ + api.ServiceLabel: "web", + api.ContainerNumberLabel: "1", + api.ConfigHashLabel: hash, + api.ProjectLabel: "tp", + }, + NetworkSettings: nil, // nil — must not panic + }, + }, + }, + Networks: map[string]ObservedNetwork{ + "default": {ID: "net1", Name: "tp_default", ConfigHash: "outdated"}, + }, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + // Container is skipped by findContainersOnNetwork (nil settings), + // network is still recreated + assert.Equal(t, plan.String(), ` +1. remove network tp_default reason: config hash changed +[1] -> 2. create network tp_default reason: config hash changed +`) +} + +// TestReconcileExternalNetworkResolvedFromContainer verifies that external +// network IDs are resolved from running containers' network endpoints, +// preventing a false "network configuration changed" recreate. +func TestReconcileExternalNetworkResolvedFromContainer(t *testing.T) { + svc := types.ServiceConfig{ + Name: "web", + Image: "nginx", + Networks: map[string]*types.ServiceNetworkConfig{ + "ext": nil, + }, + } + hash, err := ServiceHash(svc) + assert.NilError(t, err) + + ctr := makeContainer("tp", "web", 1, hash) + ctr.NetworkSettings = &container.NetworkSettingsSummary{ + Networks: map[string]*network.EndpointSettings{ + "shared_net": {NetworkID: "extnet123"}, + }, + } + + project := &types.Project{ + Name: "tp", + Services: types.Services{ + "web": svc, + }, + Networks: types.Networks{ + "ext": types.NetworkConfig{Name: "shared_net", External: true}, + }, + } + observed := &ObservedState{ + ProjectName: "tp", + Containers: map[string]Containers{"web": {ctr}}, + Networks: map[string]ObservedNetwork{}, // external net not in observed + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + // No recreate — external network ID resolved from container + assert.Assert(t, plan.IsEmpty(), "expected empty plan but got:\n%s", plan.String()) +} + +// TestReconcileAnonymousVolumeNoOps verifies that anonymous volumes +// (empty Source) do not interfere with volume reconciliation. +func TestReconcileAnonymousVolumeNoOps(t *testing.T) { + svc := types.ServiceConfig{ + Name: "web", + Image: "nginx", + Volumes: []types.ServiceVolumeConfig{ + {Type: "volume", Source: "", Target: "/data"}, + }, + } + hash, err := ServiceHash(svc) + assert.NilError(t, err) + + project := &types.Project{ + Name: "tp", + Services: types.Services{ + "web": svc, + }, + } + observed := &ObservedState{ + ProjectName: "tp", + Containers: map[string]Containers{ + "web": {makeContainer("tp", "web", 1, hash)}, + }, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + assert.Assert(t, plan.IsEmpty(), "expected empty plan but got:\n%s", plan.String()) +} + +// TestReconcileVolumeRecreateUnrelatedServiceUnaffected verifies that when a +// volume is recreated, only services that mount it are affected. +func TestReconcileVolumeRecreateUnrelatedServiceUnaffected(t *testing.T) { + appSvc := types.ServiceConfig{ + Name: "app", + Image: "nginx", + Volumes: []types.ServiceVolumeConfig{ + {Type: "volume", Source: "data", Target: "/data"}, + }, + } + workerSvc := types.ServiceConfig{Name: "worker", Image: "worker:latest"} + appHash, err := ServiceHash(appSvc) + assert.NilError(t, err) + workerHash, err := ServiceHash(workerSvc) + assert.NilError(t, err) + + origVol := types.VolumeConfig{Name: "tp_data"} + origHash, err := VolumeHash(origVol) + assert.NilError(t, err) + + updatedVol := types.VolumeConfig{ + Name: "tp_data", + Labels: types.Labels{"v": "2"}, + } + + project := &types.Project{ + Name: "tp", + Services: types.Services{ + "app": appSvc, + "worker": workerSvc, + }, + Volumes: types.Volumes{ + "data": updatedVol, + }, + } + observed := &ObservedState{ + ProjectName: "tp", + Containers: map[string]Containers{ + "app": { + { + ID: "ctr-app", + Names: []string{"/tp-app-1"}, + State: container.StateRunning, + Labels: map[string]string{ + api.ServiceLabel: "app", + api.ContainerNumberLabel: "1", + api.ProjectLabel: "tp", + api.ConfigHashLabel: appHash, + }, + Mounts: []container.MountPoint{ + {Type: mmount.TypeVolume, Name: "tp_data"}, + }, + }, + }, + "worker": {makeContainer("tp", "worker", 1, workerHash)}, + }, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{ + "data": {Name: "tp_data", Driver: "local", ConfigHash: origHash}, + }, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + // Only app is affected — worker has no volume ops + assert.Equal(t, plan.String(), ` +1. stop container tp-app-1 reason: volume "tp_data" is being recreated +[1] -> 2. remove container tp-app-1 reason: volume "tp_data" is being recreated +[2] -> 3. remove volume tp_data reason: config hash changed +[3] -> 4. create volume tp_data reason: config hash changed +[4] -> 5. create container tp-app-1 reason: volume "data" is being recreated +`) +} + +// TestReconcileCircularDependencyNoPanic verifies that circular service +// dependencies do not cause infinite recursion in expandServiceDependencies. +func TestReconcileCircularDependencyNoPanic(t *testing.T) { + project := &types.Project{ + Name: "tp", + Services: types.Services{ + "a": types.ServiceConfig{ + Name: "a", + Image: "nginx", + DependsOn: types.DependsOnConfig{ + "b": types.ServiceDependency{Condition: "service_started"}, + }, + }, + "b": types.ServiceConfig{ + Name: "b", + Image: "nginx", + DependsOn: types.DependsOnConfig{ + "a": types.ServiceDependency{Condition: "service_started"}, + }, + }, + }, + } + observed := &ObservedState{ + ProjectName: "tp", + Containers: map[string]Containers{}, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + // Should not panic — expandServiceDependencies uses a seen map + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + assert.Assert(t, !plan.IsEmpty(), "expected create ops for both services") +} + +// TestReconcileCascadingRestartMultipleDepsOneRecreated verifies cascading +// restart fires when only one of multiple dependencies is recreated. +func TestReconcileCascadingRestartMultipleDepsOneRecreated(t *testing.T) { + dbSvc := types.ServiceConfig{Name: "db", Image: "postgres"} + cacheSvc := types.ServiceConfig{Name: "cache", Image: "redis"} + cacheHash, err := ServiceHash(cacheSvc) + assert.NilError(t, err) + webSvc := types.ServiceConfig{ + Name: "web", + Image: "nginx", + DependsOn: types.DependsOnConfig{ + "db": types.ServiceDependency{Condition: "service_started", Restart: true}, + "cache": types.ServiceDependency{Condition: "service_started", Restart: true}, + }, + } + webHash, err := ServiceHash(webSvc) + assert.NilError(t, err) + + project := &types.Project{ + Name: "tp", + Services: types.Services{ + "db": dbSvc, + "cache": cacheSvc, + "web": webSvc, + }, + } + observed := &ObservedState{ + ProjectName: "tp", + Containers: map[string]Containers{ + "db": {makeContainer("tp", "db", 1, "stale")}, // stale → recreated + "cache": {makeContainer("tp", "cache", 1, cacheHash)}, // up-to-date + "web": {makeContainer("tp", "web", 1, webHash)}, // up-to-date + }, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + assert.Equal(t, plan.String(), ` +1. create container tp-db-1_tp-db-1 reason: config hash changed +2. stop container tp-web-1 reason: dependency "db" is being recreated (restart: true) +[1] -> 3. stop container tp-db-1 reason: config hash changed +[3] -> 4. remove container tp-db-1 reason: config hash changed +[4] -> 5. rename container tp-db-1 reason: config hash changed +[5] -> 6. start container tp-db-1 reason: config hash changed +[6,2] -> 7. start container tp-web-1 reason: restart after dependency "db" recreated +`) +} + +// TestReconcileCascadingRestartSkippedForExitedDependent verifies that +// cascading restart is skipped when the dependent container is not running. +func TestReconcileCascadingRestartSkippedForExitedDependent(t *testing.T) { + dbSvc := types.ServiceConfig{Name: "db", Image: "postgres"} + webSvc := types.ServiceConfig{ + Name: "web", + Image: "nginx", + DependsOn: types.DependsOnConfig{ + "db": types.ServiceDependency{ + Condition: "service_started", + Restart: true, + }, + }, + } + webHash, err := ServiceHash(webSvc) + assert.NilError(t, err) + + webCtr := makeContainer("tp", "web", 1, webHash) + webCtr.State = container.StateExited + + project := &types.Project{ + Name: "tp", + Services: types.Services{ + "db": dbSvc, + "web": webSvc, + }, + } + observed := &ObservedState{ + ProjectName: "tp", + Containers: map[string]Containers{ + "db": {makeContainer("tp", "db", 1, "stale")}, + "web": {webCtr}, + }, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + // Only db is recreated — web is exited, no cascading restart + assert.Equal(t, plan.String(), ` +1. create container tp-db-1_tp-db-1 reason: config hash changed +[1] -> 2. stop container tp-db-1 reason: config hash changed +[2] -> 3. remove container tp-db-1 reason: config hash changed +[3] -> 4. rename container tp-db-1 reason: config hash changed +[4] -> 5. start container tp-db-1 reason: config hash changed +`) +} + +// TestReconcileCustomContainerNameScale1Allowed verifies that container_name +// with the default scale (1) works without error. +func TestReconcileCustomContainerNameScale1Allowed(t *testing.T) { + svc := types.ServiceConfig{ + Name: "web", + Image: "nginx", + ContainerName: "my-app", + } + + project := &types.Project{ + Name: "tp", + Services: types.Services{ + "web": svc, + }, + } + observed := &ObservedState{ + ProjectName: "tp", + Containers: map[string]Containers{}, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + assert.Equal(t, plan.String(), ` +1. create container my-app reason: scale up +`) +} + +// TestReconcileCustomContainerNameScale0Allowed verifies that container_name +// with scale=0 works without error and scales down existing containers. +func TestReconcileCustomContainerNameScale0Allowed(t *testing.T) { + svc := types.ServiceConfig{ + Name: "web", + Image: "nginx", + ContainerName: "my-app", + Scale: intPtr(0), + } + hash, err := ServiceHash(svc) + assert.NilError(t, err) + + project := &types.Project{ + Name: "tp", + Services: types.Services{ + "web": svc, + }, + } + observed := &ObservedState{ + ProjectName: "tp", + Containers: map[string]Containers{ + "web": { + { + ID: "c1", + Names: []string{"/my-app"}, + State: container.StateRunning, + Labels: map[string]string{ + api.ServiceLabel: "web", + api.ContainerNumberLabel: "1", + api.ConfigHashLabel: hash, + api.ProjectLabel: "tp", + }, + }, + }, + }, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + assert.Equal(t, plan.String(), ` +1. stop container my-app reason: scale down +[1] -> 2. remove container my-app reason: scale down +`) +} + +// TestReconcileShortContainerIDInRecreate verifies that container IDs shorter +// than 12 characters don't cause a slice bounds panic in temp name generation. +func TestReconcileShortContainerIDInRecreate(t *testing.T) { + svc := types.ServiceConfig{Name: "web", Image: "nginx"} + + project := &types.Project{ + Name: "tp", + Services: types.Services{ + "web": svc, + }, + } + observed := &ObservedState{ + ProjectName: "tp", + Containers: map[string]Containers{ + "web": { + { + ID: "abc123", // 6 chars — shorter than 12 + Names: []string{"/tp-web-1"}, + State: container.StateRunning, + Labels: map[string]string{ + api.ServiceLabel: "web", + api.ContainerNumberLabel: "1", + api.ConfigHashLabel: "stale", + api.ProjectLabel: "tp", + }, + }, + }, + }, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + assert.Equal(t, plan.String(), ` +1. create container abc123_tp-web-1 reason: config hash changed +[1] -> 2. stop container tp-web-1 reason: config hash changed +[2] -> 3. remove container tp-web-1 reason: config hash changed +[3] -> 4. rename container tp-web-1 reason: config hash changed +[4] -> 5. start container tp-web-1 reason: config hash changed +`) +} + +// TestReconcileTwoServicesShareRecreatedVolume verifies that when two services +// mount the same volume and it's recreated, both get stop+remove+create ops. +func TestReconcileTwoServicesShareRecreatedVolume(t *testing.T) { + appSvc := types.ServiceConfig{ + Name: "app", + Image: "nginx", + Volumes: []types.ServiceVolumeConfig{ + {Type: "volume", Source: "shared", Target: "/data"}, + }, + } + workerSvc := types.ServiceConfig{ + Name: "worker", + Image: "worker:latest", + Volumes: []types.ServiceVolumeConfig{ + {Type: "volume", Source: "shared", Target: "/data"}, + }, + } + appHash, err := ServiceHash(appSvc) + assert.NilError(t, err) + workerHash, err := ServiceHash(workerSvc) + assert.NilError(t, err) + + origVol := types.VolumeConfig{Name: "tp_shared"} + origHash, err := VolumeHash(origVol) + assert.NilError(t, err) + + updatedVol := types.VolumeConfig{ + Name: "tp_shared", + Labels: types.Labels{"v": "2"}, + } + + project := &types.Project{ + Name: "tp", + Services: types.Services{ + "app": appSvc, + "worker": workerSvc, + }, + Volumes: types.Volumes{ + "shared": updatedVol, + }, + } + observed := &ObservedState{ + ProjectName: "tp", + Containers: map[string]Containers{ + "app": { + { + ID: "ctr-app", + Names: []string{"/tp-app-1"}, + State: container.StateRunning, + Labels: map[string]string{ + api.ServiceLabel: "app", + api.ContainerNumberLabel: "1", + api.ProjectLabel: "tp", + api.ConfigHashLabel: appHash, + }, + Mounts: []container.MountPoint{ + {Type: mmount.TypeVolume, Name: "tp_shared"}, + }, + }, + }, + "worker": { + { + ID: "ctr-worker", + Names: []string{"/tp-worker-1"}, + State: container.StateRunning, + Labels: map[string]string{ + api.ServiceLabel: "worker", + api.ContainerNumberLabel: "1", + api.ProjectLabel: "tp", + api.ConfigHashLabel: workerHash, + }, + Mounts: []container.MountPoint{ + {Type: mmount.TypeVolume, Name: "tp_shared"}, + }, + }, + }, + }, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{ + "shared": {Name: "tp_shared", Driver: "local", ConfigHash: origHash}, + }, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + assert.Equal(t, plan.String(), ` +1. stop container tp-app-1 reason: volume "tp_shared" is being recreated +2. stop container tp-worker-1 reason: volume "tp_shared" is being recreated +[1] -> 3. remove container tp-app-1 reason: volume "tp_shared" is being recreated +[2] -> 4. remove container tp-worker-1 reason: volume "tp_shared" is being recreated +[3,4] -> 5. remove volume tp_shared reason: config hash changed +[5] -> 6. create volume tp_shared reason: config hash changed +[6] -> 7. create container tp-app-1 reason: volume "shared" is being recreated +[6] -> 8. create container tp-worker-1 reason: volume "shared" is being recreated +`) +} + +// TestReconcileDependsOnMissingServiceNoPanic verifies that a depends_on +// reference to a non-existent service doesn't panic — it's silently ignored. +func TestReconcileDependsOnMissingServiceNoPanic(t *testing.T) { + svc := types.ServiceConfig{ + Name: "web", + Image: "nginx", + DependsOn: types.DependsOnConfig{ + "ghost": types.ServiceDependency{Condition: "service_started"}, + }, + } + + project := &types.Project{ + Name: "tp", + Services: types.Services{ + "web": svc, + }, + } + observed := &ObservedState{ + ProjectName: "tp", + Containers: map[string]Containers{}, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + // "ghost" is silently ignored — web is created without dependency edge + assert.Equal(t, plan.String(), ` +1. create container tp-web-1 reason: scale up +`) +} + +// TestReconcileStaleConfigAndNetworkRecreate verifies that when a container +// has both a stale config hash AND is connected to a network being recreated, +// the plan contains valid operations for both paths without panicking. +func TestReconcileStaleConfigAndNetworkRecreate(t *testing.T) { + svc := types.ServiceConfig{ + Name: "web", + Image: "nginx", + Networks: map[string]*types.ServiceNetworkConfig{ + "default": nil, + }, + } + + project := &types.Project{ + Name: "tp", + Services: types.Services{ + "web": svc, + }, + Networks: types.Networks{ + "default": types.NetworkConfig{Name: "tp_default"}, + }, + } + observed := &ObservedState{ + ProjectName: "tp", + Containers: map[string]Containers{ + "web": { + { + ID: "abc123def456", + Names: []string{"/tp-web-1"}, + State: container.StateRunning, + Labels: map[string]string{ + api.ServiceLabel: "web", + api.ContainerNumberLabel: "1", + api.ConfigHashLabel: "stale", + api.ProjectLabel: "tp", + }, + NetworkSettings: &container.NetworkSettingsSummary{ + Networks: map[string]*network.EndpointSettings{ + "tp_default": {NetworkID: "net1"}, + }, + }, + }, + }, + }, + Networks: map[string]ObservedNetwork{ + "default": {ID: "net1", Name: "tp_default", ConfigHash: "outdated"}, + }, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + // Both network recreate ops and container recreate ops coexist — plan is non-empty + assert.Assert(t, !plan.IsEmpty(), "expected non-empty plan") + // The plan has ops from both paths (network: stop/disconnect/remove/create/connect/start + // and container: create-temp/stop/remove/rename/start). Some ops share IDs, so the + // network stop overwrites the container stop. Verify key ops exist. + var hasNetworkRemove, hasContainerCreate bool + for _, op := range plan.Operations { + if op.Type == OpRemoveNetwork { + hasNetworkRemove = true + } + if op.Type == OpCreateContainer { + hasContainerCreate = true + } + } + assert.Assert(t, hasNetworkRemove, "expected network remove op") + assert.Assert(t, hasContainerCreate, "expected container create op") +} + +// --------------------------------------------------------------------------- +// Volume mount mismatch tests +// --------------------------------------------------------------------------- + +// TestReconcileVolumeMountMissingTriggersRecreate verifies that when a container's +// config hash matches but it's missing a volume mount, checkExpectedVolumes triggers +// a "volume configuration changed" recreate. +func TestReconcileVolumeMountMissingTriggersRecreate(t *testing.T) { + svc := types.ServiceConfig{ + Name: "web", + Image: "nginx", + Volumes: []types.ServiceVolumeConfig{ + {Type: "volume", Source: "data", Target: "/data"}, + }, + } + hash, err := ServiceHash(svc) + assert.NilError(t, err) + + volCfg := types.VolumeConfig{Name: "tp_data"} + volHash, err := VolumeHash(volCfg) + assert.NilError(t, err) + + project := &types.Project{ + Name: "tp", + Services: types.Services{ + "web": svc, + }, + Volumes: types.Volumes{ + "data": volCfg, + }, + } + observed := &ObservedState{ + ProjectName: "tp", + Containers: map[string]Containers{ + "web": { + { + ID: "abc123def456", + Names: []string{"/tp-web-1"}, + State: container.StateRunning, + Labels: map[string]string{ + api.ServiceLabel: "web", + api.ContainerNumberLabel: "1", + api.ConfigHashLabel: hash, + api.ProjectLabel: "tp", + }, + Mounts: []container.MountPoint{}, // no mounts + }, + }, + }, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{ + "data": {Name: "tp_data", Driver: "local", ConfigHash: volHash}, + }, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + assert.Equal(t, plan.String(), ` +1. create container abc123def456_tp-web-1 reason: volume configuration changed +[1] -> 2. stop container tp-web-1 reason: volume configuration changed +[2] -> 3. remove container tp-web-1 reason: volume configuration changed +[3] -> 4. rename container tp-web-1 reason: volume configuration changed +[4] -> 5. start container tp-web-1 reason: volume configuration changed +`) +} + +// TestReconcileMultipleVolumesOneMissing verifies that when a container has +// two declared volumes but only one is mounted, it triggers recreate. +func TestReconcileMultipleVolumesOneMissing(t *testing.T) { + svc := types.ServiceConfig{ + Name: "web", + Image: "nginx", + Volumes: []types.ServiceVolumeConfig{ + {Type: "volume", Source: "data", Target: "/data"}, + {Type: "volume", Source: "logs", Target: "/logs"}, + }, + } + hash, err := ServiceHash(svc) + assert.NilError(t, err) + + volData := types.VolumeConfig{Name: "tp_data"} + volLogs := types.VolumeConfig{Name: "tp_logs"} + volDataHash, err := VolumeHash(volData) + assert.NilError(t, err) + volLogsHash, err := VolumeHash(volLogs) + assert.NilError(t, err) + + project := &types.Project{ + Name: "tp", + Services: types.Services{ + "web": svc, + }, + Volumes: types.Volumes{ + "data": volData, + "logs": volLogs, + }, + } + observed := &ObservedState{ + ProjectName: "tp", + Containers: map[string]Containers{ + "web": { + { + ID: "abc123def456", + Names: []string{"/tp-web-1"}, + State: container.StateRunning, + Labels: map[string]string{ + api.ServiceLabel: "web", + api.ContainerNumberLabel: "1", + api.ConfigHashLabel: hash, + api.ProjectLabel: "tp", + }, + // only data mounted, logs missing + Mounts: []container.MountPoint{ + {Type: mmount.TypeVolume, Name: "tp_data"}, + }, + }, + }, + }, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{ + "data": {Name: "tp_data", Driver: "local", ConfigHash: volDataHash}, + "logs": {Name: "tp_logs", Driver: "local", ConfigHash: volLogsHash}, + }, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + assert.Equal(t, plan.String(), ` +1. create container abc123def456_tp-web-1 reason: volume configuration changed +[1] -> 2. stop container tp-web-1 reason: volume configuration changed +[2] -> 3. remove container tp-web-1 reason: volume configuration changed +[3] -> 4. rename container tp-web-1 reason: volume configuration changed +[4] -> 5. start container tp-web-1 reason: volume configuration changed +`) +} + +// --------------------------------------------------------------------------- +// Timeout propagation +// --------------------------------------------------------------------------- + +// TestReconcileTimeoutPropagated verifies that the Timeout option is carried +// through to stop operations in scale-down. +func TestReconcileTimeoutPropagated(t *testing.T) { + svc := types.ServiceConfig{Name: "web", Image: "nginx"} + hash, err := ServiceHash(svc) + assert.NilError(t, err) + + timeout := 30 * time.Second + + project := &types.Project{ + Name: "tp", + Services: types.Services{ + "web": svc, + }, + } + observed := &ObservedState{ + ProjectName: "tp", + Containers: map[string]Containers{ + "web": { + makeContainer("tp", "web", 1, hash), + makeContainer("tp", "web", 2, hash), + }, + }, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + Timeout: &timeout, + }) + assert.NilError(t, err) + assert.Equal(t, plan.String(), ` +1. stop container tp-web-2 reason: scale down +[1] -> 2. remove container tp-web-2 reason: scale down +`) + // Verify timeout is set on the stop op + stopOp := plan.Operations["stop-container:tp-web-2"] + assert.Assert(t, stopOp.ContainerOp.Timeout != nil) + assert.Equal(t, *stopOp.ContainerOp.Timeout, timeout) +} + +// --------------------------------------------------------------------------- +// Scale edge cases +// --------------------------------------------------------------------------- + +// TestReconcileScaleUpFromZeroContainers verifies that scaling up when +// there are zero existing containers (first deploy) works correctly. +func TestReconcileScaleUpFromZeroContainers(t *testing.T) { + svc := types.ServiceConfig{Name: "web", Image: "nginx", Scale: intPtr(2)} + + project := &types.Project{ + Name: "tp", + Services: types.Services{ + "web": svc, + }, + } + observed := &ObservedState{ + ProjectName: "tp", + Containers: map[string]Containers{}, // nil entry for "web" + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + assert.Equal(t, plan.String(), ` +1. create container tp-web-1 reason: scale up +2. create container tp-web-2 reason: scale up +`) +} + +// TestReconcileAllContainersObsolete verifies that when all containers have +// stale hashes and scale is unchanged, each gets a full recreate chain. +func TestReconcileAllContainersObsolete(t *testing.T) { + svc := types.ServiceConfig{Name: "web", Image: "nginx", Scale: intPtr(3)} + + project := &types.Project{ + Name: "tp", + Services: types.Services{ + "web": svc, + }, + } + observed := &ObservedState{ + ProjectName: "tp", + Containers: map[string]Containers{ + "web": { + makeContainer("tp", "web", 1, "stale1"), + makeContainer("tp", "web", 2, "stale2"), + makeContainer("tp", "web", 3, "stale3"), + }, + }, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + assert.Equal(t, plan.String(), ` +1. create container tp-web-1_tp-web-1 reason: config hash changed +2. create container tp-web-2_tp-web-2 reason: config hash changed +3. create container tp-web-3_tp-web-3 reason: config hash changed +[1] -> 4. stop container tp-web-1 reason: config hash changed +[2] -> 5. stop container tp-web-2 reason: config hash changed +[3] -> 6. stop container tp-web-3 reason: config hash changed +[4] -> 7. remove container tp-web-1 reason: config hash changed +[5] -> 8. remove container tp-web-2 reason: config hash changed +[6] -> 9. remove container tp-web-3 reason: config hash changed +[7] -> 10. rename container tp-web-1 reason: config hash changed +[8] -> 11. rename container tp-web-2 reason: config hash changed +[9] -> 12. rename container tp-web-3 reason: config hash changed +[10] -> 13. start container tp-web-1 reason: config hash changed +[11] -> 14. start container tp-web-2 reason: config hash changed +[12] -> 15. start container tp-web-3 reason: config hash changed +`) +} + +// TestReconcileScaleDownStaleRemovedCurrentKept verifies that when scaling +// down with a mix of stale and current containers, the stale ones are removed +// and the current one is kept. +func TestReconcileScaleDownStaleRemovedCurrentKept(t *testing.T) { + svc := types.ServiceConfig{Name: "web", Image: "nginx"} + hash, err := ServiceHash(svc) + assert.NilError(t, err) + + project := &types.Project{ + Name: "tp", + Services: types.Services{ + "web": svc, + }, + } + observed := &ObservedState{ + ProjectName: "tp", + Containers: map[string]Containers{ + "web": { + makeContainer("tp", "web", 1, "stale1"), + makeContainer("tp", "web", 2, hash), // current + makeContainer("tp", "web", 3, "stale3"), + }, + }, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + // Both stale containers removed, current container #2 survives + assert.Equal(t, plan.String(), ` +1. stop container tp-web-1 reason: scale down +2. stop container tp-web-3 reason: scale down +[1] -> 3. remove container tp-web-1 reason: scale down +[2] -> 4. remove container tp-web-3 reason: scale down +`) +} + +// --------------------------------------------------------------------------- +// Dependency edge wiring +// --------------------------------------------------------------------------- + +// TestReconcileRecreateNoEdgeToRunningDependency verifies that when a service +// is recreated but its dependency is already running and up-to-date, no +// dependency edge is added to the recreate chain. +func TestReconcileRecreateNoEdgeToRunningDependency(t *testing.T) { + dbSvc := types.ServiceConfig{Name: "db", Image: "postgres"} + dbHash, err := ServiceHash(dbSvc) + assert.NilError(t, err) + webSvc := types.ServiceConfig{ + Name: "web", + Image: "nginx", + DependsOn: types.DependsOnConfig{ + "db": types.ServiceDependency{Condition: "service_started"}, + }, + } + + project := &types.Project{ + Name: "tp", + Services: types.Services{ + "db": dbSvc, + "web": webSvc, + }, + } + observed := &ObservedState{ + ProjectName: "tp", + Containers: map[string]Containers{ + "db": {makeContainer("tp", "db", 1, dbHash)}, // up-to-date + "web": {makeContainer("tp", "web", 1, "stale")}, + }, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + // No dependency edge on db — it has no "ready" op in the plan + assert.Equal(t, plan.String(), ` +1. create container tp-web-1_tp-web-1 reason: config hash changed +[1] -> 2. stop container tp-web-1 reason: config hash changed +[2] -> 3. remove container tp-web-1 reason: config hash changed +[3] -> 4. rename container tp-web-1 reason: config hash changed +[4] -> 5. start container tp-web-1 reason: config hash changed +`) +} + +// TestReconcileTwoServicesDependOnSameService verifies that when two services +// depend on the same missing service, both get dependency edges to it. +func TestReconcileTwoServicesDependOnSameService(t *testing.T) { + project := &types.Project{ + Name: "tp", + Services: types.Services{ + "db": types.ServiceConfig{Name: "db", Image: "postgres"}, + "web": types.ServiceConfig{ + Name: "web", + Image: "nginx", + DependsOn: types.DependsOnConfig{ + "db": types.ServiceDependency{Condition: "service_started"}, + }, + }, + "worker": types.ServiceConfig{ + Name: "worker", + Image: "worker", + DependsOn: types.DependsOnConfig{ + "db": types.ServiceDependency{Condition: "service_started"}, + }, + }, + }, + } + observed := &ObservedState{ + ProjectName: "tp", + Containers: map[string]Containers{}, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + assert.Equal(t, plan.String(), ` +1. create container tp-db-1 reason: scale up +[1] -> 2. create container tp-web-1 reason: scale up +[1] -> 3. create container tp-worker-1 reason: scale up +`) +} + +// TestReconcileContainerCreateDependsOnRecreatedNetwork verifies that when a +// network is being recreated (not just created), a new container using that +// network depends on the network create op. +func TestReconcileContainerCreateDependsOnRecreatedNetwork(t *testing.T) { + svc := types.ServiceConfig{ + Name: "web", + Image: "nginx", + Networks: map[string]*types.ServiceNetworkConfig{ + "default": nil, + }, + } + + project := &types.Project{ + Name: "tp", + Services: types.Services{ + "web": svc, + }, + Networks: types.Networks{ + "default": types.NetworkConfig{Name: "tp_default"}, + }, + } + observed := &ObservedState{ + ProjectName: "tp", + Containers: map[string]Containers{}, // no containers yet + Networks: map[string]ObservedNetwork{ + "default": {ID: "net1", Name: "tp_default", ConfigHash: "outdated"}, + }, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + assert.Equal(t, plan.String(), ` +1. remove network tp_default reason: config hash changed +[1] -> 2. create network tp_default reason: config hash changed +[2] -> 3. create container tp-web-1 reason: scale up +`) +} + +// --------------------------------------------------------------------------- +// Cascading restart edge case +// --------------------------------------------------------------------------- + +// TestReconcileCascadingRestartSkippedWhenAlreadyRecreating verifies that +// when a service is already being recreated (stale hash), cascading restart +// does not add duplicate stop/start ops. +func TestReconcileCascadingRestartSkippedWhenAlreadyRecreating(t *testing.T) { + dbSvc := types.ServiceConfig{Name: "db", Image: "postgres"} + webSvc := types.ServiceConfig{ + Name: "web", + Image: "nginx", + DependsOn: types.DependsOnConfig{ + "db": types.ServiceDependency{ + Condition: "service_started", + Restart: true, + }, + }, + } + + project := &types.Project{ + Name: "tp", + Services: types.Services{ + "db": dbSvc, + "web": webSvc, + }, + } + observed := &ObservedState{ + ProjectName: "tp", + Containers: map[string]Containers{ + "db": {makeContainer("tp", "db", 1, "stale-db")}, + "web": {makeContainer("tp", "web", 1, "stale-web")}, // also stale + }, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + // web is already being recreated — cascading restart does NOT add a duplicate stop + assert.Equal(t, plan.String(), ` +1. create container tp-db-1_tp-db-1 reason: config hash changed +[1] -> 2. stop container tp-db-1 reason: config hash changed +[2] -> 3. remove container tp-db-1 reason: config hash changed +[3] -> 4. rename container tp-db-1 reason: config hash changed +[4] -> 5. start container tp-db-1 reason: config hash changed +[5] -> 6. create container tp-web-1_tp-web-1 reason: config hash changed +[6] -> 7. stop container tp-web-1 reason: config hash changed +[7] -> 8. remove container tp-web-1 reason: config hash changed +[8] -> 9. rename container tp-web-1 reason: config hash changed +[9,5] -> 10. start container tp-web-1 reason: config hash changed +`) + // Verify exactly one stop op for web + var stopCount int + for _, op := range plan.Operations { + if op.Type == OpStopContainer && op.Resource == "tp-web-1" { + stopCount++ + } + } + assert.Equal(t, stopCount, 1) +} + +// --------------------------------------------------------------------------- +// Multiple plugin services +// --------------------------------------------------------------------------- + +// TestReconcileMultiplePluginServices verifies that multiple plugin services +// each produce their own independent OpRunPlugin op. +func TestReconcileMultiplePluginServices(t *testing.T) { + project := &types.Project{ + Name: "tp", + Services: types.Services{ + "p1": types.ServiceConfig{ + Name: "p1", + Provider: &types.ServiceProviderConfig{Type: "aws"}, + }, + "p2": types.ServiceConfig{ + Name: "p2", + Provider: &types.ServiceProviderConfig{Type: "gcp"}, + }, + }, + } + observed := &ObservedState{ + ProjectName: "tp", + Containers: map[string]Containers{}, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + assert.Equal(t, plan.String(), ` +1. plugin plugin p1 reason: plugin service +2. plugin plugin p2 reason: plugin service +`) +} + +// --------------------------------------------------------------------------- +// Orphan edge case +// --------------------------------------------------------------------------- + +// TestReconcileOrphanAlreadyStopped verifies that orphan containers in exited +// state still get stop+remove ops (stop is a no-op at execution time). +func TestReconcileOrphanAlreadyStopped(t *testing.T) { + project := &types.Project{ + Name: "tp", + Services: types.Services{}, + } + orphan := makeContainer("tp", "old", 1, "hash") + orphan.State = container.StateExited + + observed := &ObservedState{ + ProjectName: "tp", + Containers: map[string]Containers{}, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{orphan}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + RemoveOrphans: true, + }) + assert.NilError(t, err) + assert.Equal(t, plan.String(), ` +1. stop container tp-old-1 reason: orphan container +[1] -> 2. remove container tp-old-1 reason: orphan container +`) +} + +// --------------------------------------------------------------------------- +// External volume resolution +// --------------------------------------------------------------------------- + +// TestReconcileExternalVolumeResolvedFromContainer verifies that external +// volume names are resolved from running containers' mounts, preventing a +// false "volume configuration changed" recreate. +func TestReconcileExternalVolumeResolvedFromContainer(t *testing.T) { + svc := types.ServiceConfig{ + Name: "web", + Image: "nginx", + Volumes: []types.ServiceVolumeConfig{ + {Type: "volume", Source: "ext", Target: "/data"}, + }, + } + hash, err := ServiceHash(svc) + assert.NilError(t, err) + + project := &types.Project{ + Name: "tp", + Services: types.Services{ + "web": svc, + }, + Volumes: types.Volumes{ + "ext": types.VolumeConfig{Name: "shared_vol", External: true}, + }, + } + observed := &ObservedState{ + ProjectName: "tp", + Containers: map[string]Containers{ + "web": { + { + ID: "c1", + Names: []string{"/tp-web-1"}, + State: container.StateRunning, + Labels: map[string]string{ + api.ServiceLabel: "web", + api.ContainerNumberLabel: "1", + api.ConfigHashLabel: hash, + api.ProjectLabel: "tp", + }, + Mounts: []container.MountPoint{ + {Type: mmount.TypeVolume, Name: "shared_vol"}, + }, + }, + }, + }, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, // external vol not in observed + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + // No recreate — external volume name resolved from container mount + assert.Assert(t, plan.IsEmpty(), "expected empty plan but got:\n%s", plan.String()) +} + +// --------------------------------------------------------------------------- +// Mixed operations ordering +// --------------------------------------------------------------------------- + +// TestReconcileServiceDependsOnMissingNetworkVolumeAndService verifies that +// when a service needs a network, volume, and dependency service all created +// from scratch, the container create depends on all three. +func TestReconcileServiceDependsOnMissingNetworkVolumeAndService(t *testing.T) { + project := &types.Project{ + Name: "tp", + Services: types.Services{ + "db": types.ServiceConfig{Name: "db", Image: "postgres"}, + "web": types.ServiceConfig{ + Name: "web", + Image: "nginx", + Networks: map[string]*types.ServiceNetworkConfig{ + "mynet": nil, + }, + Volumes: []types.ServiceVolumeConfig{ + {Type: "volume", Source: "data", Target: "/data"}, + }, + DependsOn: types.DependsOnConfig{ + "db": types.ServiceDependency{Condition: "service_started"}, + }, + }, + }, + Networks: types.Networks{ + "mynet": types.NetworkConfig{Name: "tp_mynet"}, + }, + Volumes: types.Volumes{ + "data": types.VolumeConfig{Name: "tp_data"}, + }, + } + observed := &ObservedState{ + ProjectName: "tp", + Containers: map[string]Containers{}, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + }) + assert.NilError(t, err) + assert.Equal(t, plan.String(), ` +1. create container tp-db-1 reason: scale up +2. create network tp_mynet reason: network does not exist +3. create volume tp_data reason: volume does not exist +[1,2,3] -> 4. create container tp-web-1 reason: scale up +`) +} + +// --------------------------------------------------------------------------- +// Inherit flag propagation +// --------------------------------------------------------------------------- + +// TestReconcileInheritFlagPropagated verifies that the Inherit option is +// carried through to ContainerOperation on both recreate and scale-up creates. +func TestReconcileInheritFlagPropagated(t *testing.T) { + svc := types.ServiceConfig{Name: "web", Image: "nginx", Scale: intPtr(2)} + + project := &types.Project{ + Name: "tp", + Services: types.Services{ + "web": svc, + }, + } + observed := &ObservedState{ + ProjectName: "tp", + Containers: map[string]Containers{ + "web": {makeContainer("tp", "web", 1, "stale")}, + }, + Networks: map[string]ObservedNetwork{}, + Volumes: map[string]ObservedVolume{}, + Orphans: Containers{}, + } + + plan, err := Reconcile(project, observed, ReconcileOptions{ + Recreate: api.RecreateDiverged, + RecreateDependencies: api.RecreateDiverged, + Inherit: true, + }) + assert.NilError(t, err) + assert.Equal(t, plan.String(), ` +1. create container tp-web-1_tp-web-1 reason: config hash changed +2. create container tp-web-2 reason: scale up +[1] -> 3. stop container tp-web-1 reason: config hash changed +[3] -> 4. remove container tp-web-1 reason: config hash changed +[4] -> 5. rename container tp-web-1 reason: config hash changed +[5] -> 6. start container tp-web-1 reason: config hash changed +`) + // Verify Inherit is set on both create ops + for _, op := range plan.Operations { + if op.Type == OpCreateContainer && op.ContainerOp != nil { + assert.Assert(t, op.ContainerOp.Inherit, + "expected Inherit=true on create op %s", op.Resource) + } + } +} + // --------------------------------------------------------------------------- // Test helpers // ---------------------------------------------------------------------------