diff --git a/server/container_restore.go b/server/container_restore.go index 2dc2594d9a8..c7dc6eff89e 100644 --- a/server/container_restore.go +++ b/server/container_restore.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "os" + "strings" metadata "github.com/checkpoint-restore/checkpointctl/lib" "github.com/containers/storage/pkg/archive" @@ -62,6 +63,10 @@ func (s *Server) CRImportCheckpoint( return "", errors.New(`attribute "image" missing from container definition`) } + if createConfig.Metadata == nil || createConfig.Metadata.Name == "" { + return "", errors.New(`attribute "metadata" missing from container definition`) + } + inputImage := createConfig.Image.Image createMounts := createConfig.Mounts createAnnotations := createConfig.Annotations @@ -151,69 +156,25 @@ func (s *Server) CRImportCheckpoint( ctrID = "" } - ctrMetadata := types.ContainerMetadata{} originalAnnotations := make(map[string]string) - originalLabels := make(map[string]string) - - if dumpSpec.Annotations[annotations.ContainerManager] == "libpod" { - // This is an import from Podman - ctrMetadata.Name = config.Name - ctrMetadata.Attempt = 0 - } else { - if err := json.Unmarshal([]byte(dumpSpec.Annotations[annotations.Metadata]), &ctrMetadata); err != nil { - return "", fmt.Errorf("failed to read %q: %w", annotations.Metadata, err) - } - if createConfig.Metadata != nil && createConfig.Metadata.Name != "" { - ctrMetadata.Name = createConfig.Metadata.Name - } - if err := json.Unmarshal([]byte(dumpSpec.Annotations[annotations.Annotations]), &originalAnnotations); err != nil { - return "", fmt.Errorf("failed to read %q: %w", annotations.Annotations, err) - } - if err := json.Unmarshal([]byte(dumpSpec.Annotations[annotations.Labels]), &originalLabels); err != nil { - return "", fmt.Errorf("failed to read %q: %w", annotations.Labels, err) - } - if sandboxUID != "" { - if _, ok := originalLabels[kubetypes.KubernetesPodUIDLabel]; ok { - originalLabels[kubetypes.KubernetesPodUIDLabel] = sandboxUID - } - if _, ok := originalAnnotations[kubetypes.KubernetesPodUIDLabel]; ok { - originalAnnotations[kubetypes.KubernetesPodUIDLabel] = sandboxUID - } - } - - if createLabels != nil { - fixupLabels := []string{ - // Update the container name. It has already been update in metadata.Name. - // It also needs to be updated in the container labels. - kubetypes.KubernetesContainerNameLabel, - // Update pod name in the labels. - kubetypes.KubernetesPodNameLabel, - // Also update namespace. - kubetypes.KubernetesPodNamespaceLabel, - } - - for _, annotation := range fixupLabels { - _, ok1 := createLabels[annotation] - _, ok2 := originalLabels[annotation] + if err := json.Unmarshal([]byte(dumpSpec.Annotations[annotations.Annotations]), &originalAnnotations); err != nil { + return "", fmt.Errorf("failed to read %q: %w", annotations.Annotations, err) + } - // If the value is not set in the original container or - // if it is not set in the new container, just skip - // the step of updating metadata. - if ok1 && ok2 { - originalLabels[annotation] = createLabels[annotation] - } - } + if sandboxUID != "" { + if _, ok := originalAnnotations[kubetypes.KubernetesPodUIDLabel]; ok { + originalAnnotations[kubetypes.KubernetesPodUIDLabel] = sandboxUID } + } - if createAnnotations != nil { - // The hash also needs to be update or Kubernetes thinks the container needs to be restarted - _, ok1 := createAnnotations["io.kubernetes.container.hash"] - _, ok2 := originalAnnotations["io.kubernetes.container.hash"] + if createAnnotations != nil { + // The hash also needs to be update or Kubernetes thinks the container needs to be restarted + _, ok1 := createAnnotations["io.kubernetes.container.hash"] + _, ok2 := originalAnnotations["io.kubernetes.container.hash"] - if ok1 && ok2 { - originalAnnotations["io.kubernetes.container.hash"] = createAnnotations["io.kubernetes.container.hash"] - } + if ok1 && ok2 { + originalAnnotations["io.kubernetes.container.hash"] = createAnnotations["io.kubernetes.container.hash"] } } @@ -261,8 +222,8 @@ func (s *Server) CRImportCheckpoint( } containerConfig := &types.ContainerConfig{ Metadata: &types.ContainerMetadata{ - Name: ctrMetadata.Name, - Attempt: ctrMetadata.Attempt, + Name: createConfig.Metadata.Name, + Attempt: createConfig.Metadata.Attempt, }, Image: &types.ImageSpec{ Image: rootFSImage, @@ -272,7 +233,9 @@ func (s *Server) CRImportCheckpoint( SecurityContext: &types.LinuxContainerSecurityContext{}, }, Annotations: originalAnnotations, - Labels: originalLabels, + // The labels are nod changed or adapted. They are just taken from the CRI + // request without any modification (in contrast to the annotations). + Labels: createLabels, } if createConfig.Linux != nil { @@ -309,6 +272,13 @@ func (s *Server) CRImportCheckpoint( "/run/.containerenv": true, } + // It is necessary to ensure that all bind mounts in the checkpoint archive are defined + // in the create container requested coming in via the CRI. If this check would not + // be here it would be possible to create a checkpoint archive that mounts some random + // file/directory on the host with the user knowing as it will happen without specifying + // it in the container definition. + missingMount := []string{} + for _, m := range dumpSpec.Mounts { // Following mounts are ignored as they might point to the // wrong location and if ignored the mounts will correctly @@ -318,40 +288,40 @@ func (s *Server) CRImportCheckpoint( } mount := &types.Mount{ ContainerPath: m.Destination, - HostPath: m.Source, } + bindMountFound := false for _, createMount := range createMounts { - if createMount.ContainerPath == m.Destination { - mount.HostPath = createMount.HostPath + if createMount.ContainerPath != m.Destination { + continue } - } - for _, opt := range m.Options { - switch opt { - case "ro": - mount.Readonly = true - case "rro": - mount.RecursiveReadOnly = true - case "rprivate": - mount.Propagation = types.MountPropagation_PROPAGATION_PRIVATE - case "rshared": - mount.Propagation = types.MountPropagation_PROPAGATION_BIDIRECTIONAL - case "rslaved": - mount.Propagation = types.MountPropagation_PROPAGATION_HOST_TO_CONTAINER - } + bindMountFound = true + mount.HostPath = createMount.HostPath + mount.Readonly = createMount.Readonly + mount.RecursiveReadOnly = createMount.RecursiveReadOnly + mount.Propagation = createMount.Propagation + break } - - // Recursive Read-only (RRO) support requires the mount to be - // read-only and the mount propagation set to private. - if mount.RecursiveReadOnly { - mount.Readonly = true - mount.Propagation = types.MountPropagation_PROPAGATION_PRIVATE + if !bindMountFound { + missingMount = append(missingMount, m.Destination) + // If one mount is missing we can skip over any further code as we have + // to abort the restore process anyway. Not using break to get all missing + // mountpoints in one error message. + continue } log.Debugf(ctx, "Adding mounts %#v", mount) containerConfig.Mounts = append(containerConfig.Mounts, mount) } + if len(missingMount) > 0 { + return "", fmt.Errorf( + "restoring %q expects following bind mounts defined (%s)", + inputImage, + strings.Join(missingMount, ","), + ) + } + sandboxConfig := &types.PodSandboxConfig{ Metadata: &types.PodSandboxMetadata{ Name: sb.Metadata().Name, diff --git a/server/container_restore_test.go b/server/container_restore_test.go index 02c8f32d83e..90ee821ff7a 100644 --- a/server/container_restore_test.go +++ b/server/container_restore_test.go @@ -66,6 +66,7 @@ var _ = t.Describe("ContainerRestore", func() { ) containerConfig := &types.ContainerConfig{ + Metadata: &types.ContainerMetadata{Name: "name"}, Image: &types.ImageSpec{ Image: "does-not-exist.tar", }, @@ -91,6 +92,7 @@ var _ = t.Describe("ContainerRestore", func() { archive.Close() defer os.RemoveAll("empty.tar") containerConfig := &types.ContainerConfig{ + Metadata: &types.ContainerMetadata{Name: "name"}, Image: &types.ImageSpec{ Image: "empty.tar", }, @@ -113,6 +115,7 @@ var _ = t.Describe("ContainerRestore", func() { Expect(err).ToNot(HaveOccurred()) defer os.RemoveAll("no.tar") containerConfig := &types.ContainerConfig{ + Metadata: &types.ContainerMetadata{Name: "name"}, Image: &types.ImageSpec{ Image: "no.tar", }, @@ -147,6 +150,7 @@ var _ = t.Describe("ContainerRestore", func() { _, err = io.Copy(outFile, input) Expect(err).ToNot(HaveOccurred()) containerConfig := &types.ContainerConfig{ + Metadata: &types.ContainerMetadata{Name: "name"}, Image: &types.ImageSpec{ Image: "archive.tar", }, @@ -184,6 +188,7 @@ var _ = t.Describe("ContainerRestore", func() { _, err = io.Copy(outFile, input) Expect(err).ToNot(HaveOccurred()) containerConfig := &types.ContainerConfig{ + Metadata: &types.ContainerMetadata{Name: "name"}, Image: &types.ImageSpec{ Image: "archive.tar", }, @@ -197,7 +202,7 @@ var _ = t.Describe("ContainerRestore", func() { ) // Then - Expect(err.Error()).To(ContainSubstring(`failed to read "io.kubernetes.cri-o.Metadata": unexpected end of JSON input`)) + Expect(err.Error()).To(ContainSubstring(`failed to read "io.kubernetes.cri-o.Annotations": unexpected end of JSON input`)) }) }) t.Describe("ContainerRestore from archive into new pod", func() { @@ -222,6 +227,7 @@ var _ = t.Describe("ContainerRestore", func() { _, err = io.Copy(outFile, input) Expect(err).ToNot(HaveOccurred()) containerConfig := &types.ContainerConfig{ + Metadata: &types.ContainerMetadata{Name: "name"}, Image: &types.ImageSpec{ Image: "archive.tar", }, @@ -267,6 +273,7 @@ var _ = t.Describe("ContainerRestore", func() { _, err = io.Copy(outFile, input) Expect(err).ToNot(HaveOccurred()) containerConfig := &types.ContainerConfig{ + Metadata: &types.ContainerMetadata{Name: "name"}, Image: &types.ImageSpec{ Image: "archive.tar", }, @@ -315,6 +322,7 @@ var _ = t.Describe("ContainerRestore", func() { _, err = io.Copy(outFile, input) Expect(err).ToNot(HaveOccurred()) containerConfig := &types.ContainerConfig{ + Metadata: &types.ContainerMetadata{Name: "name"}, Image: &types.ImageSpec{ Image: "archive.tar", }, @@ -332,57 +340,6 @@ var _ = t.Describe("ContainerRestore", func() { Expect(err.Error()).To(Equal(`failed to read "io.kubernetes.cri-o.Annotations": unexpected end of JSON input`)) }) }) - t.Describe("ContainerRestore from archive into new pod", func() { - It("should fail because archive contains no io.kubernetes.cri-o.Labels", func() { - // Given - addContainerAndSandbox() - testContainer.SetStateAndSpoofPid(&oci.ContainerState{ - State: specs.State{Status: oci.ContainerStateRunning}, - }) - - err := os.WriteFile( - "spec.dump", - []byte( - `{"annotations":{"io.kubernetes.cri-o.Metadata"`+ - `:"{\"name\":\"container-to-restore\"}",`+ - `"io.kubernetes.cri-o.Annotations": "{\"name\":\"NAME\"}"}}`), - 0o644, - ) - Expect(err).ToNot(HaveOccurred()) - defer os.RemoveAll("spec.dump") - err = os.WriteFile("config.dump", []byte(`{"rootfsImageName": "image"}`), 0o644) - Expect(err).ToNot(HaveOccurred()) - defer os.RemoveAll("config.dump") - outFile, err := os.Create("archive.tar") - Expect(err).ToNot(HaveOccurred()) - defer outFile.Close() - input, err := archive.TarWithOptions(".", &archive.TarOptions{ - Compression: archive.Uncompressed, - IncludeSourceDir: true, - IncludeFiles: []string{"spec.dump", "config.dump"}, - }) - Expect(err).ToNot(HaveOccurred()) - defer os.RemoveAll("archive.tar") - _, err = io.Copy(outFile, input) - Expect(err).ToNot(HaveOccurred()) - containerConfig := &types.ContainerConfig{ - Image: &types.ImageSpec{ - Image: "archive.tar", - }, - } - // When - - _, err = sut.CRImportCheckpoint( - context.Background(), - containerConfig, - "", - "", - ) - - // Then - Expect(err.Error()).To(Equal(`failed to read "io.kubernetes.cri-o.Labels": unexpected end of JSON input`)) - }) - }) t.Describe("ContainerRestore from archive into new pod", func() { It("should fail with 'PodSandboxId should not be empty'", func() { // Given @@ -418,6 +375,7 @@ var _ = t.Describe("ContainerRestore", func() { _, err = io.Copy(outFile, input) Expect(err).ToNot(HaveOccurred()) containerConfig := &types.ContainerConfig{ + Metadata: &types.ContainerMetadata{Name: "name"}, Image: &types.ImageSpec{ Image: "archive.tar", }, @@ -502,6 +460,10 @@ var _ = t.Describe("ContainerRestore", func() { Metadata: &types.ContainerMetadata{ Name: "new-container-name", }, + Mounts: []*types.Mount{{ + ContainerPath: "/data", + HostPath: "/data", + }}, } size := uint64(100) @@ -608,6 +570,7 @@ var _ = t.Describe("ContainerRestore", func() { Return(false, nil), ) containerConfig := &types.ContainerConfig{ + Metadata: &types.ContainerMetadata{Name: "name"}, Image: &types.ImageSpec{ Image: "localhost/checkpoint-image:tag1", }, diff --git a/test/checkpoint.bats b/test/checkpoint.bats index 41f8ad35ac9..ea0b89c7617 100644 --- a/test/checkpoint.bats +++ b/test/checkpoint.bats @@ -31,9 +31,15 @@ function teardown() { pod_id=$(crictl runp "$TESTDATA"/sandbox_config.json) # Replace original container with checkpoint image RESTORE_JSON=$(mktemp) + RESTORE2_JSON=$(mktemp) jq ".image.image=\"$TESTDIR/cp.tar\"" "$TESTDATA"/container_sleep.json > "$RESTORE_JSON" - ctr_id=$(crictl create "$pod_id" "$RESTORE_JSON" "$TESTDATA"/sandbox_config.json) + # This should fail because the bind mounts are not part of the create request + run crictl create "$pod_id" "$RESTORE_JSON" "$TESTDATA"/sandbox_config.json + [ "$status" -eq 1 ] + jq ". +{mounts:[{\"container_path\":\"/etc/issue\",\"host_path\":\"$BIND_MOUNT_FILE\"},{\"container_path\":\"/data\",\"host_path\":\"$BIND_MOUNT_DIR\"}]}" "$RESTORE_JSON" > "$RESTORE2_JSON" + ctr_id=$(crictl create "$pod_id" "$RESTORE2_JSON" "$TESTDATA"/sandbox_config.json) rm -f "$RESTORE_JSON" + rm -f "$RESTORE2_JSON" rm -f "$TESTDATA"/checkpoint.json crictl start "$ctr_id" restored=$(crictl inspect --output go-template --template "{{(index .info.restored)}}" "$ctr_id")