From 278712a6e6fa922cea081692e1a4d945a2655624 Mon Sep 17 00:00:00 2001 From: Krisztian Litkey Date: Fri, 27 Jun 2025 23:21:46 +0300 Subject: [PATCH 01/15] dependencies.yaml: bump/sync internal version. Signed-off-by: Krisztian Litkey --- dependencies.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dependencies.yaml b/dependencies.yaml index 950e41e8750..25c478fea10 100644 --- a/dependencies.yaml +++ b/dependencies.yaml @@ -13,7 +13,7 @@ dependencies: # tag the .0 release if it does not already exist. If the .0 release is done, # increase the development version to the next minor (1.x.0). - name: development version - version: 1.33.0 + version: 1.33.2 refPaths: - path: internal/version/version.go match: Version From ea50bd3509ae66d2b7516062fe6614f55e97b6b6 Mon Sep 17 00:00:00 2001 From: Ayato Tokubi Date: Fri, 13 Jun 2025 04:24:32 +0000 Subject: [PATCH 02/15] fix deadlock when the container is in uninterruptible sleep Signed-off-by: Ayato Tokubi --- internal/oci/container.go | 57 +++++++++++++++++++++++++------------ internal/oci/runtime_oci.go | 3 ++ 2 files changed, 42 insertions(+), 18 deletions(-) diff --git a/internal/oci/container.go b/internal/oci/container.go index 6ba460f4543..e90a0096082 100644 --- a/internal/oci/container.go +++ b/internal/oci/container.go @@ -50,25 +50,29 @@ type Container struct { dir string stopSignal string // If set, _some_ name of the image imageID; it may have NO RELATIONSHIP to the users’ requested image name. - someNameOfTheImage *references.RegistryImageReference - imageID *storage.StorageImageID // nil for infra containers. - mountPoint string - seccompProfilePath string - conmonCgroupfsPath string - crioAnnotations fields.Set - state *ContainerState - opLock sync.RWMutex - spec *specs.Spec - idMappings *idtools.IDMappings - terminal bool - stdin bool - stdinOnce bool - created bool - spoofed bool - stopping bool - stopLock sync.Mutex + someNameOfTheImage *references.RegistryImageReference + imageID *storage.StorageImageID // nil for infra containers. + mountPoint string + seccompProfilePath string + conmonCgroupfsPath string + crioAnnotations fields.Set + state *ContainerState + opLock sync.RWMutex + spec *specs.Spec + idMappings *idtools.IDMappings + terminal bool + stdin bool + stdinOnce bool + created bool + spoofed bool + stopping bool + stopLock sync.Mutex + // stopTimeoutChan is used to update the stop timeout. + // After the container goes into the kill loop, the channel must not be used + // because it is not controlled by the timeout anymore. stopTimeoutChan chan int64 stopWatchers []chan struct{} + stopKillLoopBegun bool pidns nsmgr.Namespace restore bool restoreArchivePath string @@ -170,6 +174,7 @@ func NewContainer(id, name, bundlePath, logPath string, labels, crioAnnotations, stopSignal: stopSignal, stopTimeoutChan: make(chan int64, 10), stopWatchers: []chan struct{}{}, + stopKillLoopBegun: false, execPIDs: map[int]bool{}, } @@ -642,6 +647,13 @@ func (c *Container) SetAsStopping() (setToStopping bool) { return false } +// SetStopKillLoopBegun sets the stopKillLoopBegun flag to true. +func (c *Container) SetStopKillLoopBegun() { + c.stopLock.Lock() + defer c.stopLock.Unlock() + c.stopKillLoopBegun = true +} + func (c *Container) WaitOnStopTimeout(ctx context.Context, timeout int64) { c.stopLock.Lock() if !c.stopping { @@ -650,7 +662,16 @@ func (c *Container) WaitOnStopTimeout(ctx context.Context, timeout int64) { return } - c.stopTimeoutChan <- timeout + // Don't use the stopTimeoutChan when the container is in kill loop + // because values in the channel are no longer consumed. + if !c.stopKillLoopBegun { + // Use select and default not to block when the stopTimeoutChan is full. + // The channel is very unlikely to be full, but it could happen in theory. + select { + case c.stopTimeoutChan <- timeout: + default: + } + } watcher := make(chan struct{}, 1) c.stopWatchers = append(c.stopWatchers, watcher) diff --git a/internal/oci/runtime_oci.go b/internal/oci/runtime_oci.go index 9b6ab42d4b2..b38d1bfb1b1 100644 --- a/internal/oci/runtime_oci.go +++ b/internal/oci/runtime_oci.go @@ -1002,6 +1002,7 @@ func (r *runtimeOCI) StopLoopForContainer(c *Container, bm kwait.BackoffManager) case <-time.After(time.Until(targetTime)): log.Warnf(ctx, "Stopping container %s with stop signal(%s) timed out. Killing...", c.ID(), c.GetStopSignal()) + c.SetStopKillLoopBegun() goto killContainer @@ -1025,10 +1026,12 @@ killContainer: } if err := c.Living(); err != nil { + log.Debugf(ctx, "Container is no longer alive") stop() return } + log.Debugf(ctx, "Killing failed for some reasons, retrying...") // Reschedule the timer so that the periodic reminder can continue. blockedTimer.Reset(stopProcessBlockedInterval) }, bm, true, ctx.Done()) From 8daef88e4b04767f7334353f72162106afd96aa8 Mon Sep 17 00:00:00 2001 From: Sohan Kunkerkar Date: Mon, 30 Jun 2025 09:29:34 -0400 Subject: [PATCH 03/15] server: handle missing network namespace gracefully during networkStop After host reboot, network namespaces are destroyed but CRI-O attempts to clean them up during pod sandbox destruction, causing CNI plugin failures and preventing pods from restarting properly. The fix ensures pods can restart normally after host reboots. Signed-off-by: Sohan Kunkerkar --- server/sandbox_network.go | 39 ++++++++++++++++++++------- server/sandbox_network_freebsd.go | 24 +++++++++++++++++ server/sandbox_network_linux.go | 36 +++++++++++++++++++++++++ server/sandbox_network_unsupported.go | 23 ++++++++++++++++ test/network.bats | 2 +- 5 files changed, 113 insertions(+), 11 deletions(-) create mode 100644 server/sandbox_network_freebsd.go create mode 100644 server/sandbox_network_linux.go create mode 100644 server/sandbox_network_unsupported.go diff --git a/server/sandbox_network.go b/server/sandbox_network.go index dd4e431f343..4c86f501c94 100644 --- a/server/sandbox_network.go +++ b/server/sandbox_network.go @@ -181,23 +181,42 @@ func (s *Server) networkStop(ctx context.Context, sb *sandbox.Sandbox) error { podNetwork, err := s.newPodNetwork(ctx, sb) if err != nil { - return err + return fmt.Errorf("failed to create pod network for sandbox %s(%s): %w", sb.Name(), sb.ID(), err) } - if err := s.config.CNIPlugin().TearDownPodWithContext(stopCtx, podNetwork); err != nil { - retErr := fmt.Errorf("failed to destroy network for pod sandbox %s(%s): %w", sb.Name(), sb.ID(), err) - + // Check if the network namespace file exists and is valid before attempting CNI teardown. + // If the file doesn't exist or is invalid, skip CNI teardown and mark network as stopped. + if podNetwork.NetNS != "" { if _, statErr := os.Stat(podNetwork.NetNS); statErr != nil { - return fmt.Errorf("%w: stat netns path %q: %w", retErr, podNetwork.NetNS, statErr) + // Network namespace file doesn't exist, mark network as stopped and return success + log.Debugf(ctx, "Network namespace file %s does not exist for pod sandbox %s(%s), skipping CNI teardown", + podNetwork.NetNS, sb.Name(), sb.ID()) + + return sb.SetNetworkStopped(ctx, true) + } + + if validateErr := s.validateNetworkNamespace(podNetwork.NetNS); validateErr != nil { + // Network namespace file exists but is invalid (e.g., corrupted or fake file) + log.Warnf(ctx, "Network namespace file %s is invalid for pod sandbox %s(%s): %v, removing and skipping CNI teardown", + podNetwork.NetNS, sb.Name(), sb.ID(), validateErr) + s.cleanupNetns(ctx, podNetwork.NetNS, sb) + + return sb.SetNetworkStopped(ctx, true) } + } - // The netns file may still exists, which means that it's likely - // corrupted. Remove it to allow cleanup of the network namespace: - if rmErr := os.RemoveAll(podNetwork.NetNS); rmErr != nil { - return fmt.Errorf("%w: failed to remove netns path: %w", retErr, rmErr) + if err := s.config.CNIPlugin().TearDownPodWithContext(stopCtx, podNetwork); err != nil { + log.Warnf(ctx, "Failed to destroy network for pod sandbox %s(%s): %v", sb.Name(), sb.ID(), err) + + // If the network namespace exists but CNI teardown failed, try to clean it up. + if podNetwork.NetNS != "" { + if _, statErr := os.Stat(podNetwork.NetNS); statErr == nil { + // Clean up the netns file since CNI teardown failed. + s.cleanupNetns(ctx, podNetwork.NetNS, sb) + } } - log.Warnf(ctx, "Removed invalid netns path %s from pod sandbox %s(%s)", podNetwork.NetNS, sb.Name(), sb.ID()) + return fmt.Errorf("network teardown failed for pod sandbox %s(%s): %w", sb.Name(), sb.ID(), err) } return sb.SetNetworkStopped(ctx, true) diff --git a/server/sandbox_network_freebsd.go b/server/sandbox_network_freebsd.go new file mode 100644 index 00000000000..511ee579dcb --- /dev/null +++ b/server/sandbox_network_freebsd.go @@ -0,0 +1,24 @@ +//go:build freebsd +// +build freebsd + +package server + +import ( + "context" + + "github.com/cri-o/cri-o/internal/lib/sandbox" + "github.com/cri-o/cri-o/internal/log" +) + +// validateNetworkNamespace checks if the given path is a valid network namespace +// On FreeBSD, this is a no-op since network namespaces are Linux-specific. +func (s *Server) validateNetworkNamespace(netnsPath string) error { + // Network namespaces are Linux-specific, so on FreeBSD we assume it's valid + return nil +} + +// cleanupNetns removes a network namespace file and logs the action +// On FreeBSD, this is a no-op since network namespaces are Linux-specific. +func (s *Server) cleanupNetns(ctx context.Context, netnsPath string, sb *sandbox.Sandbox) { + log.Debugf(ctx, "Network namespace cleanup not supported on this platform") +} diff --git a/server/sandbox_network_linux.go b/server/sandbox_network_linux.go new file mode 100644 index 00000000000..1d4ae931cd0 --- /dev/null +++ b/server/sandbox_network_linux.go @@ -0,0 +1,36 @@ +//go:build linux +// +build linux + +package server + +import ( + "context" + "fmt" + "os" + + "github.com/containernetworking/plugins/pkg/ns" + + "github.com/cri-o/cri-o/internal/lib/sandbox" + "github.com/cri-o/cri-o/internal/log" +) + +// validateNetworkNamespace checks if the given path is a valid network namespace. +func (s *Server) validateNetworkNamespace(netnsPath string) error { + netns, err := ns.GetNS(netnsPath) + if err != nil { + return fmt.Errorf("invalid network namespace: %w", err) + } + + defer netns.Close() + + return nil +} + +// cleanupNetns removes a network namespace file and logs the action. +func (s *Server) cleanupNetns(ctx context.Context, netnsPath string, sb *sandbox.Sandbox) { + if rmErr := os.RemoveAll(netnsPath); rmErr != nil { + log.Warnf(ctx, "Failed to remove netns path %s: %v", netnsPath, rmErr) + } else { + log.Infof(ctx, "Removed netns path %s from pod sandbox %s(%s)", netnsPath, sb.Name(), sb.ID()) + } +} diff --git a/server/sandbox_network_unsupported.go b/server/sandbox_network_unsupported.go new file mode 100644 index 00000000000..d9067ae46b6 --- /dev/null +++ b/server/sandbox_network_unsupported.go @@ -0,0 +1,23 @@ +//go:build !linux && !freebsd +// +build !linux,!freebsd + +package server + +import ( + "context" + + "github.com/cri-o/cri-o/internal/lib/sandbox" + "github.com/cri-o/cri-o/internal/log" +) + +// validateNetworkNamespace checks if the given path is a valid network namespace +// On unsupported platforms, this is a no-op since network namespaces are Linux-specific. +func (s *Server) validateNetworkNamespace(netnsPath string) error { + return nil +} + +// cleanupNetns removes a network namespace file and logs the action +// On unsupported platforms, this is a no-op since network namespaces are Linux-specific. +func (s *Server) cleanupNetns(ctx context.Context, netnsPath string, sb *sandbox.Sandbox) { + log.Debugf(ctx, "Network namespace cleanup not supported on this platform") +} diff --git a/test/network.bats b/test/network.bats index 2c89725c66a..72bcdb4ea60 100644 --- a/test/network.bats +++ b/test/network.bats @@ -185,5 +185,5 @@ function check_networking() { # be able to remove the sandbox crictl rmp -f "$POD" - grep -q "Removed invalid netns path $NETNS_PATH$NS from pod sandbox" "$CRIO_LOG" + grep -q "Removed netns path $NETNS_PATH$NS from pod sandbox" "$CRIO_LOG" } From ff0ca8fa059b316abbb3b276a4c5af678d63d14f Mon Sep 17 00:00:00 2001 From: Sohan Kunkerkar Date: Tue, 1 Jul 2025 13:24:38 -0400 Subject: [PATCH 04/15] test: add coverage for network recovery after reboot with destroyed netns Signed-off-by: Sohan Kunkerkar --- test/network.bats | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/test/network.bats b/test/network.bats index 72bcdb4ea60..09a63e1f818 100644 --- a/test/network.bats +++ b/test/network.bats @@ -187,3 +187,40 @@ function check_networking() { crictl rmp -f "$POD" grep -q "Removed netns path $NETNS_PATH$NS from pod sandbox" "$CRIO_LOG" } + +@test "Network recovery after reboot with destroyed netns" { + # This test simulates a reboot scenario where network namespaces are destroyed + # but CRI-O needs to clean up pod network resources gracefully. + + start_crio + + pod_id=$(crictl runp "$TESTDATA"/sandbox_config.json) + + # Get the network namespace path + NETNS_PATH=/var/run/netns/ + NS=$(crictl inspectp "$pod_id" | + jq -er '.info.runtimeSpec.linux.namespaces[] | select(.type == "network").path | sub("'$NETNS_PATH'"; "")') + + # Remove the network namespace. + ip netns del "$NS" + + # Create a fake netns file. + touch "$NETNS_PATH$NS" + + restart_crio + + # Try to remove the pod. + crictl rmp -f "$pod_id" 2> /dev/null || true + + grep -q "Successfully cleaned up network for pod" "$CRIO_LOG" + + new_pod_id=$(crictl runp "$TESTDATA"/sandbox_config.json) + + # Verify the new pod is running. + output=$(crictl inspectp "$new_pod_id" | jq -r '.status.state') + [[ "$output" == "SANDBOX_READY" ]] + + # Clean up the new pod + crictl stopp "$new_pod_id" + crictl rmp "$new_pod_id" +} From 0ba126224ba4f50742488e6b9ec9edd9ddbd4252 Mon Sep 17 00:00:00 2001 From: Sohan Kunkerkar Date: Tue, 1 Jul 2025 16:26:28 -0400 Subject: [PATCH 05/15] test: use quay.io instead of registry.fedoraproject.org Signed-off-by: Sohan Kunkerkar --- internal/storage/image_test.go | 3 --- test/image.bats | 4 +++- test/registries.conf | 2 +- test/testdata/Dockerfile | 2 +- 4 files changed, 5 insertions(+), 6 deletions(-) diff --git a/internal/storage/image_test.go b/internal/storage/image_test.go index 65edd52a4fd..b71d141b6ed 100644 --- a/internal/storage/image_test.go +++ b/internal/storage/image_test.go @@ -29,7 +29,6 @@ var _ = t.Describe("Image", func() { testDockerRegistry = "docker.io" testQuayRegistry = "quay.io" testRedHatRegistry = "registry.access.redhat.com" - testFedoraRegistry = "registry.fedoraproject.org" testImageName = "image" testImageAlias = "image-for-testing" testImageAliasResolved = "registry.crio.test.com/repo" @@ -193,7 +192,6 @@ var _ = t.Describe("Image", func() { Expect(refsToNames(refs)).To(Equal([]string{ testQuayRegistry + "/" + testImageName + ":latest", testRedHatRegistry + "/" + testImageName + ":latest", - testFedoraRegistry + "/" + testImageName + ":latest", testDockerRegistry + "/library/" + testImageName + ":latest", })) }) @@ -247,7 +245,6 @@ var _ = t.Describe("Image", func() { Expect(refsToNames(refs)).To(Equal([]string{ testQuayRegistry + "/" + testImageName + "@sha256:" + testSHA256, testRedHatRegistry + "/" + testImageName + "@sha256:" + testSHA256, - testFedoraRegistry + "/" + testImageName + "@sha256:" + testSHA256, testDockerRegistry + "/library/" + testImageName + "@sha256:" + testSHA256, })) }) diff --git a/test/image.bats b/test/image.bats index 10ead5e7df5..0c5c5b3e161 100644 --- a/test/image.bats +++ b/test/image.bats @@ -104,7 +104,9 @@ function teardown() { mkdir -p "$TESTDIR/imagestore" CONTAINER_IMAGESTORE="$TESTDIR/imagestore" start_crio - FEDORA="registry.fedoraproject.org/fedora" + # registry.fedoraproject.org is pretty flaky + # Moving to the stable quay.io + FEDORA="quay.io/fedora/fedora" crictl pull $FEDORA imageid=$(crictl images --quiet "$FEDORA") [ "$imageid" != "" ] diff --git a/test/registries.conf b/test/registries.conf index 0fb3be9f31e..c0fc9f53b5e 100644 --- a/test/registries.conf +++ b/test/registries.conf @@ -1,4 +1,4 @@ -unqualified-search-registries = ['quay.io' ,'registry.access.redhat.com', 'registry.fedoraproject.org', 'docker.io'] +unqualified-search-registries = ['quay.io', 'registry.access.redhat.com', 'docker.io'] [aliases] "image-for-testing" = "registry.crio.test.com/repo" diff --git a/test/testdata/Dockerfile b/test/testdata/Dockerfile index 59c6ce56c69..8ee16e75d92 100644 --- a/test/testdata/Dockerfile +++ b/test/testdata/Dockerfile @@ -1,4 +1,4 @@ -FROM registry.fedoraproject.org/fedora-minimal:38 +FROM quay.io/fedora/fedora-minimal:38 RUN microdnf install -y coreutils \ gcc \ gzip \ From e3326cb8014b15aa13e99ca8aead22032aafc0b1 Mon Sep 17 00:00:00 2001 From: Sohan Kunkerkar Date: Wed, 9 Jul 2025 14:26:19 -0400 Subject: [PATCH 06/15] contrib/test/ci: skip network recovery after reboot test for kata Kata VMs use real infra containers that persist in storage, unlike normal containers that use spoofed infra containers. This fundamental architectural difference means the 'Network recovery after reboot with destroyed netns' test scenario doesn't apply to Kata VMs in the same way. Signed-off-by: Sohan Kunkerkar --- contrib/test/ci/vars.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/contrib/test/ci/vars.yml b/contrib/test/ci/vars.yml index 242e09e5286..17fcb89f9db 100644 --- a/contrib/test/ci/vars.yml +++ b/contrib/test/ci/vars.yml @@ -154,6 +154,7 @@ kata_skip_network_tests: - 'test "Connect to pod hostport from the host"' - 'test "Clean up network if pod sandbox fails"' - 'test "Clean up network if pod sandbox gets killed"' + - 'test "Network recovery after reboot with destroyed netns"' kata_skip_pod_tests: - 'test "pass pod sysctls to runtime"' - 'test "pass pod sysctls to runtime when in userns"' From f72aebdf7e7a9ac3023ddb6f2d6140715c041263 Mon Sep 17 00:00:00 2001 From: Krisztian Litkey Date: Fri, 27 Jun 2025 20:01:24 +0300 Subject: [PATCH 07/15] test: add env. vars with default values for CDI tests. Use a few environment variables with default values to verify that evironment variables from CDI injection take precedence over ones in the Pod Spec. Signed-off-by: Krisztian Litkey --- test/cdi.bats | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/cdi.bats b/test/cdi.bats index 768e23af748..23715803d6c 100644 --- a/test/cdi.bats +++ b/test/cdi.bats @@ -111,7 +111,7 @@ function annotate_ctr_with_unknown_cdidev { } function prepare_ctr_with_cdidev { - jq ".CDI_Devices |= . + [ { \"Name\": \"vendor0.com/device=loop8\" }, { \"Name\": \"vendor0.com/device=loop9\" } ]" \ + jq ".CDI_Devices |= . + [ { \"Name\": \"vendor0.com/device=loop8\" }, { \"Name\": \"vendor0.com/device=loop9\" } ] | .envs |= . + [ { \"key\": \"VENDOR0\", \"value\": \"unset\" }, { \"key\": \"LOOP8\", \"value\": \"unset\" } ]" \ "$TESTDATA/container_sleep.json" > "$ctr_config" } From 69e84fba69d983b7230fa1748c618dbd7ffc43c5 Mon Sep 17 00:00:00 2001 From: Kir Kolyshkin Date: Fri, 20 Jun 2025 10:55:38 -0700 Subject: [PATCH 08/15] test/ctr.bats: fix wrt new CPU units to weight conversion It's a long story (see [1], [2], [3], [4]) but both runc and crun is changing the formula to convert cgroup v1 CPU shares to cgroup v2 CPU weight, and it causes a failure in "ctr update resources" test, because it relies on the old conversion formula. Let's modify it so it works either the old or the new conversion. (Ultimately, with cgroup v2 we should switch to setting unified.cpu.weight directly). [1]: https://github.com/kubernetes/kubernetes/issues/131216 [2]: https://github.com/opencontainers/runc/issues/4772 [3]: https://github.com/opencontainers/cgroups/pull/20 [4]: https://github.com/containers/crun/pull/1767 Signed-off-by: Kir Kolyshkin --- test/ctr.bats | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/test/ctr.bats b/test/ctr.bats index 4e9ddebdc65..3990464a90f 100644 --- a/test/ctr.bats +++ b/test/ctr.bats @@ -869,8 +869,10 @@ function assert_log_linking() { [[ "$output" == *"20000 10000"* ]] output=$(crictl exec --sync "$ctr_id" sh -c "cat /sys/fs/cgroup/cpu.weight") - # 512 shares are converted to cpu.weight 20 - [[ "$output" == *"20"* ]] + # CPU shares of 512 is converted to cpu.weight of either 20 or 59, + # depending on crun/runc version (see https://github.com/kubernetes/kubernetes/issues/131216). + echo "got cpu.weight $output, want 20 or 59" + [ "$output" = "20" ] || [ "$output" = "59" ] else output=$(crictl exec --sync "$ctr_id" sh -c "cat /sys/fs/cgroup/cpu/cpu.shares") [[ "$output" == *"512"* ]] @@ -897,8 +899,10 @@ function assert_log_linking() { [[ "$output" == *"10000 20000"* ]] output=$(crictl exec --sync "$ctr_id" sh -c "cat /sys/fs/cgroup/cpu.weight") - # 256 shares are converted to cpu.weight 10 - [[ "$output" == *"10"* ]] + # CPU shares of 256 is converted to cpu.weight of either 10 or 35, + # depending on crun/runc version (see https://github.com/kubernetes/kubernetes/issues/131216). + echo "got cpu.weight $output, want 10 or 35" + [ "$output" = "10" ] || [ "$output" = "35" ] else output=$(crictl exec --sync "$ctr_id" sh -c "cat /sys/fs/cgroup/cpu/cpu.shares") [[ "$output" == *"256"* ]] From db21d4684a8ad530b38add0b8853d19cb0025102 Mon Sep 17 00:00:00 2001 From: Krisztian Litkey Date: Fri, 27 Jun 2025 16:49:41 +0300 Subject: [PATCH 09/15] server,factory/container: delay CDI device injection later. Currently CDI device injection is performed right after injecting other devices into the container. This is problematic because CDI device injection might alter, among other things, the environment. However setting up the final environment happens only later during container creation and it involves setting environment variables from the image and the Pod Spec. If the same environment variable is injected both from an image or a container, and from a CDI Spec, now the former take precedence of the latter. This is unintentional and wrong. This patch moves CDI device injection much later during container creation, between OCI Hook injection and *oci.Container creation. Signed-off-by: Krisztian Litkey --- internal/factory/container/container.go | 3 +++ internal/factory/container/device_freebsd.go | 10 ++++++++++ internal/factory/container/device_linux.go | 5 ++--- internal/factory/container/device_test.go | 4 ++-- internal/factory/container/device_unsupported.go | 7 +++++++ server/container_create.go | 4 ++++ 6 files changed, 28 insertions(+), 5 deletions(-) diff --git a/internal/factory/container/container.go b/internal/factory/container/container.go index 3cd68512021..eb6b80e5eb6 100644 --- a/internal/factory/container/container.go +++ b/internal/factory/container/container.go @@ -111,6 +111,9 @@ type Container interface { // SpecAddDevices adds devices from the server config, and container CRI config SpecAddDevices([]device.Device, []device.Device, bool, bool) error + // SpecInjectCDIDevices injects any requested CDI devices to the container's Spec. + SpecInjectCDIDevices() error + // AddUnifiedResourcesFromAnnotations adds the cgroup-v2 resources specified in the io.kubernetes.cri-o.UnifiedCgroup annotation AddUnifiedResourcesFromAnnotations(annotationsMap map[string]string) error diff --git a/internal/factory/container/device_freebsd.go b/internal/factory/container/device_freebsd.go index 22ac56b7938..b48fc27307d 100644 --- a/internal/factory/container/device_freebsd.go +++ b/internal/factory/container/device_freebsd.go @@ -1,9 +1,19 @@ package container import ( + "fmt" + "runtime" + devicecfg "github.com/cri-o/cri-o/internal/config/device" ) func (c *container) SpecAddDevices(configuredDevices, annotationDevices []devicecfg.Device, privilegedWithoutHostDevices, enableDeviceOwnershipFromSecurityContext bool) error { return nil } + +func (c *container) SpecInjectCDIDevices() error { + if len(c.Config().CDIDevices) > 0 { + return fmt.Errorf("(*container).SpecInjectCDIDevices not supported on %s", runtime.GOOS) + } + return nil +} diff --git a/internal/factory/container/device_linux.go b/internal/factory/container/device_linux.go index c5fa173ddd7..8fca4f9c0de 100644 --- a/internal/factory/container/device_linux.go +++ b/internal/factory/container/device_linux.go @@ -49,8 +49,7 @@ func (c *container) SpecAddDevices(configuredDevices, annotationDevices []device return err } - // Finally, inject CDI devices - return c.specInjectCDIDevices() + return nil } func (c *container) specAddHostDevicesIfPrivileged(privilegedWithoutHostDevices bool) error { @@ -185,7 +184,7 @@ func (c *container) specAddContainerConfigDevices(enableDeviceOwnershipFromSecur return nil } -func (c *container) specInjectCDIDevices() error { +func (c *container) SpecInjectCDIDevices() error { var ( cdiDevices = c.Config().CDIDevices fromCRI = map[string]struct{}{} diff --git a/internal/factory/container/device_test.go b/internal/factory/container/device_test.go index 2234ad86fbf..4e7d316eabc 100644 --- a/internal/factory/container/device_test.go +++ b/internal/factory/container/device_test.go @@ -186,7 +186,7 @@ var _ = t.Describe("Container", func() { } }) - t.Describe("SpecAdd(CDI)Devices", func() { + t.Describe("SpecInjectCDIDevices", func() { writeCDISpecFiles := func(content []string) error { if len(content) == 0 { return nil @@ -421,7 +421,7 @@ containerEdits: Expect(writeCDISpecFiles(test.cdiSpecFiles)).To(Succeed()) // When - err := sut.SpecAddDevices(nil, nil, false, false) + err := sut.SpecInjectCDIDevices() // Then Expect(err != nil).To(Equal(test.expectError)) diff --git a/internal/factory/container/device_unsupported.go b/internal/factory/container/device_unsupported.go index 8ee69e0be37..3d6fc637958 100644 --- a/internal/factory/container/device_unsupported.go +++ b/internal/factory/container/device_unsupported.go @@ -12,3 +12,10 @@ import ( func (c *container) SpecAddDevices(configuredDevices, annotationDevices []devicecfg.Device, privilegedWithoutHostDevices, enableDeviceOwnershipFromSecurityContext bool) error { return fmt.Errorf("(*container).SpecAddDevices not supported on %s", runtime.GOOS) } + +func (c *container) SpecInjectCDIDevices() error { + if len(c.Config().CDIDevices) > 0 { + return fmt.Errorf("(*container).SpecInjectCDIDevices not supported on %s", runtime.GOOS) + } + return nil +} diff --git a/server/container_create.go b/server/container_create.go index e62ec1012b7..149caa6f79a 100644 --- a/server/container_create.go +++ b/server/container_create.go @@ -1199,6 +1199,10 @@ func (s *Server) createSandboxContainer(ctx context.Context, ctr container.Conta } } + if err := ctr.SpecInjectCDIDevices(); err != nil { + return nil, err + } + // Set up pids limit if pids cgroup is mounted if node.CgroupHasPid() { specgen.SetLinuxResourcesPidsLimit(s.config.PidsLimit) From 31c01143a9aa28dbb982c4f475460a8508cc7f1e Mon Sep 17 00:00:00 2001 From: Krisztian Litkey Date: Wed, 16 Jul 2025 14:16:27 +0300 Subject: [PATCH 10/15] {install,roadmap}.md: give up and make prettier happy. Signed-off-by: Krisztian Litkey --- install.md | 4 ---- roadmap.md | 1 - 2 files changed, 5 deletions(-) diff --git a/install.md b/install.md index b2b75337a86..4e952b189cb 100644 --- a/install.md +++ b/install.md @@ -81,7 +81,6 @@ To install CRI-O on Flatcar Container Linux with sysexts, follow these steps: - Step 1: Download the installation script: Sample extension script for installing CRI-O using sysext is [here](https://github.com/flatcar/sysext-bakery/blob/main/create_crio_sysext.sh). - - Using curl: ```bash @@ -107,7 +106,6 @@ chmod +x create_crio_sysext.sh - Step 2: Run the installation script: Execute the script with the required arguments: - - The version of CRI-O you wish to install. [(Find a specific version of CRI-O here)](https://github.com/cri-o/cri-o/releases) - The name you wish to give to the sysext image. @@ -124,7 +122,6 @@ chmod +x create_crio_sysext.sh ``` - Step 3: Deploy the system extension: - - Once the script completes, you will have a `.raw` sysext image file named as per your `SYSEXTNAME` argument. - To deploy the system extension, move the `.raw` file to the @@ -138,7 +135,6 @@ chmod +x create_crio_sysext.sh ``` - Step 4: Verify the installation: - - Verify that the CRI-O service is running correctly. ```bash diff --git a/roadmap.md b/roadmap.md index f3a5f272c69..7804655c1ee 100644 --- a/roadmap.md +++ b/roadmap.md @@ -87,7 +87,6 @@ Some of these features can be seen below: ## Known Risks - Relying on different SIGs for CRI-O features: - - We have a need to discuss our enhancements with different SIGs to get all required information and drive the change. This can lead into helpful, but maybe not expected input and delay the deliverable. From 7cddfd40168de215f9d9337d8ced0fb9cd8d0745 Mon Sep 17 00:00:00 2001 From: "openshift-merge-bot[bot]" <148852131+openshift-merge-bot[bot]@users.noreply.github.com> Date: Thu, 12 Jun 2025 01:59:34 +0000 Subject: [PATCH 11/15] HighPerformanceHooks: Fix IRQ SMP affinity race conditions Cherry-pick of: - Merge pull request #9228 from andreaskaris/issue9227 The original 6 commits merged but were not squashed together, therefore doing this here on the downstream cherry-pick. (cherry picked from commit 3dce7d856a218e5b674eeab3cbd96422507bb69e) Signed-off-by: Andreas Karis Reported-at: https://issues.redhat.com/browse/OCPBUGS-59321 --- .../high_performance_hooks_linux.go | 75 ++-- .../high_performance_hooks_test.go | 336 +++++++++++++++++- .../runtime_handler_hooks.go | 7 + .../runtime_handler_hooks_linux.go | 51 ++- .../runtime_handler_hooks_unsupported.go | 17 +- internal/runtimehandlerhooks/utils_linux.go | 4 +- internal/runtimehandlerhooks/utils_test.go | 2 +- server/container_create.go | 6 +- server/container_start.go | 6 +- server/container_stop.go | 7 +- server/sandbox_run_linux.go | 8 +- server/server.go | 9 +- 12 files changed, 449 insertions(+), 79 deletions(-) diff --git a/internal/runtimehandlerhooks/high_performance_hooks_linux.go b/internal/runtimehandlerhooks/high_performance_hooks_linux.go index 8be09f7d822..14a7148ed05 100644 --- a/internal/runtimehandlerhooks/high_performance_hooks_linux.go +++ b/internal/runtimehandlerhooks/high_performance_hooks_linux.go @@ -63,9 +63,12 @@ const ( // HighPerformanceHooks used to run additional hooks that will configure a system for the latency sensitive workloads. type HighPerformanceHooks struct { - irqBalanceConfigFile string - cpusetLock sync.Mutex - sharedCPUs string + irqBalanceConfigFile string + cpusetLock sync.Mutex + irqSMPAffinityFileLock sync.Mutex + irqBalanceConfigFileLock sync.Mutex + sharedCPUs string + irqSMPAffinityFile string } func (h *HighPerformanceHooks) PreCreate(ctx context.Context, specgen *generate.Generator, s *sandbox.Sandbox, c *oci.Container) error { @@ -142,7 +145,7 @@ func (h *HighPerformanceHooks) PreStart(ctx context.Context, c *oci.Container, s if shouldIRQLoadBalancingBeDisabled(ctx, s.Annotations()) { log.Infof(ctx, "Disable irq smp balancing for container %q", c.ID()) - if err := setIRQLoadBalancing(ctx, c, false, IrqSmpAffinityProcFile, h.irqBalanceConfigFile); err != nil { + if err := h.setIRQLoadBalancing(ctx, c, false); err != nil { return fmt.Errorf("set IRQ load balancing: %w", err) } } @@ -196,7 +199,7 @@ func (h *HighPerformanceHooks) PreStop(ctx context.Context, c *oci.Container, s // enable the IRQ smp balancing for the container CPUs if shouldIRQLoadBalancingBeDisabled(ctx, s.Annotations()) { - if err := setIRQLoadBalancing(ctx, c, true, IrqSmpAffinityProcFile, h.irqBalanceConfigFile); err != nil { + if err := h.setIRQLoadBalancing(ctx, c, true); err != nil { return fmt.Errorf("set IRQ load balancing: %w", err) } } @@ -569,7 +572,7 @@ func disableCPULoadBalancingV1(containerManagers []cgroups.Manager) error { return nil } -func setIRQLoadBalancing(ctx context.Context, c *oci.Container, enable bool, irqSmpAffinityFile, irqBalanceConfigFile string) error { +func (h *HighPerformanceHooks) setIRQLoadBalancing(ctx context.Context, c *oci.Container, enable bool) error { lspec := c.Spec().Linux if lspec == nil || lspec.Resources == nil || @@ -578,26 +581,15 @@ func setIRQLoadBalancing(ctx context.Context, c *oci.Container, enable bool, irq return fmt.Errorf("find container %s CPUs", c.ID()) } - content, err := os.ReadFile(irqSmpAffinityFile) + newIRQBalanceSetting, err := h.updateNewIRQSMPAffinityMask(lspec.Resources.CPU.Cpus, enable) if err != nil { return err } - currentIRQSMPSetting := strings.TrimSpace(string(content)) - - newIRQSMPSetting, newIRQBalanceSetting, err := UpdateIRQSmpAffinityMask(lspec.Resources.CPU.Cpus, currentIRQSMPSetting, enable) - if err != nil { - return err - } - - if err := os.WriteFile(irqSmpAffinityFile, []byte(newIRQSMPSetting), 0o644); err != nil { - return err - } - - isIrqConfigExists := fileExists(irqBalanceConfigFile) + isIrqConfigExists := fileExists(h.irqBalanceConfigFile) if isIrqConfigExists { - if err := updateIrqBalanceConfigFile(irqBalanceConfigFile, newIRQBalanceSetting); err != nil { + if err := h.updateIrqBalanceConfigFile(newIRQBalanceSetting); err != nil { return err } } @@ -624,6 +616,36 @@ func setIRQLoadBalancing(ctx context.Context, c *oci.Container, enable bool, irq return nil } +func (h *HighPerformanceHooks) updateNewIRQSMPAffinityMask(cpus string, enable bool) (string, error) { + h.irqSMPAffinityFileLock.Lock() + defer h.irqSMPAffinityFileLock.Unlock() + + content, err := os.ReadFile(h.irqSMPAffinityFile) + if err != nil { + return "", err + } + + currentIRQSMPSetting := strings.TrimSpace(string(content)) + + newIRQSMPSetting, newIRQBalanceSetting, err := calcIRQSMPAffinityMask(cpus, currentIRQSMPSetting, enable) + if err != nil { + return "", err + } + + if err := os.WriteFile(h.irqSMPAffinityFile, []byte(newIRQSMPSetting), 0o644); err != nil { + return "", err + } + + return newIRQBalanceSetting, nil +} + +func (h *HighPerformanceHooks) updateIrqBalanceConfigFile(newIRQBalanceSetting string) error { + h.irqBalanceConfigFileLock.Lock() + defer h.irqBalanceConfigFileLock.Unlock() + + return updateIrqBalanceConfigFile(h.irqBalanceConfigFile, newIRQBalanceSetting) +} + func setCPUQuota(podManager cgroups.Manager, containerManagers []cgroups.Manager) error { if err := disableCPUQuotaForCgroup(podManager); err != nil { return err @@ -1034,19 +1056,6 @@ func RestoreIrqBalanceConfig(ctx context.Context, irqBalanceConfigFile, irqBanne return nil } -func ShouldCPUQuotaBeDisabled(ctx context.Context, cid string, cSpec *specs.Spec, s *sandbox.Sandbox, annotations fields.Set) bool { - if !shouldRunHooks(ctx, cid, cSpec, s) { - return false - } - - if annotations[crioannotations.CPUQuotaAnnotation] == annotationTrue { - log.Warnf(ctx, "%s", annotationValueDeprecationWarning(crioannotations.CPUQuotaAnnotation)) - } - - return annotations[crioannotations.CPUQuotaAnnotation] == annotationTrue || - annotations[crioannotations.CPUQuotaAnnotation] == annotationDisable -} - func shouldRunHooks(ctx context.Context, id string, cSpec *specs.Spec, s *sandbox.Sandbox) bool { if isCgroupParentBurstable(s) { log.Infof(ctx, "Container %q is a burstable pod. Skip PreStart.", id) diff --git a/internal/runtimehandlerhooks/high_performance_hooks_test.go b/internal/runtimehandlerhooks/high_performance_hooks_test.go index 7ddd2830f9f..9fb089f2ef1 100644 --- a/internal/runtimehandlerhooks/high_performance_hooks_test.go +++ b/internal/runtimehandlerhooks/high_performance_hooks_test.go @@ -4,7 +4,9 @@ import ( "context" "os" "path/filepath" + "strconv" "strings" + "sync" "time" . "github.com/onsi/ginkgo/v2" @@ -18,6 +20,7 @@ import ( "github.com/cri-o/cri-o/internal/log" "github.com/cri-o/cri-o/internal/oci" crioannotations "github.com/cri-o/cri-o/pkg/annotations" + "github.com/cri-o/cri-o/pkg/config" ) const ( @@ -59,7 +62,11 @@ var _ = Describe("high_performance_hooks", func() { irqSmpAffinityFile := filepath.Join(fixturesDir, "irq_smp_affinity") irqBalanceConfigFile := filepath.Join(fixturesDir, "irqbalance") verifySetIRQLoadBalancing := func(enabled bool, expected string) { - err := setIRQLoadBalancing(context.TODO(), container, enabled, irqSmpAffinityFile, irqBalanceConfigFile) + h := &HighPerformanceHooks{ + irqBalanceConfigFile: irqBalanceConfigFile, + irqSMPAffinityFile: irqSmpAffinityFile, + } + err := h.setIRQLoadBalancing(context.TODO(), container, enabled) Expect(err).ToNot(HaveOccurred()) content, err := os.ReadFile(irqSmpAffinityFile) @@ -112,7 +119,11 @@ var _ = Describe("high_performance_hooks", func() { irqSmpAffinityFile := filepath.Join(fixturesDir, "irq_smp_affinity") irqBalanceConfigFile := filepath.Join(fixturesDir, "irqbalance") verifySetIRQLoadBalancing := func(enabled bool, expectedSmp, expectedBan string) { - err = setIRQLoadBalancing(context.TODO(), container, enabled, irqSmpAffinityFile, irqBalanceConfigFile) + h := &HighPerformanceHooks{ + irqBalanceConfigFile: irqBalanceConfigFile, + irqSMPAffinityFile: irqSmpAffinityFile, + } + err = h.setIRQLoadBalancing(context.TODO(), container, enabled) Expect(err).ToNot(HaveOccurred()) content, err := os.ReadFile(irqSmpAffinityFile) @@ -756,4 +767,325 @@ var _ = Describe("high_performance_hooks", func() { Expect(env).To(ContainElements("OPENSHIFT_ISOLATED_CPUS=1-2", "OPENSHIFT_SHARED_CPUS=3-4")) }) }) + Describe("Make sure that correct runtime handler hooks are set", func() { + var runtimeName string + var sandboxAnnotations map[string]string + var sb *sandbox.Sandbox + var cfg *config.Config + var hooksRetriever *HooksRetriever + + irqSmpAffinityFile := filepath.Join(fixturesDir, "irq_smp_affinity") + irqBalanceConfigFile := filepath.Join(fixturesDir, "irqbalance") + flags = "0000,0000ffff" + + ctx := context.Background() + + verifySetIRQLoadBalancing := func(expected string) { + content, err := os.ReadFile(irqSmpAffinityFile) + Expect(err).ToNot(HaveOccurred()) + Expect(strings.Trim(string(content), "\n")).To(Equal(expected)) + } + + createContainer := func(cpus string) (*oci.Container, error) { + container, err := oci.NewContainer("containerID", "", "", "", + make(map[string]string), make(map[string]string), + make(map[string]string), "pauseImage", nil, nil, "", + &types.ContainerMetadata{}, "sandboxID", false, false, + false, "", "", time.Now(), "") + if err != nil { + return nil, err + } + var cpuShares uint64 = 1024 + container.SetSpec( + &specs.Spec{ + Linux: &specs.Linux{ + Resources: &specs.LinuxResources{ + CPU: &specs.LinuxCPU{ + Cpus: cpus, + Shares: &cpuShares, + }, + }, + }, + }, + ) + + return container, nil + } + + JustBeforeEach(func() { + // Simulate a restart of crio each time as we're modifying the config between runs. + cpuLoadBalancingAllowedAnywhereOnce = sync.Once{} + + hooksRetriever = NewHooksRetriever(ctx, cfg) + + // create tests affinity file + err = os.WriteFile(irqSmpAffinityFile, []byte(flags), 0o644) + Expect(err).ToNot(HaveOccurred()) + + sbox := sandbox.NewBuilder() + createdAt := time.Now() + sbox.SetCreatedAt(createdAt) + sbox.SetID("sandboxID") + sbox.SetName("sandboxName") + sbox.SetLogDir("test") + sbox.SetShmPath("test") + sbox.SetNamespace("") + sbox.SetKubeName("") + sbox.SetMountLabel("test") + sbox.SetProcessLabel("test") + sbox.SetCgroupParent("") + sbox.SetRuntimeHandler(runtimeName) + sbox.SetResolvPath("") + sbox.SetHostname("") + sbox.SetPortMappings([]*hostport.PortMapping{}) + sbox.SetHostNetwork(false) + sbox.SetUsernsMode("") + sbox.SetPodLinuxOverhead(nil) + sbox.SetPodLinuxResources(nil) + err = sbox.SetCRISandbox( + sbox.ID(), + map[string]string{}, + sandboxAnnotations, + &types.PodSandboxMetadata{}, + ) + Expect(err).ToNot(HaveOccurred()) + sbox.SetPrivileged(false) + sbox.SetHostNetwork(false) + sbox.SetCreatedAt(createdAt) + sb, err = sbox.GetSandbox() + Expect(err).ToNot(HaveOccurred()) + }) + + Context("with runtime name high-performance and sandbox disable annotation", func() { + BeforeEach(func() { + runtimeName = "high-performance" + sandboxAnnotations = map[string]string{crioannotations.IRQLoadBalancingAnnotation: "disable"} + cfg = &config.Config{ + RuntimeConfig: config.RuntimeConfig{ + IrqBalanceConfigFile: irqBalanceConfigFile, + Runtimes: config.Runtimes{ + "high-performance": { + AllowedAnnotations: []string{}, + }, + "default": {}, + }, + }, + } + }) + + It("should set the correct irq bit mask with concurrency", func() { + hooks := hooksRetriever.Get(sb.RuntimeHandler(), sb.Annotations()) + Expect(hooks).NotTo(BeNil()) + if hph, ok := hooks.(*HighPerformanceHooks); ok { + hph.irqSMPAffinityFile = irqSmpAffinityFile + } + var wg sync.WaitGroup + for cpu := range 16 { + wg.Add(1) + go func() { + defer wg.Done() + container, err := createContainer(strconv.Itoa(cpu)) + Expect(err).ToNot(HaveOccurred()) + err = hooks.PreStart(ctx, container, sb) + Expect(err).ToNot(HaveOccurred()) + }() + } + wg.Wait() + verifySetIRQLoadBalancing("00000000,00000000") + }) + }) + + Context("with runtime name high-performance and sandbox without any annotation", func() { + BeforeEach(func() { + runtimeName = "high-performance" + sandboxAnnotations = map[string]string{} + cfg = &config.Config{ + RuntimeConfig: config.RuntimeConfig{ + IrqBalanceConfigFile: irqBalanceConfigFile, + Runtimes: config.Runtimes{ + "high-performance": { + AllowedAnnotations: []string{}, + }, + "default": {}, + }, + }, + } + }) + + It("should keep the current irq bit mask but return a high performance hooks", func() { + hooks := hooksRetriever.Get(sb.RuntimeHandler(), sb.Annotations()) + Expect(hooks).NotTo(BeNil()) + hph, ok := hooks.(*HighPerformanceHooks) + Expect(ok).To(BeTrue()) + hph.irqSMPAffinityFile = irqSmpAffinityFile + + var wg sync.WaitGroup + for cpu := range 16 { + wg.Add(1) + go func() { + defer wg.Done() + container, err := createContainer(strconv.Itoa(cpu)) + Expect(err).ToNot(HaveOccurred()) + err = hooks.PreStart(ctx, container, sb) + Expect(err).ToNot(HaveOccurred()) + }() + } + wg.Wait() + verifySetIRQLoadBalancing(flags) + }) + }) + + Context("with runtime name hp and sandbox disable annotation", func() { + BeforeEach(func() { + runtimeName = "hp" + sandboxAnnotations = map[string]string{crioannotations.IRQLoadBalancingAnnotation: "disable"} + cfg = &config.Config{ + RuntimeConfig: config.RuntimeConfig{ + IrqBalanceConfigFile: irqBalanceConfigFile, + Runtimes: config.Runtimes{ + "hp": { + AllowedAnnotations: []string{ + crioannotations.IRQLoadBalancingAnnotation, + }, + }, + "default": {}, + }, + }, + } + }) + + It("should set the correct irq bit mask with concurrency", func() { + hooks := hooksRetriever.Get(sb.RuntimeHandler(), sb.Annotations()) + Expect(hooks).NotTo(BeNil()) + if hph, ok := hooks.(*HighPerformanceHooks); ok { + hph.irqSMPAffinityFile = irqSmpAffinityFile + } + var wg sync.WaitGroup + for cpu := range 16 { + wg.Add(1) + go func() { + defer wg.Done() + container, err := createContainer(strconv.Itoa(cpu)) + Expect(err).ToNot(HaveOccurred()) + err = hooks.PreStart(ctx, container, sb) + Expect(err).ToNot(HaveOccurred()) + }() + } + wg.Wait() + verifySetIRQLoadBalancing("00000000,00000000") + }) + }) + + Context("with runtime name hp and sandbox without any annotation", func() { + BeforeEach(func() { + runtimeName = "hp" + sandboxAnnotations = map[string]string{} + cfg = &config.Config{ + RuntimeConfig: config.RuntimeConfig{ + IrqBalanceConfigFile: irqBalanceConfigFile, + Runtimes: config.Runtimes{ + "hp": { + AllowedAnnotations: []string{ + crioannotations.IRQLoadBalancingAnnotation, + }, + }, + "default": {}, + }, + }, + } + }) + + It("should return a nil hook", func() { + hooks := hooksRetriever.Get(sb.RuntimeHandler(), sb.Annotations()) + Expect(hooks).To(BeNil()) + }) + }) + + // The following test case should never happen in the real world. However, it makes sure that the checks + // actually look at the runtime name and at the sandbox annotation and if _either_ signals that high performance + // hooks should be enabled then enable them. + Context("with runtime name default and sandbox disable annotation", func() { + BeforeEach(func() { + runtimeName = "default" + sandboxAnnotations = map[string]string{crioannotations.IRQLoadBalancingAnnotation: "disable"} + cfg = &config.Config{ + RuntimeConfig: config.RuntimeConfig{ + IrqBalanceConfigFile: irqBalanceConfigFile, + Runtimes: config.Runtimes{ + "default": {}, + }, + }, + } + }) + + It("should set the correct irq bit mask with concurrency", func() { + hooks := hooksRetriever.Get(sb.RuntimeHandler(), sb.Annotations()) + Expect(hooks).NotTo(BeNil()) + if hph, ok := hooks.(*HighPerformanceHooks); ok { + hph.irqSMPAffinityFile = irqSmpAffinityFile + } + var wg sync.WaitGroup + for cpu := range 16 { + wg.Add(1) + go func() { + defer wg.Done() + container, err := createContainer(strconv.Itoa(cpu)) + Expect(err).ToNot(HaveOccurred()) + err = hooks.PreStart(ctx, container, sb) + Expect(err).ToNot(HaveOccurred()) + }() + } + wg.Wait() + verifySetIRQLoadBalancing("00000000,00000000") + }) + }) + + Context("with runtime name default, CPU balancing annotation present and sandbox without any annotation", func() { + BeforeEach(func() { + runtimeName = "default" + sandboxAnnotations = map[string]string{} + cfg = &config.Config{ + RuntimeConfig: config.RuntimeConfig{ + IrqBalanceConfigFile: irqBalanceConfigFile, + Runtimes: config.Runtimes{ + "high-performance": { + AllowedAnnotations: []string{}, + }, + "hp": { + AllowedAnnotations: []string{ + crioannotations.IRQLoadBalancingAnnotation, + }, + }, + "cpu-balancing-anywhere": { + AllowedAnnotations: []string{ + crioannotations.CPULoadBalancingAnnotation, + }, + }, + "default": {}, + }, + }, + } + }) + + It("should yield a DefaultCPULoadBalanceHooks which keeps the old mask", func() { + hooks := hooksRetriever.Get(sb.RuntimeHandler(), sb.Annotations()) + Expect(hooks).NotTo(BeNil()) + _, ok := (hooks).(*DefaultCPULoadBalanceHooks) + Expect(ok).To(BeTrue()) + var wg sync.WaitGroup + for cpu := range 16 { + wg.Add(1) + go func() { + defer wg.Done() + container, err := createContainer(strconv.Itoa(cpu)) + Expect(err).ToNot(HaveOccurred()) + err = hooks.PreStart(ctx, container, sb) + Expect(err).ToNot(HaveOccurred()) + }() + } + wg.Wait() + verifySetIRQLoadBalancing(flags) + }) + }) + }) }) diff --git a/internal/runtimehandlerhooks/runtime_handler_hooks.go b/internal/runtimehandlerhooks/runtime_handler_hooks.go index 3c286c4bbee..5a503a7ef18 100644 --- a/internal/runtimehandlerhooks/runtime_handler_hooks.go +++ b/internal/runtimehandlerhooks/runtime_handler_hooks.go @@ -8,6 +8,7 @@ import ( "github.com/cri-o/cri-o/internal/lib/sandbox" "github.com/cri-o/cri-o/internal/oci" + libconfig "github.com/cri-o/cri-o/pkg/config" ) var ( @@ -27,3 +28,9 @@ type RuntimeHandlerHooks interface { type HighPerformanceHook interface { RuntimeHandlerHooks } + +// HooksRetriever allows retrieving the runtime hooks for a given sandbox. +type HooksRetriever struct { + config *libconfig.Config + highPerformanceHooks RuntimeHandlerHooks +} diff --git a/internal/runtimehandlerhooks/runtime_handler_hooks_linux.go b/internal/runtimehandlerhooks/runtime_handler_hooks_linux.go index 5bd7f7ad1b4..facd0347858 100644 --- a/internal/runtimehandlerhooks/runtime_handler_hooks_linux.go +++ b/internal/runtimehandlerhooks/runtime_handler_hooks_linux.go @@ -10,28 +10,57 @@ import ( libconfig "github.com/cri-o/cri-o/pkg/config" ) -// GetRuntimeHandlerHooks returns RuntimeHandlerHooks implementation by the runtime handler name. -func GetRuntimeHandlerHooks(ctx context.Context, config *libconfig.Config, handler string, annotations map[string]string) (RuntimeHandlerHooks, error) { +// NewHooksRetriever returns a pointer to a new retriever. +// Log a warning if deprecated configuration is detected. +func NewHooksRetriever(ctx context.Context, config *libconfig.Config) *HooksRetriever { ctx, span := log.StartSpan(ctx) defer span.End() - if strings.Contains(handler, HighPerformance) { - log.Warnf(ctx, "The usage of the handler %q without adding high-performance feature annotations under allowed_annotations will be deprecated under 1.21", HighPerformance) + rhh := &HooksRetriever{ + config: config, + highPerformanceHooks: nil, + } + + for name, runtime := range config.Runtimes { + annotationMap := map[string]string{} + for _, v := range runtime.AllowedAnnotations { + annotationMap[v] = "" + } - return &HighPerformanceHooks{irqBalanceConfigFile: config.IrqBalanceConfigFile, cpusetLock: sync.Mutex{}, sharedCPUs: config.SharedCPUSet}, nil + if strings.Contains(name, HighPerformance) && !highPerformanceAnnotationsSpecified(annotationMap) { + log.Warnf(ctx, "The usage of the handler %q without adding high-performance feature annotations under "+ + "allowed_annotations is deprecated since 1.21", HighPerformance) + } } - if highPerformanceAnnotationsSpecified(annotations) { - log.Warnf(ctx, "The usage of the handler %q without adding high-performance feature annotations under allowed_annotations will be deprecated under 1.21", HighPerformance) + return rhh +} + +// Get checks runtime name or the sandbox's annotations for allowed high performance annotations. If present, it returns +// the single instance of highPerformanceHooks. +// Otherwise, if crio's config allows CPU load balancing anywhere, return a DefaultCPULoadBalanceHooks. +// Otherwise, return nil. +func (hr *HooksRetriever) Get(runtimeName string, sandboxAnnotations map[string]string) RuntimeHandlerHooks { + if strings.Contains(runtimeName, HighPerformance) || highPerformanceAnnotationsSpecified(sandboxAnnotations) { + if hr.highPerformanceHooks == nil { + hr.highPerformanceHooks = &HighPerformanceHooks{ + irqBalanceConfigFile: hr.config.IrqBalanceConfigFile, + cpusetLock: sync.Mutex{}, + irqSMPAffinityFileLock: sync.Mutex{}, + irqBalanceConfigFileLock: sync.Mutex{}, + sharedCPUs: hr.config.SharedCPUSet, + irqSMPAffinityFile: IrqSmpAffinityProcFile, + } + } - return &HighPerformanceHooks{irqBalanceConfigFile: config.IrqBalanceConfigFile, cpusetLock: sync.Mutex{}, sharedCPUs: config.SharedCPUSet}, nil + return hr.highPerformanceHooks } - if cpuLoadBalancingAllowed(config) { - return &DefaultCPULoadBalanceHooks{}, nil + if cpuLoadBalancingAllowed(hr.config) { + return &DefaultCPULoadBalanceHooks{} } - return nil, nil + return nil } func highPerformanceAnnotationsSpecified(annotations map[string]string) bool { diff --git a/internal/runtimehandlerhooks/runtime_handler_hooks_unsupported.go b/internal/runtimehandlerhooks/runtime_handler_hooks_unsupported.go index a35b8cee4bd..4893408b15a 100644 --- a/internal/runtimehandlerhooks/runtime_handler_hooks_unsupported.go +++ b/internal/runtimehandlerhooks/runtime_handler_hooks_unsupported.go @@ -13,11 +13,22 @@ const ( IrqSmpAffinityProcFile = "" ) -// GetRuntimeHandlerHooks returns RuntimeHandlerHooks implementation by the runtime handler name -func GetRuntimeHandlerHooks(ctx context.Context, config *libconfig.Config, handler string, annotations map[string]string) (RuntimeHandlerHooks, error) { +// NewHooksRetriever returns a pointer to a new retriever. +func NewHooksRetriever(ctx context.Context, config *libconfig.Config) *HooksRetriever { ctx, span := log.StartSpan(ctx) defer span.End() - return &DefaultCPULoadBalanceHooks{}, nil + + rhh := &HooksRetriever{ + config: config, + highPerformanceHooks: nil, + } + + return rhh +} + +// Get always returns DefaultCPULoadBalanceHooks for non-linux architectures. +func (hr *HooksRetriever) Get(runtimeName string, sandboxAnnotations map[string]string) RuntimeHandlerHooks { + return &DefaultCPULoadBalanceHooks{} } // RestoreIrqBalanceConfig restores irqbalance service with original banned cpu mask settings diff --git a/internal/runtimehandlerhooks/utils_linux.go b/internal/runtimehandlerhooks/utils_linux.go index 04adab60df9..e93931a15c1 100644 --- a/internal/runtimehandlerhooks/utils_linux.go +++ b/internal/runtimehandlerhooks/utils_linux.go @@ -103,10 +103,10 @@ func isAllBitSet(in []byte) bool { return true } -// UpdateIRQSmpAffinityMask take input cpus that need to change irq affinity mask and +// calcIRQSMPAffinityMask take input cpus that need to change irq affinity mask and // the current mask string, return an update mask string and inverted mask, with those cpus // enabled or disable in the mask. -func UpdateIRQSmpAffinityMask(cpus, current string, set bool) (cpuMask, bannedCPUMask string, err error) { +func calcIRQSMPAffinityMask(cpus, current string, set bool) (cpuMask, bannedCPUMask string, err error) { podcpuset, err := cpuset.Parse(cpus) if err != nil { return cpus, "", err diff --git a/internal/runtimehandlerhooks/utils_test.go b/internal/runtimehandlerhooks/utils_test.go index 18a814192ec..0f35c5e9fb4 100644 --- a/internal/runtimehandlerhooks/utils_test.go +++ b/internal/runtimehandlerhooks/utils_test.go @@ -27,7 +27,7 @@ var _ = Describe("Utils", func() { DescribeTable("testing cpu mask", func(c TestData) { - mask, invMask, err := UpdateIRQSmpAffinityMask(c.input.cpus, c.input.mask, c.input.set) + mask, invMask, err := calcIRQSMPAffinityMask(c.input.cpus, c.input.mask, c.input.set) Expect(err).ToNot(HaveOccurred()) Expect(mask).To(Equal(c.expected.mask)) Expect(invMask).To(Equal(c.expected.invMask)) diff --git a/server/container_create.go b/server/container_create.go index e62ec1012b7..82c56a9ebad 100644 --- a/server/container_create.go +++ b/server/container_create.go @@ -36,7 +36,6 @@ import ( "github.com/cri-o/cri-o/internal/log" oci "github.com/cri-o/cri-o/internal/oci" "github.com/cri-o/cri-o/internal/resourcestore" - "github.com/cri-o/cri-o/internal/runtimehandlerhooks" "github.com/cri-o/cri-o/internal/storage" "github.com/cri-o/cri-o/internal/storage/references" crioann "github.com/cri-o/cri-o/pkg/annotations" @@ -1315,10 +1314,7 @@ func (s *Server) createSandboxContainer(ctx context.Context, ctr container.Conta makeOCIConfigurationRootless(specgen) } - hooks, err := runtimehandlerhooks.GetRuntimeHandlerHooks(ctx, &s.config, sb.RuntimeHandler(), sb.Annotations()) - if err != nil { - return nil, fmt.Errorf("failed to get runtime handler %q hooks", sb.RuntimeHandler()) - } + hooks := s.hooksRetriever.Get(sb.RuntimeHandler(), sb.Annotations()) if err := s.nri.createContainer(ctx, specgen, sb, ociContainer); err != nil { return nil, err diff --git a/server/container_start.go b/server/container_start.go index 6c6a570e253..79964319e76 100644 --- a/server/container_start.go +++ b/server/container_start.go @@ -12,7 +12,6 @@ import ( "github.com/cri-o/cri-o/internal/lib" "github.com/cri-o/cri-o/internal/log" oci "github.com/cri-o/cri-o/internal/oci" - "github.com/cri-o/cri-o/internal/runtimehandlerhooks" ) // StartContainer starts the container. @@ -70,10 +69,7 @@ func (s *Server) StartContainer(ctx context.Context, req *types.StartContainerRe sandbox := s.getSandbox(ctx, c.Sandbox()) - hooks, err := runtimehandlerhooks.GetRuntimeHandlerHooks(ctx, &s.config, sandbox.RuntimeHandler(), sandbox.Annotations()) - if err != nil { - return nil, fmt.Errorf("failed to get runtime handler %q hooks", sandbox.RuntimeHandler()) - } + hooks := s.hooksRetriever.Get(sandbox.RuntimeHandler(), sandbox.Annotations()) if err := s.nri.startContainer(ctx, sandbox, c); err != nil { log.Warnf(ctx, "NRI start failed for container %q: %v", c.ID(), err) diff --git a/server/container_stop.go b/server/container_stop.go index bec6bda9c33..35a74d13571 100644 --- a/server/container_stop.go +++ b/server/container_stop.go @@ -12,7 +12,6 @@ import ( "github.com/cri-o/cri-o/internal/log" "github.com/cri-o/cri-o/internal/oci" - "github.com/cri-o/cri-o/internal/runtimehandlerhooks" ) // StopContainer stops a running container with a grace period (i.e., timeout). @@ -49,11 +48,7 @@ func (s *Server) stopContainer(ctx context.Context, ctr *oci.Container, timeout sb := s.getSandbox(ctx, ctr.Sandbox()) - hooks, err := runtimehandlerhooks.GetRuntimeHandlerHooks(ctx, &s.config, sb.RuntimeHandler(), sb.Annotations()) - if err != nil { - return fmt.Errorf("failed to get runtime handler %q hooks", sb.RuntimeHandler()) - } - + hooks := s.hooksRetriever.Get(sb.RuntimeHandler(), sb.Annotations()) if hooks != nil { if err := hooks.PreStop(ctx, ctr, sb); err != nil { return fmt.Errorf("failed to run pre-stop hook for container %q: %w", ctr.ID(), err) diff --git a/server/sandbox_run_linux.go b/server/sandbox_run_linux.go index 725bf2a9497..6f9a170a5c8 100644 --- a/server/sandbox_run_linux.go +++ b/server/sandbox_run_linux.go @@ -33,7 +33,6 @@ import ( "github.com/cri-o/cri-o/internal/memorystore" oci "github.com/cri-o/cri-o/internal/oci" "github.com/cri-o/cri-o/internal/resourcestore" - "github.com/cri-o/cri-o/internal/runtimehandlerhooks" "github.com/cri-o/cri-o/pkg/annotations" libconfig "github.com/cri-o/cri-o/pkg/config" "github.com/cri-o/cri-o/utils" @@ -1174,12 +1173,7 @@ func (s *Server) runPodSandbox(ctx context.Context, req *types.RunPodSandboxRequ return nil, err } - hooks, err := runtimehandlerhooks.GetRuntimeHandlerHooks(ctx, &s.config, sb.RuntimeHandler(), sb.Annotations()) - if err != nil { - return nil, fmt.Errorf("failed to get runtime handler %q hooks", sb.RuntimeHandler()) - } - - if hooks != nil { + if hooks := s.hooksRetriever.Get(sb.RuntimeHandler(), sb.Annotations()); hooks != nil { if err := hooks.PreStart(ctx, container, sb); err != nil { return nil, fmt.Errorf("failed to run pre-stop hook for container %q: %w", sb.ID(), err) } diff --git a/server/server.go b/server/server.go index e3771aad671..5ef25a292bf 100644 --- a/server/server.go +++ b/server/server.go @@ -92,6 +92,8 @@ type Server struct { // NRI runtime interface nri *nriAPI + // hooksRetriever allows getting the runtime hooks for the sandboxes. + hooksRetriever *runtimehandlerhooks.HooksRetriever } // pullArguments are used to identify a pullOperation via an input image name and @@ -461,7 +463,9 @@ func New( minimumMappableGID: config.MinimumMappableGID, pullOperationsInProgress: make(map[pullArguments]*pullOperation), resourceStore: resourcestore.New(), + hooksRetriever: runtimehandlerhooks.NewHooksRetriever(ctx, config), } + if s.config.EnablePodEvents { // creating a container events channel only if the evented pleg is enabled s.ContainerEventsChan = make(chan types.ContainerEventResponse, 1000) @@ -884,10 +888,7 @@ func (s *Server) handleExit(ctx context.Context, event fsnotify.Event) { } } - hooks, err := runtimehandlerhooks.GetRuntimeHandlerHooks(ctx, &s.config, sb.RuntimeHandler(), sb.Annotations()) - if err != nil { - log.Warnf(ctx, "Failed to get runtime handler %q hooks", sb.RuntimeHandler()) - } else if hooks != nil { + if hooks := s.hooksRetriever.Get(sb.RuntimeHandler(), sb.Annotations()); hooks != nil { if err := hooks.PostStop(ctx, c, sb); err != nil { log.Errorf(ctx, "Failed to run post-stop hook for container %s: %v", c.ID(), err) } From 23ea29008a92c76e136a261a4112772275acaa41 Mon Sep 17 00:00:00 2001 From: Lennart Jern Date: Mon, 28 Jul 2025 10:01:42 +0300 Subject: [PATCH 12/15] Bump go-chi to v5.2.2 This fixes GHSA-vrw8-fxc6-2r93 See https://github.com/advisories/GHSA-vrw8-fxc6-2r93 Signed-off-by: Lennart Jern --- go.mod | 2 +- go.sum | 4 ++-- vendor/github.com/go-chi/chi/v5/README.md | 6 ++++-- vendor/github.com/go-chi/chi/v5/chi.go | 3 +-- vendor/github.com/go-chi/chi/v5/mux.go | 10 ++++------ vendor/github.com/go-chi/chi/v5/path_value.go | 5 +++-- vendor/github.com/go-chi/chi/v5/path_value_fallback.go | 4 ++-- vendor/github.com/go-chi/chi/v5/tree.go | 6 ++---- vendor/modules.txt | 2 +- 9 files changed, 20 insertions(+), 22 deletions(-) diff --git a/go.mod b/go.mod index 146e97afa79..04fecd29d69 100644 --- a/go.mod +++ b/go.mod @@ -33,7 +33,7 @@ require ( github.com/docker/distribution v2.8.3+incompatible github.com/docker/go-units v0.5.0 github.com/fsnotify/fsnotify v1.9.0 - github.com/go-chi/chi/v5 v5.2.1 + github.com/go-chi/chi/v5 v5.2.2 github.com/go-logr/logr v1.4.2 github.com/godbus/dbus/v5 v5.1.1-0.20230522191255-76236955d466 github.com/google/go-cmp v0.7.0 diff --git a/go.sum b/go.sum index 82573b3be7f..15273714cda 100644 --- a/go.sum +++ b/go.sum @@ -170,8 +170,8 @@ github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c= github.com/gliderlabs/ssh v0.3.8/go.mod h1:xYoytBv1sV0aL3CavoDuJIQNURXkkfPA/wxQ1pL1fAU= -github.com/go-chi/chi/v5 v5.2.1 h1:KOIHODQj58PmL80G2Eak4WdvUzjSJSm0vG72crDCqb8= -github.com/go-chi/chi/v5 v5.2.1/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= +github.com/go-chi/chi/v5 v5.2.2 h1:CMwsvRVTbXVytCk1Wd72Zy1LAsAh9GxMmSNWLHCG618= +github.com/go-chi/chi/v5 v5.2.2/go.mod h1:L2yAIGWB3H+phAw1NxKwWM+7eUH/lU8pOMm5hHcoops= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= github.com/go-git/go-billy/v5 v5.6.2 h1:6Q86EsPXMa7c3YZ3aLAQsMA0VlWmy43r6FHqa/UNbRM= diff --git a/vendor/github.com/go-chi/chi/v5/README.md b/vendor/github.com/go-chi/chi/v5/README.md index 7f662ab45f1..c58a0e20cea 100644 --- a/vendor/github.com/go-chi/chi/v5/README.md +++ b/vendor/github.com/go-chi/chi/v5/README.md @@ -20,7 +20,9 @@ and [docgen](https://github.com/go-chi/docgen). We hope you enjoy it too! ## Install -`go get -u github.com/go-chi/chi/v5` +```sh +go get -u github.com/go-chi/chi/v5 +``` ## Features @@ -194,7 +196,7 @@ type Router interface { // path, with a fresh middleware stack for the inline-Router. Group(fn func(r Router)) Router - // Route mounts a sub-Router along a `pattern`` string. + // Route mounts a sub-Router along a `pattern` string. Route(pattern string, fn func(r Router)) Router // Mount attaches another http.Handler along ./pattern/* diff --git a/vendor/github.com/go-chi/chi/v5/chi.go b/vendor/github.com/go-chi/chi/v5/chi.go index fc32c4efe98..2b6ebd337c0 100644 --- a/vendor/github.com/go-chi/chi/v5/chi.go +++ b/vendor/github.com/go-chi/chi/v5/chi.go @@ -37,8 +37,7 @@ // // A placeholder with a name followed by a colon allows a regular // expression match, for example {number:\\d+}. The regular expression -// syntax is Go's normal regexp RE2 syntax, except that regular expressions -// including { or } are not supported, and / will never be +// syntax is Go's normal regexp RE2 syntax, except that / will never be // matched. An anonymous regexp pattern is allowed, using an empty string // before the colon in the placeholder, such as {:\\d+} // diff --git a/vendor/github.com/go-chi/chi/v5/mux.go b/vendor/github.com/go-chi/chi/v5/mux.go index 91daf691e95..f1266971b4c 100644 --- a/vendor/github.com/go-chi/chi/v5/mux.go +++ b/vendor/github.com/go-chi/chi/v5/mux.go @@ -107,9 +107,8 @@ func (mx *Mux) Use(middlewares ...func(http.Handler) http.Handler) { // Handle adds the route `pattern` that matches any http method to // execute the `handler` http.Handler. func (mx *Mux) Handle(pattern string, handler http.Handler) { - parts := strings.SplitN(pattern, " ", 2) - if len(parts) == 2 { - mx.Method(parts[0], parts[1], handler) + if method, rest, found := strings.Cut(pattern, " "); found { + mx.Method(method, rest, handler) return } @@ -119,9 +118,8 @@ func (mx *Mux) Handle(pattern string, handler http.Handler) { // HandleFunc adds the route `pattern` that matches any http method to // execute the `handlerFn` http.HandlerFunc. func (mx *Mux) HandleFunc(pattern string, handlerFn http.HandlerFunc) { - parts := strings.SplitN(pattern, " ", 2) - if len(parts) == 2 { - mx.Method(parts[0], parts[1], handlerFn) + if method, rest, found := strings.Cut(pattern, " "); found { + mx.Method(method, rest, handlerFn) return } diff --git a/vendor/github.com/go-chi/chi/v5/path_value.go b/vendor/github.com/go-chi/chi/v5/path_value.go index 7e78171e5c5..77c840f0191 100644 --- a/vendor/github.com/go-chi/chi/v5/path_value.go +++ b/vendor/github.com/go-chi/chi/v5/path_value.go @@ -1,5 +1,6 @@ -//go:build go1.22 -// +build go1.22 +//go:build go1.22 && !tinygo +// +build go1.22,!tinygo + package chi diff --git a/vendor/github.com/go-chi/chi/v5/path_value_fallback.go b/vendor/github.com/go-chi/chi/v5/path_value_fallback.go index f551781a43c..749a8520a75 100644 --- a/vendor/github.com/go-chi/chi/v5/path_value_fallback.go +++ b/vendor/github.com/go-chi/chi/v5/path_value_fallback.go @@ -1,5 +1,5 @@ -//go:build !go1.22 -// +build !go1.22 +//go:build !go1.22 || tinygo +// +build !go1.22 tinygo package chi diff --git a/vendor/github.com/go-chi/chi/v5/tree.go b/vendor/github.com/go-chi/chi/v5/tree.go index c7d3bc57040..85fcfdbb8d4 100644 --- a/vendor/github.com/go-chi/chi/v5/tree.go +++ b/vendor/github.com/go-chi/chi/v5/tree.go @@ -730,11 +730,9 @@ func patNextSegment(pattern string) (nodeTyp, string, string, byte, int, int) { tail = pattern[pe] } - var rexpat string - if idx := strings.Index(key, ":"); idx >= 0 { + key, rexpat, isRegexp := strings.Cut(key, ":") + if isRegexp { nt = ntRegexp - rexpat = key[idx+1:] - key = key[:idx] } if len(rexpat) > 0 { diff --git a/vendor/modules.txt b/vendor/modules.txt index 08434f47716..9ed02c2ed7e 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -526,7 +526,7 @@ github.com/fsnotify/fsnotify/internal # github.com/fxamacker/cbor/v2 v2.7.0 ## explicit; go 1.17 github.com/fxamacker/cbor/v2 -# github.com/go-chi/chi/v5 v5.2.1 +# github.com/go-chi/chi/v5 v5.2.2 ## explicit; go 1.20 github.com/go-chi/chi/v5 # github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 From 44bf764bd72288d531a7eda8d1c50df7952253da Mon Sep 17 00:00:00 2001 From: Ayato Tokubi Date: Sat, 21 Jun 2025 18:02:56 +0000 Subject: [PATCH 13/15] Bump go version to 1.24.3 Signed-off-by: Ayato Tokubi --- go.mod | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 04fecd29d69..95863f85f98 100644 --- a/go.mod +++ b/go.mod @@ -1,4 +1,4 @@ -go 1.24.1 +go 1.24.3 module github.com/cri-o/cri-o From 5226db6b5debec815ca29fb76fbdff7d7aa6d14f Mon Sep 17 00:00:00 2001 From: Ayato Tokubi Date: Mon, 23 Jun 2025 05:16:11 +0000 Subject: [PATCH 14/15] update nixpkgs Signed-off-by: Ayato Tokubi --- nix/nixpkgs.json | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/nix/nixpkgs.json b/nix/nixpkgs.json index 11ec5867d1c..316daf651d6 100644 --- a/nix/nixpkgs.json +++ b/nix/nixpkgs.json @@ -1,12 +1,13 @@ { "url": "https://github.com/nixos/nixpkgs", - "rev": "268ec3b4f1976d689f80b5b18da885420d258b0a", - "date": "2025-05-01T02:12:42+02:00", - "path": "/nix/store/6hrr1hb4n1ni6w9qwy842rgzmj6pm8h9-nixpkgs", - "sha256": "1a2sbfm97waaq5w682j8ji1b632g0r5px2lgm3jybh0ndzgi36f5", - "hash": "sha256-xZkR328WwOXlqI+KfksGTwyzQpRICmR4wUrxk6pbWqg=", + "rev": "11c738bae485c60a87e952a589cd769783ce3748", + "date": "2025-06-22T20:02:23-07:00", + "path": "/nix/store/wcjn2craafchk3kigd8nm9vjpic1ld1k-nixpkgs", + "sha256": "135m5zl2ivc46x8ya936r5i174ki69j8nrnqnzvswbadnpi4g93k", + "hash": "sha256-c6RH4rVNLa73t9hmi2QycZITYslmJOVRN4TtKOgvtYw=", "fetchLFS": false, "fetchSubmodules": false, "deepClone": false, + "fetchTags": false, "leaveDotGit": false } From 16c19198deba8b901a7c605f212f267c789e7692 Mon Sep 17 00:00:00 2001 From: Kubernetes Release Robot Date: Fri, 1 Aug 2025 00:34:10 +0000 Subject: [PATCH 15/15] version: bump to 1.33.3 Signed-off-by: Kubernetes Release Robot --- internal/version/version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/version/version.go b/internal/version/version.go index 20cc6751494..48918590bba 100644 --- a/internal/version/version.go +++ b/internal/version/version.go @@ -21,7 +21,7 @@ import ( ) // Version is the version of the build. -const Version = "1.33.2" +const Version = "1.33.3" // ReleaseMinorVersions are the currently supported minor versions. var ReleaseMinorVersions = []string{"1.33", "1.32", "1.31", "1.30"}