From 6e9aec7cba538473f4efe412a84b1201ca9ac932 Mon Sep 17 00:00:00 2001 From: Stefan Bueringer Date: Tue, 11 Jun 2024 20:21:33 +0200 Subject: [PATCH] Cleanup janitor MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Stefan Büringer buringerst@vmware.com --- Makefile | 12 +- go.mod | 8 +- go.sum | 16 +-- hack/clean-ci.sh | 15 +- hack/tools/boskosctl/main.go | 3 +- hack/tools/janitor/README.md | 7 +- hack/tools/janitor/main.go | 185 +++++++++++-------------- hack/tools/pkg/janitor/janitor.go | 125 ++--------------- hack/tools/pkg/janitor/janitor_test.go | 137 ++++++------------ hack/tools/pkg/janitor/vsphere.go | 30 ---- test/e2e/e2e_suite_test.go | 3 - test/go.mod | 14 +- test/go.sum | 24 ++-- 13 files changed, 171 insertions(+), 408 deletions(-) diff --git a/Makefile b/Makefile index 2eb00f4911..42b558d27e 100644 --- a/Makefile +++ b/Makefile @@ -930,15 +930,9 @@ clean-ci: ## Cleanup orphaned objects in CI @if [ -z "${GOVC_USERNAME}" ]; then echo "GOVC_USERNAME is not set"; exit 1; fi @if [ -z "${GOVC_PASSWORD}" ]; then echo "GOVC_PASSWORD is not set"; exit 1; fi @if [ -z "${GOVC_URL}" ]; then echo "GOVC_URL is not set"; exit 1; fi - go run $(JANITOR_DIR) \ - --dry-run=false \ - --max-age=12h \ - --ipam-namespace=default \ - --folder=/SDDC-Datacenter/vm/Workloads/cluster-api-provider-vsphere \ - --resource-pool=/SDDC-Datacenter/host/Cluster-1/Resources/Compute-ResourcePool/cluster-api-provider-vsphere \ - --vm-folder=/SDDC-Datacenter/vm/Workloads/cluster-api-provider-vsphere \ - --vm-folder=/SDDC-Datacenter/vm/Workloads/cloud-provider-vsphere \ - --vm-folder=/SDDC-Datacenter/vm/Workloads/image-builder + @if [ -z "${VSPHERE_TLS_THUMBPRINT}" ]; then echo "VSPHERE_TLS_THUMBPRINT is not set"; exit 1; fi + @if [ -z "${BOSKOS_HOST}" ]; then echo "BOSKOS_HOST is not set"; exit 1; fi + go run $(JANITOR_DIR) --dry-run=false .PHONY: clean-temporary clean-temporary: ## Remove all temporary files and folders diff --git a/go.mod b/go.mod index 91140fd1fb..c8e0fff90f 100644 --- a/go.mod +++ b/go.mod @@ -2,7 +2,7 @@ module sigs.k8s.io/cluster-api-provider-vsphere go 1.22.0 -replace sigs.k8s.io/cluster-api => sigs.k8s.io/cluster-api v1.7.0-rc.0.0.20240610140608-2e3860ac7408 +replace sigs.k8s.io/cluster-api => sigs.k8s.io/cluster-api v1.7.0-rc.0.0.20240617064349-5b6043e1b6ec replace github.com/vmware-tanzu/vm-operator/pkg/constants/testlabels => github.com/vmware-tanzu/vm-operator/pkg/constants/testlabels v0.0.0-20240404200847-de75746a9505 @@ -37,7 +37,7 @@ require ( k8s.io/klog/v2 v2.120.1 k8s.io/utils v0.0.0-20231127182322-b307cd553661 sigs.k8s.io/cluster-api v1.7.0-rc.0.0.20240610140608-2e3860ac7408 - sigs.k8s.io/controller-runtime v0.18.3 + sigs.k8s.io/controller-runtime v0.18.4 sigs.k8s.io/kustomize/api v0.17.2 sigs.k8s.io/kustomize/kyaml v0.17.1 sigs.k8s.io/yaml v1.4.0 @@ -75,14 +75,14 @@ require ( go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect golang.org/x/net v0.26.0 // indirect - golang.org/x/oauth2 v0.20.0 // indirect + golang.org/x/oauth2 v0.21.0 // indirect golang.org/x/sync v0.7.0 // indirect golang.org/x/sys v0.21.0 // indirect golang.org/x/term v0.21.0 // indirect golang.org/x/text v0.16.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240311132316-a219d84964c2 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240314234333-6e1732d8331c // indirect - google.golang.org/grpc v1.62.1 // indirect + google.golang.org/grpc v1.62.2 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/warnings.v0 v0.1.2 // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.0 // indirect diff --git a/go.sum b/go.sum index e5b6abe930..73247d1d0a 100644 --- a/go.sum +++ b/go.sum @@ -714,8 +714,8 @@ golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.20.0 h1:4mQdhULixXKP1rwYBW0vAijoXnkTG0BLCDRzfe1idMo= -golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= +golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -900,8 +900,8 @@ google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk= -google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= +google.golang.org/grpc v1.62.2 h1:iEIj1U5qjyBjzkM5nk3Fq+S1IbjbXSyqeULZ1Nfo4AA= +google.golang.org/grpc v1.62.2/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1011,11 +1011,11 @@ rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.0 h1:Tc9rS7JJoZ9sl3OpL4842oIk6lH7gWBb0JOmJ0ute7M= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.0/go.mod h1:1ewhL9l1gkPcU/IU/6rFYfikf+7Y5imWv7ARVbBOzNs= -sigs.k8s.io/cluster-api v1.7.0-rc.0.0.20240610140608-2e3860ac7408 h1:2YkK+3O1A7HsQV9Cw8ncH+DGa1GuSFwv6lbl1bOyZgI= -sigs.k8s.io/cluster-api v1.7.0-rc.0.0.20240610140608-2e3860ac7408/go.mod h1:qN/cGR3Ww2GlMTcM47Abeob4SvpkN/8II439eNbPz6w= +sigs.k8s.io/cluster-api v1.7.0-rc.0.0.20240617064349-5b6043e1b6ec h1:ikgHzieJg7LTJjvL/o4gcs8pcGBLwXyvuRvrC4Uj7kk= +sigs.k8s.io/cluster-api v1.7.0-rc.0.0.20240617064349-5b6043e1b6ec/go.mod h1:tDxEz5a0levoOzLKny7JMW5S7g2P4fKYHNOMsS9IH/c= sigs.k8s.io/controller-runtime v0.9.0/go.mod h1:TgkfvrhhEw3PlI0BRL/5xM+89y3/yc0ZDfdbTl84si8= -sigs.k8s.io/controller-runtime v0.18.3 h1:B5Wmmo8WMWK7izei+2LlXLVDGzMwAHBNLX68lwtlSR4= -sigs.k8s.io/controller-runtime v0.18.3/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg= +sigs.k8s.io/controller-runtime v0.18.4 h1:87+guW1zhvuPLh1PHybKdYFLU0YJp4FhJRmiHvm5BZw= +sigs.k8s.io/controller-runtime v0.18.4/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/kustomize/api v0.17.2 h1:E7/Fjk7V5fboiuijoZHgs4aHuexi5Y2loXlVOAVAG5g= diff --git a/hack/clean-ci.sh b/hack/clean-ci.sh index ad2cc62e6d..874522ac75 100755 --- a/hack/clean-ci.sh +++ b/hack/clean-ci.sh @@ -31,10 +31,6 @@ on_exit() { trap on_exit EXIT -# Set the kubeconfig to the IPAM cluster so the wait function is able to reach the kube-apiserver -# to ensure the vpn connection works. -export E2E_IPAM_KUBECONFIG="/root/ipam-conf/capv-services.conf" - # Run the vpn client in container docker run --rm -d --name vpn -v "${HOME}/.openvpn/:${HOME}/.openvpn/" \ -w "${HOME}/.openvpn/" --cap-add=NET_ADMIN --net=host --device=/dev/net/tun \ @@ -43,11 +39,11 @@ docker run --rm -d --name vpn -v "${HOME}/.openvpn/:${HOME}/.openvpn/" \ # Tail the vpn logs docker logs vpn -# Wait until the VPN connection is active and we are able to reach the ipam cluster -function wait_for_ipam_reachable() { + # Wait until the VPN connection is active. +function wait_for_vpn_up() { local n=0 until [ $n -ge 30 ]; do - kubectl --kubeconfig="${E2E_IPAM_KUBECONFIG}" --request-timeout=2s get inclusterippools.ipam.cluster.x-k8s.io && RET=$? || RET=$? + curl "https://${GOVC_URL}" --connect-timeout 2 -k && RET=$? || RET=$? if [[ "$RET" -eq 0 ]]; then break fi @@ -56,10 +52,7 @@ function wait_for_ipam_reachable() { done return "$RET" } -wait_for_ipam_reachable - -# Set kubeconfig for IPAM cleanup -export KUBECONFIG="${E2E_IPAM_KUBECONFIG}" +wait_for_vpn_up # Run e2e tests make clean-ci diff --git a/hack/tools/boskosctl/main.go b/hack/tools/boskosctl/main.go index e7de7c03e9..ac6f3a43f9 100644 --- a/hack/tools/boskosctl/main.go +++ b/hack/tools/boskosctl/main.go @@ -359,8 +359,7 @@ func release(ctx context.Context, client *boskos.Client, resourceName, vSphereUs defer vSphereClients.Logout(ctx) // Delete all VMs created up until now. - maxCreationDate := time.Now() - j := janitor.NewJanitor(vSphereClients, nil, maxCreationDate, "", false) + j := janitor.NewJanitor(vSphereClients, false) log.Info("Cleaning up vSphere") // Note: We intentionally want to skip clusterModule cleanup. If we run this too often we might hit race conditions diff --git a/hack/tools/janitor/README.md b/hack/tools/janitor/README.md index f5de74aba9..cea355eff9 100644 --- a/hack/tools/janitor/README.md +++ b/hack/tools/janitor/README.md @@ -3,8 +3,5 @@ The janitor is a tool for CI to cleanup objects leftover from failed or killed prowjobs. It can be run regularly as prowjob. -It tries to delete: - -* vSphere: virtual machines in the configured folders which exist longer than the configured `--max-age` flag. -* vSphere: cluster modules which do not refer any virtual machine -* IPAM: IPAddressClaims which exist longer than the configured `--max-age` flag +It retrieves vSphere projects from Boskos and then deletes VMs and resource pools accordingly. +Additionally it will delete cluster modules which do not refer any virtual machine. diff --git a/hack/tools/janitor/main.go b/hack/tools/janitor/main.go index f4e8d640dc..946f18888b 100644 --- a/hack/tools/janitor/main.go +++ b/hack/tools/janitor/main.go @@ -20,8 +20,8 @@ package main import ( "context" "flag" + "fmt" "os" - "time" "github.com/pkg/errors" "github.com/spf13/pflag" @@ -30,7 +30,6 @@ import ( "k8s.io/klog/v2" ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1" ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/cluster-api-provider-vsphere/hack/tools/pkg/boskos" "sigs.k8s.io/cluster-api-provider-vsphere/hack/tools/pkg/janitor" @@ -45,28 +44,16 @@ func init() { var ( dryRun bool - ipamNamespace string - maxAge time.Duration - // Flags to get folders and resource pools from Boskos. boskosHost string resourceOwner string resourceTypes []string - // Flags to directly specify folders and resource pools. - vsphereVMFolders []string - vsphereFolders []string - vsphereResourcePools []string ) func initFlags(fs *pflag.FlagSet) { // Note: Intentionally not adding a fallback value, so it is still possible to not use Boskos. fs.StringVar(&boskosHost, "boskos-host", os.Getenv("BOSKOS_HOST"), "Boskos server URL. Boskos is only used to retrieve resources if this flag is set.") - fs.StringVar(&resourceOwner, "resource-owner", "vsphere-janitor", "Owner for the resource.") + fs.StringVar(&resourceOwner, "resource-owner", "vsphere-janitor", "Owner for the resource during cleanup.") fs.StringArrayVar(&resourceTypes, "resource-type", []string{"vsphere-project-cluster-api-provider", "vsphere-project-cloud-provider", "vsphere-project-image-builder"}, "Types of the resources") - fs.StringArrayVar(&vsphereVMFolders, "vm-folder", []string{}, "Path to folders in vCenter to cleanup virtual machines.") - fs.StringArrayVar(&vsphereFolders, "folder", []string{}, "Path to a folder in vCenter to recursively cleanup empty subfolders.") - fs.StringArrayVar(&vsphereResourcePools, "resource-pool", []string{}, "Path to a resource pool in vCenter to recursively cleanup empty child resource pools.") - fs.StringVar(&ipamNamespace, "ipam-namespace", "", "Namespace for IPAddressClaim cleanup.") - fs.DurationVar(&maxAge, "max-age", time.Hour*12, "Maximum age of an object before it is getting deleted.") fs.BoolVar(&dryRun, "dry-run", false, "dry-run results in not deleting anything but printing the actions.") } @@ -90,6 +77,17 @@ func main() { func run(ctx context.Context) error { log := ctrl.LoggerFrom(ctx) + log.Info("Configured settings", "dry-run", dryRun) + + if boskosHost == "" { + return fmt.Errorf("--boskos-host must be set") + } + if resourceOwner == "" { + return fmt.Errorf("--resource-owner must be set") + } + if len(resourceTypes) == 0 { + return fmt.Errorf("--resource-type must be set") + } // Create clients for vSphere. vSphereClients, err := janitor.NewVSphereClients(ctx, janitor.NewVSphereClientsInput{ @@ -104,111 +102,94 @@ func run(ctx context.Context) error { } defer vSphereClients.Logout(ctx) - // Create controller-runtime client for IPAM. - restConfig, err := ctrl.GetConfig() - if err != nil { - return errors.Wrap(err, "unable to get kubeconfig") - } - ipamClient, err := client.New(restConfig, client.Options{Scheme: ipamScheme}) + log = log.WithValues("boskosHost", boskosHost, "resourceOwner", resourceOwner) + ctx = ctrl.LoggerInto(ctx, log) + log.Info("Getting resources to cleanup from Boskos") + client, err := boskos.NewClient(resourceOwner, boskosHost) if err != nil { - return errors.Wrap(err, "creating IPAM client") + return err } - if boskosHost != "" { - log = log.WithValues("boskosHost", boskosHost, "resourceOwner", resourceOwner) - log.Info("Getting resources to cleanup from Boskos") - client, err := boskos.NewClient(resourceOwner, boskosHost) + var allErrs []error + for _, resourceType := range resourceTypes { + log := log.WithValues("resourceType", resourceType) + ctx := ctrl.LoggerInto(ctx, log) + + metrics, err := client.Metric(resourceType) if err != nil { - return err + allErrs = append(allErrs, errors.Errorf("failed to get metrics before cleanup for resource type %q", resourceType)) + } else { + log.Info("State before cleanup", "resourceStates", metrics.Current, "resourceOwners", metrics.Owners) } - var allErrs []error - for _, resourceType := range resourceTypes { - // For all resource in state dirty that are currently not owned: - // * acquire the resource (and set it to state "cleaning") - // * try to clean up vSphere - // * if cleanup succeeds, release the resource as free - // * if cleanup fails, resource will stay in cleaning and become stale (reaper will move it to dirty) - for { - log.Info("Acquiring resource") - res, err := client.Acquire(resourceType, boskos.Dirty, boskos.Cleaning) - if err != nil { - // If we get an error on acquire we're done looping through all dirty resources - if errors.Is(err, boskos.ErrNotFound) { - // Note: ErrNotFound means there are no more dirty resources that are not owned. - log.Info("No more resources to cleanup") - break - } - allErrs = append(allErrs, errors.Wrapf(err, "failed to acquire resource")) + // For all resource in state dirty that are currently not owned: + // * acquire the resource (and set it to state "cleaning") + // * try to clean up vSphere + // * if cleanup succeeds, release the resource as free + // * if cleanup fails, resource will stay in cleaning and become stale (reaper will move it to dirty) + for { + log.Info("Acquiring resource") + res, err := client.Acquire(resourceType, boskos.Dirty, boskos.Cleaning) + if err != nil { + // If we get an error on acquire we're done looping through all dirty resources + if errors.Is(err, boskos.ErrNotFound) { + // Note: ErrNotFound means there are no more dirty resources that are not owned. + log.Info("No more resources to cleanup") break } - log := log.WithValues("resourceName", res.Name) + allErrs = append(allErrs, errors.Wrapf(err, "failed to acquire resource")) + break + } + log := log.WithValues("resourceName", res.Name) + ctx := ctrl.LoggerInto(ctx, log) - if res.UserData == nil { - allErrs = append(allErrs, errors.Errorf("failed to get user data, resource %q is missing user data", res.Name)) - continue - } + if res.UserData == nil { + allErrs = append(allErrs, errors.Errorf("failed to get user data, resource %q is missing user data", res.Name)) + continue + } - folder, hasFolder := res.UserData.Load("folder") - if !hasFolder { - allErrs = append(allErrs, errors.Errorf("failed to get user data, resource %q is missing \"folder\" key", res.Name)) - continue - } - resourcePool, hasResourcePool := res.UserData.Load("resourcePool") - if !hasResourcePool { - allErrs = append(allErrs, errors.Errorf("failed to get user data, resource %q is missing \"resourcePool\" key", res.Name)) - continue - } + folder, hasFolder := res.UserData.Load("folder") + if !hasFolder { + allErrs = append(allErrs, errors.Errorf("failed to get user data, resource %q is missing \"folder\" key", res.Name)) + continue + } + resourcePool, hasResourcePool := res.UserData.Load("resourcePool") + if !hasResourcePool { + allErrs = append(allErrs, errors.Errorf("failed to get user data, resource %q is missing \"resourcePool\" key", res.Name)) + continue + } - // Delete all VMs created up until now. - maxCreationDate := time.Now() - j := janitor.NewJanitor(vSphereClients, nil, maxCreationDate, "", false) + j := janitor.NewJanitor(vSphereClients, false) - log.Info("Cleaning up vSphere") - if err := j.CleanupVSphere(ctx, []string{folder.(string)}, []string{resourcePool.(string)}, []string{folder.(string)}, false); err != nil { - log.Info("Cleaning up vSphere failed") + log.Info("Cleaning up vSphere") + if err := j.CleanupVSphere(ctx, []string{folder.(string)}, []string{resourcePool.(string)}, []string{folder.(string)}, false); err != nil { + log.Info("Cleaning up vSphere failed") - // Intentionally keep this resource in cleaning state. The reaper will move it from cleaning to dirty - // and we'll retry the cleanup. - // If we move it to dirty here, the for loop will pick it up again, and we get stuck in an infinite loop. - allErrs = append(allErrs, errors.Wrapf(err, "cleaning up vSphere failed, resource %q will now become stale", res.Name)) - continue - } - log.Info("Cleaning up vSphere succeeded") + // Intentionally keep this resource in cleaning state. The reaper will move it from cleaning to dirty + // and we'll retry the cleanup. + // If we move it to dirty here, the for loop will pick it up again, and we get stuck in an infinite loop. + allErrs = append(allErrs, errors.Wrapf(err, "cleaning up vSphere failed, resource %q will now become stale", res.Name)) + continue + } + log.Info("Cleaning up vSphere succeeded") - // Try to release resource as free. - log.Info("Releasing resource as free") - if releaseErr := client.Release(res.Name, boskos.Free); releaseErr != nil { - allErrs = append(allErrs, errors.Wrapf(releaseErr, "cleaning up vSphere succeeded and releasing resource as free failed, resource %q will now become stale", res.Name)) - } - log.Info("Releasing resource as free succeeded") + // Try to release resource as free. + log.Info("Releasing resource as free") + if releaseErr := client.Release(res.Name, boskos.Free); releaseErr != nil { + allErrs = append(allErrs, errors.Wrapf(releaseErr, "cleaning up vSphere succeeded and releasing resource as free failed, resource %q will now become stale", res.Name)) } + log.Info("Releasing resource as free succeeded") } - if len(allErrs) > 0 { - return errors.Wrap(kerrors.NewAggregate(allErrs), "cleaning up Boskos resources") - } - } - - // Note: The following will be deleted once we migrated all repos to Boskos. - maxCreationDate := time.Now().Add(-maxAge) - janitor := janitor.NewJanitor(vSphereClients, ipamClient, maxCreationDate, ipamNamespace, dryRun) - log.Info("Configured settings", "dry-run", dryRun) - log.Info("Configured settings", "folders", vsphereFolders) - log.Info("Configured settings", "vm-folders", vsphereVMFolders) - log.Info("Configured settings", "resource-pools", vsphereResourcePools) - log.Info("Configured settings", "ipam-namespace", ipamNamespace) - log.Info("Configured settings", "max-age", maxAge) - log.Info("Configured settings", "janitor.maxCreationDate", maxCreationDate) - - // First cleanup old vms and other vSphere resources to free up IPAddressClaims or cluster modules which are still in-use. - if err := janitor.CleanupVSphere(ctx, vsphereFolders, vsphereResourcePools, vsphereVMFolders, false); err != nil { - return errors.Wrap(err, "cleaning up vSphere") + metrics, err = client.Metric(resourceType) + if err != nil { + allErrs = append(allErrs, errors.Errorf("failed to get metrics after cleanup for resource type %q", resourceType)) + } else { + log.Info("State after cleanup", "resourceOwners", metrics.Owners, "resourceStates", metrics.Current) + } } - - // Second cleanup IPAddressClaims. - if err := janitor.DeleteIPAddressClaims(ctx); err != nil { - return errors.Wrap(err, "cleaning up IPAddressClaims") + if len(allErrs) > 0 { + return errors.Wrap(kerrors.NewAggregate(allErrs), "cleaning up Boskos resources") } return nil diff --git a/hack/tools/pkg/janitor/janitor.go b/hack/tools/pkg/janitor/janitor.go index e089e93be7..1da63842f7 100644 --- a/hack/tools/pkg/janitor/janitor.go +++ b/hack/tools/pkg/janitor/janitor.go @@ -23,7 +23,6 @@ import ( "slices" "sort" "strings" - "time" "github.com/pkg/errors" "github.com/vmware/govmomi/object" @@ -31,30 +30,21 @@ import ( "github.com/vmware/govmomi/vim25/mo" "github.com/vmware/govmomi/vim25/types" kerrors "k8s.io/apimachinery/pkg/util/errors" - "k8s.io/klog/v2" - ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1" ctrl "sigs.k8s.io/controller-runtime" - "sigs.k8s.io/controller-runtime/pkg/client" ) // NewJanitor creates a new Janitor. -func NewJanitor(vSphereClients *VSphereClients, ipamClient client.Client, maxCreationDate time.Time, ipamNamespace string, dryRun bool) *Janitor { +func NewJanitor(vSphereClients *VSphereClients, dryRun bool) *Janitor { return &Janitor{ - dryRun: dryRun, - ipamClient: ipamClient, - ipamNamespace: ipamNamespace, - maxCreationDate: maxCreationDate, - vSphereClients: vSphereClients, + dryRun: dryRun, + vSphereClients: vSphereClients, } } // Janitor implements a janitor for vSphere. type Janitor struct { - dryRun bool - ipamClient client.Client - ipamNamespace string - maxCreationDate time.Time - vSphereClients *VSphereClients + dryRun bool + vSphereClients *VSphereClients } type virtualMachine struct { @@ -108,8 +98,7 @@ func (s *Janitor) CleanupVSphere(ctx context.Context, folders, resourcePools, vm return nil } -// deleteVSphereVMs deletes all VSphereVMs in a given folder in vSphere if their creation -// timestamp is before the janitor's configured maxCreationDate. +// deleteVSphereVMs deletes all VSphereVMs in a given folder in vSphere. func (s *Janitor) deleteVSphereVMs(ctx context.Context, folder string) error { log := ctrl.LoggerFrom(ctx).WithName("vSphereVMs").WithValues("folder", folder) ctx = ctrl.LoggerInto(ctx, log) @@ -143,16 +132,12 @@ func (s *Janitor) deleteVSphereVMs(ctx context.Context, folder string) error { vmsToDeleteAndPoweroff := []*virtualMachine{} vmsToDelete := []*virtualMachine{} - // Filter out vms we don't have to cleanup depending on s.maxCreationDate. + // Figure out which VMs to delete and which to power off and delete. for _, managedObjectVM := range managedObjectVMs { if managedObjectVM.Summary.Config.Template { // Skip templates for deletion. continue } - if managedObjectVM.Config.CreateDate.After(s.maxCreationDate) { - // Ignore vms created after maxCreationDate - continue - } vm := &virtualMachine{ managedObject: managedObjectVM, @@ -216,7 +201,6 @@ func (s *Janitor) deleteVSphereVMs(ctx context.Context, folder string) error { // contain any virtual machine. // An object only gets deleted if: // * it does not have any children of a different type -// * the timestamp field's value is before s.maxCreationDate // If an object does not yet have a field, the janitor will add the field to it with the current timestamp as value. func (s *Janitor) deleteObjectChildren(ctx context.Context, inventoryPath string, objectType string) error { if !slices.Contains([]string{"ResourcePool", "Folder"}, objectType) { @@ -257,53 +241,12 @@ func (s *Janitor) deleteObjectChildren(ctx context.Context, inventoryPath string } } - // Get key for the deletion marker. - deletionMarkerKey, err := s.vSphereClients.FieldsManager.FindKey(ctx, vSphereDeletionMarkerName) - if err != nil { - if !errors.Is(err, object.ErrKeyNameNotFound) { - return errors.Wrapf(err, "finding custom field %q", vSphereDeletionMarkerName) - } - - // In case of ErrKeyNameNotFound we will create the deletionMarker but only if - // we are not on dryRun. - log.Info("Creating the deletion field") - - if !s.dryRun { - field, err := s.vSphereClients.FieldsManager.Add(ctx, vSphereDeletionMarkerName, "ManagedEntity", nil, nil) - if err != nil { - return errors.Wrapf(err, "creating custom field %q", vSphereDeletionMarkerName) - } - deletionMarkerKey = field.Key - } - } - - objectsToMark := []*managedElement{} objectsToDelete := []*managedElement{} - // Filter elements and collect two groups: - // * objects to add the timestamp field - // * objects to destroy + // Filter elements and collect objects to destroy. for i := range managedEntities { managedEntity := managedEntities[i] - // We mark any object we find with a timestamp to determine the first time we did see this item. - // This is used as replacement for the non-existing CreationTimestamp on objects. - timestamp, err := getDeletionMarkerTimestamp(deletionMarkerKey, managedEntity.entity.Value) - if err != nil { - return err - } - // If no timestamp was found: queue it to get marked. - if timestamp == nil { - objectsToMark = append(objectsToMark, managedEntity) - continue - } - - // Filter out objects we don't have to cleanup depending on s.maxCreationDate. - if timestamp.After(s.maxCreationDate) { - log.Info("Skipping deletion of object: marked timestamp does not exceed maxCreationDate", "timestamp", timestamp, "inventoryPath", managedEntity.element.Path) - continue - } - // Filter out objects which have children. if hasChildren[managedEntity.element.Path] { log.Info("Skipping deletion of object: object has child objects of a different type", "inventoryPath", managedEntity.element.Path) @@ -313,20 +256,6 @@ func (s *Janitor) deleteObjectChildren(ctx context.Context, inventoryPath string objectsToDelete = append(objectsToDelete, managedEntity) } - for i := range objectsToMark { - managedElement := objectsToMark[i] - log.Info("Marking resource object for deletion in vSphere", objectType, managedElement.element.Path) - - if s.dryRun { - // Skipping actual mark on dryRun. - continue - } - - if err := s.vSphereClients.FieldsManager.Set(ctx, managedElement.entity.Reference(), deletionMarkerKey, time.Now().Format(time.RFC3339)); err != nil { - return errors.Wrapf(err, "setting field %s on object %s", vSphereDeletionMarkerName, managedElement.element.Path) - } - } - // sort objects to delete so children are deleted before parents sort.Slice(objectsToDelete, func(i, j int) bool { a := objectsToDelete[i] @@ -358,44 +287,6 @@ func (s *Janitor) deleteObjectChildren(ctx context.Context, inventoryPath string return nil } -// DeleteIPAddressClaims deletes IPAddressClaims. -func (s *Janitor) DeleteIPAddressClaims(ctx context.Context) error { - log := ctrl.LoggerFrom(ctx).WithName("IPAddressClaims") - ctrl.LoggerInto(ctx, log) - log.Info("Deleting IPAddressClaims") - - // List all existing IPAddressClaims - ipAddressClaims := &ipamv1.IPAddressClaimList{} - if err := s.ipamClient.List(ctx, ipAddressClaims, - client.InNamespace(s.ipamNamespace), - ); err != nil { - return err - } - - errList := []error{} - - for _, ipAddressClaim := range ipAddressClaims.Items { - ipAddressClaim := ipAddressClaim - // Skip IPAddressClaims which got created after maxCreationDate. - if ipAddressClaim.CreationTimestamp.After(s.maxCreationDate) { - continue - } - - log.Info("Deleting IPAddressClaim", "IPAddressClaim", klog.KObj(&ipAddressClaim)) - - if s.dryRun { - // Skipping actual deletion on dryRun. - continue - } - - if err := s.ipamClient.Delete(ctx, &ipAddressClaim); err != nil { - errList = append(errList, err) - } - } - - return kerrors.NewAggregate(errList) -} - func (s *Janitor) deleteVSphereClusterModules(ctx context.Context) error { log := ctrl.LoggerFrom(ctx).WithName("vSphere cluster modules") ctrl.LoggerInto(ctx, log) diff --git a/hack/tools/pkg/janitor/janitor_test.go b/hack/tools/pkg/janitor/janitor_test.go index a604127142..b6d51611a1 100644 --- a/hack/tools/pkg/janitor/janitor_test.go +++ b/hack/tools/pkg/janitor/janitor_test.go @@ -23,7 +23,6 @@ import ( "path" "strings" "testing" - "time" "github.com/onsi/gomega" "github.com/onsi/gomega/gbytes" @@ -78,7 +77,7 @@ func setup(ctx context.Context, t *testing.T) (*VSphereClients, *vcsim.Simulator return clients, vcsim } -func setupTestCase(g *gomega.WithT, sim *vcsim.Simulator, objects []*vcsimObject) (string, map[string]bool) { +func setupTestCase(g *gomega.WithT, sim *vcsim.Simulator, objects []*vcsimObject) string { g.THelper() relativePath := rand.String(10) @@ -91,15 +90,12 @@ func setupTestCase(g *gomega.WithT, sim *vcsim.Simulator, objects []*vcsimObject g.Expect(baseFolder.Create(sim, relativePath)).To(gomega.Succeed()) g.Expect(baseDatastore.Create(sim, relativePath)).To(gomega.Succeed()) - createdObjects := map[string]bool{} - // Create objects for _, object := range objects { - createdObjects[path.Join(object.objectType, object.pathSuffix)] = true g.Expect(object.Create(sim, relativePath)).To(gomega.Succeed()) } - return relativePath, createdObjects + return relativePath } const ( @@ -114,35 +110,19 @@ func Test_janitor_deleteVSphereVMs(t *testing.T) { // Initialize and start vcsim clients, sim := setup(ctx, t) - deleteAll := time.Now().Add(time.Hour * 1) - deleteNone := time.Now() - tests := []struct { - name string - objects []*vcsimObject - maxCreationDate time.Time - wantErr bool - want map[string]bool + name string + objects []*vcsimObject + wantErr bool + want map[string]bool }{ { name: "delete all VMs", objects: []*vcsimObject{ vcsimVirtualMachine("foo"), }, - maxCreationDate: deleteAll, - wantErr: false, - want: nil, - }, - { - name: "delete no VMs", - objects: []*vcsimObject{ - vcsimVirtualMachine("foo"), - }, - maxCreationDate: deleteNone, - wantErr: false, - want: map[string]bool{ - "VirtualMachine/foo": true, - }, + wantErr: false, + want: nil, }, { name: "recursive vm deletion", @@ -157,8 +137,7 @@ func Test_janitor_deleteVSphereVMs(t *testing.T) { vcsimVirtualMachine("a/bar"), vcsimVirtualMachine("a/b/c/foobar"), }, - maxCreationDate: deleteAll, - wantErr: false, + wantErr: false, want: map[string]bool{ "ResourcePool/a": true, "ResourcePool/a/b": true, @@ -173,12 +152,11 @@ func Test_janitor_deleteVSphereVMs(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := gomega.NewWithT(t) - relativePath, _ := setupTestCase(g, sim, tt.objects) + relativePath := setupTestCase(g, sim, tt.objects) s := &Janitor{ - dryRun: false, - maxCreationDate: tt.maxCreationDate, - vSphereClients: clients, + dryRun: false, + vSphereClients: clients, } // use folder created for this test case as inventoryPath @@ -224,7 +202,7 @@ func Test_janitor_deleteObjectChildren(t *testing.T) { objectType: "ResourcePool", objects: []*vcsimObject{ vcsimResourcePool("a"), - vcsimResourcePool("b"), + vcsimResourcePool("b"), // this one will be deleted vcsimFolder("a"), vcsimVirtualMachine("a/foo"), }, @@ -241,7 +219,7 @@ func Test_janitor_deleteObjectChildren(t *testing.T) { objects: []*vcsimObject{ vcsimResourcePool("a"), vcsimFolder("a"), - vcsimFolder("b"), + vcsimFolder("b"), // this one will be deleted vcsimVirtualMachine("a/foo"), }, want: map[string]bool{ @@ -309,26 +287,18 @@ func Test_janitor_deleteObjectChildren(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := gomega.NewWithT(t) - relativePath, wantMarkedObjects := setupTestCase(g, sim, tt.objects) + relativePath := setupTestCase(g, sim, tt.objects) inventoryPath := path.Join(tt.basePath, relativePath) s := &Janitor{ - dryRun: false, - maxCreationDate: time.Now().Add(time.Hour * 1), - vSphereClients: clients, + dryRun: false, + vSphereClients: clients, } - // Run first iteration which should only tag the resource pools with a timestamp. g.Expect(s.deleteObjectChildren(ctx, inventoryPath, tt.objectType)).To(gomega.Succeed()) existingObjects, err := recursiveListFoldersAndResourcePools(ctx, relativePath, clients.Govmomi, clients.Finder, clients.ViewManager) g.Expect(err).ToNot(gomega.HaveOccurred()) - g.Expect(existingObjects).To(gomega.BeEquivalentTo(wantMarkedObjects)) - - // Run second iteration which should destroy the resource pools with a timestamp. - g.Expect(s.deleteObjectChildren(ctx, inventoryPath, tt.objectType)).To(gomega.Succeed()) - existingObjects, err = recursiveListFoldersAndResourcePools(ctx, relativePath, clients.Govmomi, clients.Finder, clients.ViewManager) - g.Expect(err).ToNot(gomega.HaveOccurred()) if tt.want != nil { g.Expect(existingObjects).To(gomega.BeEquivalentTo(tt.want)) } else { @@ -348,36 +318,27 @@ func Test_janitor_CleanupVSphere(t *testing.T) { // Initialize and start vcsim clients, sim := setup(ctx, t) - deleteAll := time.Now().Add(time.Hour * 1) - tests := []struct { - name string - dryRun bool - maxCreationDate time.Time - objects []*vcsimObject - wantAfterFirstRun map[string]bool - wantAfterSecondRun map[string]bool + name string + dryRun bool + objects []*vcsimObject + want map[string]bool }{ { - name: "no-op", - dryRun: false, - maxCreationDate: deleteAll, - objects: nil, - wantAfterFirstRun: map[string]bool{}, - wantAfterSecondRun: map[string]bool{}, + name: "no-op", + dryRun: false, + objects: nil, + want: map[string]bool{}, }, { - name: "dryRun: no-op", - dryRun: true, - maxCreationDate: deleteAll, - objects: nil, - wantAfterFirstRun: map[string]bool{}, - wantAfterSecondRun: map[string]bool{}, + name: "dryRun: no-op", + dryRun: true, + objects: nil, + want: map[string]bool{}, }, { - name: "delete everything", - dryRun: false, - maxCreationDate: deleteAll, + name: "delete everything", + dryRun: false, objects: []*vcsimObject{ vcsimFolder("a"), vcsimResourcePool("a"), @@ -385,18 +346,11 @@ func Test_janitor_CleanupVSphere(t *testing.T) { vcsimFolder("c"), vcsimResourcePool("c"), }, - wantAfterFirstRun: map[string]bool{ - "Folder/a": true, - "Folder/c": true, - "ResourcePool/a": true, - "ResourcePool/c": true, - }, - wantAfterSecondRun: map[string]bool{}, + want: map[string]bool{}, }, { - name: "dryRun: would delete everything", - dryRun: true, - maxCreationDate: deleteAll, + name: "dryRun: would delete everything", + dryRun: true, objects: []*vcsimObject{ vcsimFolder("a"), vcsimResourcePool("a"), @@ -404,14 +358,7 @@ func Test_janitor_CleanupVSphere(t *testing.T) { vcsimFolder("c"), vcsimResourcePool("c"), }, - wantAfterFirstRun: map[string]bool{ - "Folder/a": true, - "Folder/c": true, - "ResourcePool/a": true, - "ResourcePool/c": true, - "VirtualMachine/a/b": true, - }, - wantAfterSecondRun: map[string]bool{ + want: map[string]bool{ "Folder/a": true, "Folder/c": true, "ResourcePool/a": true, @@ -424,12 +371,11 @@ func Test_janitor_CleanupVSphere(t *testing.T) { t.Run(tt.name, func(t *testing.T) { g := gomega.NewWithT(t) - relativePath, _ := setupTestCase(g, sim, tt.objects) + relativePath := setupTestCase(g, sim, tt.objects) s := &Janitor{ - dryRun: tt.dryRun, - maxCreationDate: tt.maxCreationDate, - vSphereClients: clients, + dryRun: tt.dryRun, + vSphereClients: clients, } folder := vcsimFolder("").Path(relativePath) @@ -441,12 +387,7 @@ func Test_janitor_CleanupVSphere(t *testing.T) { g.Expect(s.CleanupVSphere(ctx, folders, resourcePools, folders, false)).To(gomega.Succeed()) existingObjects, err := recursiveListFoldersAndResourcePools(ctx, relativePath, clients.Govmomi, clients.Finder, clients.ViewManager) g.Expect(err).ToNot(gomega.HaveOccurred()) - g.Expect(existingObjects).To(gomega.BeEquivalentTo(tt.wantAfterFirstRun)) - - g.Expect(s.CleanupVSphere(ctx, folders, resourcePools, folders, false)).To(gomega.Succeed()) - existingObjects, err = recursiveListFoldersAndResourcePools(ctx, relativePath, clients.Govmomi, clients.Finder, clients.ViewManager) - g.Expect(err).ToNot(gomega.HaveOccurred()) - g.Expect(existingObjects).To(gomega.BeEquivalentTo(tt.wantAfterSecondRun)) + g.Expect(existingObjects).To(gomega.BeEquivalentTo(tt.want)) // Ensure the parent object still exists assertObjectExists(ctx, g, clients.Finder, folder) diff --git a/hack/tools/pkg/janitor/vsphere.go b/hack/tools/pkg/janitor/vsphere.go index 3e64e720d7..46c51a5c09 100644 --- a/hack/tools/pkg/janitor/vsphere.go +++ b/hack/tools/pkg/janitor/vsphere.go @@ -18,9 +18,7 @@ package janitor import ( "context" - "fmt" "net/url" - "time" "github.com/pkg/errors" "github.com/vmware/govmomi" @@ -34,7 +32,6 @@ import ( "github.com/vmware/govmomi/vim25" "github.com/vmware/govmomi/vim25/mo" "github.com/vmware/govmomi/vim25/soap" - "github.com/vmware/govmomi/vim25/types" ctrl "sigs.k8s.io/controller-runtime" ) @@ -124,8 +121,6 @@ func NewVSphereClients(ctx context.Context, input NewVSphereClientsInput) (*VSph }, nil } -const vSphereDeletionMarkerName = "capv-janitor-deletion-marker" - func waitForTasksFinished(ctx context.Context, tasks []*object.Task, ignoreErrors bool) error { for _, t := range tasks { if err := t.Wait(ctx); !ignoreErrors && err != nil { @@ -135,31 +130,6 @@ func waitForTasksFinished(ctx context.Context, tasks []*object.Task, ignoreError return nil } -func getDeletionMarkerTimestamp(key int32, values []types.BaseCustomFieldValue) (*time.Time, error) { - // Find the value for the key - var b *types.BaseCustomFieldValue - for i := range values { - if values[i].GetCustomFieldValue().Key != key { - continue - } - b = &values[i] - break - } - - // Key does not exist - if b == nil { - return nil, nil - } - - value, ok := (*b).(*types.CustomFieldStringValue) - if !ok { - return nil, fmt.Errorf("cannot typecast %t to *types.CustomFieldStringValue", *b) - } - - t, err := time.Parse(time.RFC3339, value.Value) - return &t, err -} - type managedElement struct { entity mo.ManagedEntity element *list.Element diff --git a/test/e2e/e2e_suite_test.go b/test/e2e/e2e_suite_test.go index 1e8f623682..37b078f2ba 100644 --- a/test/e2e/e2e_suite_test.go +++ b/test/e2e/e2e_suite_test.go @@ -33,7 +33,6 @@ import ( storagev1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/klog/v2" - ipamv1 "sigs.k8s.io/cluster-api/exp/ipam/api/v1beta1" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/test/framework/bootstrap" "sigs.k8s.io/cluster-api/test/framework/clusterctl" @@ -335,8 +334,6 @@ var _ = SynchronizedAfterSuite(func() { func initScheme() *runtime.Scheme { sc := runtime.NewScheme() framework.TryAddDefaultSchemes(sc) - // TODO: should probably be added to TryAddDefaultSchemes in core CAPI. - _ = ipamv1.AddToScheme(sc) if testTarget == VCSimTestTarget { _ = vcsimv1.AddToScheme(sc) diff --git a/test/go.mod b/test/go.mod index d265f571b4..1b51945c20 100644 --- a/test/go.mod +++ b/test/go.mod @@ -2,9 +2,9 @@ module sigs.k8s.io/cluster-api-provider-vsphere/test go 1.22.0 -replace sigs.k8s.io/cluster-api => sigs.k8s.io/cluster-api v1.7.0-rc.0.0.20240610140608-2e3860ac7408 +replace sigs.k8s.io/cluster-api => sigs.k8s.io/cluster-api v1.7.0-rc.0.0.20240617064349-5b6043e1b6ec -replace sigs.k8s.io/cluster-api/test => sigs.k8s.io/cluster-api/test v1.7.0-rc.0.0.20240610140608-2e3860ac7408 +replace sigs.k8s.io/cluster-api/test => sigs.k8s.io/cluster-api/test v1.7.0-rc.0.0.20240617064349-5b6043e1b6ec replace sigs.k8s.io/cluster-api-provider-vsphere => ../ @@ -28,10 +28,10 @@ require ( k8s.io/component-base v0.30.1 k8s.io/klog/v2 v2.120.1 k8s.io/utils v0.0.0-20231127182322-b307cd553661 - sigs.k8s.io/cluster-api v1.7.0-rc.0.0.20240610140608-2e3860ac7408 + sigs.k8s.io/cluster-api v1.7.0-rc.0.0.20240617064349-5b6043e1b6ec sigs.k8s.io/cluster-api-provider-vsphere v0.0.0-00010101000000-000000000000 sigs.k8s.io/cluster-api/test v0.0.0-00010101000000-000000000000 - sigs.k8s.io/controller-runtime v0.18.3 + sigs.k8s.io/controller-runtime v0.18.4 sigs.k8s.io/yaml v1.4.0 ) @@ -58,7 +58,7 @@ require ( github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/distribution/reference v0.6.0 // indirect - github.com/docker/docker v26.1.3+incompatible // indirect + github.com/docker/docker v26.1.4+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.4.0 // indirect github.com/drone/envsubst/v2 v2.0.0-20210730161058-179042472c46 // indirect @@ -145,7 +145,7 @@ require ( go.uber.org/zap v1.27.0 // indirect golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect golang.org/x/net v0.26.0 // indirect - golang.org/x/oauth2 v0.20.0 // indirect + golang.org/x/oauth2 v0.21.0 // indirect golang.org/x/sync v0.7.0 // indirect golang.org/x/sys v0.21.0 // indirect golang.org/x/term v0.21.0 // indirect @@ -155,7 +155,7 @@ require ( gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240311132316-a219d84964c2 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240314234333-6e1732d8331c // indirect - google.golang.org/grpc v1.62.1 // indirect + google.golang.org/grpc v1.62.2 // indirect google.golang.org/protobuf v1.33.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect diff --git a/test/go.sum b/test/go.sum index 683774ec6c..0ce79142a5 100644 --- a/test/go.sum +++ b/test/go.sum @@ -134,8 +134,8 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= -github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v26.1.4+incompatible h1:vuTpXDuoga+Z38m1OZHzl7NKisKWaWlhjQk7IDPSLsU= +github.com/docker/docker v26.1.4+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= @@ -784,8 +784,8 @@ golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.20.0 h1:4mQdhULixXKP1rwYBW0vAijoXnkTG0BLCDRzfe1idMo= -golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= +golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -980,8 +980,8 @@ google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk= -google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= +google.golang.org/grpc v1.62.2 h1:iEIj1U5qjyBjzkM5nk3Fq+S1IbjbXSyqeULZ1Nfo4AA= +google.golang.org/grpc v1.62.2/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1093,13 +1093,13 @@ rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.0 h1:Tc9rS7JJoZ9sl3OpL4842oIk6lH7gWBb0JOmJ0ute7M= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.30.0/go.mod h1:1ewhL9l1gkPcU/IU/6rFYfikf+7Y5imWv7ARVbBOzNs= -sigs.k8s.io/cluster-api v1.7.0-rc.0.0.20240610140608-2e3860ac7408 h1:2YkK+3O1A7HsQV9Cw8ncH+DGa1GuSFwv6lbl1bOyZgI= -sigs.k8s.io/cluster-api v1.7.0-rc.0.0.20240610140608-2e3860ac7408/go.mod h1:qN/cGR3Ww2GlMTcM47Abeob4SvpkN/8II439eNbPz6w= -sigs.k8s.io/cluster-api/test v1.7.0-rc.0.0.20240610140608-2e3860ac7408 h1:4nueeeqlF2wivPMygTdutuwuNEv91strso9RSF4QdIw= -sigs.k8s.io/cluster-api/test v1.7.0-rc.0.0.20240610140608-2e3860ac7408/go.mod h1:2X3fnNufQugbIGd0t3A38B3FdFYm2a7T8F9HbbGXRkY= +sigs.k8s.io/cluster-api v1.7.0-rc.0.0.20240617064349-5b6043e1b6ec h1:ikgHzieJg7LTJjvL/o4gcs8pcGBLwXyvuRvrC4Uj7kk= +sigs.k8s.io/cluster-api v1.7.0-rc.0.0.20240617064349-5b6043e1b6ec/go.mod h1:tDxEz5a0levoOzLKny7JMW5S7g2P4fKYHNOMsS9IH/c= +sigs.k8s.io/cluster-api/test v1.7.0-rc.0.0.20240617064349-5b6043e1b6ec h1:D4aMcg7ujLXt+2FatZBRdF8cwXrxmcb4+8PBNyD/fks= +sigs.k8s.io/cluster-api/test v1.7.0-rc.0.0.20240617064349-5b6043e1b6ec/go.mod h1:auf4+RhO9P5jR+6xetig4ClXfp0UmALrhV34/WVbja0= sigs.k8s.io/controller-runtime v0.9.0/go.mod h1:TgkfvrhhEw3PlI0BRL/5xM+89y3/yc0ZDfdbTl84si8= -sigs.k8s.io/controller-runtime v0.18.3 h1:B5Wmmo8WMWK7izei+2LlXLVDGzMwAHBNLX68lwtlSR4= -sigs.k8s.io/controller-runtime v0.18.3/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg= +sigs.k8s.io/controller-runtime v0.18.4 h1:87+guW1zhvuPLh1PHybKdYFLU0YJp4FhJRmiHvm5BZw= +sigs.k8s.io/controller-runtime v0.18.4/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/kind v0.23.0 h1:8fyDGWbWTeCcCTwA04v4Nfr45KKxbSPH1WO9K+jVrBg=