From 8dbaabeaae41934cb7316b2618d1a965004957af Mon Sep 17 00:00:00 2001 From: kos-team Date: Mon, 23 Sep 2024 22:36:48 -0500 Subject: [PATCH 1/4] Fix advanced TidbCluster example (#5743) --- examples/advanced/tidb-cluster.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/examples/advanced/tidb-cluster.yaml b/examples/advanced/tidb-cluster.yaml index 618ccd9cff..cf8d36c83c 100644 --- a/examples/advanced/tidb-cluster.yaml +++ b/examples/advanced/tidb-cluster.yaml @@ -824,7 +824,7 @@ spec: # app.kubernetes.io/component: pump # annotations: # node.kubernetes.io/instance-type: some-vm-type - # tolerations: {} + # tolerations: [] # configUpdateStrategy: RollingUpdate # statefulSetUpdateStrategy: RollingUpdate # podSecurityContext: {} @@ -868,7 +868,7 @@ spec: # app.kubernetes.io/component: ticdc # annotations: # node.kubernetes.io/instance-type: some-vm-type - # tolerations: {} + # tolerations: [] # configUpdateStrategy: RollingUpdate # statefulSetUpdateStrategy: RollingUpdate # podSecurityContext: {} @@ -918,7 +918,7 @@ spec: # app.kubernetes.io/component: tiflash # annotations: # node.kubernetes.io/instance-type: some-vm-type - # tolerations: {} + # tolerations: [] # configUpdateStrategy: RollingUpdate # statefulSetUpdateStrategy: RollingUpdate # podSecurityContext: {} @@ -970,7 +970,7 @@ spec: # # configure the configuration file for TiFlash Proxy process # proxy: | # [security] - # cert-allowed-cn = "CNNAME" + # cert-allowed-cn = ["CNNAME"] # # TopologySpreadConstraints for pod scheduling, will overwrite the cluster level spread constraints setting # # Ref: pkg/apis/pingcap/v1alpha1/types.go#TopologySpreadConstraint # topologySpreadConstraints: From 317718971076d38d7d88a4a35c2eeb219061c3f2 Mon Sep 17 00:00:00 2001 From: Xuecheng Zhang Date: Tue, 24 Sep 2024 15:23:02 +0800 Subject: [PATCH 2/4] e2e: bump TiDB and MySQL version (#5697) --- Makefile | 4 +- .../manifests/local-volume-provisioner.yaml | 4 +- .../local-pv/local-volume-provisioner.yaml | 4 +- manifests/eks/local-volume-provisioner.yaml | 4 +- .../local-ssd-provision.yaml | 4 +- .../local-dind/local-volume-provisioner.yaml | 4 +- tests/dm.go | 2 +- tests/e2e/br/br.go | 16 +++++-- tests/e2e/br/framework/br/data.go | 4 ++ tests/e2e/br/utils/s3/minio.go | 2 +- tests/e2e/tidbcluster/across-kubernetes.go | 13 +++++- tests/e2e/tidbcluster/serial.go | 4 +- tests/e2e/tidbcluster/tidbcluster.go | 46 ++++++++----------- tests/e2e/util/image/image.go | 15 +++--- tests/e2e/util/portforward/portforward.go | 1 + tests/images/e2e/Dockerfile | 16 ++++--- tests/pkg/fixture/fixture.go | 24 ++++++---- 17 files changed, 97 insertions(+), 70 deletions(-) diff --git a/Makefile b/Makefile index 4bfc282ac8..d7f7befcf7 100644 --- a/Makefile +++ b/Makefile @@ -33,7 +33,7 @@ TEST_COVER_PACKAGES := go list ./cmd/... ./pkg/... $(foreach mod, $(GO_SUBMODULE # NOTE: coverage report generated for E2E tests (with `-c`) may not stable, see # https://github.com/golang/go/issues/23883#issuecomment-381766556 -GO_TEST := $(GO) test -cover -covermode=atomic -coverpkg=$$($(TEST_COVER_PACKAGES)) +GO_TEST := CGO_ENABLED=0 $(GO) test -cover -covermode=atomic -coverpkg=$$($(TEST_COVER_PACKAGES)) default: build @@ -160,7 +160,7 @@ endif cp -r charts/tidb-operator tests/images/e2e cp -r charts/tidb-drainer tests/images/e2e cp -r manifests tests/images/e2e - docker build -t "${DOCKER_REPO}/tidb-operator-e2e:${IMAGE_TAG}" tests/images/e2e + docker build -t "${DOCKER_REPO}/tidb-operator-e2e:${IMAGE_TAG}" --build-arg=TARGETARCH=$(GOARCH) tests/images/e2e e2e-build: ## Build binaries for test $(GO_BUILD) -ldflags '$(LDFLAGS)' -o tests/images/e2e/bin/ginkgo github.com/onsi/ginkgo/ginkgo diff --git a/deploy/modules/aws/tidb-operator/manifests/local-volume-provisioner.yaml b/deploy/modules/aws/tidb-operator/manifests/local-volume-provisioner.yaml index 0508fec16a..08fc2d7abb 100644 --- a/deploy/modules/aws/tidb-operator/manifests/local-volume-provisioner.yaml +++ b/deploy/modules/aws/tidb-operator/manifests/local-volume-provisioner.yaml @@ -44,7 +44,7 @@ spec: pingcap.com/aws-local-ssd: "true" serviceAccountName: local-storage-admin containers: - - image: "quay.io/external_storage/local-volume-provisioner:v2.3.2" + - image: "registry.k8s.io/sig-storage/local-volume-provisioner:v2.6.0" name: provisioner securityContext: privileged: true @@ -58,7 +58,7 @@ spec: fieldRef: fieldPath: metadata.namespace - name: JOB_CONTAINER_IMAGE - value: "quay.io/external_storage/local-volume-provisioner:v2.3.2" + value: "registry.k8s.io/sig-storage/local-volume-provisioner:v2.6.0" resources: requests: cpu: 100m diff --git a/examples/local-pv/local-volume-provisioner.yaml b/examples/local-pv/local-volume-provisioner.yaml index 0fb050a3d9..edb1ba5a7b 100644 --- a/examples/local-pv/local-volume-provisioner.yaml +++ b/examples/local-pv/local-volume-provisioner.yaml @@ -67,7 +67,7 @@ spec: spec: serviceAccountName: local-storage-admin containers: - - image: "quay.io/external_storage/local-volume-provisioner:v2.3.4" + - image: "registry.k8s.io/sig-storage/local-volume-provisioner:v2.6.0" name: provisioner securityContext: privileged: true @@ -81,7 +81,7 @@ spec: fieldRef: fieldPath: metadata.namespace - name: JOB_CONTAINER_IMAGE - value: "quay.io/external_storage/local-volume-provisioner:v2.3.4" + value: "registry.k8s.io/sig-storage/local-volume-provisioner:v2.6.0" resources: requests: cpu: 100m diff --git a/manifests/eks/local-volume-provisioner.yaml b/manifests/eks/local-volume-provisioner.yaml index 1bae235624..aba41effa9 100644 --- a/manifests/eks/local-volume-provisioner.yaml +++ b/manifests/eks/local-volume-provisioner.yaml @@ -92,7 +92,7 @@ spec: name: disks mountPropagation: Bidirectional containers: - - image: "quay.io/external_storage/local-volume-provisioner:v2.3.4" + - image: "registry.k8s.io/sig-storage/local-volume-provisioner:v2.6.0" name: provisioner securityContext: privileged: true @@ -106,7 +106,7 @@ spec: fieldRef: fieldPath: metadata.namespace - name: JOB_CONTAINER_IMAGE - value: "quay.io/external_storage/local-volume-provisioner:v2.3.4" + value: "registry.k8s.io/sig-storage/local-volume-provisioner:v2.6.0" resources: requests: cpu: 100m diff --git a/manifests/gke/local-ssd-provision/local-ssd-provision.yaml b/manifests/gke/local-ssd-provision/local-ssd-provision.yaml index 0798ba0413..7e92747f4a 100644 --- a/manifests/gke/local-ssd-provision/local-ssd-provision.yaml +++ b/manifests/gke/local-ssd-provision/local-ssd-provision.yaml @@ -184,7 +184,7 @@ spec: mount -U "$uuid" -t ext4 --target "$mnt_dir" --options "$mnt_opts" chmod a+w "$mnt_dir" containers: - - image: "quay.io/external_storage/local-volume-provisioner:v2.3.2" + - image: "registry.k8s.io/sig-storage/local-volume-provisioner:v2.6.0" name: provisioner securityContext: privileged: true @@ -205,7 +205,7 @@ spec: fieldRef: fieldPath: metadata.namespace - name: JOB_CONTAINER_IMAGE - value: "quay.io/external_storage/local-volume-provisioner:v2.3.2" + value: "registry.k8s.io/sig-storage/local-volume-provisioner:v2.6.0" volumeMounts: - mountPath: /etc/provisioner/config name: provisioner-config diff --git a/manifests/local-dind/local-volume-provisioner.yaml b/manifests/local-dind/local-volume-provisioner.yaml index ffc3c28342..e58077c1ed 100644 --- a/manifests/local-dind/local-volume-provisioner.yaml +++ b/manifests/local-dind/local-volume-provisioner.yaml @@ -39,7 +39,7 @@ spec: spec: serviceAccountName: local-storage-admin containers: - - image: "quay.io/external_storage/local-volume-provisioner:v2.3.4" + - image: "registry.k8s.io/sig-storage/local-volume-provisioner:v2.6.0" name: provisioner securityContext: privileged: true @@ -53,7 +53,7 @@ spec: fieldRef: fieldPath: metadata.namespace - name: JOB_CONTAINER_IMAGE - value: "quay.io/external_storage/local-volume-provisioner:v2.3.4" + value: "registry.k8s.io/sig-storage/local-volume-provisioner:v2.6.0" resources: requests: cpu: 100m diff --git a/tests/dm.go b/tests/dm.go index 30a4395e64..3fe4a6823b 100644 --- a/tests/dm.go +++ b/tests/dm.go @@ -60,7 +60,7 @@ const ( // DMMySQLSvcStsName is the upstream MySQL svc/sts name for DM E2E tests. DMMySQLSvcStsName = "dm-mysql" // DMMySQLImage is the upstream MySQL container image for DM E2E tests. - DMMySQLImage = "mysql:5.7" + DMMySQLImage = "mysql:8.0" // DMMySQLReplicas is the upstream MySQL instance number for DM E2E tests. // We use replicas as different MySQL instances. DMMySQLReplicas int32 = 2 diff --git a/tests/e2e/br/br.go b/tests/e2e/br/br.go index ac9a9eb6f2..58e5c1c81f 100644 --- a/tests/e2e/br/br.go +++ b/tests/e2e/br/br.go @@ -291,8 +291,8 @@ var _ = ginkgo.Describe("Backup and Restore", func() { utilginkgo.ContextWhenFocus("Specific Version", func() { cases := []*testcase{ - newTestCase(utilimage.TiDBV5x0x0, utilimage.TiDBLatest, typeBR), - newTestCase(utilimage.TiDBV5x0x2, utilimage.TiDBLatest, typeBR), + newTestCase(utilimage.TiDBV7x5x0, utilimage.TiDBLatest, typeBR), + newTestCase(utilimage.TiDBV7x5x3, utilimage.TiDBLatest, typeBR), } for i := range cases { tcase := cases[i] @@ -504,8 +504,12 @@ var _ = ginkgo.Describe("Backup and Restore", func() { // }) }) + // the following cases may encounter errors after restarting the backup pod: + // "there may be some backup files in the path already, please specify a correct backup directory" ginkgo.Context("Restart Backup by k8s Test", func() { ginkgo.It("delete backup pod and restart by k8s test", func() { + ginkgo.Skip("unstable case, after restart: there may be some backup files in the path already, please specify a correct backup directory") + backupClusterName := "delete-backup-pod-test" backupVersion := utilimage.TiDBLatest enableTLS := false @@ -566,6 +570,8 @@ var _ = ginkgo.Describe("Backup and Restore", func() { ginkgo.Context("Restart Backup by backoff retry policy Test", func() { ginkgo.It("kill backup pod and restart by backoff retry policy", func() { + ginkgo.Skip("unstable case, after restart: there may be some backup files in the path already, please specify a correct backup directory") + backupClusterName := "kill-backup-pod-test" backupVersion := utilimage.TiDBLatest enableTLS := false @@ -629,6 +635,8 @@ var _ = ginkgo.Describe("Backup and Restore", func() { }) ginkgo.It("kill backup pod and exceed maxRetryTimes", func() { + ginkgo.Skip("unstable case, after restart: there may be some backup files in the path already, please specify a correct backup directory") + backupClusterName := "kill-backup-pod-exceed-times-test" backupVersion := utilimage.TiDBLatest enableTLS := false @@ -708,6 +716,8 @@ var _ = ginkgo.Describe("Backup and Restore", func() { }) ginkgo.It("kill backup pod and exceed retryTimeout", func() { + ginkgo.Skip("unstable case, after restart: there may be some backup files in the path already, please specify a correct backup directory") + backupClusterName := "kill-backup-pod-exceed-timeout-test" backupVersion := utilimage.TiDBLatest enableTLS := false @@ -901,7 +911,7 @@ func getPDServiceResourceName(tcName string) string { func createTidbCluster(f *e2eframework.Framework, name string, version string, enableTLS bool, skipCA bool) error { ns := f.Namespace.Name // TODO: change to use tidbclusterutil like brutil - tc := fixture.GetTidbCluster(ns, name, version) + tc := fixture.GetTidbClusterWithoutPDMS(ns, name, version) tc.Spec.PD.Replicas = 1 tc.Spec.TiKV.Replicas = 1 tc.Spec.TiDB.Replicas = 1 diff --git a/tests/e2e/br/framework/br/data.go b/tests/e2e/br/framework/br/data.go index 2e9a9c1cb6..92315fb4f0 100644 --- a/tests/e2e/br/framework/br/data.go +++ b/tests/e2e/br/framework/br/data.go @@ -165,6 +165,10 @@ func GetRestore(ns, name, tcName, typ string, s3Config *v1alpha1.S3StorageProvid ClusterNamespace: ns, SendCredToTikv: &sendCredToTikv, CheckRequirements: pointer.BoolPtr(false), // workaround for https://docs.pingcap.com/tidb/stable/backup-and-restore-faq#why-does-br-report-new_collations_enabled_on_first_bootstrap-mismatch + Options: []string{ + // ref: https://docs.pingcap.com/tidb/stable/backup-and-restore-overview#version-compatibility + "--with-sys-table=false", + }, }, }, } diff --git a/tests/e2e/br/utils/s3/minio.go b/tests/e2e/br/utils/s3/minio.go index 5bccf192c8..26629999da 100644 --- a/tests/e2e/br/utils/s3/minio.go +++ b/tests/e2e/br/utils/s3/minio.go @@ -33,7 +33,7 @@ import ( const ( minioName = "minio" - minioImage = "minio/minio:RELEASE.2020-05-08T02-40-49Z" + minioImage = "minio/minio:RELEASE.2024-09-13T20-26-02Z" minioBucket = "local" // the bucket for e2e test minioSecret = "minio-secret" diff --git a/tests/e2e/tidbcluster/across-kubernetes.go b/tests/e2e/tidbcluster/across-kubernetes.go index f82be98b87..280dbac81a 100644 --- a/tests/e2e/tidbcluster/across-kubernetes.go +++ b/tests/e2e/tidbcluster/across-kubernetes.go @@ -279,9 +279,18 @@ var _ = ginkgo.Describe("[Across Kubernetes]", func() { ginkgo.By("Update pd's peerURL of cluster-1") pdAddr := fmt.Sprintf("%s:%d", localHost, localPort) var resp *pdutil.GetMembersResponse - err = retry.OnError(retry.DefaultRetry, func(e error) bool { return e != nil }, func() error { + err = wait.PollImmediate(5*time.Second, 5*time.Minute, func() (bool, error) { + // it seems the above `WaitForTidbClusterReady` may return before the pd server is ready + // so we need to retry here resp, err = pdutil.GetMembersV2(pdAddr) - return err + if err != nil { + log.Logf("failed to get pd members of cluster-1 %s/%s, %v", tc1.Namespace, tc1.Name, err) + return false, nil + } + if len(resp.Members) == 0 { + return false, nil + } + return true, nil }) framework.ExpectNoError(err, " failed to get pd members of cluster-1 %s/%s", tc1.Namespace, tc1.Name) for _, member := range resp.Members { diff --git a/tests/e2e/tidbcluster/serial.go b/tests/e2e/tidbcluster/serial.go index b268b27773..b091c73bc5 100644 --- a/tests/e2e/tidbcluster/serial.go +++ b/tests/e2e/tidbcluster/serial.go @@ -59,8 +59,8 @@ import ( ) const ( - OperatorLatestVersion string = "v1.5.0-beta.1" - OperatorPrevMajorVersion string = "v1.4.6" + OperatorLatestVersion string = "v1.6.0" + OperatorPrevMajorVersion string = "v1.5.4" ) // Serial specs describe tests which cannot run in parallel. diff --git a/tests/e2e/tidbcluster/tidbcluster.go b/tests/e2e/tidbcluster/tidbcluster.go index bfadfe2933..7402637aca 100644 --- a/tests/e2e/tidbcluster/tidbcluster.go +++ b/tests/e2e/tidbcluster/tidbcluster.go @@ -2021,6 +2021,16 @@ var _ = ginkgo.Describe("TiDBCluster", func() { utiltc.MustWaitForComponentPhase(cli, tc, v1alpha1.PDMemberType, v1alpha1.UpgradePhase, 3*time.Minute, time.Second*10) log.Logf("PD is in UpgradePhase") + ginkgo.By("Wait for TiKV to be in UpgradePhase") + utiltc.MustWaitForComponentPhase(cli, tc, v1alpha1.TiKVMemberType, v1alpha1.UpgradePhase, 3*time.Minute, time.Second*10) + log.Logf("TiKV is in UpgradePhase") + + ginkgo.By("Wait for TiDB to be in UpgradePhase") + utiltc.MustWaitForComponentPhase(cli, tc, v1alpha1.TiDBMemberType, v1alpha1.UpgradePhase, 3*time.Minute, time.Second*10) + log.Logf("TiDB is in UpgradePhase") + + // the tc ready condition between components upgrade phase may not be observed + // and it may only observed the last ready after all components upgraded ginkgo.By("Wait for tc ready") err = oa.WaitForTidbClusterReady(tc, 10*time.Minute, 10*time.Second) framework.ExpectNoError(err, "failed to wait for TidbCluster %s/%s components ready", ns, tc.Name) @@ -2037,14 +2047,6 @@ var _ = ginkgo.Describe("TiDBCluster", func() { log.Logf("PD config:\n%s", pdCm.Data["config-file"]) gomega.Expect(pdCm.Data["config-file"]).To(gomega.ContainSubstring("lease = 3")) - ginkgo.By("Wait for TiKV to be in UpgradePhase") - utiltc.MustWaitForComponentPhase(cli, tc, v1alpha1.TiKVMemberType, v1alpha1.UpgradePhase, 3*time.Minute, time.Second*10) - log.Logf("TiKV is in UpgradePhase") - - ginkgo.By("Wait for tc ready") - err = oa.WaitForTidbClusterReady(tc, 10*time.Minute, 10*time.Second) - framework.ExpectNoError(err, "failed to wait for TidbCluster %s/%s components ready", ns, tc.Name) - ginkgo.By("Check TiKV configuration") tikvMemberName := controller.TiKVMemberName(tc.Name) tikvSts, err := stsGetter.StatefulSets(ns).Get(context.TODO(), tikvMemberName, metav1.GetOptions{}) @@ -2057,14 +2059,6 @@ var _ = ginkgo.Describe("TiDBCluster", func() { log.Logf("TiKV config:\n%s", tikvCm.Data["config-file"]) gomega.Expect(tikvCm.Data["config-file"]).To(gomega.ContainSubstring("status-thread-pool-size = 1")) - ginkgo.By("Wait for TiDB to be in UpgradePhase") - utiltc.MustWaitForComponentPhase(cli, tc, v1alpha1.TiDBMemberType, v1alpha1.UpgradePhase, 3*time.Minute, time.Second*10) - log.Logf("TiDB is in UpgradePhase") - - ginkgo.By("Wait for tc ready") - err = oa.WaitForTidbClusterReady(tc, 10*time.Minute, 10*time.Second) - framework.ExpectNoError(err, "failed to wait for TidbCluster %s/%s components ready", ns, tc.Name) - ginkgo.By("Check TiDB configuration") tidbMemberName := controller.TiDBMemberName(tc.Name) tidbSts, err := stsGetter.StatefulSets(ns).Get(context.TODO(), tidbMemberName, metav1.GetOptions{}) @@ -2160,7 +2154,7 @@ var _ = ginkgo.Describe("TiDBCluster", func() { // upgrdae testing for specific versions utilginkgo.ContextWhenFocus("Specific Version", func() { - configureV5x0x0 := func(tc *v1alpha1.TidbCluster) { + configureV7x5x0 := func(tc *v1alpha1.TidbCluster) { pdCfg := v1alpha1.NewPDConfig() tikvCfg := v1alpha1.NewTiKVConfig() tidbCfg := v1alpha1.NewTiDBConfig() @@ -2190,7 +2184,7 @@ var _ = ginkgo.Describe("TiDBCluster", func() { tc.Spec.TiDB.Config = tidbCfg tc.Spec.TiFlash.Config = tiflashCfg } - configureV5x0x2 := func(tc *v1alpha1.TidbCluster) { + configureV7x5x2 := func(tc *v1alpha1.TidbCluster) { pdCfg := v1alpha1.NewPDConfig() tikvCfg := v1alpha1.NewTiKVConfig() tidbCfg := v1alpha1.NewTiDBConfig() @@ -2220,7 +2214,7 @@ var _ = ginkgo.Describe("TiDBCluster", func() { tc.Spec.TiDB.Config = tidbCfg tc.Spec.TiFlash.Config = tiflashCfg } - configureV5x1x0 := func(tc *v1alpha1.TidbCluster) { + configureV8x1x0 := func(tc *v1alpha1.TidbCluster) { pdCfg := v1alpha1.NewPDConfig() tikvCfg := v1alpha1.NewTiKVConfig() tidbCfg := v1alpha1.NewTiDBConfig() @@ -2264,16 +2258,16 @@ var _ = ginkgo.Describe("TiDBCluster", func() { cases := []upgradeCase{ { - oldVersion: utilimage.TiDBV5x0x0, + oldVersion: utilimage.TiDBV7x5x0, newVersion: utilimage.TiDBLatest, - configureOldTiDBCluster: configureV5x0x0, - configureNewTiDBCluster: configureV5x1x0, + configureOldTiDBCluster: configureV7x5x0, + configureNewTiDBCluster: configureV8x1x0, }, { - oldVersion: utilimage.TiDBV5x0x2, + oldVersion: utilimage.TiDBV7x5x3, newVersion: utilimage.TiDBLatest, - configureOldTiDBCluster: configureV5x0x2, - configureNewTiDBCluster: configureV5x1x0, + configureOldTiDBCluster: configureV7x5x2, + configureNewTiDBCluster: configureV8x1x0, }, } for i := range cases { @@ -3136,7 +3130,7 @@ var _ = ginkgo.Describe("TiDBCluster", func() { ginkgo.It("migrate start script from v1 to v2 "+testcase.nameSuffix, func() { tcName := "migrate-start-script-v2" - tc := fixture.GetTidbCluster(ns, tcName, utilimage.TiDBLatest) + tc := fixture.GetTidbClusterWithoutPDMS(ns, tcName, utilimage.TiDBLatest) tc = fixture.AddTiFlashForTidbCluster(tc) tc = fixture.AddTiCDCForTidbCluster(tc) tc = fixture.AddPumpForTidbCluster(tc) diff --git a/tests/e2e/util/image/image.go b/tests/e2e/util/image/image.go index 27b426b8e1..177ef4e0b3 100644 --- a/tests/e2e/util/image/image.go +++ b/tests/e2e/util/image/image.go @@ -28,18 +28,17 @@ import ( ) var ( - TiDBPreviousVersions []string = []string{"v5.0.6", "v5.1.4", "v5.2.4", "v5.3.2", "v5.4.2"} + TiDBPreviousVersions []string = []string{"v6.5.10", "v7.1.5", "v7.5.2", "v8.1.0"} ) const ( // TiDB Version - TiDBLatestPrev = "v6.0.0" - TiDBLatest = "v6.1.0" + TiDBLatestPrev = "v7.5.3" + TiDBLatest = "v8.1.0" // different version with PDMSImage TiDBNightlyVersion = "nightly" // specific version - TiDBV5x0x0 = "v5.0.0" - TiDBV5x0x2 = "v5.0.2" - TiDBV5x3 = "v5.3.0" + TiDBV7x5x0 = "v7.5.0" + TiDBV7x5x3 = "v7.5.3" PrometheusImage = "prom/prometheus" PrometheusVersion = "v2.27.1" @@ -48,14 +47,14 @@ const ( TiDBMonitorInitializerImage = "pingcap/tidb-monitor-initializer" TiDBMonitorInitializerVersion = TiDBLatest GrafanaImage = "grafana/grafana" - GrafanaVersion = "6.1.6" + GrafanaVersion = "7.5.11" ThanosImage = "thanosio/thanos" ThanosVersion = "v0.17.2" DMV2Prev = TiDBLatestPrev DMV2 = TiDBLatest TiDBNGMonitoringLatest = TiDBLatest HelperImage = "alpine:3.16.0" - PDMSImage = "v8.1.0" + PDMSImage = "v8.1.1" ) func ListImages() []string { diff --git a/tests/e2e/util/portforward/portforward.go b/tests/e2e/util/portforward/portforward.go index ccf2d4935d..a265c97eb5 100644 --- a/tests/e2e/util/portforward/portforward.go +++ b/tests/e2e/util/portforward/portforward.go @@ -67,6 +67,7 @@ func (f *portForwarder) forwardPorts(podKey, method string, url *url.URL, addres readyChan := make(chan struct{}) fw, err := portforward.NewOnAddresses(dialer, addresses, ports, ctx.Done(), readyChan, w, w) if err != nil { + cancel() return nil, nil, err } diff --git a/tests/images/e2e/Dockerfile b/tests/images/e2e/Dockerfile index 5d596bfccf..6f1d1d6a42 100644 --- a/tests/images/e2e/Dockerfile +++ b/tests/images/e2e/Dockerfile @@ -3,6 +3,8 @@ FROM debian:buster-slim ENV KUBECTL_VERSION=v1.28.5 ENV HELM_VERSION=v3.11.0 +ARG TARGETARCH + # python is required by gcloud RUN apt-get update && \ apt-get install -y ca-certificates curl git openssl default-mysql-client unzip && \ @@ -10,15 +12,15 @@ RUN apt-get update && \ apt-get autoremove -y && \ apt-get clean -y -RUN curl https://storage.googleapis.com/kubernetes-release/release/${KUBECTL_VERSION}/bin/linux/amd64/kubectl \ +RUN curl https://storage.googleapis.com/kubernetes-release/release/${KUBECTL_VERSION}/bin/linux/${TARGETARCH}/kubectl \ -o /usr/local/bin/kubectl && \ chmod +x /usr/local/bin/kubectl && \ - curl https://get.helm.sh/helm-${HELM_VERSION}-linux-amd64.tar.gz \ - -o helm-${HELM_VERSION}-linux-amd64.tar.gz && \ - tar -zxvf helm-${HELM_VERSION}-linux-amd64.tar.gz && \ - mv linux-amd64/helm /usr/local/bin/helm && \ - rm -rf linux-amd64 && \ - rm helm-${HELM_VERSION}-linux-amd64.tar.gz + curl https://get.helm.sh/helm-${HELM_VERSION}-linux-${TARGETARCH}.tar.gz \ + -o helm-${HELM_VERSION}-linux-${TARGETARCH}.tar.gz && \ + tar -zxvf helm-${HELM_VERSION}-linux-${TARGETARCH}.tar.gz && \ + mv linux-${TARGETARCH}/helm /usr/local/bin/helm && \ + rm -rf linux-${TARGETARCH} && \ + rm helm-${HELM_VERSION}-linux-${TARGETARCH}.tar.gz RUN curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" && \ unzip awscliv2.zip && \ ./aws/install && \ diff --git a/tests/pkg/fixture/fixture.go b/tests/pkg/fixture/fixture.go index e039dec66e..be018981b8 100644 --- a/tests/pkg/fixture/fixture.go +++ b/tests/pkg/fixture/fixture.go @@ -84,6 +84,22 @@ const ComponentCustomKey = "component-test-key" // GetTidbCluster returns a TidbCluster resource configured for testing func GetTidbCluster(ns, name, version string) *v1alpha1.TidbCluster { + tc := GetTidbClusterWithoutPDMS(ns, name, version) + + random := rand.Intn(2) + if random != 0 && version == utilimage.PDMSImage { + log.Logf("[GetTidbCluster] tidbcluster's pd mode is micro-service in this situation, "+ + "version: %s, tc name: %s, namespace: %s", version, name, ns) + // 50% random in pdms mode + tc = AddPDMSForTidbCluster(tc) + } + + return tc +} + +// GetTidbClusterWithoutPDMS returns a TidbCluster resource configured for testing. +// in some cases, it won't support pdms mode, so we can't use pdms mode in this situation. +func GetTidbClusterWithoutPDMS(ns, name, version string) *v1alpha1.TidbCluster { // We assume all unparsable versions are greater or equal to v4.0.0-beta, // e.g. nightly. tikvConfig := v1alpha1.NewTiKVConfig() @@ -193,14 +209,6 @@ func GetTidbCluster(ns, name, version string) *v1alpha1.TidbCluster { }, } - random := rand.Intn(2) - if random != 0 && version == utilimage.PDMSImage { - log.Logf("[GetTidbCluster] tidbcluster's pd mode is micro-service in this situation, "+ - "version: %s, tc name: %s, namespace: %s", version, name, ns) - // 50% random in pdms mode - tc = AddPDMSForTidbCluster(tc) - } - return tc } From dca333cc3f6e4ffe7814a4adac704ebc943a5853 Mon Sep 17 00:00:00 2001 From: ris <79858083+RidRisR@users.noreply.github.com> Date: Tue, 24 Sep 2024 15:23:48 +0800 Subject: [PATCH 3/4] hack: support k3s as provider for local-up (#5727) --- hack/local-up-by-k3s.sh | 179 +++++++++++++++++++++++++++++++++++ hack/local-up-by-kind.sh | 192 ++++++++++++++++++++++++++++++++++++++ hack/local-up-operator.sh | 182 ++++++------------------------------ 3 files changed, 397 insertions(+), 156 deletions(-) create mode 100755 hack/local-up-by-k3s.sh create mode 100644 hack/local-up-by-kind.sh diff --git a/hack/local-up-by-k3s.sh b/hack/local-up-by-k3s.sh new file mode 100755 index 0000000000..873bb66bd8 --- /dev/null +++ b/hack/local-up-by-k3s.sh @@ -0,0 +1,179 @@ +#!/usr/bin/env bash + +# Copyright 2020 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# This command runs tidb-operator in Kubernetes. +# + +set -o errexit +set -o nounset +set -o pipefail + +ROOT=$(unset CDPATH && cd $(dirname "${BASH_SOURCE[0]}")/.. && pwd) +cd $ROOT + +source "${ROOT}/hack/lib.sh" + +function usage() { + cat <<'EOF' +This commands run tidb-operator in Kubernetes. + +Usage: hack/local-up-operator.sh [-hd] + + -h show this message and exit + -i install dependencies only + +Environments: + + PROVIDER Kubernetes provider. Defaults: k3s. + CLUSTER the name of e2e cluster. + KUBECONFIG path to the kubeconfig file, defaults: ~/.kube/config + KUBECONTEXT context in kubeconfig file, defaults to current context + NAMESPACE Kubernetes namespace in which we run our tidb-operator. + DOCKER_REGISTRY image docker registry + IMAGE_TAG image tag + SKIP_IMAGE_BUILD skip build and push images + +EOF +} + +installOnly=false +while getopts "h?i" opt; do + case "$opt" in + h|\?) + usage + exit 0 + ;; + i) + installOnly=true + ;; + esac +done + +PROVIDER=${PROVIDER:-k3s} +KUBECONFIG=${KUBECONFIG:-~/.kube/config} +KUBECONTEXT=${KUBECONTEXT:-} +NAMESPACE=${NAMESPACE:-pingcap} +DOCKER_REGISTRY=${DOCKER_REGISTRY:-localhost:5000} +IMAGE_TAG=${IMAGE_TAG:-latest} +SKIP_IMAGE_BUILD=${SKIP_IMAGE_BUILD:-} + +hack::ensure_kubectl +hack::ensure_helm + +function hack::create_namespace() { + local ns="$1" # The namespace to create + # Create the namespace + $KUBECTL_BIN create namespace $ns + # Wait for the namespace to become active + for ((i=0; i < 30; i++)); do + local phase=$(kubectl get ns $ns -ojsonpath='{.status.phase}') + if [ "$phase" == "Active" ]; then + echo "info: namespace $ns is active" + return 0 + fi + sleep 1 + done + echo "error: timed out waiting for namespace $ns to become active" + return 1 +} + +function hack::wait_for_deploy() { + local ns="$1" + local name="$2" + local retries="${3:-300}" + echo "info: waiting for pods of deployment $ns/$name are ready (retries: $retries, interval: 1s)" + for ((i = 0; i < retries; i++)) { + read a b <<<$($KUBECTL_BIN --context $KUBECONTEXT -n $ns get deploy/$name -ojsonpath='{.spec.replicas} {.status.readyReplicas}{"\n"}') + if [[ "$a" -gt 0 && "$a" -eq "$b" ]]; then + echo "info: all pods of deployment $ns/$name are ready (desired: $a, ready: $b)" + return 0 + fi + echo "info: pods of deployment $ns/$name (desired: $a, ready: $b)" + sleep 1 + } + echo "info: timed out waiting for pods of deployment $ns/$name are ready" + return 1 +} + +if [[ "$installOnly" == "true" ]]; then + exit 0 +fi + +echo "info: checking clusters" + +if [ "$PROVIDER" == "k3s" ]; then + echo "info: using k3s provider" + if ! kubectl cluster-info &>/dev/null; then + echo "error: k3s cluster not found, please ensure it is running" + exit 1 + fi +else + echo "error: only k3s PROVIDER is supported" + exit 1 +fi + +if [ -z "$KUBECONTEXT" ]; then + KUBECONTEXT=$(kubectl config current-context) + echo "info: KUBECONTEXT is not set, current context $KUBECONTEXT is used" +fi + +if [ -z "$SKIP_IMAGE_BUILD" ]; then + echo "info: building docker images" + DOCKER_REGISTRY=$DOCKER_REGISTRY IMAGE_TAG=$IMAGE_TAG make docker + + # Push images to the local registry + echo "info: pushing images to the local registry" + + docker push ${DOCKER_REGISTRY}/pingcap/tidb-operator:${IMAGE_TAG} + docker push ${DOCKER_REGISTRY}/pingcap/tidb-backup-manager:${IMAGE_TAG} +else + echo "info: skip building docker images" +fi + +echo "info: uninstall tidb-operator" +$KUBECTL_BIN -n "$NAMESPACE" delete deploy -l app.kubernetes.io/name=tidb-operator +$KUBECTL_BIN -n "$NAMESPACE" delete pods -l app.kubernetes.io/name=tidb-operator + +echo "info: create namespace '$NAMESPACE' if absent" +if ! $KUBECTL_BIN get ns "$NAMESPACE" &>/dev/null; then + hack::create_namespace "$NAMESPACE" +fi + +echo "info: installing crds" +if ! $KUBECTL_BIN create -f manifests/crd.yaml &>/dev/null; then + $KUBECTL_BIN replace -f manifests/crd.yaml +fi + +echo "info: deploying tidb-operator" +helm_template_args=( + --namespace "$NAMESPACE" + --set-string operatorImage=$DOCKER_REGISTRY/pingcap/tidb-operator:${IMAGE_TAG} + --set-string tidbBackupManagerImage=$DOCKER_REGISTRY/pingcap/tidb-backup-manager:${IMAGE_TAG} + --set-string controllerManager.logLevel=4 + --set-string scheduler.logLevel=4 + --set imagePullPolicy=Always +) + +$HELM_BIN template tidb-operator-dev ./charts/tidb-operator/ ${helm_template_args[@]} | kubectl -n "$NAMESPACE" apply -f - + +deploys=( + tidb-controller-manager + # tidb-scheduler +) +for deploy in ${deploys[@]}; do + echo "info: waiting for $NAMESPACE/$deploy to be ready" + hack::wait_for_deploy "$NAMESPACE" "$deploy" +done diff --git a/hack/local-up-by-kind.sh b/hack/local-up-by-kind.sh new file mode 100644 index 0000000000..cc87fcac46 --- /dev/null +++ b/hack/local-up-by-kind.sh @@ -0,0 +1,192 @@ +#!/usr/bin/env bash + +# Copyright 2020 PingCAP, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# This command runs tidb-operator in Kubernetes. +# + +set -o errexit +set -o nounset +set -o pipefail + +ROOT=$(unset CDPATH && cd $(dirname "${BASH_SOURCE[0]}")/.. && pwd) +cd $ROOT + +source "${ROOT}/hack/lib.sh" + +function usage() { + cat <<'EOF' +This commands run tidb-operator in Kubernetes. + +Usage: hack/local-up-operator.sh [-hd] + + -h show this message and exit + -i install dependencies only + +Environments: + + PROVIDER Kubernetes provider. Defaults: kind. + CLUSTER the name of e2e cluster. Defaults to kind for kind provider. + KUBECONFIG path to the kubeconfig file, defaults: ~/.kube/config + KUBECONTEXT context in kubeconfig file, defaults to current context + NAMESPACE Kubernetes namespace in which we run our tidb-operator. + DOCKER_REGISTRY image docker registry + IMAGE_TAG image tag + SKIP_IMAGE_BUILD skip build and push images + +EOF +} + +installOnly=false +while getopts "h?i" opt; do + case "$opt" in + h|\?) + usage + exit 0 + ;; + i) + installOnly=true + ;; + esac +done + +PROVIDER=${PROVIDER:-kind} +CLUSTER=${CLUSTER:-} +KUBECONFIG=${KUBECONFIG:-~/.kube/config} +KUBECONTEXT=${KUBECONTEXT:-} +NAMESPACE=${NAMESPACE:-pingcap} +DOCKER_REGISTRY=${DOCKER_REGISTRY:-localhost:5000} +IMAGE_TAG=${IMAGE_TAG:-latest} +SKIP_IMAGE_BUILD=${SKIP_IMAGE_BUILD:-} + +hack::ensure_kubectl +hack::ensure_kind +hack::ensure_helm + +if [[ "$installOnly" == "true" ]]; then + exit 0 +fi + +function hack::create_namespace() { + local ns="$1" + $KUBECTL_BIN create namespace $ns + for ((i=0; i < 30; i++)); do + local phase=$(kubectl get ns $ns -ojsonpath='{.status.phase}') + if [ "$phase" == "Active" ]; then + return 0 + fi + sleep 1 + done + return 1 +} + +function hack::wait_for_deploy() { + local ns="$1" + local name="$2" + local retries="${3:-300}" + echo "info: waiting for pods of deployment $ns/$name are ready (retries: $retries, interval: 1s)" + for ((i = 0; i < retries; i++)) { + read a b <<<$($KUBECTL_BIN --context $KUBECONTEXT -n $ns get deploy/$name -ojsonpath='{.spec.replicas} {.status.readyReplicas}{"\n"}') + if [[ "$a" -gt 0 && "$a" -eq "$b" ]]; then + echo "info: all pods of deployment $ns/$name are ready (desired: $a, ready: $b)" + return 0 + fi + echo "info: pods of deployment $ns/$name (desired: $a, ready: $b)" + sleep 1 + } + echo "info: timed out waiting for pods of deployment $ns/$name are ready" + return 1 +} + +function hack::cluster_exists() { + local c="$1" + for n in $($KIND_BIN get clusters); do + if [ "$n" == "$c" ]; then + return 0 + fi + done + return 1 +} + +echo "info: checking clusters" + +if [ "$PROVIDER" == "kind" ]; then + if [ -z "$CLUSTER" ]; then + CLUSTER=kind + fi + if ! hack::cluster_exists "$CLUSTER"; then + echo "error: kind cluster '$CLUSTER' not found, please create it or specify the right cluster name with CLUSTER environment" + exit 1 + fi +else + echo "erorr: only kind PROVIDER is supported" + exit 1 +fi + +if [ -z "$KUBECONTEXT" ]; then + KUBECONTEXT=$(kubectl config current-context) + echo "info: KUBECONTEXT is not set, current context $KUBECONTEXT is used" +fi + +if [ -z "$SKIP_IMAGE_BUILD" ]; then + echo "info: building docker images" + DOCKER_REGISTRY=$DOCKER_REGISTRY IMAGE_TAG=$IMAGE_TAG make docker +else + echo "info: skip building docker images" +fi + +echo "info: loading images into cluster" +images=( + $DOCKER_REGISTRY/pingcap/tidb-operator:${IMAGE_TAG} + $DOCKER_REGISTRY/pingcap/tidb-backup-manager:${IMAGE_TAG} +) +for n in ${images[@]}; do + echo "info: loading image $n" + $KIND_BIN load docker-image --name $CLUSTER $n +done + +echo "info: uninstall tidb-operator" +$KUBECTL_BIN -n "$NAMESPACE" delete deploy -l app.kubernetes.io/name=tidb-operator +$KUBECTL_BIN -n "$NAMESPACE" delete pods -l app.kubernetes.io/name=tidb-operator + +echo "info: create namespace '$NAMESPACE' if absent" +if ! $KUBECTL_BIN get ns "$NAMESPACE" &>/dev/null; then + hack::create_namespace "$NAMESPACE" +fi + +echo "info: installing crds" +if ! $KUBECTL_BIN create -f manifests/crd.yaml &>/dev/null; then + $KUBECTL_BIN replace -f manifests/crd.yaml +fi + +echo "info: deploying tidb-operator" +helm_template_args=( + --namespace "$NAMESPACE" + --set-string operatorImage=$DOCKER_REGISTRY/pingcap/tidb-operator:${IMAGE_TAG} + --set-string tidbBackupManagerImage=$DOCKER_REGISTRY/pingcap/tidb-backup-manager:${IMAGE_TAG} + --set-string controllerManager.logLevel=4 + --set-string scheduler.logLevel=4 +) + +$HELM_BIN template tidb-operator-dev ./charts/tidb-operator/ ${helm_template_args[@]} | kubectl -n "$NAMESPACE" apply -f - + +deploys=( + tidb-controller-manager + # tidb-scheduler +) +for deploy in ${deploys[@]}; do + echo "info: waiting for $NAMESPACE/$deploy to be ready" + hack::wait_for_deploy "$NAMESPACE" "$deploy" +done diff --git a/hack/local-up-operator.sh b/hack/local-up-operator.sh index cc87fcac46..d89806b563 100755 --- a/hack/local-up-operator.sh +++ b/hack/local-up-operator.sh @@ -17,176 +17,46 @@ # This command runs tidb-operator in Kubernetes. # -set -o errexit -set -o nounset -set -o pipefail - -ROOT=$(unset CDPATH && cd $(dirname "${BASH_SOURCE[0]}")/.. && pwd) -cd $ROOT - -source "${ROOT}/hack/lib.sh" +# Default provider is kind +PROVIDER=${PROVIDER:-kind} +# Function to display usage information function usage() { cat <<'EOF' -This commands run tidb-operator in Kubernetes. +This script runs tidb-operator in Kubernetes using the appropriate provider. -Usage: hack/local-up-operator.sh [-hd] +Usage: local-up-operator.sh [-hd] [-p PROVIDER] - -h show this message and exit - -i install dependencies only + -h Show this message and exit + -i Install dependencies only Environments: PROVIDER Kubernetes provider. Defaults: kind. - CLUSTER the name of e2e cluster. Defaults to kind for kind provider. - KUBECONFIG path to the kubeconfig file, defaults: ~/.kube/config - KUBECONTEXT context in kubeconfig file, defaults to current context + CLUSTER The name of the e2e cluster. + KUBECONFIG Path to the kubeconfig file, defaults: ~/.kube/config + KUBECONTEXT Context in kubeconfig file, defaults to the current context NAMESPACE Kubernetes namespace in which we run our tidb-operator. - DOCKER_REGISTRY image docker registry - IMAGE_TAG image tag - SKIP_IMAGE_BUILD skip build and push images + DOCKER_REGISTRY Image docker registry + IMAGE_TAG Image tag + SKIP_IMAGE_BUILD Skip build and push images EOF } -installOnly=false -while getopts "h?i" opt; do - case "$opt" in - h|\?) - usage - exit 0 +# Determine the appropriate script based on the provider +case "$PROVIDER" in + kind) + echo "Running with kind provider..." + bash hack/local-up-by-kind.sh "$@" ;; - i) - installOnly=true + k3s) + echo "Running with k3s provider..." + bash hack/local-up-by-k3s.sh "$@" ;; - esac -done - -PROVIDER=${PROVIDER:-kind} -CLUSTER=${CLUSTER:-} -KUBECONFIG=${KUBECONFIG:-~/.kube/config} -KUBECONTEXT=${KUBECONTEXT:-} -NAMESPACE=${NAMESPACE:-pingcap} -DOCKER_REGISTRY=${DOCKER_REGISTRY:-localhost:5000} -IMAGE_TAG=${IMAGE_TAG:-latest} -SKIP_IMAGE_BUILD=${SKIP_IMAGE_BUILD:-} - -hack::ensure_kubectl -hack::ensure_kind -hack::ensure_helm - -if [[ "$installOnly" == "true" ]]; then - exit 0 -fi - -function hack::create_namespace() { - local ns="$1" - $KUBECTL_BIN create namespace $ns - for ((i=0; i < 30; i++)); do - local phase=$(kubectl get ns $ns -ojsonpath='{.status.phase}') - if [ "$phase" == "Active" ]; then - return 0 - fi - sleep 1 - done - return 1 -} - -function hack::wait_for_deploy() { - local ns="$1" - local name="$2" - local retries="${3:-300}" - echo "info: waiting for pods of deployment $ns/$name are ready (retries: $retries, interval: 1s)" - for ((i = 0; i < retries; i++)) { - read a b <<<$($KUBECTL_BIN --context $KUBECONTEXT -n $ns get deploy/$name -ojsonpath='{.spec.replicas} {.status.readyReplicas}{"\n"}') - if [[ "$a" -gt 0 && "$a" -eq "$b" ]]; then - echo "info: all pods of deployment $ns/$name are ready (desired: $a, ready: $b)" - return 0 - fi - echo "info: pods of deployment $ns/$name (desired: $a, ready: $b)" - sleep 1 - } - echo "info: timed out waiting for pods of deployment $ns/$name are ready" - return 1 -} - -function hack::cluster_exists() { - local c="$1" - for n in $($KIND_BIN get clusters); do - if [ "$n" == "$c" ]; then - return 0 - fi - done - return 1 -} - -echo "info: checking clusters" - -if [ "$PROVIDER" == "kind" ]; then - if [ -z "$CLUSTER" ]; then - CLUSTER=kind - fi - if ! hack::cluster_exists "$CLUSTER"; then - echo "error: kind cluster '$CLUSTER' not found, please create it or specify the right cluster name with CLUSTER environment" + *) + echo "Unsupported provider: $PROVIDER" + usage exit 1 - fi -else - echo "erorr: only kind PROVIDER is supported" - exit 1 -fi - -if [ -z "$KUBECONTEXT" ]; then - KUBECONTEXT=$(kubectl config current-context) - echo "info: KUBECONTEXT is not set, current context $KUBECONTEXT is used" -fi - -if [ -z "$SKIP_IMAGE_BUILD" ]; then - echo "info: building docker images" - DOCKER_REGISTRY=$DOCKER_REGISTRY IMAGE_TAG=$IMAGE_TAG make docker -else - echo "info: skip building docker images" -fi - -echo "info: loading images into cluster" -images=( - $DOCKER_REGISTRY/pingcap/tidb-operator:${IMAGE_TAG} - $DOCKER_REGISTRY/pingcap/tidb-backup-manager:${IMAGE_TAG} -) -for n in ${images[@]}; do - echo "info: loading image $n" - $KIND_BIN load docker-image --name $CLUSTER $n -done - -echo "info: uninstall tidb-operator" -$KUBECTL_BIN -n "$NAMESPACE" delete deploy -l app.kubernetes.io/name=tidb-operator -$KUBECTL_BIN -n "$NAMESPACE" delete pods -l app.kubernetes.io/name=tidb-operator - -echo "info: create namespace '$NAMESPACE' if absent" -if ! $KUBECTL_BIN get ns "$NAMESPACE" &>/dev/null; then - hack::create_namespace "$NAMESPACE" -fi - -echo "info: installing crds" -if ! $KUBECTL_BIN create -f manifests/crd.yaml &>/dev/null; then - $KUBECTL_BIN replace -f manifests/crd.yaml -fi - -echo "info: deploying tidb-operator" -helm_template_args=( - --namespace "$NAMESPACE" - --set-string operatorImage=$DOCKER_REGISTRY/pingcap/tidb-operator:${IMAGE_TAG} - --set-string tidbBackupManagerImage=$DOCKER_REGISTRY/pingcap/tidb-backup-manager:${IMAGE_TAG} - --set-string controllerManager.logLevel=4 - --set-string scheduler.logLevel=4 -) - -$HELM_BIN template tidb-operator-dev ./charts/tidb-operator/ ${helm_template_args[@]} | kubectl -n "$NAMESPACE" apply -f - - -deploys=( - tidb-controller-manager - # tidb-scheduler -) -for deploy in ${deploys[@]}; do - echo "info: waiting for $NAMESPACE/$deploy to be ready" - hack::wait_for_deploy "$NAMESPACE" "$deploy" -done + ;; +esac From 6acb1101efb1af386250522750f8716c3e8a5482 Mon Sep 17 00:00:00 2001 From: Xuecheng Zhang Date: Wed, 25 Sep 2024 09:41:59 +0800 Subject: [PATCH 4/4] CI: bump runner Golang to v1.23.1 (#5748) --- ci/e2e_eks.groovy | 2 +- ci/e2e_gke.groovy | 2 +- ci/e2e_kind.groovy | 2 +- ci/pull_e2e_kind.groovy | 2 +- ci/pull_e2e_release.groovy | 2 +- ci/vm.groovy | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/ci/e2e_eks.groovy b/ci/e2e_eks.groovy index 6210ef40c4..fc27883f25 100644 --- a/ci/e2e_eks.groovy +++ b/ci/e2e_eks.groovy @@ -16,7 +16,7 @@ kind: Pod spec: containers: - name: main - image: hub.pingcap.net/tidb-operator/kubekins-e2e:v7-go1.22.5 + image: hub.pingcap.net/tidb-operator/kubekins-e2e:v8-go1.23.1 command: - runner.sh - sleep diff --git a/ci/e2e_gke.groovy b/ci/e2e_gke.groovy index b1fef00520..f77cc3294b 100644 --- a/ci/e2e_gke.groovy +++ b/ci/e2e_gke.groovy @@ -16,7 +16,7 @@ kind: Pod spec: containers: - name: main - image: hub.pingcap.net/tidb-operator/kubekins-e2e:v7-go1.22.5 + image: hub.pingcap.net/tidb-operator/kubekins-e2e:v8-go1.23.1 command: - runner.sh - sleep diff --git a/ci/e2e_kind.groovy b/ci/e2e_kind.groovy index 6a2d585ec8..e6c75fe653 100644 --- a/ci/e2e_kind.groovy +++ b/ci/e2e_kind.groovy @@ -16,7 +16,7 @@ metadata: spec: containers: - name: main - image: hub.pingcap.net/tidb-operator/kubekins-e2e:v7-go1.22.5 + image: hub.pingcap.net/tidb-operator/kubekins-e2e:v8-go1.23.1 command: - runner.sh - exec diff --git a/ci/pull_e2e_kind.groovy b/ci/pull_e2e_kind.groovy index 0367d37c15..323ed60df3 100644 --- a/ci/pull_e2e_kind.groovy +++ b/ci/pull_e2e_kind.groovy @@ -54,7 +54,7 @@ metadata: spec: containers: - name: main - image: hub.pingcap.net/tidb-operator/kubekins-e2e:v7-go1.22.5 + image: hub.pingcap.net/tidb-operator/kubekins-e2e:v8-go1.23.1 command: - runner.sh - exec diff --git a/ci/pull_e2e_release.groovy b/ci/pull_e2e_release.groovy index de4fa6e403..68dc4be25f 100644 --- a/ci/pull_e2e_release.groovy +++ b/ci/pull_e2e_release.groovy @@ -37,7 +37,7 @@ metadata: spec: containers: - name: main - image: hub.pingcap.net/tidb-operator/kubekins-e2e:v7-go1.22.5 + image: hub.pingcap.net/tidb-operator/kubekins-e2e:v8-go1.23.1 command: - runner.sh - exec diff --git a/ci/vm.groovy b/ci/vm.groovy index 0c7e031a7d..9d1ba39cdf 100644 --- a/ci/vm.groovy +++ b/ci/vm.groovy @@ -16,7 +16,7 @@ kind: Pod spec: containers: - name: main - image: hub.pingcap.net/tidb-operator/kubekins-e2e:v7-go1.22.5 + image: hub.pingcap.net/tidb-operator/kubekins-e2e:v8-go1.23.1 command: - runner.sh - sleep