diff --git a/.gimps.yaml b/.gimps.yaml index 7856ca9c5..fd0b820ba 100644 --- a/.gimps.yaml +++ b/.gimps.yaml @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -# This is the configuration for https://github.com/xrstf/gimps. +# This is the configuration for https://codeberg.org/xrstf/gimps. importOrder: [std, external, kubermatic, kubernetes] sets: diff --git a/.gitignore b/.gitignore index 8cc8d65e8..784e3f33a 100644 --- a/.gitignore +++ b/.gitignore @@ -15,4 +15,5 @@ examples/*.srl /vendor .vscode .gitpod.yml -cmd/machine-controller/__debug_bin +cmd/machine-controller/__debug_bin* +!pkg diff --git a/.golangci.yml b/.golangci.yml index cc14d4190..717f562f1 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,28 +1,21 @@ +version: "2" run: - deadline: 20m build-tags: - e2e - skip-dirs: - - pkg/client - - pkg/machines - linters: + default: none enable: - asciicheck - bidichk - bodyclose - - deadcode - depguard - durationcheck - errcheck - errname - errorlint - - exportloopref - goconst - gocyclo - godot - - gofmt - - gosimple - govet - importas - ineffassign @@ -33,29 +26,92 @@ linters: - nosprintfhostport - predeclared - promlinter - - revive - staticcheck - - structcheck - - tenv - unconvert - unused - - varcheck - wastedassign - whitespace - disable-all: true - + settings: + depguard: + rules: + main: + deny: + - pkg: io/ioutil + desc: https://go.dev/doc/go1.16#ioutil + - pkg: github.com/ghodss/yaml + desc: use sigs.k8s.io/yaml instead + govet: + enable: + - nilness # find tautologies / impossible conditions + importas: + alias: + # Machine Controller + - pkg: k8c.io/machine-controller/sdk/apis/(\w+)/(v[\w\d]+) + alias: $1$2 + # Kubernetes + - pkg: k8s.io/api/(\w+)/(v[\w\d]+) + alias: $1$2 + - pkg: k8s.io/apimachinery/pkg/apis/meta/v1 + alias: metav1 + - pkg: k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 + alias: apiextensionsv1 + - pkg: k8s.io/apimachinery/pkg/api/errors + alias: apierrors + - pkg: k8s.io/apimachinery/pkg/util/errors + alias: kerrors + # Controller Runtime + - pkg: sigs.k8s.io/controller-runtime/pkg/client + alias: ctrlruntimeclient + # Other Kube APIs + - pkg: go.anx.io/go-anxcloud/pkg/apis/(\w+)/(v[\w\d]+) + alias: anx$1$2 + - pkg: github.com/tinkerbell/tink/api/(v[\w\d]+) + alias: tink$1 + - pkg: kubevirt.io/api/(\w+)/(v[\w\d]+) + alias: kubevirt$1$2 + - pkg: kubevirt.io/containerized-data-importer-api/pkg/apis/(\w+)/(v[\w\d]+) + alias: cdi$1$2 + no-unaliased: true + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + rules: + - path: (.+)\.go$ + text: func Convert_MachinesV1alpha1Machine_To_ClusterV1alpha1Machine should be ConvertMachinesV1alpha1MachineToClusterV1alpha1Machine + - path: (.+)\.go$ + text: func Convert_MachineDeployment_ProviderConfig_To_ProviderSpec should be ConvertMachineDeploymentProviderConfigToProviderSpec + - path: (.+)\.go$ + text: func Convert_MachineSet_ProviderConfig_To_ProviderSpec should be ConvertMachineSetProviderConfigToProviderSpec + - path: (.+)\.go$ + text: func Convert_Machine_ProviderConfig_To_ProviderSpec should be ConvertMachineProviderConfigToProviderSpec + - path: (.+)\.go$ + text: cyclomatic complexity [0-9]+ of func `\(\*provider\)\.Create` is high + - path: (.+)\.go$ + text: cyclomatic complexity [0-9]+ of func `\(\*provider\)\.Validate` is high + - path: (.+)\.go$ + text: cyclomatic complexity [0-9]+ of func `\(\*provider\)\.getConfig` is high + - path: (.+)\.go$ + text: 'SA1019: s.server.IPv6 is deprecated' + - path: (.+)\.go$ + text: 'SA1019: mgr.GetEventRecorderFor is deprecated' + paths: + - apis/machines + - third_party$ + - builtin$ + - examples$ issues: - exclude: - - should have comment or be unexported - - should have comment \\(or a comment on this block\\) or be unexported - - func Convert_MachinesV1alpha1Machine_To_ClusterV1alpha1Machine should be ConvertMachinesV1alpha1MachineToClusterV1alpha1Machine - - func Convert_MachineDeployment_ProviderConfig_To_ProviderSpec should be ConvertMachineDeploymentProviderConfigToProviderSpec - - func Convert_MachineSet_ProviderConfig_To_ProviderSpec should be ConvertMachineSetProviderConfigToProviderSpec - - func Convert_Machine_ProviderConfig_To_ProviderSpec should be ConvertMachineProviderConfigToProviderSpec - - 'counter\.Set is deprecated: Use NewConstMetric' - - 'eviction\.go:221:4: the surrounding loop is unconditionally terminated' - - "cyclomatic complexity 31 of func `verifyMigrateUID` is high" - - "cyclomatic complexity 31 of func `main` is high" - - 'cyclomatic complexity 34 of func `\(\*provider\)\.getConfig` is high' - - 'cyclomatic complexity 31 of func `\(\*provider\)\.Validate` is high' - - 'cyclomatic complexity 33 of func `\(\*provider\)\.Create` is high' + max-same-issues: 0 +formatters: + enable: + - gofmt + exclusions: + generated: lax + paths: + - apis/machines + - third_party$ + - builtin$ + - examples$ diff --git a/.prow/e2e-features.yaml b/.prow/e2e-features.yaml index 4515e5b39..f18ceac5d 100644 --- a/.prow/e2e-features.yaml +++ b/.prow/e2e-features.yaml @@ -17,6 +17,7 @@ presubmits: always_run: true decorate: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" + path_alias: k8c.io/machine-controller labels: preset-aws: "true" preset-azure: "true" @@ -34,7 +35,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.19-node-18-kind-0.17-5 + - image: quay.io/kubermatic/build:go-1.25-node-22-kind-0.30-8 command: - "./hack/ci/run-e2e-tests.sh" args: @@ -52,6 +53,7 @@ presubmits: always_run: true decorate: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" + path_alias: k8c.io/machine-controller labels: preset-openstack: "true" preset-hetzner: "true" @@ -63,7 +65,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.19-node-18-kind-0.17-5 + - image: quay.io/kubermatic/build:go-1.25-node-22-kind-0.30-8 command: - "./hack/ci/run-e2e-tests.sh" args: @@ -78,9 +80,11 @@ presubmits: memory: 7Gi - name: pull-machine-controller-e2e-ubuntu-upgrade - always_run: true + # In-tree CCM is not supported for openstack starting from k8s 1.26. Please see https://github.com/kubermatic/machine-controller/issues/1626 for updates. + # run_if_changed: "(pkg/cloudprovider/provider/openstack/|pkg/userdata)" decorate: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" + path_alias: k8c.io/machine-controller labels: preset-openstack: "true" preset-hetzner: "true" @@ -91,7 +95,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.19-node-18-kind-0.17-5 + - image: quay.io/kubermatic/build:go-1.25-node-22-kind-0.30-8 command: - "./hack/ci/run-e2e-tests.sh" args: @@ -109,6 +113,7 @@ presubmits: always_run: false decorate: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" + path_alias: k8c.io/machine-controller labels: preset-hetzner: "true" preset-e2e-ssh: "true" @@ -118,7 +123,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.19-node-18-kind-0.17-5 + - image: quay.io/kubermatic/build:go-1.25-node-22-kind-0.30-8 command: - "./hack/ci/run-e2e-tests.sh" args: diff --git a/.prow/postsubmits.yaml b/.prow/postsubmits.yaml index 4145a8407..a40fcba11 100644 --- a/.prow/postsubmits.yaml +++ b/.prow/postsubmits.yaml @@ -17,6 +17,7 @@ postsubmits: always_run: true decorate: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" + path_alias: k8c.io/machine-controller branches: - ^main$ # Match on tags @@ -26,7 +27,7 @@ postsubmits: preset-goproxy: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.19-node-18-kind-0.17-5 + - image: quay.io/kubermatic/build:go-1.25-node-22-kind-0.30-8 command: - /bin/bash - -c @@ -48,13 +49,14 @@ postsubmits: always_run: true decorate: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" + path_alias: k8c.io/machine-controller branches: - ^main$ labels: preset-goproxy: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.19-node-18-kind-0.17-5 + - image: quay.io/kubermatic/build:go-1.25-node-22-kind-0.30-8 command: - "./hack/ci/upload-gocache.sh" resources: diff --git a/.prow/provider-alibaba.yaml b/.prow/provider-alibaba.yaml index e579225af..38b6f7963 100644 --- a/.prow/provider-alibaba.yaml +++ b/.prow/provider-alibaba.yaml @@ -18,6 +18,7 @@ presubmits: always_run: false decorate: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" + path_alias: k8c.io/machine-controller max_concurrency: 1 labels: preset-alibaba: "true" @@ -29,7 +30,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.19-node-18-kind-0.17-5 + - image: quay.io/kubermatic/build:go-1.25-node-22-kind-0.30-8 command: - "./hack/ci/run-e2e-tests.sh" args: diff --git a/.prow/provider-anexia.yaml b/.prow/provider-anexia.yaml index d4f443cc7..6bf2d11d6 100644 --- a/.prow/provider-anexia.yaml +++ b/.prow/provider-anexia.yaml @@ -14,9 +14,14 @@ presubmits: - name: pull-machine-controller-e2e-anexia + # We've made the E2E tests for Anexia optional since it doesn't support k8s v1.26 at the moment. + # the tests on k8s v1.26+ will fail. + # TODO: These tests shouldn't be marked as optional. + optional: true always_run: false decorate: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" + path_alias: k8c.io/machine-controller labels: preset-hetzner: "true" preset-e2e-ssh: "true" @@ -27,15 +32,12 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.19-node-18-kind-0.17-5 + - image: quay.io/kubermatic/build:go-1.25-node-22-kind-0.30-8 command: - "./hack/ci/run-e2e-tests.sh" args: - "TestAnexiaProvisioningE2E" env: - # OperatingSystemManager does not yet support Anexia - - name: OPERATING_SYSTEM_MANAGER - value: "false" - name: CLOUD_PROVIDER value: anexia securityContext: diff --git a/.prow/provider-aws.yaml b/.prow/provider-aws.yaml index 67468708c..d7945a3bb 100644 --- a/.prow/provider-aws.yaml +++ b/.prow/provider-aws.yaml @@ -14,9 +14,11 @@ presubmits: - name: pull-machine-controller-e2e-aws - run_if_changed: "(pkg/cloudprovider/provider/aws/|pkg/userdata)" + # In-tree CCM is not supported for AWS starting from k8s 1.27. Please see https://github.com/kubermatic/machine-controller/issues/1626 for updates. + # run_if_changed: "(pkg/cloudprovider/provider/aws/|pkg/userdata)" decorate: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" + path_alias: k8c.io/machine-controller labels: preset-aws: "true" preset-hetzner: "true" @@ -28,7 +30,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.19-node-18-kind-0.17-5 + - image: quay.io/kubermatic/build:go-1.25-node-22-kind-0.30-8 command: - "./hack/ci/run-e2e-tests.sh" args: @@ -45,44 +47,12 @@ presubmits: limits: memory: 7Gi - - name: pull-machine-controller-e2e-aws-spot-instance-legacy-userdata - always_run: true - decorate: true - clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" - labels: - preset-aws: "true" - preset-hetzner: "true" - preset-e2e-ssh: "true" - preset-rhel: "true" - preset-goproxy: "true" - preset-kind-volume-mounts: "true" - preset-docker-mirror: "true" - preset-kubeconfig-ci: "true" - spec: - containers: - - image: quay.io/kubermatic/build:go-1.19-node-18-kind-0.17-5 - env: - - name: OPERATING_SYSTEM_MANAGER - value: "false" - - name: CLOUD_PROVIDER - value: aws - command: - - "./hack/ci/run-e2e-tests.sh" - args: - - "TestAWSSpotInstanceProvisioningE2E" - securityContext: - privileged: true - resources: - requests: - memory: 7Gi - cpu: 2 - limits: - memory: 7Gi - - name: pull-machine-controller-e2e-aws-arm - run_if_changed: "(pkg/cloudprovider/provider/aws/|pkg/userdata)" + # In-tree CCM is not supported for AWS starting from k8s 1.27. Please see https://github.com/kubermatic/machine-controller/issues/1626 for updates. + # run_if_changed: "(pkg/cloudprovider/provider/aws/|pkg/userdata)" decorate: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" + path_alias: k8c.io/machine-controller labels: preset-aws: "true" preset-hetzner: "true" @@ -93,7 +63,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.19-node-18-kind-0.17-5 + - image: quay.io/kubermatic/build:go-1.25-node-22-kind-0.30-8 command: - "./hack/ci/run-e2e-tests.sh" args: @@ -111,9 +81,11 @@ presubmits: memory: 7Gi - name: pull-machine-controller-e2e-aws-ebs-encryption-enabled - run_if_changed: "(pkg/cloudprovider/provider/aws/|pkg/userdata)" + # In-tree CCM is not supported for AWS starting from k8s 1.27. Please see https://github.com/kubermatic/machine-controller/issues/1626 for updates. + # run_if_changed: "(pkg/cloudprovider/provider/aws/|pkg/userdata)" decorate: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" + path_alias: k8c.io/machine-controller labels: preset-aws: "true" preset-hetzner: "true" @@ -124,7 +96,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.19-node-18-kind-0.17-5 + - image: quay.io/kubermatic/build:go-1.25-node-22-kind-0.30-8 command: - "./hack/ci/run-e2e-tests.sh" args: @@ -142,9 +114,11 @@ presubmits: memory: 7Gi - name: pull-machine-controller-e2e-aws-spot-instance - always_run: true + # In-tree CCM is not supported for AWS starting from k8s 1.27. Please see https://github.com/kubermatic/machine-controller/issues/1626 for updates. + # run_if_changed: "(pkg/cloudprovider/provider/aws/|pkg/userdata)" decorate: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" + path_alias: k8c.io/machine-controller labels: preset-aws: "true" preset-hetzner: "true" @@ -156,7 +130,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.19-node-18-kind-0.17-5 + - image: quay.io/kubermatic/build:go-1.25-node-22-kind-0.30-8 command: - "./hack/ci/run-e2e-tests.sh" args: @@ -177,6 +151,7 @@ presubmits: always_run: false decorate: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" + path_alias: k8c.io/machine-controller labels: preset-aws: "true" preset-hetzner: "true" @@ -187,7 +162,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.19-node-18-kind-0.17-5 + - image: quay.io/kubermatic/build:go-1.25-node-22-kind-0.30-8 command: - "./hack/ci/run-e2e-tests.sh" args: @@ -204,41 +179,11 @@ presubmits: limits: memory: 7Gi - - name: pull-machine-controller-e2e-aws-centos8 - always_run: false - decorate: true - clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" - labels: - preset-aws: "true" - preset-hetzner: "true" - preset-e2e-ssh: "true" - preset-goproxy: "true" - preset-kind-volume-mounts: "true" - preset-docker-mirror: "true" - preset-kubeconfig-ci: "true" - spec: - containers: - - image: quay.io/kubermatic/build:go-1.19-node-18-kind-0.17-5 - command: - - "./hack/ci/run-e2e-tests.sh" - args: - - "TestAWSCentOS8ProvisioningE2E" - env: - - name: CLOUD_PROVIDER - value: aws - securityContext: - privileged: true - resources: - requests: - memory: 7Gi - cpu: 2 - limits: - memory: 7Gi - - name: pull-machine-controller-e2e-aws-assume-role always_run: false decorate: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" + path_alias: k8c.io/machine-controller labels: preset-aws-assume-role: "true" preset-hetzner: "true" @@ -249,7 +194,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.19-node-18-kind-0.17-5 + - image: quay.io/kubermatic/build:go-1.25-node-22-kind-0.30-8 command: - "./hack/ci/run-e2e-tests.sh" args: diff --git a/.prow/provider-azure.yaml b/.prow/provider-azure.yaml index 4fb60bb79..a17ee309f 100644 --- a/.prow/provider-azure.yaml +++ b/.prow/provider-azure.yaml @@ -17,6 +17,7 @@ presubmits: run_if_changed: "(pkg/cloudprovider/provider/azure/|pkg/userdata)" decorate: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" + path_alias: k8c.io/machine-controller labels: preset-azure: "true" preset-hetzner: "true" @@ -28,7 +29,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.19-node-18-kind-0.17-5 + - image: quay.io/kubermatic/build:go-1.25-node-22-kind-0.30-8 command: - "./hack/ci/run-e2e-tests.sh" args: @@ -49,6 +50,7 @@ presubmits: run_if_changed: "(pkg/cloudprovider/provider/azure/|pkg/userdata)" decorate: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" + path_alias: k8c.io/machine-controller labels: preset-azure: "true" preset-hetzner: "true" @@ -60,7 +62,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.19-node-18-kind-0.17-5 + - image: quay.io/kubermatic/build:go-1.25-node-22-kind-0.30-8 command: - "./hack/ci/run-e2e-tests.sh" args: @@ -82,6 +84,7 @@ presubmits: always_run: false decorate: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" + path_alias: k8c.io/machine-controller labels: preset-azure: "true" preset-hetzner: "true" @@ -93,7 +96,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.19-node-18-kind-0.17-5 + - image: quay.io/kubermatic/build:go-1.25-node-22-kind-0.30-8 command: - "./hack/ci/run-e2e-tests.sh" args: diff --git a/.prow/provider-digitalocean.yaml b/.prow/provider-digitalocean.yaml index 3b7dce3d2..cceb79b37 100644 --- a/.prow/provider-digitalocean.yaml +++ b/.prow/provider-digitalocean.yaml @@ -17,6 +17,7 @@ presubmits: run_if_changed: "(pkg/cloudprovider/provider/digitalocean/|pkg/userdata)" decorate: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" + path_alias: k8c.io/machine-controller labels: preset-digitalocean: "true" preset-hetzner: "true" @@ -27,7 +28,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.19-node-18-kind-0.17-5 + - image: quay.io/kubermatic/build:go-1.25-node-22-kind-0.30-8 command: - "./hack/ci/run-e2e-tests.sh" args: diff --git a/.prow/provider-equinix-metal.yaml b/.prow/provider-equinix-metal.yaml index c7d80af7d..71e8e893e 100644 --- a/.prow/provider-equinix-metal.yaml +++ b/.prow/provider-equinix-metal.yaml @@ -18,6 +18,7 @@ presubmits: run_if_changed: "(pkg/cloudprovider/provider/equinixmetal/|pkg/userdata)" decorate: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" + path_alias: k8c.io/machine-controller labels: preset-hetzner: "true" preset-e2e-ssh: "true" @@ -28,7 +29,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.19-node-18-kind-0.17-5 + - image: quay.io/kubermatic/build:go-1.25-node-22-kind-0.30-8 command: - "./hack/ci/run-e2e-tests.sh" args: diff --git a/.prow/provider-gcp.yaml b/.prow/provider-gcp.yaml index d879ad984..96abd5107 100644 --- a/.prow/provider-gcp.yaml +++ b/.prow/provider-gcp.yaml @@ -14,9 +14,10 @@ presubmits: - name: pull-machine-controller-e2e-gce - run_if_changed: "(pkg/cloudprovider/provider/gce/|pkg/userdata)" + always_run: true decorate: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" + path_alias: k8c.io/machine-controller labels: preset-gce: "true" preset-hetzner: "true" @@ -28,7 +29,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.19-node-18-kind-0.17-5 + - image: quay.io/kubermatic/build:go-1.25-node-22-kind-0.30-8 command: - "./hack/ci/run-e2e-tests.sh" args: diff --git a/.prow/provider-hetzner.yaml b/.prow/provider-hetzner.yaml index de08ca29a..9a8d2e898 100644 --- a/.prow/provider-hetzner.yaml +++ b/.prow/provider-hetzner.yaml @@ -17,6 +17,7 @@ presubmits: run_if_changed: "(pkg/cloudprovider/provider/hetzner/|pkg/userdata)" decorate: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" + path_alias: k8c.io/machine-controller labels: preset-hetzner: "true" preset-e2e-ssh: "true" @@ -26,7 +27,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.19-node-18-kind-0.17-5 + - image: quay.io/kubermatic/build:go-1.25-node-22-kind-0.30-8 command: - "./hack/ci/run-e2e-tests.sh" args: diff --git a/.prow/provider-kubevirt.yaml b/.prow/provider-kubevirt.yaml index 61d12af71..f8041513e 100644 --- a/.prow/provider-kubevirt.yaml +++ b/.prow/provider-kubevirt.yaml @@ -14,9 +14,11 @@ presubmits: - name: pull-machine-controller-e2e-kubevirt - run_if_changed: "(pkg/cloudprovider/provider/kubevirt/|pkg/userdata)" + # run_if_changed: "(pkg/cloudprovider/provider/kubevirt/|pkg/userdata)" + always_run: false decorate: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" + path_alias: k8c.io/machine-controller max_concurrency: 1 labels: preset-kubevirt: "true" @@ -29,7 +31,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.19-node-18-kind-0.17-5 + - image: quay.io/kubermatic/build:go-1.25-node-22-kind-0.30-8 command: - "./hack/ci/run-e2e-tests.sh" args: diff --git a/.prow/provider-linode.yaml b/.prow/provider-linode.yaml index ac9d1a895..0515d9a9e 100644 --- a/.prow/provider-linode.yaml +++ b/.prow/provider-linode.yaml @@ -18,6 +18,7 @@ presubmits: optional: true decorate: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" + path_alias: k8c.io/machine-controller labels: preset-hetzner: "true" preset-e2e-ssh: "true" @@ -28,7 +29,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.19-node-18-kind-0.17-5 + - image: quay.io/kubermatic/build:go-1.25-node-22-kind-0.30-8 command: - "./hack/ci/run-e2e-tests.sh" args: diff --git a/.prow/provider-nutanix.yaml b/.prow/provider-nutanix.yaml index 1b303e5c5..0ecb39a0f 100644 --- a/.prow/provider-nutanix.yaml +++ b/.prow/provider-nutanix.yaml @@ -17,6 +17,7 @@ presubmits: run_if_changed: "(pkg/cloudprovider/provider/nutanix/)" decorate: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" + path_alias: k8c.io/machine-controller labels: preset-hetzner: "true" preset-e2e-ssh: "true" @@ -27,7 +28,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.19-node-18-kind-0.17-5 + - image: quay.io/kubermatic/build:go-1.25-node-22-kind-0.30-8 command: - "./hack/ci/run-e2e-tests.sh" args: diff --git a/.prow/provider-openstack.yaml b/.prow/provider-openstack.yaml index b787f092a..b7bc454c1 100644 --- a/.prow/provider-openstack.yaml +++ b/.prow/provider-openstack.yaml @@ -14,13 +14,11 @@ presubmits: - name: pull-machine-controller-e2e-openstack - run_if_changed: "(pkg/cloudprovider/provider/openstack/|pkg/userdata)" - # We've made the E2E tests for OpenStack optional since in-tree cloud provider for OpenStack was removed with k8s v1.26. Since MC depends on the in-tree cloud provider - # the tests on k8s v1.26+ will fail. - # TODO: These tests shouldn't be marked as optional. - optional: true + # In-tree CCM is not supported for openstack starting from k8s 1.26. Please see https://github.com/kubermatic/machine-controller/issues/1626 for updates. + # run_if_changed: "(pkg/cloudprovider/provider/openstack/|pkg/userdata)" decorate: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" + path_alias: k8c.io/machine-controller labels: preset-openstack: "true" preset-hetzner: "true" @@ -32,7 +30,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.19-node-18-kind-0.17-5 + - image: quay.io/kubermatic/build:go-1.25-node-22-kind-0.30-8 command: - "./hack/ci/run-e2e-tests.sh" args: @@ -50,9 +48,11 @@ presubmits: memory: 7Gi - name: pull-machine-controller-e2e-openstack-project-auth - run_if_changed: "(pkg/cloudprovider/provider/openstack/|pkg/userdata)" + # In-tree CCM is not supported for openstack starting from k8s 1.26. Please see https://github.com/kubermatic/machine-controller/issues/1626 for updates. + # run_if_changed: "(pkg/cloudprovider/provider/openstack/|pkg/userdata)" decorate: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" + path_alias: k8c.io/machine-controller labels: preset-openstack: "true" preset-hetzner: "true" @@ -64,7 +64,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.19-node-18-kind-0.17-5 + - image: quay.io/kubermatic/build:go-1.25-node-22-kind-0.30-8 command: - "./hack/ci/run-e2e-tests.sh" args: diff --git a/.prow/provider-scaleway.yaml b/.prow/provider-scaleway.yaml index 9f0aab11a..e65843a9c 100644 --- a/.prow/provider-scaleway.yaml +++ b/.prow/provider-scaleway.yaml @@ -17,6 +17,7 @@ presubmits: always_run: false decorate: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" + path_alias: k8c.io/machine-controller labels: preset-scaleway: "true" preset-hetzner: "true" @@ -27,7 +28,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.19-node-18-kind-0.17-5 + - image: quay.io/kubermatic/build:go-1.25-node-22-kind-0.30-8 command: - "./hack/ci/run-e2e-tests.sh" args: diff --git a/.prow/provider-vmware-cloud-director.yaml b/.prow/provider-vmware-cloud-director.yaml index 481fb8d64..d693a0957 100644 --- a/.prow/provider-vmware-cloud-director.yaml +++ b/.prow/provider-vmware-cloud-director.yaml @@ -16,8 +16,11 @@ presubmits: - name: pull-machine-controller-e2e-vmware-cloud-director always_run: false decorate: true + # Please check: https://github.com/kubermatic/machine-controller/issues/1619 + optional: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" - run_if_changed: "(pkg/cloudprovider/provider/vmwareclouddirector/|pkg/userdata)" + path_alias: k8c.io/machine-controller + run_if_changed: "(pkg/cloudprovider/provider/vmwareclouddirector/)" labels: preset-vcloud-director: "true" preset-hetzner: "true" @@ -29,7 +32,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.19-node-18-kind-0.17-5 + - image: quay.io/kubermatic/build:go-1.25-node-22-kind-0.30-8 command: - "./hack/ci/run-e2e-tests.sh" args: diff --git a/.prow/provider-vsphere.yaml b/.prow/provider-vsphere.yaml index 2343f0080..d1680ae08 100644 --- a/.prow/provider-vsphere.yaml +++ b/.prow/provider-vsphere.yaml @@ -17,6 +17,7 @@ presubmits: run_if_changed: "(pkg/cloudprovider/provider/vsphere/|pkg/userdata)" decorate: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" + path_alias: k8c.io/machine-controller labels: preset-hetzner: "true" preset-e2e-ssh: "true" @@ -28,7 +29,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.19-node-18-kind-0.17-5 + - image: quay.io/kubermatic/build:go-1.25-node-22-kind-0.30-8 command: - "./hack/ci/run-e2e-tests.sh" args: @@ -49,6 +50,7 @@ presubmits: always_run: false decorate: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" + path_alias: k8c.io/machine-controller labels: preset-vsphere: "true" preset-rhel: "true" @@ -60,7 +62,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.19-node-18-kind-0.17-5 + - image: quay.io/kubermatic/build:go-1.25-node-22-kind-0.30-8 command: - "./hack/ci/run-e2e-tests.sh" args: @@ -81,6 +83,7 @@ presubmits: always_run: false decorate: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" + path_alias: k8c.io/machine-controller labels: preset-vsphere: "true" preset-rhel: "true" @@ -92,7 +95,7 @@ presubmits: preset-kubeconfig-ci: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.19-node-18-kind-0.17-5 + - image: quay.io/kubermatic/build:go-1.25-node-22-kind-0.30-8 command: - "./hack/ci/run-e2e-tests.sh" args: @@ -108,3 +111,69 @@ presubmits: cpu: 2 limits: memory: 7Gi + + - name: pull-machine-controller-e2e-vsphere-multiple-networks + always_run: false + decorate: true + clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" + path_alias: k8c.io/machine-controller + labels: + preset-hetzner: "true" + preset-e2e-ssh: "true" + preset-vsphere: "true" + preset-rhel: "true" + preset-goproxy: "true" + preset-kind-volume-mounts: "true" + preset-docker-mirror: "true" + preset-kubeconfig-ci: "true" + spec: + containers: + - image: quay.io/kubermatic/build:go-1.25-node-22-kind-0.30-8 + command: + - "./hack/ci/run-e2e-tests.sh" + args: + - "TestVsphereMultipleNICProvisioningE2E" + env: + - name: CLOUD_PROVIDER + value: vsphere + securityContext: + privileged: true + resources: + requests: + memory: 7Gi + cpu: 2 + limits: + memory: 7Gi + + - name: pull-machine-controller-e2e-vsphere-anti-affinity + always_run: false + decorate: true + clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" + path_alias: k8c.io/machine-controller + labels: + preset-hetzner: "true" + preset-e2e-ssh: "true" + preset-vsphere: "true" + preset-rhel: "true" + preset-goproxy: "true" + preset-kind-volume-mounts: "true" + preset-docker-mirror: "true" + preset-kubeconfig-ci: "true" + spec: + containers: + - image: quay.io/kubermatic/build:go-1.25-node-22-kind-0.30-8 + command: + - "./hack/ci/run-e2e-tests.sh" + args: + - "TestVsphereAntiAffinityProvisioningE2E" + env: + - name: CLOUD_PROVIDER + value: vsphere + securityContext: + privileged: true + resources: + requests: + memory: 7Gi + cpu: 2 + limits: + memory: 7Gi diff --git a/.prow/verify.yaml b/.prow/verify.yaml index 6aad15648..a7703aab5 100644 --- a/.prow/verify.yaml +++ b/.prow/verify.yaml @@ -17,11 +17,12 @@ presubmits: always_run: true decorate: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" + path_alias: k8c.io/machine-controller labels: preset-goproxy: "true" spec: containers: - - image: golang:1.19.4 + - image: quay.io/kubermatic/build:go-1.25-node-22-8 command: - make args: @@ -38,11 +39,12 @@ presubmits: always_run: true decorate: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" + path_alias: k8c.io/machine-controller labels: preset-goproxy: "true" spec: containers: - - image: golang:1.19.4 + - image: quay.io/kubermatic/build:go-1.25-node-22-8 command: - make args: @@ -59,11 +61,12 @@ presubmits: always_run: true decorate: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" + path_alias: k8c.io/machine-controller labels: preset-goproxy: "true" spec: containers: - - image: golangci/golangci-lint:v1.50.1 + - image: quay.io/kubermatic/build:go-1.25-node-22-8 command: - make args: @@ -79,15 +82,16 @@ presubmits: always_run: true decorate: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" + path_alias: k8c.io/machine-controller labels: preset-goproxy: "true" spec: containers: - - image: quay.io/kubermatic/yamllint:0.1 + - image: quay.io/kubermatic/build:go-1.25-node-22-8 command: - - "sh" - - "-c" - - "yamllint -c .yamllint.conf ." + - make + args: + - yamllint resources: requests: memory: 32Mi @@ -100,11 +104,12 @@ presubmits: run_if_changed: "^hack/" decorate: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" + path_alias: k8c.io/machine-controller spec: containers: - - image: docker.io/mvdan/shfmt:v3.3.1 + - image: quay.io/kubermatic/build:go-1.25-node-22-8 command: - - "/bin/shfmt" + - "/usr/local/bin/shfmt" args: # -l list files whose formatting differs from shfmt's # -d error with a diff when the formatting differs @@ -128,9 +133,10 @@ presubmits: always_run: true decorate: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" + path_alias: k8c.io/machine-controller spec: containers: - - image: quay.io/kubermatic-labs/boilerplate:v0.2.0 + - image: quay.io/kubermatic/build:go-1.25-node-22-8 command: - "./hack/verify-boilerplate.sh" resources: @@ -145,27 +151,29 @@ presubmits: run_if_changed: "^go.(mod|sum)$" decorate: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" + path_alias: k8c.io/machine-controller labels: preset-goproxy: "true" spec: containers: - - image: quay.io/kubermatic/build:go-1.19-node-18-kind-0.17-5 + - image: quay.io/kubermatic/build:go-1.25-node-22-8 command: - ./hack/verify-licenses.sh resources: requests: - memory: 2Gi - cpu: 2 + memory: 1Gi + cpu: 1 - name: pull-machine-controller-test always_run: true decorate: true clone_uri: "ssh://git@github.com/kubermatic/machine-controller.git" + path_alias: k8c.io/machine-controller labels: preset-goproxy: "true" spec: containers: - - image: golang:1.19.4 + - image: quay.io/kubermatic/build:go-1.25-node-22-8 command: - make args: diff --git a/.wwhrd.yml b/.wwhrd.yml index 677ba2b2f..43eb38e7a 100644 --- a/.wwhrd.yml +++ b/.wwhrd.yml @@ -27,10 +27,6 @@ allowlist: exceptions: - github.com/hashicorp/golang-lru # MPL-2.0 - github.com/hashicorp/golang-lru/simplelru # MPL-2.0 - - github.com/embik/nutanix-client-go/pkg/client # MPL-2.0 - - github.com/embik/nutanix-client-go/pkg/client/v3 # MPL-2.0 - - github.com/embik/nutanix-client-go/internal/utils # MPL-2.0 - - github.com/ajeddeloh/go-json # Since it's a fork, https://github.com/golang/go/blob/master/LICENSE - github.com/hashicorp/go-version # MPL-2.0 - github.com/hashicorp/go-cleanhttp # MPL-2.0 - github.com/hashicorp/go-retryablehttp # MPL-2.0 diff --git a/.yamllint.conf b/.yamllint.conf index dda206acc..3c79f7897 100644 --- a/.yamllint.conf +++ b/.yamllint.conf @@ -5,3 +5,6 @@ rules: document-start: disable comments: disable line-length: disable + +ignore: | + .golangci.yml \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index 380cbde28..857dc2bd8 100644 --- a/Dockerfile +++ b/Dockerfile @@ -12,19 +12,18 @@ # See the License for the specific language governing permissions and # limitations under the License. -ARG GO_VERSION=1.19.4 +ARG GO_VERSION=1.25.7 FROM docker.io/golang:${GO_VERSION} AS builder -WORKDIR /go/src/github.com/kubermatic/machine-controller +WORKDIR /go/src/k8c.io/machine-controller COPY . . RUN make all -FROM alpine:3.16 +FROM alpine:3.23 RUN apk add --no-cache ca-certificates cdrkit COPY --from=builder \ - /go/src/github.com/kubermatic/machine-controller/machine-controller \ - /go/src/github.com/kubermatic/machine-controller/machine-controller-userdata-* \ - /go/src/github.com/kubermatic/machine-controller/webhook \ + /go/src/k8c.io/machine-controller/machine-controller \ + /go/src/k8c.io/machine-controller/webhook \ /usr/local/bin/ USER nobody diff --git a/Makefile b/Makefile index ae3ddf1df..53c3a8cdb 100644 --- a/Makefile +++ b/Makefile @@ -14,13 +14,13 @@ SHELL = /bin/bash -eu -o pipefail -GO_VERSION ?= 1.19.4 +GO_VERSION ?= 1.25.7 GOOS ?= $(shell go env GOOS) export CGO_ENABLED := 0 -export GIT_TAG ?= $(shell git tag --points-at HEAD) +export GIT_TAG ?= $(shell git tag --points-at HEAD 'v*') export GOFLAGS?=-mod=readonly -trimpath @@ -33,8 +33,7 @@ IMAGE_TAG = \ $(shell echo $$(git rev-parse HEAD && if [[ -n $$(git status --porcelain) ]]; then echo '-dirty'; fi)|tr -d ' ') IMAGE_NAME ?= $(REGISTRY)/$(REGISTRY_NAMESPACE)/machine-controller:$(IMAGE_TAG) -OS = amzn2 centos ubuntu rhel flatcar rockylinux -USERDATA_BIN = $(patsubst %, machine-controller-userdata-%, $(OS)) +OS = amzn2 ubuntu rhel flatcar rockylinux BASE64_ENC = \ $(shell if base64 -w0 <(echo "") &> /dev/null; then echo "base64 -w0"; else echo "base64 -b0"; fi) @@ -43,29 +42,25 @@ BASE64_ENC = \ all: build-machine-controller webhook .PHONY: build-machine-controller -build-machine-controller: machine-controller $(USERDATA_BIN) - -machine-controller-userdata-%: cmd/userdata/% $(shell find cmd/userdata/$* pkg -name '*.go') - GOOS=$(GOOS) go build -v \ - $(LDFLAGS) \ - -o $@ \ - github.com/kubermatic/machine-controller/cmd/userdata/$* +build-machine-controller: machine-controller %: cmd/% $(shell find cmd/$* pkg -name '*.go') GOOS=$(GOOS) go build -v \ $(LDFLAGS) \ -o $@ \ - github.com/kubermatic/machine-controller/cmd/$* + k8c.io/machine-controller/cmd/$* .PHONY: clean -clean: clean-certs - rm -f machine-controller \ - webhook \ - $(USERDATA_BIN) +clean: + rm -f machine-controller webhook .PHONY: lint lint: golangci-lint run -v + make -C sdk lint + +yamllint: + yamllint -c .yamllint.conf . .PHONY: docker-image docker-image: @@ -86,64 +81,28 @@ docker-image-publish: docker-image .PHONY: test-unit-docker test-unit-docker: @docker run --rm \ - -v $$PWD:/go/src/github.com/kubermatic/machine-controller \ + -v $$PWD:/go/src/k8c.io/machine-controller \ -v $$PWD/.buildcache:/cache \ -e GOCACHE=/cache \ - -w /go/src/github.com/kubermatic/machine-controller \ + -w /go/src/k8c.io/machine-controller \ golang:$(GO_VERSION) \ make test-unit "GOFLAGS=$(GOFLAGS)" .PHONY: test-unit test-unit: go test -v ./... + cd sdk && go test -v ./... .PHONY: build-tests build-tests: go test -run nope ./... + cd sdk && go test -run nope ./... go test -tags e2e -run nope ./... -examples/ca-key.pem: - openssl genrsa -out examples/ca-key.pem 4096 - -examples/ca-cert.pem: examples/ca-key.pem - openssl req -x509 -new -nodes -key examples/ca-key.pem \ - -subj "/C=US/ST=CA/O=Acme/CN=k8s-machine-controller-ca" \ - -sha256 -days 10000 -out examples/ca-cert.pem - -examples/admission-key.pem: examples/ca-cert.pem - openssl genrsa -out examples/admission-key.pem 2048 - chmod 0600 examples/admission-key.pem - -examples/admission-cert.pem: examples/admission-key.pem - openssl req -new -sha256 \ - -key examples/admission-key.pem \ - -config examples/webhook-certificate.cnf -extensions v3_req \ - -out examples/admission.csr - openssl x509 -req \ - -sha256 \ - -days 10000 \ - -extensions v3_req \ - -extfile examples/webhook-certificate.cnf \ - -in examples/admission.csr \ - -CA examples/ca-cert.pem \ - -CAkey examples/ca-key.pem \ - -CAcreateserial \ - -out examples/admission-cert.pem - -clean-certs: - cd examples/ && rm -f admission.csr admission-cert.pem admission-key.pem ca-cert.pem ca-key.pem - -.PHONY: deploy -deploy: examples/admission-cert.pem - @cat examples/machine-controller.yaml \ - |sed "s/__admission_ca_cert__/$(shell cat examples/ca-cert.pem|$(BASE64_ENC))/g" \ - |sed "s/__admission_cert__/$(shell cat examples/admission-cert.pem|$(BASE64_ENC))/g" \ - |sed "s/__admission_key__/$(shell cat examples/admission-key.pem|$(BASE64_ENC))/g" \ - |kubectl apply -f - - .PHONY: check-dependencies check-dependencies: go mod verify + cd sdk && go mod verify .PHONY: download-gocache download-gocache: diff --git a/OWNERS b/OWNERS index 46fe4a023..74ccec94c 100644 --- a/OWNERS +++ b/OWNERS @@ -1,10 +1,10 @@ # See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md approvers: - - machine-controller-maintainers + - sig-cluster-management reviewers: - - machine-controller-maintainers + - sig-cluster-management labels: - sig/cluster-management diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES index 879077cbf..0f82fc715 100644 --- a/OWNERS_ALIASES +++ b/OWNERS_ALIASES @@ -2,18 +2,18 @@ # To change team associations, update the GitHub teams via https://github.com/kubermatic/access. aliases: - machine-controller-maintainers: + sig-cluster-management: + - adoi - ahmedwaleedmalik - - embik - - hdurand0710 + - buraksekili + - cnvergence + - julioc-p - kron4eg - - mfranczy + - mgoltzsche - moadqassem - moelsayed - - sankalp-r - - themue + - mohamed-rafraf + - rajasahil + - soer3n - xmudrii - xrstf - sig-virtualization: - - hdurand0710 - - mfranczy diff --git a/README.md b/README.md index cd9f9b6fb..3cf49f29c 100644 --- a/README.md +++ b/README.md @@ -1,69 +1,92 @@ # Kubermatic machine-controller +**Important Note: User data plugins for machine-controller have been removed. [Operating System Manager](https://github.com/kubermatic/operating-system-manager) is the successor of user data plugins. It's responsible for creating and managing the required configurations for worker nodes in a Kubernetes cluster with better modularity and extensibility. Please refer to [Operating System Manager][8] for more details.** + ## Table of Contents - [Kubermatic machine-controller](#kubermatic-machine-controller) - [Table of Contents](#table-of-contents) - [Features](#features) - - [What works](#what-works) - - [Supported Kubernetes versions](#supported-kubernetes-versions) - - [What does not work](#what-does-not-work) + - [What Works](#what-works) + - [Supported Kubernetes Versions](#supported-kubernetes-versions) + - [Community Providers](#community-providers) + - [What Doesn't Work](#what-doesnt-work) - [Quickstart](#quickstart) - - [Deploy the machine-controller](#deploy-the-machine-controller) - - [Creating a machineDeployment](#creating-a-machinedeployment) - - [Advanced usage](#advanced-usage) - - [Specifying the apiserver endpoint](#specifying-the-apiserver-endpoint) - - [CA-data](#ca-data) - - [Apiserver endpoint](#apiserver-endpoint) + - [Deploy machine-controller](#deploy-machine-controller) + - [Creating a `MachineDeployment`](#creating-a-machinedeployment) + - [Advanced Usage](#advanced-usage) + - [Specifying the Apiserver Endpoint](#specifying-the-apiserver-endpoint) + - [CA Data](#ca-data) + - [Apiserver Endpoint](#apiserver-endpoint) - [Example cluster-info ConfigMap](#example-cluster-info-configmap) - [Development](#development) - [Testing](#testing) - - [Unittests](#unittests) - - [End-to-End locally](#end-to-end-locally) + - [Unit Tests](#unit-tests) + - [End-to-End Locally](#end-to-end-locally) - [Troubleshooting](#troubleshooting) - [Contributing](#contributing) - - [Before you start](#before-you-start) - - [Pull requests](#pull-requests) + - [Before You Start](#before-you-start) + - [Pull Requests](#pull-requests) - [Changelog](#changelog) ## Features -### What works +### What Works -- Creation of worker nodes on AWS, Digitalocean, Openstack, Azure, Google Cloud Platform, Nutanix, VMWare Cloud Director, VMWare Vsphere, Linode, Hetzner cloud and Kubevirt (experimental) -- Using Ubuntu, Flatcar or CentOS 7 distributions ([not all distributions work on all providers](/docs/operating-system.md)) +- Creation of worker nodes on AWS, Digitalocean, Openstack, Azure, Google Cloud Platform, Nutanix, VMWare Cloud Director, VMWare vSphere, Hetzner Cloud and Kubevirt +- Using Ubuntu, Flatcar, or Rocky Linux 8 distributions ([not all distributions work on all providers](/docs/operating-system.md)) -### Supported Kubernetes versions +### Supported Kubernetes Versions machine-controller tries to follow the Kubernetes version [support policy](https://kubernetes.io/docs/setup/release/version-skew-policy/) as close as possible. Currently supported K8S versions are: -- 1.26 -- 1.25 -- 1.24 +- 1.35 +- 1.34 +- 1.33 +- 1.32 + +### Community Providers + +Some cloud providers implemented in machine-controller have been graciously contributed by community members. Those cloud providers are not part of the automated end-to-end +tests run by the machine-controller developers and thus, their status cannot be guaranteed. The machine-controller developers assume that they are functional, but can only +offer limited support for new features or bugfixes in those providers. + +The current list of community providers is: -## What does not work +- Linode +- Vultr +- OpenNebula + +## What Doesn't Work - Master creation (Not planned at the moment) ## Quickstart -### Deploy the machine-controller +### Deploy machine-controller + +- Install [cert-manager](https://cert-manager.io/) for generating certificates used by webhooks since they serve using HTTPS + +```terminal +kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.11.2/cert-manager.yaml +``` -`make deploy` +- Run `kubectl apply -f examples/operating-system-manager.yaml` to deploy the operating-system-manager which is responsible for managing user data for worker machines. +- Run `kubectl apply -f examples/machine-controller.yaml` to deploy the machine-controller. -### Creating a machineDeployment +### Creating a `MachineDeployment` ```bash # edit examples/$cloudprovider-machinedeployment.yaml & create the machineDeployment kubectl create -f examples/$cloudprovider-machinedeployment.yaml ``` -## Advanced usage +## Advanced Usage -### Specifying the apiserver endpoint +### Specifying the Apiserver Endpoint By default the controller looks for a `cluster-info` ConfigMap within the `kube-public` Namespace. If one is found which contains a minimal kubeconfig (kubeadm cluster have them by default), this kubeconfig will be used for the node bootstrapping. @@ -74,11 +97,11 @@ The kubeconfig only needs to contain two things: If no ConfigMap can be found: -### CA-data +### CA Data -The CA will be loaded from the passed kubeconfig when running outside the cluster or from `/var/run/secrets/kubernetes.io/serviceaccount/ca.crt` when running inside the cluster. +The Certificate Authority (CA) will be loaded from the passed kubeconfig when running outside the cluster or from `/var/run/secrets/kubernetes.io/serviceaccount/ca.crt` when running inside the cluster. -### Apiserver endpoint +### Apiserver Endpoint The first endpoint from the kubernetes endpoints will be taken. `kubectl get endpoints kubernetes -o yaml` @@ -109,11 +132,11 @@ data: ### Testing -#### Unittests +#### Unit Tests Simply run `make test-unit` -#### End-to-End locally +#### End-to-End Locally **_[WIP]_** @@ -125,13 +148,12 @@ If you encounter issues [file an issue][1] or talk to us on the [#kubermatic cha Thanks for taking the time to join our community and start contributing! -### Before you start +### Before You Start - Please familiarize yourself with the [Code of Conduct][4] before contributing. - See [CONTRIBUTING.md][5] for instructions on the developer certificate of origin that we require. -- Read how [we're using ZenHub][6] for project and roadmap planning -### Pull requests +### Pull Requests - We welcome pull requests. Feel free to dig through the [issues][1] and jump in. @@ -144,5 +166,5 @@ See [the list of releases][7] to find out about feature changes. [3]: http://slack.kubermatic.io/ [4]: code-of-conduct.md [5]: CONTRIBUTING.md -[6]: Zenhub.md [7]: https://github.com/kubermatic/machine-controller/releases +[8]: https://docs.kubermatic.com/operatingsystemmanager diff --git a/Zenhub.md b/Zenhub.md deleted file mode 100644 index a041cb39e..000000000 --- a/Zenhub.md +++ /dev/null @@ -1,15 +0,0 @@ -# ZenHub - -As an Open Source community, it is necessary for our work, communication, and collaboration to be done in the open. -GitHub provides a central repository for code, pull requests, issues, and documentation. When applicable, we will use Google Docs for design reviews, proposals, and other working documents. - -While GitHub issues, milestones, and labels generally work pretty well, the Kubermatic team has found that product planning requires some additional tooling that GitHub projects do not offer. - -In our effort to minimize tooling while enabling product management insights, we have decided to use [ZenHub Open-Source](https://www.zenhub.com/blog/open-source/) to overlay product and project tracking on top of GitHub. -ZenHub is a GitHub application that provides Kanban visualization, Epic tracking, fine-grained prioritization, and more. It's primary backing storage system is existing GitHub issues along with additional metadata stored in ZenHub's database. - -If you are an user or Developer, you do not _need_ to use ZenHub for your regular workflow (e.g to see open bug reports or feature requests, work on pull requests). However, if you'd like to be able to visualize the high-level project goals and roadmap, you will need to use the free version of ZenHub. - -## Using ZenHub - -ZenHub can be integrated within the GitHub interface using their [Chrome or FireFox extensions](https://www.zenhub.com/extension). In addition, you can use their dedicated web application. diff --git a/cmd/machine-controller/main.go b/cmd/machine-controller/main.go index 2a1b0a1a3..8197231ae 100644 --- a/cmd/machine-controller/main.go +++ b/cmd/machine-controller/main.go @@ -20,27 +20,29 @@ import ( "context" "flag" "fmt" - "net" + "log" "net/http" "net/http/pprof" "strings" "time" + "github.com/go-logr/zapr" "github.com/prometheus/client_golang/prometheus" - - clusterv1alpha1 "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" - "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1/migrations" - cloudprovidertypes "github.com/kubermatic/machine-controller/pkg/cloudprovider/types" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/util" - clusterinfo "github.com/kubermatic/machine-controller/pkg/clusterinfo" - "github.com/kubermatic/machine-controller/pkg/containerruntime" - machinecontroller "github.com/kubermatic/machine-controller/pkg/controller/machine" - machinedeploymentcontroller "github.com/kubermatic/machine-controller/pkg/controller/machinedeployment" - machinesetcontroller "github.com/kubermatic/machine-controller/pkg/controller/machineset" - "github.com/kubermatic/machine-controller/pkg/controller/nodecsrapprover" - "github.com/kubermatic/machine-controller/pkg/health" - machinesv1alpha1 "github.com/kubermatic/machine-controller/pkg/machines/v1alpha1" - "github.com/kubermatic/machine-controller/pkg/node" + "go.uber.org/zap" + + cloudprovidertypes "k8c.io/machine-controller/pkg/cloudprovider/types" + "k8c.io/machine-controller/pkg/cloudprovider/util" + clusterinfo "k8c.io/machine-controller/pkg/clusterinfo" + machinecontroller "k8c.io/machine-controller/pkg/controller/machine" + machinedeploymentcontroller "k8c.io/machine-controller/pkg/controller/machinedeployment" + machinesetcontroller "k8c.io/machine-controller/pkg/controller/machineset" + "k8c.io/machine-controller/pkg/controller/nodecsrapprover" + "k8c.io/machine-controller/pkg/health" + machinecontrollerlog "k8c.io/machine-controller/pkg/log" + "k8c.io/machine-controller/pkg/migrations" + "k8c.io/machine-controller/pkg/node" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" + machinesv1alpha1 "k8c.io/machine-controller/sdk/apis/machines/v1alpha1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/types" @@ -48,11 +50,13 @@ import ( "k8s.io/client-go/kubernetes/scheme" restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" - "k8s.io/klog" + "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/healthz" + ctrlruntimelog "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/manager/signals" "sigs.k8s.io/controller-runtime/pkg/metrics" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" ) var ( @@ -71,24 +75,33 @@ var ( enableLeaderElection bool leaderElectionNamespace string - useOSM bool - useExternalBootstrap bool - + useExternalBootstrap bool + overrideBootstrapKubeletAPIServer string nodeCSRApprover bool - nodeHTTPProxy string - nodeNoProxy string - nodeInsecureRegistries string - nodeRegistryMirrors string - nodePauseImage string - nodeContainerRuntime string - podCIDR string nodePortRange string - nodeRegistryCredentialsSecret string - nodeContainerdVersion string - nodeContainerdRegistryMirrors = containerruntime.RegistryMirrorsFlags{} - overrideBootstrapKubeletAPIServer string + + nodeHTTPProxy string + nodeNoProxy string + nodeInsecureRegistries string + nodeRegistryMirrors string + nodePauseImage string + nodeContainerRuntime string + nodeRegistryCredentialsSecret string + nodeContainerdVersion string + nodeContainerdRegistryMirrors sliceVar ) +type sliceVar []string + +func (s *sliceVar) String() string { + return strings.Join(*s, ",") +} + +func (s *sliceVar) Set(value string) error { + *s = append(*s, value) + return nil +} + const ( defaultLeaderElectionNamespace = "kube-system" defaultLeaderElectionID = "machine-controller" @@ -130,19 +143,19 @@ type controllerRunOptions struct { node machinecontroller.NodeSettings - // Enable external bootstrap management by consuming secrets that are used to configure an instance's user-data. - useExternalBootstrap bool - // A port range to reserve for services with NodePort visibility. nodePortRange string overrideBootstrapKubeletAPIServer string + + log *zap.SugaredLogger } func main() { nodeFlags := node.NewFlags(flag.CommandLine) + logFlags := machinecontrollerlog.NewDefaultOptions() + logFlags.AddFlags(flag.CommandLine) - klog.InitFlags(nil) // This is also being registered in kubevirt.io/kubevirt/pkg/kubecli/kubecli.go so // we have to guard it. // TODO: Evaluate alternatives to importing the CLI. Generate our own client? Use a dynamic client? @@ -152,7 +165,7 @@ func main() { if flag.Lookup("master") == nil { flag.StringVar(&masterURL, "master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.") } - flag.StringVar(&clusterDNSIPs, "cluster-dns", "10.10.10.10", "Comma-separated list of DNS server IP address.") + flag.StringVar(&clusterDNSIPs, "cluster-dns", "", "DEPRECATED: This flag is no-op and will have no effect. This value should be configured in the user-data provider, such as operating-system-manager.") flag.IntVar(&workerCount, "worker-count", 1, "Number of workers to process machines. Using a high number with a lot of machines might cause getting rate-limited from your cloud provider.") flag.StringVar(&healthProbeAddress, "health-probe-address", "127.0.0.1:8085", "The address on which the liveness check on /healthz and readiness check on /readyz will be available") flag.StringVar(&metricsAddress, "metrics-address", "127.0.0.1:8080", "The address on which Prometheus metrics will be available under /metrics") @@ -164,61 +177,65 @@ func main() { flag.StringVar(&bootstrapTokenServiceAccountName, "bootstrap-token-service-account-name", "", "When set use the service account token from this SA as bootstrap token instead of creating a temporary one. Passed in namespace/name format") flag.BoolVar(&profiling, "enable-profiling", false, "when set, enables the endpoints on the http server under /debug/pprof/") flag.DurationVar(&skipEvictionAfter, "skip-eviction-after", 2*time.Hour, "Skips the eviction if a machine is not gone after the specified duration.") - flag.StringVar(&nodeHTTPProxy, "node-http-proxy", "", "If set, it configures the 'HTTP_PROXY' & 'HTTPS_PROXY' environment variable on the nodes.") - flag.StringVar(&nodeNoProxy, "node-no-proxy", ".svc,.cluster.local,localhost,127.0.0.1", "If set, it configures the 'NO_PROXY' environment variable on the nodes.") - flag.StringVar(&nodeInsecureRegistries, "node-insecure-registries", "", "Comma separated list of registries which should be configured as insecure on the container runtime") - flag.StringVar(&nodeRegistryMirrors, "node-registry-mirrors", "", "Comma separated list of Docker image mirrors") - flag.StringVar(&nodePauseImage, "node-pause-image", "", "Image for the pause container including tag. If not set, the kubelet default will be used: https://kubernetes.io/docs/reference/command-line-tools-reference/kubelet/") - flag.String("node-kubelet-repository", "quay.io/kubermatic/kubelet", "[NO-OP] Repository for the kubelet container. Has no effects.") - flag.StringVar(&nodeContainerRuntime, "node-container-runtime", "docker", "container-runtime to deploy") - flag.StringVar(&nodeContainerdVersion, "node-containerd-version", "", "version of containerd to deploy") - flag.Var(&nodeContainerdRegistryMirrors, "node-containerd-registry-mirrors", "Configure registry mirrors endpoints. Can be used multiple times to specify multiple mirrors") + flag.BoolVar(&useExternalBootstrap, "use-external-bootstrap", true, "DEPRECATED: This flag is no-op and will have no effect since machine-controller only supports external bootstrap mechanism. This flag is only kept for backwards compatibility and will be removed in the future") + flag.StringVar(&overrideBootstrapKubeletAPIServer, "override-bootstrap-kubelet-apiserver", "", "Override for the API server address used in worker nodes bootstrap-kubelet.conf") flag.StringVar(&caBundleFile, "ca-bundle", "", "path to a file containing all PEM-encoded CA certificates (will be used instead of the host's certificates if set)") flag.BoolVar(&nodeCSRApprover, "node-csr-approver", true, "Enable NodeCSRApprover controller to automatically approve node serving certificate requests") - flag.StringVar(&podCIDR, "pod-cidr", "172.25.0.0/16", "WARNING: flag is unused, kept only for backwards compatibility") flag.StringVar(&nodePortRange, "node-port-range", "30000-32767", "A port range to reserve for services with NodePort visibility") - flag.StringVar(&nodeRegistryCredentialsSecret, "node-registry-credentials-secret", "", "A Secret object reference, that contains auth info for image registry in namespace/secret-name form, example: kube-system/registry-credentials. See doc at https://github.com/kubermaric/machine-controller/blob/main/docs/registry-authentication.md") - flag.BoolVar(&useOSM, "use-osm", false, "DEPRECATED: use osm controller for node bootstrap [use use-external-bootstrap instead]") - flag.BoolVar(&useExternalBootstrap, "use-external-bootstrap", false, "use an external bootstrap provider for instance user-data (e.g. operating-system-manager, also known as OSM)") - flag.StringVar(&overrideBootstrapKubeletAPIServer, "override-bootstrap-kubelet-apiserver", "", "Override for the API server address used in worker nodes bootstrap-kubelet.conf") + + flag.StringVar(&nodeHTTPProxy, "node-http-proxy", "", "DEPRECATED: This flag is no-op and will have no effect. This value should be configured in the user-data provider, such as operating-system-manager.") + flag.StringVar(&nodeNoProxy, "node-no-proxy", "", "DEPRECATED: This flag is no-op and will have no effect. This value should be configured in the user-data provider, such as operating-system-manager.") + flag.StringVar(&nodeInsecureRegistries, "node-insecure-registries", "", "DEPRECATED: This flag is no-op and will have no effect. This value should be configured in the user-data provider, such as operating-system-manager.") + flag.StringVar(&nodeRegistryMirrors, "node-registry-mirrors", "", "DEPRECATED: This flag is no-op and will have no effect. This value should be configured in the user-data provider, such as operating-system-manager.") + flag.StringVar(&nodePauseImage, "node-pause-image", "", "DEPRECATED: This flag is no-op and will have no effect. This value should be configured in the user-data provider, such as operating-system-manager.") + flag.StringVar(&nodeContainerRuntime, "node-container-runtime", "", "DEPRECATED: This flag is no-op and will have no effect. This value should be configured in the user-data provider, such as operating-system-manager.") + flag.StringVar(&nodeContainerdVersion, "node-containerd-version", "", "DEPRECATED: This flag is no-op and will have no effect. This value should be configured in the user-data provider, such as operating-system-manager.") + flag.Var(&nodeContainerdRegistryMirrors, "node-containerd-registry-mirrors", "DEPRECATED: This flag is no-op and will have no effect. This value should be configured in the user-data provider, such as operating-system-manager.") + flag.StringVar(&nodeRegistryCredentialsSecret, "node-registry-credentials-secret", "", "DEPRECATED: This flag is no-op and will have no effect. This value should be configured in the user-data provider, such as operating-system-manager.") flag.Parse() - kubeconfig = flag.Lookup("kubeconfig").Value.(flag.Getter).Get().(string) - masterURL = flag.Lookup("master").Value.(flag.Getter).Get().(string) - clusterDNSIPs, err := parseClusterDNSIPs(clusterDNSIPs) - if err != nil { - klog.Fatalf("invalid cluster dns specified: %v", err) + if err := logFlags.Validate(); err != nil { + log.Fatalf("Invalid options: %v", err) } + rawLog := machinecontrollerlog.New(logFlags.Debug, logFlags.Format) + log := rawLog.Sugar() + + // set the logger used by controller-runtime + ctrlruntimelog.SetLogger(zapr.NewLogger(rawLog.WithOptions(zap.AddCallerSkip(1)))) + + kubeconfig = flag.Lookup("kubeconfig").Value.(flag.Getter).Get().(string) + masterURL = flag.Lookup("master").Value.(flag.Getter).Get().(string) + var parsedJoinClusterTimeout *time.Duration if joinClusterTimeout != "" { parsedJoinClusterTimeoutLiteral, err := time.ParseDuration(joinClusterTimeout) parsedJoinClusterTimeout = &parsedJoinClusterTimeoutLiteral if err != nil { - klog.Fatalf("failed to parse join-cluster-timeout as duration: %v", err) + log.Fatalw("Failed to parse join-cluster-timeout as duration", zap.Error(err)) } } // Needed for migrations if err := machinesv1alpha1.AddToScheme(scheme.Scheme); err != nil { - klog.Fatalf("failed to add machinesv1alpha1 api to scheme: %v", err) + log.Fatalw("Failed to add api to scheme", "api", machinesv1alpha1.SchemeGroupVersion, zap.Error(err)) } if err := apiextensionsv1.AddToScheme(scheme.Scheme); err != nil { - klog.Fatalf("failed to add apiextensionsv1 api to scheme: %v", err) + log.Fatalw("Failed to add api to scheme", "api", apiextensionsv1.SchemeGroupVersion, zap.Error(err)) } if err := clusterv1alpha1.AddToScheme(scheme.Scheme); err != nil { - klog.Fatalf("failed to add clusterv1alpha1 api to scheme: %v", err) + log.Fatalw("Failed to add api to scheme", "api", clusterv1alpha1.SchemeGroupVersion, zap.Error(err)) } cfg, err := clientcmd.BuildConfigFromFlags(masterURL, kubeconfig) if err != nil { - klog.Fatalf("error building kubeconfig: %v", err) + log.Fatalw("Failed to build kubeconfig", zap.Error(err)) } if caBundleFile != "" { if err := util.SetCABundleFile(caBundleFile); err != nil { - klog.Fatalf("-ca-bundle is invalid: %v", err) + log.Fatalw("-ca-bundle is invalid", zap.Error(err)) } } @@ -228,56 +245,34 @@ func main() { // QPS and Burst config there machineCfg, err := clientcmd.BuildConfigFromFlags(masterURL, kubeconfig) if err != nil { - klog.Fatalf("error building kubeconfig for machines: %v", err) + log.Fatalw("Failed to build kubeconfig for machines", zap.Error(err)) } kubeClient, err := kubernetes.NewForConfig(cfg) if err != nil { - klog.Fatalf("error building kubernetes clientset for kubeClient: %v", err) + log.Fatalw("Failed to build kubernetes clientset for kubeClient", zap.Error(err)) } kubeconfigProvider := clusterinfo.New(cfg, kubeClient) ctrlMetrics := machinecontroller.NewMachineControllerMetrics() ctrlMetrics.MustRegister(metrics.Registry) - containerRuntimeOpts := containerruntime.Opts{ - ContainerRuntime: nodeContainerRuntime, - ContainerdVersion: nodeContainerdVersion, - ContainerdRegistryMirrors: nodeContainerdRegistryMirrors, - InsecureRegistries: nodeInsecureRegistries, - PauseImage: nodePauseImage, - RegistryMirrors: nodeRegistryMirrors, - RegistryCredentialsSecret: nodeRegistryCredentialsSecret, - } - containerRuntimeConfig, err := containerruntime.BuildConfig(containerRuntimeOpts) - if err != nil { - klog.Fatalf("failed to generate container runtime config: %v", err) - } - runOptions := controllerRunOptions{ - kubeClient: kubeClient, - kubeconfigProvider: kubeconfigProvider, - name: name, - cfg: machineCfg, - metrics: ctrlMetrics, - prometheusRegisterer: metrics.Registry, - skipEvictionAfter: skipEvictionAfter, - nodeCSRApprover: nodeCSRApprover, - node: machinecontroller.NodeSettings{ - ClusterDNSIPs: clusterDNSIPs, - HTTPProxy: nodeHTTPProxy, - NoProxy: nodeNoProxy, - PauseImage: nodePauseImage, - RegistryCredentialsSecretRef: nodeRegistryCredentialsSecret, - ContainerRuntime: containerRuntimeConfig, - }, - useExternalBootstrap: useExternalBootstrap || useOSM, + log: log, + kubeClient: kubeClient, + kubeconfigProvider: kubeconfigProvider, + name: name, + cfg: machineCfg, + metrics: ctrlMetrics, + prometheusRegisterer: metrics.Registry, + skipEvictionAfter: skipEvictionAfter, + nodeCSRApprover: nodeCSRApprover, nodePortRange: nodePortRange, overrideBootstrapKubeletAPIServer: overrideBootstrapKubeletAPIServer, } if err := nodeFlags.UpdateNodeSettings(&runOptions.node); err != nil { - klog.Fatalf("failed to update nodesettings: %v", err) + log.Fatalw("Failed to update nodesettings", zap.Error(err)) } if parsedJoinClusterTimeout != nil { @@ -287,7 +282,7 @@ func main() { if bootstrapTokenServiceAccountName != "" { flagParts := strings.Split(bootstrapTokenServiceAccountName, "/") if flagPartsLen := len(flagParts); flagPartsLen != 2 { - klog.Fatalf("Splitting the bootstrap-token-service-account-name flag value in '/' returned %d parts, expected exactly two", flagPartsLen) + log.Fatalf("Splitting the bootstrap-token-service-account-name flag value in '/' returned %d parts, expected exactly two", flagPartsLen) } runOptions.bootstrapTokenServiceAccountName = &types.NamespacedName{Namespace: flagParts[0], Name: flagParts[1]} } @@ -295,16 +290,16 @@ func main() { ctx := signals.SetupSignalHandler() go func() { <-ctx.Done() - klog.Info("caught signal, shutting down...") + log.Info("Caught signal, shutting down...") }() mgr, err := createManager(5*time.Minute, runOptions) if err != nil { - klog.Fatalf("failed to create runtime manager: %v", err) + log.Fatalw("Failed to create runtime manager", zap.Error(err)) } if err := mgr.Start(ctx); err != nil { - klog.Errorf("failed to start kubebuilder manager: %v", err) + log.Errorw("Failed to start manager", zap.Error(err)) } } @@ -314,43 +309,45 @@ func createManager(syncPeriod time.Duration, options controllerRunOptions) (mana namespace = defaultLeaderElectionNamespace } + metricsOptions := metricsserver.Options{BindAddress: metricsAddress} + if profiling { + m := http.NewServeMux() + m.HandleFunc("/", pprof.Index) + m.HandleFunc("/cmdline", pprof.Cmdline) + m.HandleFunc("/profile", pprof.Profile) + m.HandleFunc("/symbol", pprof.Symbol) + m.HandleFunc("/trace", pprof.Trace) + metricsOptions.ExtraHandlers = map[string]http.Handler{ + "/debug/pprof/": m, + } + } + mgr, err := manager.New(options.cfg, manager.Options{ - SyncPeriod: &syncPeriod, + Cache: cache.Options{ + DefaultNamespaces: map[string]cache.Config{}, + SyncPeriod: &syncPeriod, + }, LeaderElection: enableLeaderElection, LeaderElectionID: defaultLeaderElectionID, LeaderElectionNamespace: namespace, HealthProbeBindAddress: healthProbeAddress, - MetricsBindAddress: metricsAddress, + Metrics: metricsOptions, }) if err != nil { - return nil, fmt.Errorf("error building ctrlruntime manager: %w", err) + return nil, fmt.Errorf("failed to build ctrlruntime manager: %w", err) } if err := mgr.AddReadyzCheck("alive", healthz.Ping); err != nil { return nil, fmt.Errorf("failed to add readiness check: %w", err) } - if err := mgr.AddHealthzCheck("kubeconfig", health.KubeconfigAvailable(options.kubeconfigProvider)); err != nil { + if err := mgr.AddHealthzCheck("kubeconfig", health.KubeconfigAvailable(options.kubeconfigProvider, options.log)); err != nil { return nil, fmt.Errorf("failed to add health check: %w", err) } if err := mgr.AddHealthzCheck("apiserver-connection", health.ApiserverReachable(options.kubeClient)); err != nil { return nil, fmt.Errorf("failed to add health check: %w", err) } - - if profiling { - m := http.NewServeMux() - m.HandleFunc("/", pprof.Index) - m.HandleFunc("/cmdline", pprof.Cmdline) - m.HandleFunc("/profile", pprof.Profile) - m.HandleFunc("/symbol", pprof.Symbol) - m.HandleFunc("/trace", pprof.Trace) - - if err := mgr.AddMetricsExtraHandler("/debug/pprof/", m); err != nil { - return nil, fmt.Errorf("failed to add pprof http handlers: %w", err) - } - } - if err := mgr.Add(&controllerBootstrap{ mgr: mgr, opt: options, @@ -384,20 +381,24 @@ func (bs *controllerBootstrap) Start(ctx context.Context) error { } // Migrate MachinesV1Alpha1Machine to ClusterV1Alpha1Machine. - if err := migrations.MigrateMachinesv1Alpha1MachineToClusterv1Alpha1MachineIfNecessary(ctx, client, bs.opt.kubeClient, providerData); err != nil { + if err := migrations.MigrateMachinesv1Alpha1MachineToClusterv1Alpha1MachineIfNecessary(ctx, bs.opt.log, client, providerData); err != nil { return fmt.Errorf("migration to clusterv1alpha1 failed: %w", err) } // Migrate providerConfig field to providerSpec field. - if err := migrations.MigrateProviderConfigToProviderSpecIfNecessary(ctx, bs.opt.cfg, client); err != nil { + if err := migrations.MigrateProviderConfigToProviderSpecIfNecessary(ctx, bs.opt.log, bs.opt.cfg, client); err != nil { return fmt.Errorf("migration of providerConfig field to providerSpec field failed: %w", err) } machineCollector := machinecontroller.NewMachineCollector(ctx, bs.mgr.GetClient()) metrics.Registry.MustRegister(machineCollector) + machineDeploymentCollector := machinedeploymentcontroller.NewCollector(ctx, bs.mgr.GetClient()) + metrics.Registry.MustRegister(machineDeploymentCollector) + if err := machinecontroller.Add( ctx, + bs.opt.log, bs.mgr, bs.opt.kubeClient, workerCount, @@ -409,41 +410,27 @@ func (bs *controllerBootstrap) Start(ctx context.Context) error { bs.opt.bootstrapTokenServiceAccountName, bs.opt.skipEvictionAfter, bs.opt.node, - bs.opt.useExternalBootstrap, bs.opt.nodePortRange, bs.opt.overrideBootstrapKubeletAPIServer, ); err != nil { return fmt.Errorf("failed to add Machine controller to manager: %w", err) } - if err := machinesetcontroller.Add(bs.mgr); err != nil { + if err := machinesetcontroller.Add(bs.mgr, bs.opt.log); err != nil { return fmt.Errorf("failed to add MachineSet controller to manager: %w", err) } - if err := machinedeploymentcontroller.Add(bs.mgr); err != nil { + if err := machinedeploymentcontroller.Add(bs.mgr, bs.opt.log); err != nil { return fmt.Errorf("failed to add MachineDeployment controller to manager: %w", err) } if bs.opt.nodeCSRApprover { - if err := nodecsrapprover.Add(bs.mgr); err != nil { + if err := nodecsrapprover.Add(bs.mgr, bs.opt.log); err != nil { return fmt.Errorf("failed to add NodeCSRApprover controller to manager: %w", err) } } - klog.Info("machine controller startup complete") + bs.opt.log.Info("Machine-controller startup complete") return nil } - -func parseClusterDNSIPs(s string) ([]net.IP, error) { - var ips []net.IP - sips := strings.Split(s, ",") - for _, sip := range sips { - ip := net.ParseIP(strings.TrimSpace(sip)) - if ip == nil { - return nil, fmt.Errorf("unable to parse ip %s", sip) - } - ips = append(ips, ip) - } - return ips, nil -} diff --git a/cmd/provision/README.md b/cmd/provision/README.md deleted file mode 100644 index 4811de7dc..000000000 --- a/cmd/provision/README.md +++ /dev/null @@ -1,84 +0,0 @@ -# Provisioning - -This command offers all required functionality to provision an host to join a Kubernetes cluster. - -The following operating systems are supported - -- Ubuntu 18.04 -- CentOS 7 -- Flatcar - -## Requirements - -- The cluster needs to use the bootstrap token authentication - -## CLI - -```bash -./provision \ - --kubelet-version="v1.13.1" \ - --cloud-provider="openstack" \ - --cloud-config="/etc/kubernetes/cloud-config" \ - --token="AAAAAAAAAAAAAAAA" \ - --ca-cert="/etc/kubernetes/ca.crt" -``` - -## Process - -Nodes will boot with a cloud-init (Or Ignition) which writes required files & a shell script (called `setup.sh` here). - -### cloud-init (Or ignition) - -Parts which will be covered by cloud-init (or Ignition) - -- Install SSH keys -- Configure hostname -- `ca.crt` - The CA certificate which got used to issue the certificates of the API server serving certificates -- `cloud-config` - A optional cloud-config used by the kubelet to interact with the cloud provider. -- `setup.sh` - Is responsible for downloading the `provision` binary and to execute it. - The download of the binary might also be done using built-in `cloud-init` (or Ignition) features - -### Provision - -The `provision` binary will identify the operating system and execute a set of provisioning steps. - -The provisioning process gets separated into 2 phases: - -- Base provisioning - Install and configure all required dependencies -- Join - Write & start the kubelet systemd unit - -#### Base provisioning - -The following steps belong into the base provisioning: - -- Install required packages (apt & yum action) -- Configure required kernel parameter (Like ip forwarding, etc.) -- Configure required kernel modules -- Disable swap -- Download & install the CNI plugins -- Download & Install docker -- Download Kubelet -- Install health checks (Kubelet & Docker) - -#### Join - -This part will: - -- Write & start the kubelet systemd unit - -## Offline usage - -The `provision` binary should also be usable for "prebaking" images, which then can be used for offline usage. - -## Development process - -To make sure the local development version of the `provision` command gets used for new machines created by the local running machine controller, -a new flag `--provision-source` must be introduced. -This flag will instruct the machine controller to download the `provision` binary from the specified location. - -For simplicity the `/hack/run-machine-controller.sh` will be updated to include a step which will compile the `provoision` command & upload it to a gcs bucket. diff --git a/cmd/userdata/amzn2/main.go b/cmd/userdata/amzn2/main.go deleted file mode 100644 index 60f5afb24..000000000 --- a/cmd/userdata/amzn2/main.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright 2021 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// -// UserData plugin for Amazon Linux 2. -// - -package main - -import ( - "flag" - - "github.com/kubermatic/machine-controller/pkg/userdata/amzn2" - userdataplugin "github.com/kubermatic/machine-controller/pkg/userdata/plugin" - - "k8s.io/klog" -) - -func main() { - // Parse flags. - var debug bool - - flag.BoolVar(&debug, "debug", false, "Switch for enabling the plugin debugging") - flag.Parse() - - // Instantiate provider and start plugin. - var provider = &amzn2.Provider{} - var p = userdataplugin.New(provider, debug) - - if err := p.Run(); err != nil { - klog.Fatalf("error running Amazon Linux 2 plugin: %v", err) - } -} diff --git a/cmd/userdata/centos/main.go b/cmd/userdata/centos/main.go deleted file mode 100644 index 5ddd80f73..000000000 --- a/cmd/userdata/centos/main.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright 2019 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// -// UserData plugin for CentOS. -// - -package main - -import ( - "flag" - - "github.com/kubermatic/machine-controller/pkg/userdata/centos" - userdataplugin "github.com/kubermatic/machine-controller/pkg/userdata/plugin" - - "k8s.io/klog" -) - -func main() { - // Parse flags. - var debug bool - - flag.BoolVar(&debug, "debug", false, "Switch for enabling the plugin debugging") - flag.Parse() - - // Instantiate provider and start plugin. - var provider = ¢os.Provider{} - var p = userdataplugin.New(provider, debug) - - if err := p.Run(); err != nil { - klog.Fatalf("error running CentOS plugin: %v", err) - } -} diff --git a/cmd/userdata/flatcar/main.go b/cmd/userdata/flatcar/main.go deleted file mode 100644 index 21b397fd2..000000000 --- a/cmd/userdata/flatcar/main.go +++ /dev/null @@ -1,45 +0,0 @@ -/* -Copyright 2019 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// -// UserData plugin for flatcar. -// - -package main - -import ( - "flag" - - "github.com/kubermatic/machine-controller/pkg/userdata/flatcar" - userdataplugin "github.com/kubermatic/machine-controller/pkg/userdata/plugin" - - "k8s.io/klog" -) - -func main() { - // Parse flags. - var debug bool - flag.BoolVar(&debug, "debug", false, "Switch for enabling the plugin debugging") - flag.Parse() - - // Instantiate provider and start plugin. - var provider = &flatcar.Provider{} - var p = userdataplugin.New(provider, debug) - - if err := p.Run(); err != nil { - klog.Fatalf("error running flatcar plugin: %v", err) - } -} diff --git a/cmd/userdata/rhel/main.go b/cmd/userdata/rhel/main.go deleted file mode 100644 index ef14f008b..000000000 --- a/cmd/userdata/rhel/main.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright 2019 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// -// UserData plugin for RHEL. -// - -package main - -import ( - "flag" - - userdataplugin "github.com/kubermatic/machine-controller/pkg/userdata/plugin" - "github.com/kubermatic/machine-controller/pkg/userdata/rhel" - - "k8s.io/klog" -) - -func main() { - // Parse flags. - var debug bool - - flag.BoolVar(&debug, "debug", false, "Switch for enabling the plugin debugging") - flag.Parse() - - // Instantiate provider and start plugin. - var provider = &rhel.Provider{} - var p = userdataplugin.New(provider, debug) - - if err := p.Run(); err != nil { - klog.Fatalf("error running RHEL plugin: %v", err) - } -} diff --git a/cmd/userdata/rockylinux/main.go b/cmd/userdata/rockylinux/main.go deleted file mode 100644 index 204d38eb2..000000000 --- a/cmd/userdata/rockylinux/main.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright 2022 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// -// UserData plugin for RockyLinux. -// - -package main - -import ( - "flag" - - userdataplugin "github.com/kubermatic/machine-controller/pkg/userdata/plugin" - "github.com/kubermatic/machine-controller/pkg/userdata/rockylinux" - - "k8s.io/klog" -) - -func main() { - // Parse flags. - var debug bool - - flag.BoolVar(&debug, "debug", false, "Switch for enabling the plugin debugging") - flag.Parse() - - // Instantiate provider and start plugin. - var provider = &rockylinux.Provider{} - var p = userdataplugin.New(provider, debug) - - if err := p.Run(); err != nil { - klog.Fatalf("error running RockyLinux plugin: %v", err) - } -} diff --git a/cmd/userdata/ubuntu/main.go b/cmd/userdata/ubuntu/main.go deleted file mode 100644 index df8eb2b3a..000000000 --- a/cmd/userdata/ubuntu/main.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright 2019 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// -// UserData plugin for Ubuntu. -// - -package main - -import ( - "flag" - - userdataplugin "github.com/kubermatic/machine-controller/pkg/userdata/plugin" - "github.com/kubermatic/machine-controller/pkg/userdata/ubuntu" - - "k8s.io/klog" -) - -func main() { - // Parse flags. - var debug bool - - flag.BoolVar(&debug, "debug", false, "Switch for enabling the plugin debugging") - flag.Parse() - - // Instantiate provider and start plugin. - var provider = &ubuntu.Provider{} - var p = userdataplugin.New(provider, debug) - - if err := p.Run(); err != nil { - klog.Fatalf("error running Ubuntu plugin: %v", err) - } -} diff --git a/cmd/webhook/main.go b/cmd/webhook/main.go index c3d4af796..a041e93e2 100644 --- a/cmd/webhook/main.go +++ b/cmd/webhook/main.go @@ -18,17 +18,21 @@ package main import ( "flag" + "log" "github.com/Masterminds/semver/v3" + "github.com/go-logr/zapr" + "go.uber.org/zap" - "github.com/kubermatic/machine-controller/pkg/admission" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/util" - "github.com/kubermatic/machine-controller/pkg/node" - userdatamanager "github.com/kubermatic/machine-controller/pkg/userdata/manager" + "k8c.io/machine-controller/pkg/admission" + "k8c.io/machine-controller/pkg/cloudprovider/util" + machinecontrollerlog "k8c.io/machine-controller/pkg/log" + "k8c.io/machine-controller/pkg/node" "k8s.io/client-go/tools/clientcmd" - "k8s.io/klog" ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" + ctrlruntimelog "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/manager/signals" ) type options struct { @@ -38,7 +42,6 @@ type options struct { admissionTLSCertPath string admissionTLSKeyPath string caBundleFile string - useOSM bool useExternalBootstrap bool namespace string workerClusterKubeconfig string @@ -47,9 +50,11 @@ type options struct { func main() { nodeFlags := node.NewFlags(flag.CommandLine) + logFlags := machinecontrollerlog.NewDefaultOptions() + logFlags.AddFlags(flag.CommandLine) + opt := &options{} - klog.InitFlags(nil) if flag.Lookup("kubeconfig") == nil { flag.StringVar(&opt.kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.") } @@ -57,40 +62,49 @@ func main() { flag.StringVar(&opt.masterURL, "master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.") } flag.StringVar(&opt.admissionListenAddress, "listen-address", ":9876", "The address on which the MutatingWebhook will listen on") - flag.StringVar(&opt.admissionTLSCertPath, "tls-cert-path", "/tmp/cert/cert.pem", "The path of the TLS cert for the MutatingWebhook") - flag.StringVar(&opt.admissionTLSKeyPath, "tls-key-path", "/tmp/cert/key.pem", "The path of the TLS key for the MutatingWebhook") + flag.StringVar(&opt.admissionTLSCertPath, "tls-cert-path", "/tmp/cert/tls.crt", "The path of the TLS cert for the MutatingWebhook") + flag.StringVar(&opt.admissionTLSKeyPath, "tls-key-path", "/tmp/cert/tls.key", "The path of the TLS key for the MutatingWebhook") flag.StringVar(&opt.caBundleFile, "ca-bundle", "", "path to a file containing all PEM-encoded CA certificates (will be used instead of the host's certificates if set)") flag.StringVar(&opt.namespace, "namespace", "kubermatic", "The namespace where the webhooks will run") flag.StringVar(&opt.workerClusterKubeconfig, "worker-cluster-kubeconfig", "", "Path to kubeconfig of worker/user cluster where machines and machinedeployments exist. If not specified, value from --kubeconfig or in-cluster config will be used") flag.StringVar(&opt.versionConstraint, "kubernetes-version-constraints", ">=0.0.0", "") - // OSM specific flags - flag.BoolVar(&opt.useOSM, "use-osm", false, "DEPRECATED: osm controller is enabled for node bootstrap [use use-external-bootstrap instead]") - flag.BoolVar(&opt.useExternalBootstrap, "use-external-bootstrap", false, "user-data is provided by external bootstrap mechanism (e.g. operating-system-manager, also known as OSM)") + flag.BoolVar(&opt.useExternalBootstrap, "use-external-bootstrap", true, "DEPRECATED: This flag is no-op and will have no effect since machine-controller only supports external bootstrap mechanism. This flag is only kept for backwards compatibility and will be removed in the future") flag.Parse() + + if err := logFlags.Validate(); err != nil { + log.Fatalf("Invalid options: %v", err) + } + + rawLog := machinecontrollerlog.New(logFlags.Debug, logFlags.Format) + log := rawLog.Sugar() + + // set the logger used by controller-runtime + ctrlruntimelog.SetLogger(zapr.NewLogger(rawLog.WithOptions(zap.AddCallerSkip(1)))) + opt.kubeconfig = flag.Lookup("kubeconfig").Value.(flag.Getter).Get().(string) opt.masterURL = flag.Lookup("master").Value.(flag.Getter).Get().(string) if opt.caBundleFile != "" { if err := util.SetCABundleFile(opt.caBundleFile); err != nil { - klog.Fatalf("-ca-bundle is invalid: %v", err) + log.Fatalw("-ca-bundle is invalid", zap.Error(err)) } } cfg, err := clientcmd.BuildConfigFromFlags(opt.masterURL, opt.kubeconfig) if err != nil { - klog.Fatalf("error building kubeconfig: %v", err) + log.Fatalw("Failed to build kubeconfig", zap.Error(err)) } client, err := ctrlruntimeclient.New(cfg, ctrlruntimeclient.Options{}) if err != nil { - klog.Fatalf("failed to build client: %v", err) + log.Fatalw("Failed to build client", zap.Error(err)) } constraint, err := semver.NewConstraint(opt.versionConstraint) if err != nil { - klog.Fatalf("failed to validate kubernetes-version-constraints: %v", err) + log.Fatalw("Failed to validate kubernetes-version-constraints", zap.Error(err)) } // Start with assuming that current cluster will be used as worker cluster @@ -101,43 +115,40 @@ func main() { &clientcmd.ClientConfigLoadingRules{ExplicitPath: opt.workerClusterKubeconfig}, &clientcmd.ConfigOverrides{}).ClientConfig() if err != nil { - klog.Fatal(err) + log.Fatalw("Failed to create worker cluster config", zap.Error(err)) } // Build dedicated client for worker cluster workerClient, err = ctrlruntimeclient.New(workerClusterConfig, ctrlruntimeclient.Options{}) if err != nil { - klog.Fatalf("failed to build worker client: %v", err) + log.Fatalw("Failed to build worker client", zap.Error(err)) } } - um, err := userdatamanager.New() - if err != nil { - klog.Fatalf("error initialising userdata plugins: %v", err) - } - srv, err := admission.Builder{ - ListenAddress: opt.admissionListenAddress, - Client: client, - WorkerClient: workerClient, - UserdataManager: um, - UseExternalBootstrap: opt.useExternalBootstrap || opt.useOSM, - NodeFlags: nodeFlags, - Namespace: opt.namespace, - VersionConstraints: constraint, + ListenAddress: opt.admissionListenAddress, + Log: log, + Client: client, + WorkerClient: workerClient, + NodeFlags: nodeFlags, + Namespace: opt.namespace, + VersionConstraints: constraint, + + // we could change this to get the CertDir from the configured CertName + // and KeyName, but doing so does not bring us any benefits but would + // technically break compatibility. + CertDir: "/", + CertName: opt.admissionTLSCertPath, + KeyName: opt.admissionTLSKeyPath, }.Build() if err != nil { - klog.Fatalf("failed to create admission hook: %v", err) + log.Fatalw("Failed to create admission hook", zap.Error(err)) } - if err := srv.ListenAndServeTLS(opt.admissionTLSCertPath, opt.admissionTLSKeyPath); err != nil { - klog.Fatalf("Failed to start server: %v", err) + log.Infow("Listening", "address", opt.admissionListenAddress) + + serverContext := signals.SetupSignalHandler() + if err := srv.Start(serverContext); err != nil { + log.Fatalw("Failed to start server", zap.Error(err)) } - defer func() { - if err := srv.Close(); err != nil { - klog.Fatalf("Failed to shutdown server: %v", err) - } - }() - klog.Infof("Listening on %s", opt.admissionListenAddress) - select {} } diff --git a/docs/cloud-provider.md b/docs/cloud-provider.md index 39c701b6c..375b30a75 100644 --- a/docs/cloud-provider.md +++ b/docs/cloud-provider.md @@ -54,10 +54,10 @@ accessKeyId: "<< YOUR_ACCESS_KEY_ID >>" secretAccessKey: "<< YOUR_SECRET_ACCESS_KEY_ID >>" # region for the instance region: "eu-central-1" -# avaiability zone for the instance +# availability zone for the instance availabilityZone: "eu-central-1a" # vpc id for the instance -vpcId: "vpc-819f62e9" +vpcId: "vpc-079f7648481a11e77" # subnet id for the instance subnetId: "subnet-2bff4f43" # enable public IP assignment, default is true @@ -80,8 +80,7 @@ ami: "" # When not set a 'kubernetes-v1' security group will get created securityGroupIDs: - "" -# name of the instance profile to use. -# When not set a 'kubernetes-v1' instance profile will get created +# name of the instance profile to use, required. instanceProfile : "" # instance tags ("KubernetesCluster": "my-cluster" is a required tag. @@ -136,6 +135,8 @@ network: "" computeAPIVersion: "" # set trust-device-path flag for kubelet trustDevicePath: false +# set to true to store metadata on a configuration drive instead of the metadata service +configDrive: false # set root disk size rootDiskSizeGB: 50 # set root disk volume type @@ -147,6 +148,46 @@ tags: tagKey: tagValue ``` +## OpenNebula + +**Note:** This is a [community provider](../README.md#community-providers). + +### machine.spec.providerConfig.cloudProviderSpec + +```yaml +# XML-RPC endpoint of your OpenNebula installation +endpoint: "" +# your OpenNebula username +username: "" +# your OpenNebula password +password: "" + +# cpu (float64) +cpu: 1 +# vcpu +vcpu: 2 +# memory in MB +memory: 1024 + +# the name of the image to use, needs to be owned by the current user +image: "Amazon Linux 2" +# which datastore to use for the image +datastore: "" +# size of the disk in MB +diskSize: 51200 + +# network name, needs to be owned by the current user +network: "" + +# whether to enable the VNC console +enableVNC: true + +# optional key/value pairs to add to the VM template +vmTemplateExtra: + # useful for e.g. setting the placement attributes as defined in https://docs.opennebula.io/6.4/management_and_operations/references/template.html#template-placement-section + SCHED_REQUIREMENTS: 'RACK="G4"' +``` + ## Google Cloud Platform ### machine.spec.providerConfig.cloudProviderSpec @@ -183,7 +224,7 @@ labels: ### machine.spec.providerConfig.cloudProviderSpec ```yaml token: "<< HETZNER_API_TOKEN >>" -serverType: "cx11" +serverType: "cx23" datacenter: "" location: "fsn1" # Optional: network IDs or names @@ -196,6 +237,8 @@ labels: ## Linode +**Note:** This is a [community provider](../README.md#community-providers). + ### machine.spec.providerConfig.cloudProviderSpec ```yaml # your linode token @@ -324,9 +367,11 @@ Refer to the [VSphere](./vsphere.md#provider-configuration) specific documentati ## Vultr +**Note:** This is a [community provider](../README.md#community-providers). + ### machine.spec.providerConfig.cloudProviderSpec ```yaml apiKey: "<< VULTR_API_KEY >>" plan: "vhf-8c-32gb" region: "" -osId: 127 \ No newline at end of file +osId: 127 diff --git a/docs/howto-provider.md b/docs/howto-provider.md index f797c4260..3f72a5a84 100644 --- a/docs/howto-provider.md +++ b/docs/howto-provider.md @@ -6,7 +6,7 @@ ### Interface description -The interface a cloud provider has to implement is located in the package `github.com/kubermatic/machine-controller/pkg/cloudprovider/cloud`. It is named `Provider` and defines a small set of functions: +The interface a cloud provider has to implement is located in the package `k8c.io/machine-controller/pkg/cloudprovider/cloud`. It is named `Provider` and defines a small set of functions: ```go AddDefaults(spec v1alpha1.MachineSpec) (v1alpha1.MachineSpec, error) @@ -26,13 +26,7 @@ Get(machine *v1alpha1.Machine) (instance.Instance, error) `Get` gets a node that is associated with the given machine. Note that this method can return a so called _terminal error_, which indicates that a manual interaction is required to recover from this state. See `v1alpha1.MachineStatus` for more info and `errors.TerminalError` type. -In case the instance cannot be found, the returned error has to be `github.com/kubermatic/machine-controller/pkg/cloudprovider/errors.ErrInstanceNotFound` for proper evaluation by the machine controller. - -```go -GetCloudConfig(spec v1alpha1.MachineSpec) (config string, name string, err error) -``` - -`GetCloudConfig` will return the cloud provider specific cloud-config, which gets consumed by the kubelet. +In case the instance cannot be found, the returned error has to be `k8c.io/machine-controller/pkg/cloudprovider/errors.ErrInstanceNotFound` for proper evaluation by the machine controller. ```go Create(machine *v1alpha1.Machine, data *cloud.MachineCreateDeleteData, userdata string) (instance.Instance, error) @@ -66,24 +60,24 @@ SetMetricsForMachines(machines v1alpha1.MachineList) error ### Implementation hints -Provider implementations are located in individual packages in `github.com/kubermatic/machine-controller/pkg/cloudprovider/provider`. Here see e.g. `hetzner` as a straight and good understandable implementation. Other implementations are there too, helping to understand the needed tasks inside and around the `Provider` interface implementation. +Provider implementations are located in individual packages in `k8c.io/machine-controller/pkg/cloudprovider/provider`. Here see e.g. `hetzner` as a straight and good understandable implementation. Other implementations are there too, helping to understand the needed tasks inside and around the `Provider` interface implementation. When retrieving the individual configuration from the provider specification a type for unmarshalling is needed. Here first the provider configuration is read and based on it the individual values of the configuration are retrieved. Typically the access data (token, ID/key combination, document with all information) alternatively can be passed via an environment variable. According methods of the used `providerconfig.ConfigVarResolver` do support this. -For creation of new machines the support of the possible information has to be checked. The machine controller supports _CentOS_, _Flatcar_ and _Ubuntu_. In case one or more aren't supported by the cloud infrastructure the error `providerconfig.ErrOSNotSupported` has to be returned. +For creation of new machines the support of the possible information has to be checked. The machine controller supports _Flatcar_ and _Ubuntu_. In case one or more aren't supported by the cloud infrastructure the error `providerconfig.ErrOSNotSupported` has to be returned. ## Integrate provider into the Machine Controller -For each cloud provider a unique string constant has to be defined in file `types.go` in package `github.com/kubermatic/machine-controller/pkg/providerconfig`. Registration based on this constant is done in file `provider.go` in package `github.com/kubermatic/machine-controller/pkg/cloudprovider`. +For each cloud provider a unique string constant has to be defined in file `types.go` in package `k8c.io/machine-controller/pkg/providerconfig`. Registration based on this constant is done in file `provider.go` in package `k8c.io/machine-controller/pkg/cloudprovider`. ## Add example manifest -For documentation of the different configuration options an according example manifest with helpful comments has to be added to `github.com/kubermatic/machine-controller/examples`. Naming scheme is `-machinedeployment.yaml`. +For documentation of the different configuration options an according example manifest with helpful comments has to be added to `k8c.io/machine-controller/examples`. Naming scheme is `-machinedeployment.yaml`. ## Integrate provider into CI -Like the example manifest a more concrete one named `machinedeployment-.yaml` has to be added to `github.com/kubermatic/machine-controller/test/e2e/provisioning/testdata`. Additionally file `all_e2e_test.go` in package `github.com/kubermatic/machine-controller/test/e2e/provisioning` contains all provider tests. Like the existing ones the test for the new provider has to be placed here. Mainly it's the retrieval of test data, especially the access data, from the environment and the starting of the test scenarios. +Like the example manifest a more concrete one named `machinedeployment-.yaml` has to be added to `k8c.io/machine-controller/test/e2e/provisioning/testdata`. Additionally file `all_e2e_test.go` in package `k8c.io/machine-controller/test/e2e/provisioning` contains all provider tests. Like the existing ones the test for the new provider has to be placed here. Mainly it's the retrieval of test data, especially the access data, from the environment and the starting of the test scenarios. Now the provider is ready to be added into the project for CI tests. diff --git a/docs/openstack-images.md b/docs/openstack-images.md index a1979885c..8a89250e9 100644 --- a/docs/openstack-images.md +++ b/docs/openstack-images.md @@ -10,5 +10,5 @@ There is a script to upload all supported image to OpenStack. By default all images will be named `machine-controller-${OS_NAME}`. The image names can be overwritten using environment variables: ```bash -UBUNTU_IMAGE_NAME="ubuntu" CENTOS_IMAGE_NAME="centos" ./hack/setup-openstack-images.sh +UBUNTU_IMAGE_NAME="ubuntu" ./hack/setup-openstack-images.sh ``` diff --git a/docs/operating-system.md b/docs/operating-system.md index 8d096a529..c4b7692f6 100644 --- a/docs/operating-system.md +++ b/docs/operating-system.md @@ -4,19 +4,19 @@ ### Cloud provider -| | Ubuntu | CentOS | Flatcar | RHEL | Amazon Linux 2 | Rocky Linux | -|---|---|---|---|---|---|---| -| AWS | ✓ | ✓ | ✓ | ✓ | ✓ | ✓ | -| Azure | ✓ | ✓ | ✓ | ✓ | x | ✓ | -| Digitalocean | ✓ | ✓ | x | x | x | ✓ | -| Equinix Metal | ✓ | ✓ | ✓ | x | x | ✓ | -| Google Cloud Platform | ✓ | x | x | x | x | x | -| Hetzner | ✓ | ✓ | x | x | x | ✓ | -| KubeVirt | ✓ | ✓ | ✓ | ✓ | x | ✓ | -| Nutanix | ✓ | ✓ | x | x | x | x | -| Openstack | ✓ | ✓ | ✓ | ✓ | x | ✓ | -| VMware Cloud Director | ✓ | x | x | x | x | x | -| VSphere | ✓ | ✓ | ✓ | ✓ | x | ✓ | +| | Ubuntu | Flatcar | RHEL | Amazon Linux 2 | Rocky Linux | +|---|---|---|---|---|---| +| AWS | ✓ | ✓ | ✓ | ✓ | ✓ | +| Azure | ✓ | ✓ | ✓ | x | ✓ | +| Digitalocean | ✓ | x | x | x | ✓ | +| Equinix Metal | ✓ | ✓ | x | x | ✓ | +| Google Cloud Platform | ✓ | ✓ | x | x | x | +| Hetzner | ✓ | x | x | x | ✓ | +| KubeVirt | ✓ | ✓ | ✓ | x | ✓ | +| Nutanix | ✓ | x | x | x | x | +| Openstack | ✓ | ✓ | ✓ | x | ✓ | +| VMware Cloud Director | ✓ | x | x | x | x | +| VSphere | ✓ | ✓ | ✓ | x | ✓ | ## Configuring a operating system @@ -24,7 +24,6 @@ The operating system to use can be set via `machine.spec.providerConfig.operatin Allowed values: - `amzn2` -- `centos` - `flatcar` - `rhel` - `rockylinux` @@ -40,7 +39,6 @@ Machine controller may work with other OS versions that are not listed in the ta | | Versions | |---|---| | AmazonLinux2 | 2.x | -| CentOS | 7.4.x, 7.6.x, 7.7.x | | RHEL | 8.x | | Rocky Linux | 8.5 | | Ubuntu | 20.04 LTS, 22.04 LTS | diff --git a/docs/vsphere.md b/docs/vsphere.md index 6a743ccd1..81cf0d98e 100644 --- a/docs/vsphere.md +++ b/docs/vsphere.md @@ -27,7 +27,7 @@ To see where to locate the OVAs go to the OS specific section. 3. Click through the dialog until "Select storage" 4. Select the same storage you want to use for your machines 5. Select the same network you want to use for your machines -6. Leave everyhting in the "Customize Template" and "Ready to complete" dialog as it is +6. Leave everything in the "Customize Template" and "Ready to complete" dialog as it is 7. Wait until the VM got fully imported and the "Snapshots" => "Create Snapshot" button is not grayed out anymore #### Command-line procedure @@ -154,7 +154,7 @@ Procedure: ``` # The URL below is just an example - image_url="https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2" + image_url="https://cloud-images.ubuntu.com/releases/20.04/release/ubuntu-20.04-server-cloudimg-amd64.img" image_name="$(basename -- "${image_url}" | sed 's/.qcow2$//g')" curl -sL "${image_url}" -O . ``` @@ -168,7 +168,7 @@ Procedure: 3. Upload to vSphere using WebUI or GOVC: Make sure to replace the parameters on the command below with the correct - values specific to yout vSphere environment. + values specific to your vSphere environment. ``` govc import.vmdk -dc=dc-1 -pool=/dc-1/host/cl-1/Resources -ds=ds-1 "./${image_name}.vmdk" @@ -203,12 +203,6 @@ Red Hat Enterprise Linux 8.x KVM Guest Image can be found at [Red Hat Customer P Follow [qcow2](#create-template-vm-from-qcow2) template VM creation guide. -#### CentOS - -CentOS 7 image can be found at the following link: . - -Follow [qcow2](#create-template-vm-from-qcow2) template VM creation guide. - ## Provider configuration VSphere provider accepts the following configuration parameters: @@ -226,7 +220,8 @@ datacenter: datacenter1 # VM template name templateVMName: ubuntu-template # Optional. Sets the networks on the VM. If no network is specified, the template default will be used. -vmNetName: network1 +networks: +- network1 # Optional folder: folder1 # Optional: Force VMs to be provisoned to the specified resourcePool diff --git a/examples/alibaba-machinedeployment.yaml b/examples/alibaba-machinedeployment.yaml index b36c7571b..506b61a03 100644 --- a/examples/alibaba-machinedeployment.yaml +++ b/examples/alibaba-machinedeployment.yaml @@ -62,4 +62,4 @@ spec: distUpgradeOnBoot: false disableAutoUpdate: true versions: - kubelet: 1.24.9 + kubelet: 1.33.4 diff --git a/examples/anexia-machinedeployment.yaml b/examples/anexia-machinedeployment.yaml index efb1564b7..64722b6ef 100644 --- a/examples/anexia-machinedeployment.yaml +++ b/examples/anexia-machinedeployment.yaml @@ -30,7 +30,7 @@ spec: namespace: kube-system name: machine-controller-anexia key: token - vlanID: "<< ANEXIA_VLAN_ID >>" + # Currently only the "Flatcar Linux Stable" template is supported. # Use templateBuild to specify a build. If empty => latest # Alternatively use templateID for a specific template. @@ -39,11 +39,39 @@ spec: cpus: 2 memory: 2048 - # only a single disk is currently supported, but support for multiple disks is planned already + # this defaults to "performance", but you can set anything + # supported by the Anexia Engine here - or not set this attribute + # at all + cpuPerformanceType: standard + disks: - size: 60 performanceType: ENT6 + # Each entry in this array will create a network interface in each + # Machine, connected to the given VLAN. + networks: + - vlan: "<< ANEXIA_VLAN_ID >>" + + # If prefixes are given, we reserve an IP address for each of + # them - if you give one IPv4 and one IPv6 prefix, your + # Machines will have dual-stack connectivity + # + # As an compatibility-aid for the old cloudProviderSpec.vlanID, + # which reserved an IP for the configured VLAN, you can also + # have an entry "" (empty string) to get the same behavior - + # but this is not recommended. + # + # Not configuring any prefix might be useful if you want to + # configure IP addresses on this interface via other means, + # e.g. a Layer2 load balancer. + # + # Each MachineDeployment needs at least one Network with at + # least one Prefix, because we have to know (and thus, reserve) + # at least one IP address for each Machine. + prefixes: + - "<< ANEXIA_PREFIX_ID >>" + # You may have this old disk config attribute in your config - please migrate to the disks attribute. # For now it is still recognized though. #diskSize: 60 @@ -55,4 +83,4 @@ spec: distUpgradeOnBoot: false disableAutoUpdate: true versions: - kubelet: 1.24.9 + kubelet: 1.33.4 diff --git a/examples/aws-machinedeployment.yaml b/examples/aws-machinedeployment.yaml index 633e1ed3c..127012e35 100644 --- a/examples/aws-machinedeployment.yaml +++ b/examples/aws-machinedeployment.yaml @@ -52,7 +52,7 @@ spec: key: secretAccessKey region: "eu-central-1" availabilityZone: "eu-central-1a" - vpcId: "vpc-819f62e9" + vpcId: "vpc-079f7648481a11e77" subnetId: "subnet-2bff4f43" instanceType: "t2.micro" instanceProfile: "kubernetes-v1" @@ -79,4 +79,4 @@ spec: # provided the rhsm will be disabled and any created subscription won't be removed automatically rhsmOfflineToken: "<< REDHAT_SUBSCRIPTIONS_OFFLINE_TOKEN >>" versions: - kubelet: 1.24.9 + kubelet: 1.33.4 diff --git a/examples/azure-machinedeployment.yaml b/examples/azure-machinedeployment.yaml index 63166665e..abfe62ba8 100644 --- a/examples/azure-machinedeployment.yaml +++ b/examples/azure-machinedeployment.yaml @@ -65,7 +65,7 @@ spec: location: "westeurope" resourceGroup: "<< YOUR_RESOURCE_GROUP >>" vnetResourceGroup: "<< YOUR_VNET_RESOURCE_GROUP >>" - vmSize: "Standard_F2" + vmSize: "Standard_F2s_v2" # optional disk size values in GB. If not set, the defaults for the vmSize will be used. osDiskSize: 30 dataDiskSize: 30 @@ -91,4 +91,4 @@ spec: # provided the rhsm will be disabled and any created subscription won't be removed automatically rhsmOfflineToken: "<< REDHAT_SUBSCRIPTIONS_OFFLINE_TOKEN >>" versions: - kubelet: 1.24.9 + kubelet: 1.33.4 diff --git a/examples/baremetal-tinkerbell-machinedeployment.yaml b/examples/baremetal-tinkerbell-machinedeployment.yaml index ff3ed3c6e..d7154e590 100644 --- a/examples/baremetal-tinkerbell-machinedeployment.yaml +++ b/examples/baremetal-tinkerbell-machinedeployment.yaml @@ -25,88 +25,15 @@ spec: cloudProvider: "baremetal" cloudProviderSpec: driver: "tinkerbell" - metadataClientConfig: - endpoint: - secretKeyRef: - namespace: kube-system - name: machine-controller-baremetal-tb - key: endpoint - authMethod: - secretKeyRef: - namespace: kube-system - name: machine-controller-baremetal-tb - key: authMethod - username: - secretKeyRef: - namespace: kube-system - name: machine-controller-baremetal-tb - key: username - password: - secretKeyRef: - namespace: kube-system - name: machine-controller-baremetal-tb - key: password - token: - secretKeyRef: - namespace: kube-system - name: machine-controller-baremetal-tb - key: token driverSpec: - provisionerIPAddress: << PROVISIONER_IP_ADDRESS >> - mirrorHost: << MIRROR_HOST >> - hardware: - id: << MACHINE_NAME >> - metadata: - facility: - facilitycode: << FACILITY_CODE >> - planslug: << PLAN_SLUG >> - state: "" - instance: - operatingsystemversion: - distro: << OS_NAME >> - imagetag: << IMAGE_TAG >> - osslug: << OS_NAME >> - slug: << OS_NAME >> - version: << OS_VERSION >> - storage: - disks: - - device: /dev/sda - wipetable: true - partitions: - - size: 4096 - label: BIOS - number: 1 - - size: 3993600 - label: SWAP - number: 2 - - size: 0 - label: ROOT - number: 3 - filesystems: - - mount: - point: / - create: - options: - - -L - - ROOT - device: /dev/sda3 - format: ext4 - - mount: - point: none - create: - options: - - -L - - SWAP - device: /dev/sda2 - format: swap - network: - interfaces: - - dhcp: - arch: x86_64 - uefi: false - netboot: - allowpxe: false - allowworkflow: false + clusterName: "<< CLUSTER_NAME >>" + osImageUrl: "<< OS_IMAGE_URL >>" + auth: + kubeconfig: + value: "<< KUBECONFIG_BASE64 >>" + hardwareRef: + name: hardware-1 + namespace: "default" operatingSystem: "<< OS_NAME >>" operatingSystemSpec: distUpgradeOnBoot: false diff --git a/examples/digitalocean-machinedeployment.yaml b/examples/digitalocean-machinedeployment.yaml index f914a89ef..e57123176 100644 --- a/examples/digitalocean-machinedeployment.yaml +++ b/examples/digitalocean-machinedeployment.yaml @@ -52,9 +52,8 @@ spec: monitoring: false tags: - "machine-controller" - # Can be 'ubuntu' or 'centos' operatingSystem: "ubuntu" operatingSystemSpec: disableAutoUpdate: true versions: - kubelet: 1.24.9 + kubelet: 1.33.4 diff --git a/examples/equinixmetal-machinedeployment.yaml b/examples/equinixmetal-machinedeployment.yaml index c46b17f5d..9540d65e5 100644 --- a/examples/equinixmetal-machinedeployment.yaml +++ b/examples/equinixmetal-machinedeployment.yaml @@ -51,4 +51,4 @@ spec: operatingSystemSpec: distUpgradeOnBoot: false versions: - kubelet: 1.24.9 + kubelet: 1.33.4 diff --git a/examples/gce-machinedeployment.yaml b/examples/gce-machinedeployment.yaml index bb4392e28..e7b2bf239 100644 --- a/examples/gce-machinedeployment.yaml +++ b/examples/gce-machinedeployment.yaml @@ -50,6 +50,10 @@ spec: key: serviceAccount # See https://cloud.google.com/compute/docs/regions-zones/ zone: "europe-west3-a" + # Is the id of the GCP project that can be used to create machines in. Usually this id is taken from the + # service account however, it should be possible to create a machine in another project, as long as the + # machine controller has the right permissions + projectID: "" # See https://cloud.google.com/compute/docs/machine-types machineType: "n1-standard-2" # In GB @@ -86,4 +90,4 @@ spec: # provided the rhsm will be disabled and any created subscription won't be removed automatically rhsmOfflineToken: "<< REDHAT_SUBSCRIPTIONS_OFFLINE_TOKEN >>" versions: - kubelet: 1.24.9 + kubelet: 1.33.4 diff --git a/examples/hetzner-machinedeployment.yaml b/examples/hetzner-machinedeployment.yaml index 9dafc90bf..922d364a2 100644 --- a/examples/hetzner-machinedeployment.yaml +++ b/examples/hetzner-machinedeployment.yaml @@ -53,6 +53,10 @@ spec: # Optional: network IDs or names networks: - "<< YOUR_NETWORK >>" + # Optional: assignPublicIPv4 whether a public ipv4 should be assigned or not + assignPublicIPv4: true + # Optional: assignPublicIPv4 whether an ipv6 should be assigned or not + assignPublicIPv6: true # Optional: firewall IDs or names firewalls: - "<< YOUR_FIREWALL >>" @@ -63,4 +67,4 @@ spec: operatingSystemSpec: distUpgradeOnBoot: false versions: - kubelet: 1.24.9 + kubelet: 1.33.4 diff --git a/examples/kubevirt-local-mounter.yaml b/examples/kubevirt-local-mounter.yaml index 302d99e81..55aff3497 100644 --- a/examples/kubevirt-local-mounter.yaml +++ b/examples/kubevirt-local-mounter.yaml @@ -15,7 +15,7 @@ spec: hostPID: true containers: - name: startup-script - image: quay.io/kubermatic/startup-script:v0.1.0 + image: quay.io/kubermatic/startup-script:v0.3.0 securityContext: privileged: true env: diff --git a/examples/kubevirt-machinedeployment.yaml b/examples/kubevirt-machinedeployment.yaml index 81b71ed4a..0313d4b8d 100644 --- a/examples/kubevirt-machinedeployment.yaml +++ b/examples/kubevirt-machinedeployment.yaml @@ -56,7 +56,7 @@ spec: - maxSkew: "1" topologyKey: "kubernetes.io/hostname" whenUnsatisfiable: "" # Allowed values: "DoNotSchedule", "ScheduleAnyway" - # Can also be `centos`, must align with he configured registryImage above + # Must align with the configured registryImage above operatingSystem: "ubuntu" operatingSystemSpec: distUpgradeOnBoot: false @@ -69,4 +69,4 @@ spec: # provided the rhsm will be disabled and any created subscription won't be removed automatically rhsmOfflineToken: "<< REDHAT_SUBSCRIPTIONS_OFFLINE_TOKEN >>" versions: - kubelet: 1.24.9 + kubelet: 1.33.4 diff --git a/examples/linode-machinedeployment.yaml b/examples/linode-machinedeployment.yaml index cf7beb50d..43ab305ee 100644 --- a/examples/linode-machinedeployment.yaml +++ b/examples/linode-machinedeployment.yaml @@ -53,4 +53,4 @@ spec: operatingSystemSpec: disableAutoUpdate: true versions: - kubelet: 1.24.9 + kubelet: 1.33.4 diff --git a/examples/machine-controller.yaml b/examples/machine-controller.yaml index b724ba5e8..995dd1ac1 100644 --- a/examples/machine-controller.yaml +++ b/examples/machine-controller.yaml @@ -155,6 +155,28 @@ spec: jsonPath: .metadata.deletionTimestamp priority: 1 --- +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: machine-controller-selfsigned-issuer + namespace: kube-system +spec: + selfSigned: {} +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: machine-controller-serving-cert + namespace: kube-system +spec: + dnsNames: + - "machine-controller-webhook.kube-system.svc" + - "machine-controller-webhook.kube-system.svc.cluster.local" + issuerRef: + kind: Issuer + name: machine-controller-selfsigned-issuer + secretName: machine-controller-webhook-server-cert +--- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: @@ -211,14 +233,13 @@ spec: name: machine-controller command: - /usr/local/bin/machine-controller - - -logtostderr - - -v=3 + - -log-debug=false + - -log-format=json # json or console - -worker-count=5 - -node-csr-approver=true - -cluster-dns=10.10.10.10 - -metrics-address=0.0.0.0:8080 - -health-probe-address=0.0.0.0:8085 - - -use-osm=true # Machines that fail to join the cluster within this timeout and # are owned by a MachineSet will get deleted so the MachineSet # controller re-creates them @@ -259,14 +280,15 @@ spec: name: webhook command: - /usr/local/bin/webhook - - -logtostderr - - -v=6 - - -use-osm=true + # on debug level, full Machine objects with inline credentials might be logged, beware! + - -log-debug=false + - -log-format=json # json or console - -namespace=kube-system - -listen-address=0.0.0.0:9876 volumeMounts: - name: machine-controller-admission-cert - mountPath: /tmp/cert + mountPath: /tmp/cert/ + readOnly: true livenessProbe: httpGet: path: /healthz @@ -283,16 +305,8 @@ spec: volumes: - name: machine-controller-admission-cert secret: - secretName: machine-controller-admission-cert ---- -apiVersion: v1 -kind: Secret -metadata: - name: machine-controller-admission-cert - namespace: kube-system -data: - "cert.pem": __admission_cert__ - "key.pem": __admission_key__ + defaultMode: 420 + secretName: machine-controller-webhook-server-cert --- apiVersion: v1 kind: Service @@ -601,6 +615,8 @@ apiVersion: admissionregistration.k8s.io/v1 kind: MutatingWebhookConfiguration metadata: name: machinedeployments.machine-controller.kubermatic.io + annotations: + cert-manager.io/inject-ca-from: kube-system/machine-controller-serving-cert webhooks: - name: machinedeployments.machine-controller.kubermatic.io failurePolicy: Fail @@ -621,7 +637,6 @@ webhooks: namespace: kube-system name: machine-controller-webhook path: /machinedeployments - caBundle: __admission_ca_cert__ - name: machines.machine-controller.kubermatic.io failurePolicy: Fail sideEffects: None @@ -641,4 +656,3 @@ webhooks: namespace: kube-system name: machine-controller-webhook path: /machines - caBundle: __admission_ca_cert__ diff --git a/examples/nutanix-machinedeployment.yaml b/examples/nutanix-machinedeployment.yaml index e6978b0a6..cc8675c84 100644 --- a/examples/nutanix-machinedeployment.yaml +++ b/examples/nutanix-machinedeployment.yaml @@ -82,4 +82,4 @@ spec: distUpgradeOnBoot: false disableAutoUpdate: true versions: - kubelet: 1.24.9 + kubelet: 1.33.4 diff --git a/examples/opennebula-machinedeployment.yaml b/examples/opennebula-machinedeployment.yaml new file mode 100644 index 000000000..87b4f87cf --- /dev/null +++ b/examples/opennebula-machinedeployment.yaml @@ -0,0 +1,70 @@ +apiVersion: v1 +kind: Secret +metadata: + # If you change the namespace/name, you must also + # adjust the rbac rules + name: machine-controller-opennebula + namespace: kube-system +type: Opaque +stringData: + password: << ONE_PASSWORD >> +--- +apiVersion: "cluster.k8s.io/v1alpha1" +kind: MachineDeployment +metadata: + name: opennebula-machinedeployment + namespace: kube-system +spec: + paused: false + replicas: 1 + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + minReadySeconds: 0 + selector: + matchLabels: + foo: bar + template: + metadata: + labels: + foo: bar + spec: + providerSpec: + value: + sshPublicKeys: + - "<< YOUR_PUBLIC_KEY >>" + cloudProvider: "opennebula" + cloudProviderSpec: + endpoint: "<< ONE_ENDPOINT including '/RPC2' >>" + username: "<< ONE_USERNAME >>" + # If empty, can be set via ONE_PASSWORD env var + password: + secretKeyRef: + namespace: kube-system + name: machine-controller-opennebula + key: password + cpu: 1 + vcpu: 2 + memory: 1024 + + image: "flatcar-stable" + datastore: "<< YOUR_DATASTORE_NAME >>" + diskSize: 51200 # MB + + network: "<< YOUR_NETWORK_NAME >>" + + enableVNC: true + + # if you want to have more control over e.g. placement of the VM you can do this: + #vmTemplateExtra: + # SCHED_REQUIREMENTS: 'RACK="G4"' + operatingSystem: "flatcar" + operatingSystemSpec: + distUpgradeOnBoot: false + + # use cloud-init for flatcar as ignition doesn't know anything about OpenNebula yet + provisioningUtility: "cloud-init" + versions: + kubelet: 1.33.4 diff --git a/examples/openstack-machinedeployment.yaml b/examples/openstack-machinedeployment.yaml index f3401d3cd..c99f0f6cf 100644 --- a/examples/openstack-machinedeployment.yaml +++ b/examples/openstack-machinedeployment.yaml @@ -153,7 +153,6 @@ spec: # the list of metadata you would like to attach to the instance tags: tagKey: tagValue - # Can be 'ubuntu' or 'centos' operatingSystem: "ubuntu" operatingSystemSpec: distUpgradeOnBoot: true @@ -166,4 +165,4 @@ spec: # provided the rhsm will be disabled and any created subscription won't be removed automatically rhsmOfflineToken: "<< REDHAT_SUBSCRIPTIONS_OFFLINE_TOKEN >>" versions: - kubelet: 1.24.9 + kubelet: 1.33.4 diff --git a/examples/operating-system-manager.yaml b/examples/operating-system-manager.yaml index 403f14e2e..da5a8b5e2 100644 --- a/examples/operating-system-manager.yaml +++ b/examples/operating-system-manager.yaml @@ -4,8 +4,7 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.2 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.13.0 name: operatingsystemconfigs.operatingsystemmanager.k8c.io spec: group: operatingsystemmanager.k8c.io @@ -14,359 +13,408 @@ spec: listKind: OperatingSystemConfigList plural: operatingsystemconfigs shortNames: - - osc + - osc singular: operatingsystemconfig scope: Namespaced versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: OperatingSystemConfig is the object that represents the OperatingSystemConfig - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: OperatingSystemConfigSpec represents the operating system - configuration spec. - properties: - bootstrapConfig: - description: BootstrapConfig is used for initial configuration of - machine and to fetch the kubernetes secret that contains the provisioning - config. - properties: - files: - description: Files is a list of files that should exist in the - instance - items: - description: File is a file that should get written to the host's - file system. The content can either be inlined or referenced - from a secret in the same namespace. - properties: - content: - description: Content describe the file's content. - properties: - inline: - description: Inline is a struct that contains information - about the inlined data. - properties: - data: - description: Data is the file's data. - type: string - encoding: - description: Encoding is the file's encoding (e.g. - base64). - type: string - required: - - data - type: object - type: object - path: - description: Path is the path of the file system where the - file should get written to. - type: string - permissions: - default: 644 - description: Permissions describes with which permissions - the file should get written to the file system. Should - be in decimal base and without any leading zeroes. - format: int32 - type: integer - required: - - content - - path - type: object - type: array - modules: - description: CloudInitModules contains the supported cloud-init - modules - properties: - bootcmd: - description: BootCMD module runs arbitrary commands very early - in the boot process, only slightly after a boothook would - run. - items: - type: string - type: array - rh_subscription: - additionalProperties: - type: string - description: RHSubscription registers a Red Hat system either - by username and password or activation and org + - name: v1alpha1 + schema: + openAPIV3Schema: + description: OperatingSystemConfig is the object that represents the OperatingSystemConfig + properties: + apiVersion: + description: + "APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: string + kind: + description: + "Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: string + metadata: + type: object + spec: + description: + OperatingSystemConfigSpec represents the operating system + configuration spec. + properties: + bootstrapConfig: + description: + BootstrapConfig is used for initial configuration of + machine and to fetch the kubernetes secret that contains the provisioning + config. + properties: + files: + description: + Files is a list of files that should exist in the + instance + items: + description: + File is a file that should get written to the host's + file system. The content can either be inlined or referenced + from a secret in the same namespace. + properties: + content: + description: Content describe the file's content. + properties: + inline: + description: + Inline is a struct that contains information + about the inlined data. + properties: + data: + description: Data is the file's data. + type: string + encoding: + description: + Encoding is the file's encoding (e.g. + base64). + type: string + required: + - data + type: object + type: object + path: + description: + Path is the path of the file system where the + file should get written to. + type: string + permissions: + default: 644 + description: + Permissions describes with which permissions + the file should get written to the file system. Should + be in decimal base and without any leading zeroes. + format: int32 + type: integer + required: + - content + - path type: object - runcmd: - description: RunCMD Run arbitrary commands at a rc.local like - level with output to the console. - items: - type: string - type: array - yum_repo_dir: - description: 'YumRepoDir the repo parts directory where individual - yum repo config files will be written. Default: /etc/yum.repos.d' - type: string - yum_repos: - additionalProperties: + type: array + modules: + description: + CloudInitModules contains the supported cloud-init + modules + properties: + bootcmd: + description: + BootCMD module runs arbitrary commands very early + in the boot process, only slightly after a boothook would + run. + items: + type: string + type: array + rh_subscription: additionalProperties: type: string + description: + RHSubscription registers a Red Hat system either + by username and password or activation and org type: object - description: YumRepos adds yum repository configuration to - the system. - type: object - type: object - units: - description: Units a list of the systemd unit files which will - run on the instance - items: - description: Unit is a systemd unit used for the operating system - config. - properties: - content: - description: Content is the unit's content. - type: string - dropIns: - description: DropIns is a list of drop-ins for this unit. + runcmd: + description: + RunCMD Run arbitrary commands at a rc.local like + level with output to the console. items: - description: DropIn is a drop-in configuration for a systemd - unit. - properties: - content: - description: Content is the content of the drop-in. - type: string - name: - description: Name is the name of the drop-in. - type: string - required: - - content - - name - type: object + type: string type: array - enable: - description: Enable describes whether the unit is enabled - or not. - type: boolean - mask: - description: Mask describes whether the unit is masked or - not. - type: boolean - name: - description: Name is the name of a unit. + yum_repo_dir: + description: + "YumRepoDir the repo parts directory where individual + yum repo config files will be written. Default: /etc/yum.repos.d" type: string - required: - - name + yum_repos: + additionalProperties: + additionalProperties: + type: string + type: object + description: + YumRepos adds yum repository configuration to + the system. + type: object type: object - type: array - userSSHKeys: - description: UserSSHKeys is a list of attached user ssh keys - items: - type: string - type: array - type: object - cloudProvider: - description: CloudProvider represent the cloud provider that support - the given operating system version - properties: - name: - description: Name represents the name of the supported cloud provider - enum: - - aws - - azure - - digitalocean - - gce - - hetzner - - kubevirt - - linode - - nutanix - - openstack - - equinixmetal - - vsphere - - fake - - alibaba - - anexia - - scaleway - - baremetal - - external - - vmware-cloud-director - type: string - spec: - description: Spec represents the os/image reference in the supported - cloud provider - type: object - x-kubernetes-preserve-unknown-fields: true - required: - - name - type: object - osName: - description: 'OSType represent the operating system name e.g: ubuntu' - enum: - - flatcar - - rhel - - centos - - ubuntu - - sles - - amzn2 - - rockylinux - type: string - osVersion: - description: OSVersion the version of the operating system - type: string - provisioningConfig: - description: ProvisioningConfig is used for provisioning the worker - node. - properties: - files: - description: Files is a list of files that should exist in the - instance - items: - description: File is a file that should get written to the host's - file system. The content can either be inlined or referenced - from a secret in the same namespace. - properties: - content: - description: Content describe the file's content. - properties: - inline: - description: Inline is a struct that contains information - about the inlined data. + units: + description: + Units a list of the systemd unit files which will + run on the instance + items: + description: + Unit is a systemd unit used for the operating system + config. + properties: + content: + description: Content is the unit's content. + type: string + dropIns: + description: DropIns is a list of drop-ins for this unit. + items: + description: + DropIn is a drop-in configuration for a systemd + unit. properties: - data: - description: Data is the file's data. + content: + description: Content is the content of the drop-in. type: string - encoding: - description: Encoding is the file's encoding (e.g. - base64). + name: + description: Name is the name of the drop-in. type: string required: - - data + - content + - name type: object - type: object - path: - description: Path is the path of the file system where the - file should get written to. - type: string - permissions: - default: 644 - description: Permissions describes with which permissions - the file should get written to the file system. Should - be in decimal base and without any leading zeroes. - format: int32 - type: integer - required: - - content - - path - type: object - type: array - modules: - description: CloudInitModules contains the supported cloud-init - modules - properties: - bootcmd: - description: BootCMD module runs arbitrary commands very early - in the boot process, only slightly after a boothook would - run. - items: - type: string - type: array - rh_subscription: - additionalProperties: - type: string - description: RHSubscription registers a Red Hat system either - by username and password or activation and org + type: array + enable: + description: + Enable describes whether the unit is enabled + or not. + type: boolean + mask: + description: + Mask describes whether the unit is masked or + not. + type: boolean + name: + description: Name is the name of a unit. + type: string + required: + - name type: object - runcmd: - description: RunCMD Run arbitrary commands at a rc.local like - level with output to the console. - items: - type: string - type: array - yum_repo_dir: - description: 'YumRepoDir the repo parts directory where individual - yum repo config files will be written. Default: /etc/yum.repos.d' + type: array + userSSHKeys: + description: UserSSHKeys is a list of attached user ssh keys + items: type: string - yum_repos: - additionalProperties: - additionalProperties: + type: array + type: object + cloudProvider: + description: + CloudProvider represent the cloud provider that support + the given operating system version + properties: + name: + description: Name represents the name of the supported cloud provider + enum: + - aws + - azure + - digitalocean + - edge + - gce + - hetzner + - kubevirt + - linode + - nutanix + - openstack + - equinixmetal + - vsphere + - fake + - alibaba + - anexia + - scaleway + - baremetal + - external + - vmware-cloud-director + - opennebula + type: string + spec: + description: + Spec represents the os/image reference in the supported + cloud provider + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - name + type: object + osName: + description: "OSType represent the operating system name e.g: ubuntu" + enum: + - flatcar + - rhel + - ubuntu + - amzn2 + - rockylinux + type: string + osVersion: + description: OSVersion the version of the operating system + type: string + provisioningConfig: + description: + ProvisioningConfig is used for provisioning the worker + node. + properties: + files: + description: + Files is a list of files that should exist in the + instance + items: + description: + File is a file that should get written to the host's + file system. The content can either be inlined or referenced + from a secret in the same namespace. + properties: + content: + description: Content describe the file's content. + properties: + inline: + description: + Inline is a struct that contains information + about the inlined data. + properties: + data: + description: Data is the file's data. + type: string + encoding: + description: + Encoding is the file's encoding (e.g. + base64). + type: string + required: + - data + type: object + type: object + path: + description: + Path is the path of the file system where the + file should get written to. type: string - type: object - description: YumRepos adds yum repository configuration to - the system. + permissions: + default: 644 + description: + Permissions describes with which permissions + the file should get written to the file system. Should + be in decimal base and without any leading zeroes. + format: int32 + type: integer + required: + - content + - path type: object - type: object - units: - description: Units a list of the systemd unit files which will - run on the instance - items: - description: Unit is a systemd unit used for the operating system - config. + type: array + modules: + description: + CloudInitModules contains the supported cloud-init + modules properties: - content: - description: Content is the unit's content. - type: string - dropIns: - description: DropIns is a list of drop-ins for this unit. + bootcmd: + description: + BootCMD module runs arbitrary commands very early + in the boot process, only slightly after a boothook would + run. items: - description: DropIn is a drop-in configuration for a systemd - unit. - properties: - content: - description: Content is the content of the drop-in. - type: string - name: - description: Name is the name of the drop-in. - type: string - required: - - content - - name - type: object + type: string type: array - enable: - description: Enable describes whether the unit is enabled - or not. - type: boolean - mask: - description: Mask describes whether the unit is masked or - not. - type: boolean - name: - description: Name is the name of a unit. + rh_subscription: + additionalProperties: + type: string + description: + RHSubscription registers a Red Hat system either + by username and password or activation and org + type: object + runcmd: + description: + RunCMD Run arbitrary commands at a rc.local like + level with output to the console. + items: + type: string + type: array + yum_repo_dir: + description: + "YumRepoDir the repo parts directory where individual + yum repo config files will be written. Default: /etc/yum.repos.d" type: string - required: - - name + yum_repos: + additionalProperties: + additionalProperties: + type: string + type: object + description: + YumRepos adds yum repository configuration to + the system. + type: object type: object - type: array - userSSHKeys: - description: UserSSHKeys is a list of attached user ssh keys - items: - type: string - type: array - type: object - required: - - bootstrapConfig - - cloudProvider - - osName - - osVersion - - provisioningConfig - type: object - required: - - spec - type: object - served: true - storage: true + units: + description: + Units a list of the systemd unit files which will + run on the instance + items: + description: + Unit is a systemd unit used for the operating system + config. + properties: + content: + description: Content is the unit's content. + type: string + dropIns: + description: DropIns is a list of drop-ins for this unit. + items: + description: + DropIn is a drop-in configuration for a systemd + unit. + properties: + content: + description: Content is the content of the drop-in. + type: string + name: + description: Name is the name of the drop-in. + type: string + required: + - content + - name + type: object + type: array + enable: + description: + Enable describes whether the unit is enabled + or not. + type: boolean + mask: + description: + Mask describes whether the unit is masked or + not. + type: boolean + name: + description: Name is the name of a unit. + type: string + required: + - name + type: object + type: array + userSSHKeys: + description: UserSSHKeys is a list of attached user ssh keys + items: + type: string + type: array + type: object + provisioningUtility: + default: cloud-init + description: + ProvisioningUtility used for configuring the worker node. + Defaults to cloud-init. + enum: + - cloud-init + - ignition + type: string + required: + - bootstrapConfig + - cloudProvider + - osName + - osVersion + - provisioningConfig + type: object + required: + - spec + type: object + served: true + storage: true --- apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: annotations: - controller-gen.kubebuilder.io/version: v0.9.2 - creationTimestamp: null + controller-gen.kubebuilder.io/version: v0.13.0 name: operatingsystemprofiles.operatingsystemmanager.k8c.io spec: group: operatingsystemmanager.k8c.io @@ -375,496 +423,568 @@ spec: listKind: OperatingSystemProfileList plural: operatingsystemprofiles shortNames: - - osp + - osp singular: operatingsystemprofile scope: Namespaced versions: - - name: v1alpha1 - schema: - openAPIV3Schema: - description: OperatingSystemProfile is the object that represents the OperatingSystemProfile - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: OperatingSystemProfileSpec represents the operating system - configuration spec. - properties: - bootstrapConfig: - description: BootstrapConfig is used for initial configuration of - machine and to fetch the kubernetes secret that contains the provisioning - config. - properties: - files: - description: Files is a list of files that should exist in the - instance - items: - description: File is a file that should get written to the host's - file system. The content can either be inlined or referenced - from a secret in the same namespace. - properties: - content: - description: Content describe the file's content. - properties: - inline: - description: Inline is a struct that contains information - about the inlined data. - properties: - data: - description: Data is the file's data. - type: string - encoding: - description: Encoding is the file's encoding (e.g. - base64). - type: string - required: - - data - type: object - type: object - path: - description: Path is the path of the file system where the - file should get written to. - type: string - permissions: - default: 644 - description: Permissions describes with which permissions - the file should get written to the file system. Should - be in decimal base and without any leading zeroes. - format: int32 - type: integer - required: - - content - - path - type: object - type: array - modules: - description: CloudInitModules field contains the optional cloud-init - modules which are supported by OSM - properties: - bootcmd: - description: BootCMD module runs arbitrary commands very early - in the boot process, only slightly after a boothook would - run. - items: - type: string - type: array - rh_subscription: - additionalProperties: - type: string - description: RHSubscription registers a Red Hat system either - by username and password or activation and org - type: object - runcmd: - description: RunCMD Run arbitrary commands at a rc.local like - level with output to the console. - items: - type: string - type: array - yum_repo_dir: - description: 'YumRepoDir the repo parts directory where individual - yum repo config files will be written. Default: /etc/yum.repos.d' - type: string - yum_repos: - additionalProperties: - additionalProperties: - type: string - type: object - description: YumRepos adds yum repository configuration to - the system. - type: object - type: object - supportedContainerRuntimes: - description: SupportedContainerRuntimes represents the container - runtimes supported by the given OS - items: - description: ContainerRuntimeSpec aggregates information about - a specific container runtime - properties: - files: - description: Files to add to the main files list when the - containerRuntime is selected - items: - description: File is a file that should get written to - the host's file system. The content can either be inlined - or referenced from a secret in the same namespace. + - name: v1alpha1 + schema: + openAPIV3Schema: + description: OperatingSystemProfile is the object that represents the OperatingSystemProfile + properties: + apiVersion: + description: + "APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources" + type: string + kind: + description: + "Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + type: string + metadata: + type: object + spec: + description: + OperatingSystemProfileSpec represents the operating system + configuration spec. + properties: + bootstrapConfig: + description: + BootstrapConfig is used for initial configuration of + machine and to fetch the kubernetes secret that contains the provisioning + config. + properties: + files: + description: + Files is a list of files that should exist in the + instance + items: + description: + File is a file that should get written to the host's + file system. The content can either be inlined or referenced + from a secret in the same namespace. + properties: + content: + description: Content describe the file's content. properties: - content: - description: Content describe the file's content. + inline: + description: + Inline is a struct that contains information + about the inlined data. properties: - inline: - description: Inline is a struct that contains - information about the inlined data. - properties: - data: - description: Data is the file's data. - type: string - encoding: - description: Encoding is the file's encoding - (e.g. base64). - type: string - required: - - data - type: object + data: + description: Data is the file's data. + type: string + encoding: + description: + Encoding is the file's encoding (e.g. + base64). + type: string + required: + - data type: object - path: - description: Path is the path of the file system where - the file should get written to. - type: string - permissions: - default: 644 - description: Permissions describes with which permissions - the file should get written to the file system. - Should be in decimal base and without any leading - zeroes. - format: int32 - type: integer - required: - - content - - path type: object + path: + description: + Path is the path of the file system where the + file should get written to. + type: string + permissions: + default: 644 + description: + Permissions describes with which permissions + the file should get written to the file system. Should + be in decimal base and without any leading zeroes. + format: int32 + type: integer + required: + - content + - path + type: object + type: array + modules: + description: + CloudInitModules field contains the optional cloud-init + modules which are supported by OSM + properties: + bootcmd: + description: + BootCMD module runs arbitrary commands very early + in the boot process, only slightly after a boothook would + run. + items: + type: string type: array - name: - description: Name of the Container runtime - enum: - - docker - - containerd - type: string - templates: + rh_subscription: additionalProperties: type: string - description: Templates to add to the available templates - when the containerRuntime is selected + description: + RHSubscription registers a Red Hat system either + by username and password or activation and org type: object - required: - - files - - name - type: object - type: array - templates: - additionalProperties: - type: string - description: Templates to be included in units and files - type: object - units: - description: Units a list of the systemd unit files which will - run on the instance - items: - description: Unit is a systemd unit used for the operating system - config. - properties: - content: - description: Content is the unit's content. - type: string - dropIns: - description: DropIns is a list of drop-ins for this unit. + runcmd: + description: + RunCMD Run arbitrary commands at a rc.local like + level with output to the console. items: - description: DropIn is a drop-in configuration for a systemd - unit. - properties: - content: - description: Content is the content of the drop-in. - type: string - name: - description: Name is the name of the drop-in. - type: string - required: - - content - - name - type: object + type: string type: array - enable: - description: Enable describes whether the unit is enabled - or not. - type: boolean - mask: - description: Mask describes whether the unit is masked or - not. - type: boolean - name: - description: Name is the name of a unit. + yum_repo_dir: + description: + "YumRepoDir the repo parts directory where individual + yum repo config files will be written. Default: /etc/yum.repos.d" type: string - required: - - name + yum_repos: + additionalProperties: + additionalProperties: + type: string + type: object + description: + YumRepos adds yum repository configuration to + the system. + type: object type: object - type: array - type: object - osName: - description: 'OSType represent the operating system name e.g: ubuntu' - enum: - - flatcar - - rhel - - centos - - ubuntu - - sles - - amzn2 - - rockylinux - type: string - osVersion: - description: OSVersion the version of the operating system - type: string - provisioningConfig: - description: ProvisioningConfig is used for provisioning the worker - node. - properties: - files: - description: Files is a list of files that should exist in the - instance - items: - description: File is a file that should get written to the host's - file system. The content can either be inlined or referenced - from a secret in the same namespace. - properties: - content: - description: Content describe the file's content. - properties: - inline: - description: Inline is a struct that contains information - about the inlined data. + supportedContainerRuntimes: + description: + SupportedContainerRuntimes represents the container + runtimes supported by the given OS. Docker has been deprecated + and is no-op. + items: + description: + ContainerRuntimeSpec aggregates information about + a specific container runtime + properties: + files: + description: + Files to add to the main files list when the + containerRuntime is selected + items: + description: + File is a file that should get written to + the host's file system. The content can either be inlined + or referenced from a secret in the same namespace. properties: - data: - description: Data is the file's data. - type: string - encoding: - description: Encoding is the file's encoding (e.g. - base64). + content: + description: Content describe the file's content. + properties: + inline: + description: + Inline is a struct that contains + information about the inlined data. + properties: + data: + description: Data is the file's data. + type: string + encoding: + description: + Encoding is the file's encoding + (e.g. base64). + type: string + required: + - data + type: object + type: object + path: + description: + Path is the path of the file system where + the file should get written to. type: string + permissions: + default: 644 + description: + Permissions describes with which permissions + the file should get written to the file system. + Should be in decimal base and without any leading + zeroes. + format: int32 + type: integer required: - - data + - content + - path type: object - type: object - path: - description: Path is the path of the file system where the - file should get written to. - type: string - permissions: - default: 644 - description: Permissions describes with which permissions - the file should get written to the file system. Should - be in decimal base and without any leading zeroes. - format: int32 - type: integer - required: - - content - - path - type: object - type: array - modules: - description: CloudInitModules field contains the optional cloud-init - modules which are supported by OSM - properties: - bootcmd: - description: BootCMD module runs arbitrary commands very early - in the boot process, only slightly after a boothook would - run. - items: - type: string - type: array - rh_subscription: - additionalProperties: - type: string - description: RHSubscription registers a Red Hat system either - by username and password or activation and org + type: array + name: + description: Name of the Container runtime + enum: + - docker + - containerd + type: string + templates: + additionalProperties: + type: string + description: + Templates to add to the available templates + when the containerRuntime is selected + type: object + required: + - files + - name type: object - runcmd: - description: RunCMD Run arbitrary commands at a rc.local like - level with output to the console. - items: - type: string - type: array - yum_repo_dir: - description: 'YumRepoDir the repo parts directory where individual - yum repo config files will be written. Default: /etc/yum.repos.d' + type: array + templates: + additionalProperties: type: string - yum_repos: - additionalProperties: - additionalProperties: + description: Templates to be included in units and files + type: object + units: + description: + Units a list of the systemd unit files which will + run on the instance + items: + description: + Unit is a systemd unit used for the operating system + config. + properties: + content: + description: Content is the unit's content. type: string - type: object - description: YumRepos adds yum repository configuration to - the system. + dropIns: + description: DropIns is a list of drop-ins for this unit. + items: + description: + DropIn is a drop-in configuration for a systemd + unit. + properties: + content: + description: Content is the content of the drop-in. + type: string + name: + description: Name is the name of the drop-in. + type: string + required: + - content + - name + type: object + type: array + enable: + description: + Enable describes whether the unit is enabled + or not. + type: boolean + mask: + description: + Mask describes whether the unit is masked or + not. + type: boolean + name: + description: Name is the name of a unit. + type: string + required: + - name type: object - type: object - supportedContainerRuntimes: - description: SupportedContainerRuntimes represents the container - runtimes supported by the given OS - items: - description: ContainerRuntimeSpec aggregates information about - a specific container runtime - properties: - files: - description: Files to add to the main files list when the - containerRuntime is selected - items: - description: File is a file that should get written to - the host's file system. The content can either be inlined - or referenced from a secret in the same namespace. + type: array + type: object + osName: + description: "OSType represent the operating system name e.g: ubuntu" + enum: + - flatcar + - rhel + - ubuntu + - amzn2 + - rockylinux + type: string + osVersion: + description: OSVersion the version of the operating system + type: string + provisioningConfig: + description: + ProvisioningConfig is used for provisioning the worker + node. + properties: + files: + description: + Files is a list of files that should exist in the + instance + items: + description: + File is a file that should get written to the host's + file system. The content can either be inlined or referenced + from a secret in the same namespace. + properties: + content: + description: Content describe the file's content. properties: - content: - description: Content describe the file's content. + inline: + description: + Inline is a struct that contains information + about the inlined data. properties: - inline: - description: Inline is a struct that contains - information about the inlined data. - properties: - data: - description: Data is the file's data. - type: string - encoding: - description: Encoding is the file's encoding - (e.g. base64). - type: string - required: - - data - type: object + data: + description: Data is the file's data. + type: string + encoding: + description: + Encoding is the file's encoding (e.g. + base64). + type: string + required: + - data type: object - path: - description: Path is the path of the file system where - the file should get written to. - type: string - permissions: - default: 644 - description: Permissions describes with which permissions - the file should get written to the file system. - Should be in decimal base and without any leading - zeroes. - format: int32 - type: integer - required: - - content - - path type: object + path: + description: + Path is the path of the file system where the + file should get written to. + type: string + permissions: + default: 644 + description: + Permissions describes with which permissions + the file should get written to the file system. Should + be in decimal base and without any leading zeroes. + format: int32 + type: integer + required: + - content + - path + type: object + type: array + modules: + description: + CloudInitModules field contains the optional cloud-init + modules which are supported by OSM + properties: + bootcmd: + description: + BootCMD module runs arbitrary commands very early + in the boot process, only slightly after a boothook would + run. + items: + type: string type: array - name: - description: Name of the Container runtime - enum: - - docker - - containerd - type: string - templates: + rh_subscription: additionalProperties: type: string - description: Templates to add to the available templates - when the containerRuntime is selected + description: + RHSubscription registers a Red Hat system either + by username and password or activation and org type: object - required: - - files - - name - type: object - type: array - templates: - additionalProperties: - type: string - description: Templates to be included in units and files - type: object - units: - description: Units a list of the systemd unit files which will - run on the instance - items: - description: Unit is a systemd unit used for the operating system - config. - properties: - content: - description: Content is the unit's content. - type: string - dropIns: - description: DropIns is a list of drop-ins for this unit. + runcmd: + description: + RunCMD Run arbitrary commands at a rc.local like + level with output to the console. items: - description: DropIn is a drop-in configuration for a systemd - unit. - properties: - content: - description: Content is the content of the drop-in. - type: string - name: - description: Name is the name of the drop-in. - type: string - required: - - content - - name - type: object + type: string type: array - enable: - description: Enable describes whether the unit is enabled - or not. - type: boolean - mask: - description: Mask describes whether the unit is masked or - not. - type: boolean - name: - description: Name is the name of a unit. + yum_repo_dir: + description: + "YumRepoDir the repo parts directory where individual + yum repo config files will be written. Default: /etc/yum.repos.d" type: string - required: - - name + yum_repos: + additionalProperties: + additionalProperties: + type: string + type: object + description: + YumRepos adds yum repository configuration to + the system. + type: object type: object - type: array - type: object - supportedCloudProviders: - description: SupportedCloudProviders represent the cloud providers - that support the given operating system version - items: - description: CloudProviderSpec contains the os/image reference for - a specific supported cloud provider - properties: - name: - description: Name represents the name of the supported cloud - provider - enum: - - aws - - azure - - digitalocean - - gce - - hetzner - - kubevirt - - linode - - nutanix - - openstack - - equinixmetal - - vsphere - - fake - - alibaba - - anexia - - scaleway - - baremetal - - external - - vmware-cloud-director - type: string - spec: - description: Spec represents the os/image reference in the supported - cloud provider + supportedContainerRuntimes: + description: + SupportedContainerRuntimes represents the container + runtimes supported by the given OS. Docker has been deprecated + and is no-op. + items: + description: + ContainerRuntimeSpec aggregates information about + a specific container runtime + properties: + files: + description: + Files to add to the main files list when the + containerRuntime is selected + items: + description: + File is a file that should get written to + the host's file system. The content can either be inlined + or referenced from a secret in the same namespace. + properties: + content: + description: Content describe the file's content. + properties: + inline: + description: + Inline is a struct that contains + information about the inlined data. + properties: + data: + description: Data is the file's data. + type: string + encoding: + description: + Encoding is the file's encoding + (e.g. base64). + type: string + required: + - data + type: object + type: object + path: + description: + Path is the path of the file system where + the file should get written to. + type: string + permissions: + default: 644 + description: + Permissions describes with which permissions + the file should get written to the file system. + Should be in decimal base and without any leading + zeroes. + format: int32 + type: integer + required: + - content + - path + type: object + type: array + name: + description: Name of the Container runtime + enum: + - docker + - containerd + type: string + templates: + additionalProperties: + type: string + description: + Templates to add to the available templates + when the containerRuntime is selected + type: object + required: + - files + - name + type: object + type: array + templates: + additionalProperties: + type: string + description: Templates to be included in units and files type: object - x-kubernetes-preserve-unknown-fields: true - required: - - name + units: + description: + Units a list of the systemd unit files which will + run on the instance + items: + description: + Unit is a systemd unit used for the operating system + config. + properties: + content: + description: Content is the unit's content. + type: string + dropIns: + description: DropIns is a list of drop-ins for this unit. + items: + description: + DropIn is a drop-in configuration for a systemd + unit. + properties: + content: + description: Content is the content of the drop-in. + type: string + name: + description: Name is the name of the drop-in. + type: string + required: + - content + - name + type: object + type: array + enable: + description: + Enable describes whether the unit is enabled + or not. + type: boolean + mask: + description: + Mask describes whether the unit is masked or + not. + type: boolean + name: + description: Name is the name of a unit. + type: string + required: + - name + type: object + type: array type: object - type: array - version: - description: Version is the version of the operating System Profile - pattern: v(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ - type: string - required: - - bootstrapConfig - - osName - - osVersion - - provisioningConfig - - supportedCloudProviders - - version - type: object - required: - - spec - type: object - served: true - storage: true + provisioningUtility: + default: cloud-init + description: + ProvisioningUtility used for configuring the worker node. + Defaults to cloud-init. + enum: + - cloud-init + - ignition + type: string + supportedCloudProviders: + description: + SupportedCloudProviders represent the cloud providers + that support the given operating system version + items: + description: + CloudProviderSpec contains the os/image reference for + a specific supported cloud provider + properties: + name: + description: + Name represents the name of the supported cloud + provider + enum: + - aws + - azure + - digitalocean + - edge + - gce + - hetzner + - kubevirt + - linode + - nutanix + - openstack + - equinixmetal + - vsphere + - fake + - alibaba + - anexia + - scaleway + - baremetal + - external + - vmware-cloud-director + - opennebula + type: string + spec: + description: + Spec represents the os/image reference in the supported + cloud provider + type: object + x-kubernetes-preserve-unknown-fields: true + required: + - name + type: object + type: array + version: + description: Version is the version of the operating System Profile + pattern: v(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*)(?:-((?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*)(?:\.(?:0|[1-9]\d*|\d*[a-zA-Z-][0-9a-zA-Z-]*))*))?(?:\+([0-9a-zA-Z-]+(?:\.[0-9a-zA-Z-]+)*))?$ + type: string + required: + - bootstrapConfig + - osName + - osVersion + - provisioningConfig + - supportedCloudProviders + - version + type: object + required: + - spec + type: object + served: true + storage: true --- apiVersion: cert-manager.io/v1 kind: Issuer @@ -975,12 +1095,12 @@ spec: serviceAccountName: operating-system-manager-webhook containers: - image: quay.io/kubermatic/operating-system-manager:latest - imagePullPolicy: IfNotPresent + imagePullPolicy: Always name: webhook command: - /usr/local/bin/webhook - - -logtostderr - - -v=6 + - -log-debug=false + - -log-format=json - -namespace=kube-system volumeMounts: - name: operating-system-manager-admission-cert @@ -1303,12 +1423,12 @@ spec: serviceAccountName: operating-system-manager containers: - image: quay.io/kubermatic/operating-system-manager:latest - imagePullPolicy: IfNotPresent + imagePullPolicy: Always name: operating-system-manager command: - /usr/local/bin/osm-controller - - -logtostderr - - -v=5 + - -log-debug=false + - -log-format=json - -worker-count=5 - -cluster-dns=10.10.10.10 - -metrics-address=0.0.0.0:8080 diff --git a/examples/scaleway-machinedeployment.yaml b/examples/scaleway-machinedeployment.yaml index da66040ff..ad56eb180 100644 --- a/examples/scaleway-machinedeployment.yaml +++ b/examples/scaleway-machinedeployment.yaml @@ -56,9 +56,8 @@ spec: ipv6: false tags: - "machine-controller" - # Can be 'ubuntu' or 'centos' operatingSystem: "ubuntu" operatingSystemSpec: disableAutoUpdate: true versions: - kubelet: 1.24.9 + kubelet: 1.33.4 diff --git a/examples/vmware-cloud-director-machinedeployment.yaml b/examples/vmware-cloud-director-machinedeployment.yaml index db9507890..1e011aacd 100644 --- a/examples/vmware-cloud-director-machinedeployment.yaml +++ b/examples/vmware-cloud-director-machinedeployment.yaml @@ -48,6 +48,13 @@ spec: namespace: kube-system name: machine-controller-vmware-cloud-director key: password + # Can also be set via the env var 'VCD_API_TOKEN' on the machine-controller + # Either username, password or apiToken should be used for authentication. + apiToken: + secretKeyRef: + namespace: kube-system + name: machine-controller-vmware-cloud-director + key: apiToken # Can also be set via the env var 'VCD_ORG' on the machine-controller organization: "<< VCD_ORG >>" # Can also be set via the env var 'VCD_VDC' on the machine-controller @@ -72,8 +79,14 @@ spec: diskBusType: "paravirtual" diskIOPS: 0 storageProfile: "*" + # Optional: SizingPolicy is the sizing policy to be used for machines created by this machine deployment. + # If left empty, default sizing policy if specified at OVDC/organization level is used. + sizingPolicy: "" + # Optional: PlacementPolicy is the placement policy to be used for machines created by this machine deployment. + # If left empty, default placement policy if specified at OVDC/organization level is used. + placementPolicy: "" operatingSystem: "ubuntu" operatingSystemSpec: distUpgradeOnBoot: false versions: - kubelet: 1.24.9 + kubelet: 1.33.4 diff --git a/examples/vsphere-datastore-cluster-machinedeployment.yaml b/examples/vsphere-datastore-cluster-machinedeployment.yaml index 9018db197..aa5e92386 100644 --- a/examples/vsphere-datastore-cluster-machinedeployment.yaml +++ b/examples/vsphere-datastore-cluster-machinedeployment.yaml @@ -51,12 +51,17 @@ spec: datacenter: datacenter1 templateVMName: ubuntu-template # Optional. Sets the networks on the VM. If no network is specified, the template default will be used. - vmNetName: network1 + networks: + - network1 # Optional folder: folder1 datastoreCluster: datastorecluster1 # Can also be set via the env var 'VSPHERE_ALLOW_INSECURE' on the machine-controller allowInsecure: true + # Cluster to configure vm anti affinity rules + cluster: cl-1 + # Automatically create anti affinity rules for machines + vmAntiAffinity: true cpus: 2 memoryMB: 2048 # Optional: Resize the root disk to this size. Must be bigger than the existing size @@ -74,4 +79,4 @@ spec: # provided the rhsm will be disabled and any created subscription won't be removed automatically rhsmOfflineToken: "<< REDHAT_SUBSCRIPTIONS_OFFLINE_TOKEN >>" versions: - kubelet: 1.24.9 + kubelet: 1.33.4 diff --git a/examples/vsphere-machinedeployment.yaml b/examples/vsphere-machinedeployment.yaml index 59ad3072a..16c662751 100644 --- a/examples/vsphere-machinedeployment.yaml +++ b/examples/vsphere-machinedeployment.yaml @@ -51,12 +51,19 @@ spec: datacenter: datacenter1 templateVMName: ubuntu-template # Optional. Sets the networks on the VM. If no network is specified, the template default will be used. - vmNetName: network1 + networks: + - network1 # Optional folder: folder1 datastore: datastore1 # Can also be set via the env var 'VSPHERE_ALLOW_INSECURE' on the machine-controller allowInsecure: true + # Cluster to configure vm anti affinity rules + cluster: cl-1 + # Automatically create anti affinity rules for machines + vmAntiAffinity: true + # Optional. Sets the VM group for the Machines in the MachineDeployment. + # vmGroup: "vmgroup-name" cpus: 2 memoryMB: 2048 # Optional: Resize the root disk to this size. Must be bigger than the existing size @@ -74,4 +81,4 @@ spec: # provided the rhsm will be disabled and any created subscription won't be removed automatically rhsmOfflineToken: "<< REDHAT_SUBSCRIPTIONS_OFFLINE_TOKEN >>" versions: - kubelet: 1.24.9 + kubelet: 1.33.4 diff --git a/examples/vultr-machinedeployment.yaml b/examples/vultr-machinedeployment.yaml index 390c88453..68416da54 100644 --- a/examples/vultr-machinedeployment.yaml +++ b/examples/vultr-machinedeployment.yaml @@ -43,9 +43,22 @@ spec: namespace: kube-system name: machine-controller-vultr key: apiKey + # Default is false meaning a virtual machine instance is created + # If true, a bare metal instance is created + physicalMachine: false region: blr - plan: 'vhf-8c-32gb' + plan: "vhf-8c-32gb" + # This takes precedence over enableVPC + vpcId: + - + # For more reference, see + # https://www.vultr.com/api/#tag/instances/operation/create-instance + enableVPC: false + enableVPC2: true + vpc2Id: + - # Required: app_id, image_id, os_id, snapshot_id, or iso_id must be provided. Currently only os_id is supported. + # This takes precedence over operatingSystem osId: 215 # Optional tags: @@ -57,4 +70,4 @@ spec: distUpgradeOnBoot: false disableAutoUpdate: true versions: - kubelet: 1.24.9 + kubelet: 1.33.4 diff --git a/go.mod b/go.mod index eb6a40591..09fb67071 100644 --- a/go.mod +++ b/go.mod @@ -1,188 +1,190 @@ -module github.com/kubermatic/machine-controller +module k8c.io/machine-controller -go 1.19 +go 1.25.0 + +replace k8c.io/machine-controller/sdk => ./sdk require ( - cloud.google.com/go/logging v1.6.1 - cloud.google.com/go/monitoring v1.9.1 - github.com/Azure/azure-sdk-for-go v65.0.0+incompatible - github.com/Azure/go-autorest/autorest/azure/auth v0.5.11 + cloud.google.com/go/logging v1.11.0 + cloud.google.com/go/monitoring v1.21.1 + github.com/Azure/azure-sdk-for-go v68.0.0+incompatible + github.com/Azure/go-autorest/autorest/azure/auth v0.5.13 github.com/Azure/go-autorest/autorest/to v0.4.0 - github.com/BurntSushi/toml v1.2.1 - github.com/Masterminds/semver/v3 v3.2.0 - github.com/Masterminds/sprig/v3 v3.2.3 - github.com/aliyun/alibaba-cloud-sdk-go v1.62.112 - github.com/aws/aws-sdk-go-v2 v1.17.3 - github.com/aws/aws-sdk-go-v2/config v1.18.7 - github.com/aws/aws-sdk-go-v2/credentials v1.13.7 - github.com/aws/aws-sdk-go-v2/service/ec2 v1.77.0 - github.com/aws/aws-sdk-go-v2/service/sts v1.17.7 - github.com/aws/smithy-go v1.13.5 - github.com/coreos/container-linux-config-transpiler v0.9.0 - github.com/davecgh/go-spew v1.1.1 - github.com/digitalocean/godo v1.93.0 - github.com/ghodss/yaml v1.0.0 - github.com/go-test/deep v1.0.8 - github.com/google/uuid v1.3.0 - github.com/gophercloud/gophercloud v1.1.1 + github.com/Masterminds/semver/v3 v3.4.0 + github.com/OpenNebula/one/src/oca/go/src/goca v0.0.0-20240905143811-b2ab5b7c9c14 + github.com/aliyun/alibaba-cloud-sdk-go v1.63.15 + github.com/aws/aws-sdk-go-v2 v1.30.5 + github.com/aws/aws-sdk-go-v2/config v1.27.33 + github.com/aws/aws-sdk-go-v2/credentials v1.17.32 + github.com/aws/aws-sdk-go-v2/service/ec2 v1.177.3 + github.com/aws/aws-sdk-go-v2/service/sts v1.30.7 + github.com/aws/smithy-go v1.20.4 + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc + github.com/digitalocean/godo v1.124.0 + github.com/equinix/equinix-sdk-go v0.46.0 + github.com/go-logr/logr v1.4.3 + github.com/go-logr/zapr v1.3.0 + github.com/go-test/deep v1.1.0 + github.com/google/go-cmp v0.7.0 + github.com/google/uuid v1.6.0 + github.com/gophercloud/gophercloud v1.14.0 github.com/heptiolabs/healthcheck v0.0.0-20211123025425-613501dd5deb - github.com/hetznercloud/hcloud-go v1.39.0 - github.com/linode/linodego v1.10.0 - github.com/nutanix-cloud-native/prism-go-client v0.3.4 - github.com/packethost/packngo v0.29.0 + github.com/hetznercloud/hcloud-go/v2 v2.13.1 + github.com/linode/linodego v1.40.0 + github.com/nutanix-cloud-native/prism-go-client v0.5.1 github.com/patrickmn/go-cache v2.1.0+incompatible github.com/pborman/uuid v1.2.1 github.com/pkg/errors v0.9.1 - github.com/pmezard/go-difflib v1.0.0 - github.com/prometheus/client_golang v1.14.0 - github.com/scaleway/scaleway-sdk-go v1.0.0-beta.10 - github.com/sethvargo/go-password v0.2.0 - github.com/tinkerbell/tink v0.8.0 - github.com/vmware/go-vcloud-director/v2 v2.18.0 - github.com/vmware/govmomi v0.30.0 - github.com/vultr/govultr/v2 v2.17.2 - go.anx.io/go-anxcloud v0.5.0 - golang.org/x/crypto v0.4.0 - golang.org/x/oauth2 v0.3.0 - gomodules.xyz/jsonpatch/v2 v2.2.0 - google.golang.org/api v0.105.0 - google.golang.org/grpc v1.51.0 - gopkg.in/gcfg.v1 v1.2.3 + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 + github.com/prometheus/client_golang v1.23.2 + github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30 + github.com/spf13/pflag v1.0.9 + github.com/tinkerbell/tink v0.10.1 + github.com/vmware/go-vcloud-director/v2 v2.25.0 + github.com/vmware/govmomi v0.43.0 + github.com/vultr/govultr/v3 v3.9.1 + go.anx.io/go-anxcloud v0.7.3 + go.uber.org/zap v1.27.0 + golang.org/x/crypto v0.46.0 + golang.org/x/oauth2 v0.34.0 + gomodules.xyz/jsonpatch/v2 v2.4.0 + google.golang.org/api v0.197.0 gopkg.in/yaml.v3 v3.0.1 - k8s.io/api v0.26.0 - k8s.io/apiextensions-apiserver v0.26.0 - k8s.io/apimachinery v0.26.0 - k8s.io/client-go v12.0.0+incompatible + k8c.io/machine-controller/sdk v0.0.0-00010101000000-000000000000 + k8s.io/api v0.35.0 + k8s.io/apiextensions-apiserver v0.35.0 + k8s.io/apimachinery v0.35.0 + k8s.io/client-go v0.35.0 + k8s.io/cloud-provider v0.35.0 k8s.io/klog v1.0.0 - k8s.io/kubelet v0.26.0 - k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 - kubevirt.io/api v0.58.0 - kubevirt.io/containerized-data-importer-api v1.55.2 - sigs.k8s.io/controller-runtime v0.14.1 - sigs.k8s.io/yaml v1.3.0 + k8s.io/kubectl v0.35.0 + k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 + kubevirt.io/api v1.3.1 + kubevirt.io/containerized-data-importer-api v1.60.3 + sigs.k8s.io/controller-runtime v0.23.1 ) require ( - cloud.google.com/go v0.107.0 // indirect - cloud.google.com/go/compute v1.14.0 // indirect - cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/longrunning v0.3.0 // indirect + cloud.google.com/go v0.115.1 // indirect + cloud.google.com/go/auth v0.9.4 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect + cloud.google.com/go/compute/metadata v0.9.0 // indirect + cloud.google.com/go/longrunning v0.6.1 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect - github.com/Azure/go-autorest/autorest v0.11.28 // indirect - github.com/Azure/go-autorest/autorest/adal v0.9.21 // indirect + github.com/Azure/go-autorest/autorest v0.11.29 // indirect + github.com/Azure/go-autorest/autorest/adal v0.9.24 // indirect github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 // indirect github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect - github.com/Masterminds/goutils v1.1.1 // indirect - github.com/PaesslerAG/gval v1.2.1 // indirect + github.com/PaesslerAG/gval v1.2.2 // indirect github.com/PaesslerAG/jsonpath v0.1.1 // indirect - github.com/ajeddeloh/go-json v0.0.0-20200220154158-5ae607161559 // indirect - github.com/ajeddeloh/yaml v0.0.0-20170912190910-6b94386aeefd // indirect - github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 // indirect github.com/araddon/dateparse v0.0.0-20210429162001-6b43995a97de // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.21 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.27 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.21 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.3.28 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.21 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.11.28 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.11 // indirect + github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.13 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.19 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.22.7 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.7 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/blang/semver/v4 v4.0.0 // indirect - github.com/cespare/xxhash/v2 v2.2.0 // indirect - github.com/coreos/go-semver v0.3.0 // indirect - github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf // indirect - github.com/coreos/ignition v0.35.0 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/dimchansky/utfbom v1.1.1 // indirect - github.com/docker/distribution v2.8.1+incompatible // indirect - github.com/emicklei/go-restful/v3 v3.10.1 // indirect - github.com/evanphx/json-patch v5.6.0+incompatible // indirect - github.com/evanphx/json-patch/v5 v5.6.0 // indirect - github.com/fsnotify/fsnotify v1.6.0 // indirect - github.com/go-logr/logr v1.2.3 // indirect + github.com/emicklei/go-restful/v3 v3.12.2 // indirect + github.com/evanphx/json-patch v5.9.0+incompatible // indirect + github.com/evanphx/json-patch/v5 v5.9.11 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect + github.com/fxamacker/cbor/v2 v2.9.0 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-logr/zapr v1.2.3 // indirect - github.com/go-openapi/jsonpointer v0.19.6 // indirect - github.com/go-openapi/jsonreference v0.20.1 // indirect - github.com/go-openapi/swag v0.22.3 // indirect - github.com/go-resty/resty/v2 v2.7.0 // indirect - github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang-jwt/jwt/v4 v4.4.3 // indirect + github.com/go-openapi/analysis v0.23.0 // indirect + github.com/go-openapi/errors v0.22.0 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.21.0 // indirect + github.com/go-openapi/loads v0.22.0 // indirect + github.com/go-openapi/spec v0.21.0 // indirect + github.com/go-openapi/strfmt v0.23.0 // indirect + github.com/go-openapi/swag v0.23.0 // indirect + github.com/go-openapi/validate v0.24.0 // indirect + github.com/go-resty/resty/v2 v2.14.0 // indirect + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect + github.com/golang-jwt/jwt/v4 v4.5.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.2 // indirect - github.com/google/gnostic v0.6.9 // indirect - github.com/google/go-cmp v0.5.9 // indirect + github.com/google/btree v1.1.3 // indirect + github.com/google/gnostic-models v0.7.0 // indirect github.com/google/go-querystring v1.1.0 // indirect - github.com/google/gofuzz v1.2.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.2.1 // indirect - github.com/googleapis/gax-go/v2 v2.7.0 // indirect + github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 // indirect + github.com/google/s2a-go v0.1.8 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect + github.com/googleapis/gax-go/v2 v2.13.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-retryablehttp v0.7.2 // indirect - github.com/hashicorp/go-version v1.6.0 // indirect - github.com/huandu/xstrings v1.4.0 // indirect - github.com/imdario/mergo v0.3.13 // indirect - github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/hashicorp/go-retryablehttp v0.7.7 // indirect + github.com/hashicorp/go-version v1.7.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b // indirect github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mitchellh/copystructure v1.2.0 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/reflectwalk v1.0.2 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect - github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/onsi/ginkgo/v2 v2.6.0 // indirect - github.com/onsi/gomega v1.24.1 // indirect - github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/openshift/api v0.0.0-20211217221424-8779abfbd571 // indirect + github.com/oklog/ulid v1.3.1 // indirect + github.com/onsi/ginkgo/v2 v2.27.2 // indirect + github.com/onsi/gomega v1.38.2 // indirect github.com/openshift/custom-resource-status v1.1.2 // indirect github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b // indirect - github.com/packethost/pkg v0.0.0-20211110202003-387414657e83 // indirect github.com/peterhellberg/link v1.2.0 // indirect - github.com/prometheus/client_model v0.3.0 // indirect - github.com/prometheus/common v0.39.0 // indirect - github.com/prometheus/procfs v0.9.0 // indirect - github.com/rogpeppe/go-internal v1.9.0 // indirect - github.com/shopspring/decimal v1.3.1 // indirect - github.com/spf13/cast v1.5.0 // indirect - github.com/spf13/cobra v1.6.1 // indirect - github.com/spf13/pflag v1.0.5 // indirect - github.com/vincent-petithory/dataurl v1.0.0 // indirect + github.com/prometheus/client_model v0.6.2 // indirect + github.com/prometheus/common v0.66.1 // indirect + github.com/prometheus/procfs v0.16.1 // indirect + github.com/rogpeppe/go-internal v1.14.1 // indirect + github.com/shopspring/decimal v1.4.0 // indirect + github.com/x448/float16 v0.8.4 // indirect + go.mongodb.org/mongo-driver v1.16.1 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.37.0 // indirect - go.opentelemetry.io/otel v1.11.2 // indirect - go.opentelemetry.io/otel/metric v0.34.0 // indirect - go.opentelemetry.io/otel/trace v1.11.2 // indirect - go.uber.org/atomic v1.10.0 // indirect - go.uber.org/multierr v1.9.0 // indirect - go.uber.org/zap v1.24.0 // indirect - go4.org v0.0.0-20201209231011-d4a079459e60 // indirect - golang.org/x/net v0.7.0 // indirect - golang.org/x/sync v0.1.0 // indirect - golang.org/x/sys v0.5.0 // indirect - golang.org/x/term v0.5.0 // indirect - golang.org/x/text v0.7.0 // indirect - golang.org/x/time v0.3.0 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef // indirect - google.golang.org/protobuf v1.28.1 // indirect + go.opentelemetry.io/auto/sdk v1.2.1 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect + go.opentelemetry.io/otel v1.39.0 // indirect + go.opentelemetry.io/otel/metric v1.39.0 // indirect + go.opentelemetry.io/otel/trace v1.39.0 // indirect + go.uber.org/atomic v1.11.0 // indirect + go.uber.org/multierr v1.11.0 // indirect + go.yaml.in/yaml/v2 v2.4.3 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect + golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 // indirect + golang.org/x/mod v0.30.0 // indirect + golang.org/x/net v0.48.0 // indirect + golang.org/x/sync v0.19.0 // indirect + golang.org/x/sys v0.39.0 // indirect + golang.org/x/term v0.38.0 // indirect + golang.org/x/text v0.32.0 // indirect + golang.org/x/time v0.11.0 // indirect + golang.org/x/tools v0.39.0 // indirect + google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 // indirect + google.golang.org/grpc v1.79.3 // indirect + google.golang.org/protobuf v1.36.10 // indirect gopkg.in/DATA-DOG/go-sqlmock.v1 v1.3.0 // indirect + gopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect - gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - k8s.io/component-base v0.26.0 // indirect - k8s.io/klog/v2 v2.80.1 // indirect - k8s.io/kube-openapi v0.0.0-20221207184640-f3cff1453715 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 // indirect kubevirt.io/controller-lifecycle-operator-sdk/api v0.2.4 // indirect - sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect + sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v6 v6.3.2-0.20260122202528-d9cc6641c482 // indirect + sigs.k8s.io/yaml v1.6.0 // indirect ) - -replace k8s.io/client-go => k8s.io/client-go v0.26.0 diff --git a/go.sum b/go.sum index dcb2aa01d..372ad422f 100644 --- a/go.sum +++ b/go.sum @@ -1,64 +1,34 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= -cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= -cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= -cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= -cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= -cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= -cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.107.0 h1:qkj22L7bgkl6vIeZDlOY2po43Mx/TIa2Wsa7VR+PEww= -cloud.google.com/go v0.107.0/go.mod h1:wpc2eNrD7hXUTy8EKS10jkxpZBjASrORK7goS+3YX2I= -cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= -cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= -cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= -cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= -cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.14.0 h1:hfm2+FfxVmnRlh6LpB7cg1ZNU+5edAHmW679JePztk0= -cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= -cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= -cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= -cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= -cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/iam v0.8.0 h1:E2osAkZzxI/+8pZcxVLcDtAQx/u+hZXVryUaYQ5O0Kk= -cloud.google.com/go/logging v1.6.1 h1:ZBsZK+JG+oCDT+vaxwqF2egKNRjz8soXiS6Xv79benI= -cloud.google.com/go/logging v1.6.1/go.mod h1:5ZO0mHHbvm8gEmeEUHrmDlTDSu5imF6MUP9OfilNXBw= -cloud.google.com/go/longrunning v0.3.0 h1:NjljC+FYPV3uh5/OwWT6pVU+doBqMg2x/rZlE+CamDs= -cloud.google.com/go/longrunning v0.3.0/go.mod h1:qth9Y41RRSUE69rDcOn6DdK3HfQfsUI0YSmW3iIlLJc= -cloud.google.com/go/monitoring v1.9.1 h1:y9g09cWAQaX3ZYscR/nfaFUXtuyRqD2+i0jTOw0BZFI= -cloud.google.com/go/monitoring v1.9.1/go.mod h1:iFzRDMSDMvvf/z30Ge1jwtuEe/jlPPAFusmvCkUdo+o= -cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= -cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= -cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= -cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= -cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= -cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go v0.115.1 h1:Jo0SM9cQnSkYfp44+v+NQXHpcHqlnRJk2qxh6yvxxxQ= +cloud.google.com/go v0.115.1/go.mod h1:DuujITeaufu3gL68/lOFIirVNJwQeyf5UXyi+Wbgknc= +cloud.google.com/go/auth v0.9.4 h1:DxF7imbEbiFu9+zdKC6cKBko1e8XeJnipNqIbWZ+kDI= +cloud.google.com/go/auth v0.9.4/go.mod h1:SHia8n6//Ya940F1rLimhJCjjx7KE17t0ctFEci3HkA= +cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= +cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= +cloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs= +cloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10= +cloud.google.com/go/iam v1.2.0 h1:kZKMKVNk/IsSSc/udOb83K0hL/Yh/Gcqpz+oAkoIFN8= +cloud.google.com/go/iam v1.2.0/go.mod h1:zITGuWgsLZxd8OwAlX+eMFgZDXzBm7icj1PVTYG766Q= +cloud.google.com/go/logging v1.11.0 h1:v3ktVzXMV7CwHq1MBF65wcqLMA7i+z3YxbUsoK7mOKs= +cloud.google.com/go/logging v1.11.0/go.mod h1:5LDiJC/RxTt+fHc1LAt20R9TKiUTReDg6RuuFOZ67+A= +cloud.google.com/go/longrunning v0.6.1 h1:lOLTFxYpr8hcRtcwWir5ITh1PAKUD/sG2lKrTSYjyMc= +cloud.google.com/go/longrunning v0.6.1/go.mod h1:nHISoOZpBcmlwbJmiVk5oDRz0qG/ZxPynEGs1iZ79s0= +cloud.google.com/go/monitoring v1.21.1 h1:zWtbIoBMnU5LP9A/fz8LmWMGHpk4skdfeiaa66QdFGc= +cloud.google.com/go/monitoring v1.21.1/go.mod h1:Rj++LKrlht9uBi8+Eb530dIrzG/cU/lB8mt+lbeFK1c= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/99designs/gqlgen v0.15.1 h1:48bRXecwlCNTa/n2bMSp2rQsXNxwZ54QHbiULNf78ec= -github.com/99designs/gqlgen v0.15.1/go.mod h1:nbeSjFkqphIqpZsYe1ULVz0yfH8hjpJdJIQoX/e0G2I= -github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9EbjRYTySD/t8i1Ee/W5EGXw= -github.com/Azure/azure-sdk-for-go v65.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v68.0.0+incompatible h1:fcYLmCpyNYRnvJbPerq7U0hS+6+I79yEDJBqVNcqUzU= +github.com/Azure/azure-sdk-for-go v68.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.11.24/go.mod h1:G6kyRlFnTuSbEYkQGawPfsCswgme4iYf6rfSKUDzbCc= -github.com/Azure/go-autorest/autorest v0.11.28 h1:ndAExarwr5Y+GaHE6VCaY1kyS/HwwGGyuimVhWsHOEM= github.com/Azure/go-autorest/autorest v0.11.28/go.mod h1:MrkzG3Y3AH668QyF9KRk5neJnGgmhQ6krbhR8Q5eMvA= +github.com/Azure/go-autorest/autorest v0.11.29 h1:I4+HL/JDvErx2LjyzaVxllw2lRDB5/BT2Bm4g20iqYw= +github.com/Azure/go-autorest/autorest v0.11.29/go.mod h1:ZtEzC4Jy2JDrZLxvWs8LrBWEBycl1hbT1eknI8MtfAs= github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= -github.com/Azure/go-autorest/autorest/adal v0.9.21 h1:jjQnVFXPfekaqb8vIsv2G1lxshoW+oGv4MDlhRtnYZk= -github.com/Azure/go-autorest/autorest/adal v0.9.21/go.mod h1:zua7mBUaCc5YnSLKYgGJR/w5ePdMDA6H56upLsHzA9U= -github.com/Azure/go-autorest/autorest/azure/auth v0.5.11 h1:P6bYXFoao05z5uhOQzbC3Qd8JqF3jUoocoTeIxkp2cA= -github.com/Azure/go-autorest/autorest/azure/auth v0.5.11/go.mod h1:84w/uV8E37feW2NCJ08uT9VBfjfUHpgLVnG2InYD6cg= -github.com/Azure/go-autorest/autorest/azure/cli v0.4.5/go.mod h1:ADQAXrkgm7acgWVUNamOgh8YNrv4p27l3Wc55oVfpzg= +github.com/Azure/go-autorest/autorest/adal v0.9.22/go.mod h1:XuAbAEUv2Tta//+voMI038TrJBqjKam0me7qR+L8Cmk= +github.com/Azure/go-autorest/autorest/adal v0.9.24 h1:BHZfgGsGwdkHDyZdtQRQk1WeUdW0m2WPAwuHZwUi5i4= +github.com/Azure/go-autorest/autorest/adal v0.9.24/go.mod h1:7T1+g0PYFmACYW5LlG2fcoPiPlFHjClyRGL7dRlP5c8= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.13 h1:Ov8avRZi2vmrE2JcXw+tu5K/yB41r7xK9GZDiBF7NdM= +github.com/Azure/go-autorest/autorest/azure/auth v0.5.13/go.mod h1:5BAVfWLWXihP47vYrPuBKKf4cS0bXI+KM9Qx6ETDJYo= github.com/Azure/go-autorest/autorest/azure/cli v0.4.6 h1:w77/uPk80ZET2F+AfQExZyEWtn+0Rk/uw17m9fv5Ajc= github.com/Azure/go-autorest/autorest/azure/cli v0.4.6/go.mod h1:piCfgPho7BiIDdEQ1+g4VmKyD5y+p/XtSNqE6Hc4QD0= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= @@ -75,255 +45,193 @@ github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZ github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/toml v1.2.1 h1:9F2/+DoOYIOksmaJFPw1tGFy1eDnIJXg+UHjuD8lTak= -github.com/BurntSushi/toml v1.2.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= -github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g= -github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= -github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= -github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= +github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= +github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0= +github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/OpenNebula/one/src/oca/go/src/goca v0.0.0-20240905143811-b2ab5b7c9c14 h1:9uqKGeUuok/9Q5B5DzDM+bVgyEZVruzaflXw8WiaZ+Y= +github.com/OpenNebula/one/src/oca/go/src/goca v0.0.0-20240905143811-b2ab5b7c9c14/go.mod h1:dvAwZi1Aol7eu6BENzHtl8ztGBkacB9t/fJj+fYk+Xg= github.com/PaesslerAG/gval v1.0.0/go.mod h1:y/nm5yEyTeX6av0OfKJNp9rBNj2XrGhAf5+v24IBN1I= -github.com/PaesslerAG/gval v1.2.1 h1:Ggwtej1xCyt1994VuDCSjycybIDo3duDCDghK/xc/A0= -github.com/PaesslerAG/gval v1.2.1/go.mod h1:XRFLwvmkTEdYziLdaCeCa5ImcGVrfQbeNUbVR+C6xac= +github.com/PaesslerAG/gval v1.2.2 h1:Y7iBzhgE09IGTt5QgGQ2IdaYYYOU134YGHBThD+wm9E= +github.com/PaesslerAG/gval v1.2.2/go.mod h1:XRFLwvmkTEdYziLdaCeCa5ImcGVrfQbeNUbVR+C6xac= github.com/PaesslerAG/jsonpath v0.1.0/go.mod h1:4BzmtoM/PI8fPO4aQGIusjGxGir2BzcV0grWtFzq1Y8= github.com/PaesslerAG/jsonpath v0.1.1 h1:c1/AToHQMVsduPAa4Vh6xp2U0evy4t8SWp8imEsylIk= github.com/PaesslerAG/jsonpath v0.1.1/go.mod h1:lVboNxFGal/VwW6d9JzIy56bUsYAP6tH/x80vjnCseY= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= -github.com/agnivade/levenshtein v1.1.0 h1:n6qGwyHG61v3ABce1rPVZklEYRT8NFpCMrpZdBUbYGM= -github.com/agnivade/levenshtein v1.1.0/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo= -github.com/ajeddeloh/go-json v0.0.0-20200220154158-5ae607161559 h1:4SPQljF/GJ8Q+QlCWMWxRBepub4DresnOm4eI2ebFGc= -github.com/ajeddeloh/go-json v0.0.0-20200220154158-5ae607161559/go.mod h1:otnto4/Icqn88WCcM4bhIJNSgsh9VLBuspyyCfvof9c= -github.com/ajeddeloh/yaml v0.0.0-20170912190910-6b94386aeefd h1:NlKlOv3aVJ5ODMC0JWPvddw05KENkL3cZttIuu8kJRo= -github.com/ajeddeloh/yaml v0.0.0-20170912190910-6b94386aeefd/go.mod h1:idhzw68Q7v4j+rQ2AGyq3OlZW2Jij9mdmGA4/Sk6J0E= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= -github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= -github.com/aliyun/alibaba-cloud-sdk-go v1.62.112 h1:49S6VGQeYyk2KIw85CHbAVaVF2lSgi8xrWDwSw0GCBM= -github.com/aliyun/alibaba-cloud-sdk-go v1.62.112/go.mod h1:Api2AkmMgGaSUAhmk76oaFObkoeCPc/bKAqcyplPODs= -github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= -github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= -github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= +github.com/aliyun/alibaba-cloud-sdk-go v1.63.15 h1:r2uwBUQhLhcPzaWz9tRJqc8MjYwHb+oF2+Q6467BF14= +github.com/aliyun/alibaba-cloud-sdk-go v1.63.15/go.mod h1:SOSDHfe1kX91v3W5QiBsWSLqeLxImobbMX1mxrFHsVQ= github.com/araddon/dateparse v0.0.0-20210429162001-6b43995a97de h1:FxWPpzIjnTlhPwqqXc4/vE0f7GvRjuAsbW+HOIe8KnA= github.com/araddon/dateparse v0.0.0-20210429162001-6b43995a97de/go.mod h1:DCaWoUhZrYW9p1lxo/cm8EmUOOzAPSEZNGF2DK1dJgw= -github.com/arbovm/levenshtein v0.0.0-20160628152529-48b4e1c0c4d0/go.mod h1:t2tdKJDJF9BV14lnkjHmOQgcvEKgtqs5a1N3LNdJhGE= -github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/aws/aws-sdk-go v1.42.23/go.mod h1:gyRszuZ/icHmHAVE4gc/r+cfCmhA1AD+vqfWbgI+eHs= -github.com/aws/aws-sdk-go-v2 v1.17.3 h1:shN7NlnVzvDUgPQ+1rLMSxY8OWRNDRYtiqe0p/PgrhY= -github.com/aws/aws-sdk-go-v2 v1.17.3/go.mod h1:uzbQtefpm44goOPmdKyAlXSNcwlRgF3ePWVW6EtJvvw= -github.com/aws/aws-sdk-go-v2/config v1.18.7 h1:V94lTcix6jouwmAsgQMAEBozVAGJMFhVj+6/++xfe3E= -github.com/aws/aws-sdk-go-v2/config v1.18.7/go.mod h1:OZYsyHFL5PB9UpyS78NElgKs11qI/B5KJau2XOJDXHA= -github.com/aws/aws-sdk-go-v2/credentials v1.13.7 h1:qUUcNS5Z1092XBFT66IJM7mYkMwgZ8fcC8YDIbEwXck= -github.com/aws/aws-sdk-go-v2/credentials v1.13.7/go.mod h1:AdCcbZXHQCjJh6NaH3pFaw8LUeBFn5+88BZGMVGuBT8= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.21 h1:j9wi1kQ8b+e0FBVHxCqCGo4kxDU175hoDHcWAi0sauU= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.12.21/go.mod h1:ugwW57Z5Z48bpvUyZuaPy4Kv+vEfJWnIrky7RmkBvJg= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.27 h1:I3cakv2Uy1vNmmhRQmFptYDxOvBnwCdNwyw63N0RaRU= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.1.27/go.mod h1:a1/UpzeyBBerajpnP5nGZa9mGzsBn5cOKxm6NWQsvoI= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.21 h1:5NbbMrIzmUn/TXFqAle6mgrH5m9cOvMLRGL7pnG8tRE= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.4.21/go.mod h1:+Gxn8jYn5k9ebfHEqlhrMirFjSW0v0C9fI+KN5vk2kE= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.28 h1:KeTxcGdNnQudb46oOl4d90f2I33DF/c6q3RnZAmvQdQ= -github.com/aws/aws-sdk-go-v2/internal/ini v1.3.28/go.mod h1:yRZVr/iT0AqyHeep00SZ4YfBAKojXz08w3XMBscdi0c= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.77.0 h1:m6HYlpZlTWb9vHuuRHpWRieqPHWlS0mvQ90OJNrG/Nk= -github.com/aws/aws-sdk-go-v2/service/ec2 v1.77.0/go.mod h1:mV0E7631M1eXdB+tlGFIw6JxfsC7Pz7+7Aw15oLVhZw= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.21 h1:5C6XgTViSb0bunmU57b3CT+MhxULqHH2721FVA+/kDM= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.9.21/go.mod h1:lRToEJsn+DRA9lW4O9L9+/3hjTkUzlzyzHqn8MTds5k= -github.com/aws/aws-sdk-go-v2/service/sso v1.11.28 h1:gItLq3zBYyRDPmqAClgzTH8PBjDQGeyptYGHIwtYYNA= -github.com/aws/aws-sdk-go-v2/service/sso v1.11.28/go.mod h1:wo/B7uUm/7zw/dWhBJ4FXuw1sySU5lyIhVg1Bu2yL9A= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.11 h1:KCacyVSs/wlcPGx37hcbT3IGYO8P8Jx+TgSDhAXtQMY= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.13.11/go.mod h1:TZSH7xLO7+phDtViY/KUp9WGCJMQkLJ/VpgkTFd5gh8= -github.com/aws/aws-sdk-go-v2/service/sts v1.17.7 h1:9Mtq1KM6nD8/+HStvWcvYnixJ5N85DX+P+OY3kI3W2k= -github.com/aws/aws-sdk-go-v2/service/sts v1.17.7/go.mod h1:+lGbb3+1ugwKrNTWcf2RT05Xmp543B06zDFTwiTLp7I= -github.com/aws/smithy-go v1.13.5 h1:hgz0X/DX0dGqTYpGALqXJoRKRj5oQ7150i5FdTePzO8= -github.com/aws/smithy-go v1.13.5/go.mod h1:Tg+OJXh4MB2R/uN61Ko2f6hTZwB/ZYGOtib8J3gBHzA= -github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= -github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= +github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= +github.com/aws/aws-sdk-go-v2 v1.30.5 h1:mWSRTwQAb0aLE17dSzztCVJWI9+cRMgqebndjwDyK0g= +github.com/aws/aws-sdk-go-v2 v1.30.5/go.mod h1:CT+ZPWXbYrci8chcARI3OmI/qgd+f6WtuLOoaIA8PR0= +github.com/aws/aws-sdk-go-v2/config v1.27.33 h1:Nof9o/MsmH4oa0s2q9a0k7tMz5x/Yj5k06lDODWz3BU= +github.com/aws/aws-sdk-go-v2/config v1.27.33/go.mod h1:kEqdYzRb8dd8Sy2pOdEbExTTF5v7ozEXX0McgPE7xks= +github.com/aws/aws-sdk-go-v2/credentials v1.17.32 h1:7Cxhp/BnT2RcGy4VisJ9miUPecY+lyE9I8JvcZofn9I= +github.com/aws/aws-sdk-go-v2/credentials v1.17.32/go.mod h1:P5/QMF3/DCHbXGEGkdbilXHsyTBX5D3HSwcrSc9p20I= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.13 h1:pfQ2sqNpMVK6xz2RbqLEL0GH87JOwSxPV2rzm8Zsb74= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.13/go.mod h1:NG7RXPUlqfsCLLFfi0+IpKN4sCB9D9fw/qTaSB+xRoU= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17 h1:pI7Bzt0BJtYA0N/JEC6B8fJ4RBrEMi1LBrkMdFYNSnQ= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.17/go.mod h1:Dh5zzJYMtxfIjYW+/evjQ8uj2OyR/ve2KROHGHlSFqE= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17 h1:Mqr/V5gvrhA2gvgnF42Zh5iMiQNcOYthFYwCyrnuWlc= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.17/go.mod h1:aLJpZlCmjE+V+KtN1q1uyZkfnUWpQGpbsn89XPKyzfU= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.177.3 h1:dqdCh1M8h+j8OGNUpxTs7eBPFr6lOdLpdlE6IPLLSq4= +github.com/aws/aws-sdk-go-v2/service/ec2 v1.177.3/go.mod h1:TFSALWR7Xs7+KyMM87ZAYxncKFBvzEt2rpK/BJCH2ps= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4 h1:KypMCbLPPHEmf9DgMGw51jMj77VfGPAN2Kv4cfhlfgI= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4/go.mod h1:Vz1JQXliGcQktFTN/LN6uGppAIRoLBR2bMvIMP0gOjc= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.19 h1:rfprUlsdzgl7ZL2KlXiUAoJnI/VxfHCvDFr2QDFj6u4= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.19/go.mod h1:SCWkEdRq8/7EK60NcvvQ6NXKuTcchAD4ROAsC37VEZE= +github.com/aws/aws-sdk-go-v2/service/sso v1.22.7 h1:pIaGg+08llrP7Q5aiz9ICWbY8cqhTkyy+0SHvfzQpTc= +github.com/aws/aws-sdk-go-v2/service/sso v1.22.7/go.mod h1:eEygMHnTKH/3kNp9Jr1n3PdejuSNcgwLe1dWgQtO0VQ= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.7 h1:/Cfdu0XV3mONYKaOt1Gr0k1KvQzkzPyiKUdlWJqy+J4= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.7/go.mod h1:bCbAxKDqNvkHxRaIMnyVPXPo+OaPRwvmgzMxbz1VKSA= +github.com/aws/aws-sdk-go-v2/service/sts v1.30.7 h1:NKTa1eqZYw8tiHSRGpP0VtTdub/8KNk8sDkNPFaOKDE= +github.com/aws/aws-sdk-go-v2/service/sts v1.30.7/go.mod h1:NXi1dIAGteSaRLqYgarlhP/Ij0cFT+qmCwiJqWh/U5o= +github.com/aws/smithy-go v1.20.4 h1:2HK1zBdPgRbjFOHlfeQZfpC4r72MOb9bZkiFwggKO+4= +github.com/aws/smithy-go v1.20.4/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= -github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= -github.com/bnkamalesh/webgo/v4 v4.1.11/go.mod h1:taIAonQTzao8G5rnB22WgKmQuIOWHpQ0n/YLAidBXlM= -github.com/bnkamalesh/webgo/v6 v6.2.2/go.mod h1:2Y+dEdTp1xC/ra+3PAVZV6hh4sCI+iPK7mcHt+t9bfM= -github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= -github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/coreos/container-linux-config-transpiler v0.9.0 h1:UBGpT8qWqzi48hNLrzMAgAUNJsR0LW8Gk5/dR/caI8U= -github.com/coreos/container-linux-config-transpiler v0.9.0/go.mod h1:SlcxXZQ2c42knj8pezMiQsM1f+ADxFMjGetuMKR/YSQ= -github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU= -github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/ignition v0.35.0 h1:UFodoYq1mOPrbEjtxIsZbThcDyQwAI1owczRDqWmKkQ= -github.com/coreos/ignition v0.35.0/go.mod h1:WJQapxzEn9DE0ryxsGvm8QnBajm/XsS/PkrDqSpz+bA= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5 h1:6xNmx7iTtyBRev0+D/Tv1FZd4SCg8axKApyNyRsAt/w= +github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5/go.mod h1:KdCmV+x/BuvyMxRnYBlmVaq4OLiKW6iRQfvC62cvdkI= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creasty/defaults v1.5.2 h1:/VfB6uxpyp6h0fr7SPp7n8WJBoV8jfxQXPCnkVSjyls= -github.com/creasty/defaults v1.5.2/go.mod h1:FPZ+Y0WNrbqOVw+c6av63eyHUAl6pMHZwqLPvXUZGfY= -github.com/dave/dst v0.26.2/go.mod h1:UMDJuIRPfyUCC78eFuB+SV/WI8oDeyFDvM/JR6NI3IU= -github.com/dave/gopackages v0.0.0-20170318123100-46e7023ec56e/go.mod h1:i00+b/gKdIDIxuLDFob7ustLAVqhsZRk2qVZrArELGQ= -github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg= -github.com/dave/kerr v0.0.0-20170318121727-bc25dd6abe8e/go.mod h1:qZqlPyPvfsDJt+3wHJ1EvSXDuVjFTK0j2p/ca+gtsb8= -github.com/dave/rebecca v0.9.1/go.mod h1:N6XYdMD/OKw3lkF3ywh8Z6wPGuwNFDNtWYEMFWEmXBA= +github.com/creasty/defaults v1.6.0 h1:ltuE9cfphUtlrBeomuu8PEyISTXnxqkBIoQfXgv7BSc= +github.com/creasty/defaults v1.6.0/go.mod h1:iGzKe6pbEHnpMPtfDXZEr0NVxWnPTjb1bbDy08fPzYM= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc= -github.com/decred/dcrd/dcrec/secp256k1/v4 v4.0.0-20210816181553-5444fa50b93d/go.mod h1:tmAIfUFEirG/Y8jhZ9M+h36obRZAk/1fcSpXwAVlfqE= -github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA= -github.com/digitalocean/godo v1.93.0 h1:N0K9z2yssZVP7nBHQ32P1Wemd5yeiJdH4ROg+7ySRxY= -github.com/digitalocean/godo v1.93.0/go.mod h1:NRpFznZFvhHjBoqZAaOD3khVzsJ3EibzKqFL4R60dmA= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/digitalocean/godo v1.124.0 h1:qroI1QdtcgnXF/pefq9blZRbXqBw1Ry/aHh2pnu/328= +github.com/digitalocean/godo v1.124.0/go.mod h1:WQVH83OHUy6gC4gXpEVQKtxTd4L5oCp+5OialidkPLY= github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi/U= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= -github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= -github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= -github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.15.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/emicklei/go-restful/v3 v3.10.1 h1:rc42Y5YTp7Am7CS630D7JmhRjq4UlEUuEKfrDac4bSQ= -github.com/emicklei/go-restful/v3 v3.10.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= +github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= +github.com/envoyproxy/go-control-plane v0.14.0 h1:hbG2kr4RuFj222B6+7T83thSPqLjwBIfQawTkC++2HA= +github.com/envoyproxy/go-control-plane/envoy v1.36.0 h1:yg/JjO5E7ubRyKX3m07GF3reDNEnfOboJ0QySbH736g= +github.com/envoyproxy/go-control-plane/envoy v1.36.0/go.mod h1:ty89S1YCCVruQAm9OtKeEkQLTb+Lkz0k8v9W0Oxsv98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= +github.com/envoyproxy/protoc-gen-validate v1.3.0 h1:TvGH1wof4H33rezVKWSpqKz5NXWg5VPuZ0uONDT6eb4= +github.com/envoyproxy/protoc-gen-validate v1.3.0/go.mod h1:HvYl7zwPa5mffgyeTUHA9zHIH36nmrm7oCbo4YKoSWA= +github.com/equinix/equinix-sdk-go v0.46.0 h1:ldQo4GtXNr+0XsThQJf/pUdx5wcLFe9QpLFtAwonqH8= +github.com/equinix/equinix-sdk-go v0.46.0/go.mod h1:hEb3XLaedz7xhl/dpPIS6eOIiXNPeqNiVoyDrT6paIg= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= -github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= -github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= -github.com/flowstack/go-jsonschema v0.1.1/go.mod h1:yL7fNggx1o8rm9RlgXv7hTBWxdBM0rVwpMwimd3F3N0= -github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/evanphx/json-patch v5.9.0+incompatible h1:fBXyNpNMuTTDdquAq/uisOr2lShz4oaXpDTX2bLe7ls= +github.com/evanphx/json-patch v5.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= +github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= +github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= +github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= -github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fullstorydev/grpcurl v1.8.7 h1:xJWosq3BQovQ4QrdPO72OrPiWuGgEsxY8ldYsJbPrqI= +github.com/fullstorydev/grpcurl v1.8.7/go.mod h1:pVtM4qe3CMoLaIzYS8uvTuDj2jVYmXqMUkZeijnXp/E= +github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= +github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= -github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= -github.com/gin-gonic/gin v1.7.7/go.mod h1:axIBovoeJpVj8S3BwE0uPMTeReE4+AfFtqpqaZ1qq1U= -github.com/go-chi/chi v1.5.4 h1:QHdzF2szwjqVV4wmByUnTcsbIg7UGaQ0tPF2t5GcAIs= -github.com/go-chi/chi v1.5.4/go.mod h1:uaf8YgoFazUOkPBG7fxPftUylNumIev9awIWOENIuEg= -github.com/go-chi/cors v1.2.0/go.mod h1:sSbTewc+6wYHBBCW7ytsFSn836hqM7JxpglAy2Vzc58= -github.com/go-chi/render v1.0.1 h1:4/5tis2cKaNdnv9zFLfXzcquC9HbeZgCnxGnKrltBS8= -github.com/go-chi/render v1.0.1/go.mod h1:pq4Rr7HbnsdaeHagklXub+p6Wd16Af5l9koip1OvJns= +github.com/gkampitakis/ciinfo v0.3.2 h1:JcuOPk8ZU7nZQjdUhctuhQofk7BGHuIy0c9Ez8BNhXs= +github.com/gkampitakis/ciinfo v0.3.2/go.mod h1:1NIwaOcFChN4fa/B0hEBdAb6npDlFL8Bwx4dfRLRqAo= +github.com/gkampitakis/go-diff v1.3.2 h1:Qyn0J9XJSDTgnsgHRdz9Zp24RaJeKMUHg2+PDZZdC4M= +github.com/gkampitakis/go-diff v1.3.2/go.mod h1:LLgOrpqleQe26cte8s36HTWcTmMEur6OPYerdAAS9tk= +github.com/gkampitakis/go-snaps v0.5.15 h1:amyJrvM1D33cPHwVrjo9jQxX8g/7E2wYdZ+01KS3zGE= +github.com/gkampitakis/go-snaps v0.5.15/go.mod h1:HNpx/9GoKisdhw9AFOBT1N7DBs9DiHo/hGheFGBZ+mc= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= -github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A= -github.com/go-logr/zapr v1.2.3/go.mod h1:eIauM6P8qSvTw5o2ez6UEAfGjQKrxQTl5EoK+Qa2oG4= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU= +github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo= +github.com/go-openapi/errors v0.22.0 h1:c4xY/OLxUBSTiepAg3j/MHuAv5mJhnf53LLMWFB+u/w= +github.com/go-openapi/errors v0.22.0/go.mod h1:J3DmZScxCDufmIMsdOuDHxJbdOGC0xtUynjIx092vXE= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= -github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= -github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= -github.com/go-openapi/jsonreference v0.20.1 h1:FBLnyygC4/IZZr893oiomc9XaghoveYTrLC1F86HID8= -github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= +github.com/go-openapi/loads v0.22.0 h1:ECPGd4jX1U6NApCGG1We+uEozOAvXvJSF4nnwHZ8Aco= +github.com/go-openapi/loads v0.22.0/go.mod h1:yLsaTCS92mnSAZX5WWoxszLj0u+Ojl+Zs5Stn1oF+rs= +github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY= +github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk= +github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c= +github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= -github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= -github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58= +github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ= github.com/go-playground/locales v0.14.0 h1:u50s323jtVGugKlcYeyzC0etD1HifMjqmJqb8WugfUU= github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs= -github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= github.com/go-playground/universal-translator v0.18.0 h1:82dyy6p4OuJq4/CByFNOn/jYrnRPArHwAcmLoJZxyho= github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA= -github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= github.com/go-playground/validator/v10 v10.10.1 h1:uA0+amWMiglNZKZ9FJRKUAe9U3RX91eVn1JYXMWt7ig= github.com/go-playground/validator/v10 v10.10.1/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4dMGDBiPU55YFDl0WbKdWU= -github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY= -github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-resty/resty/v2 v2.14.0 h1:/rhkzsAqGQkozwfKS5aFAbb6TyKd3zyFRWcdRXLPCAU= +github.com/go-resty/resty/v2 v2.14.0/go.mod h1:IW6mekUOsElt9C7oWr0XRt9BNSD6D5rr9mhk6NjmNHg= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= -github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM= -github.com/go-test/deep v1.0.8/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= -github.com/goccy/go-json v0.9.4/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/go-test/deep v1.1.0 h1:WOcxcdHcvdgThNXjw0t76K42FXTU7HpNQWHpA2HHNlg= +github.com/go-test/deep v1.1.0/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= +github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw= +github.com/goccy/go-yaml v1.18.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptGaCkuDUx6wNykzzlUixGxvkme+H/lnzb+A= -github.com/golang-jwt/jwt v3.2.2+incompatible/go.mod h1:8pz2t5EyA70fFQQSrl6XZXzqecmYZeUEB8OUGHkxJ+I= github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.2.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= -github.com/golang-jwt/jwt/v4 v4.4.3 h1:Hxl6lhQFj4AnOX6MLrsCb/+7tCj7DxP7VA+2rDIq5AU= -github.com/golang-jwt/jwt/v4 v4.4.3/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI= +github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= -github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -333,799 +241,494 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= -github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= -github.com/google/gnostic v0.6.9 h1:ZK/5VhkoX835RikCHpSUJV9a+S3e1zLh59YnyWeBW+0= -github.com/google/gnostic v0.6.9/go.mod h1:Nm8234We1lq6iB9OmlgNv3nH91XLLVZHCDayfA3xq+E= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= +github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/pprof v0.0.0-20181127221834-b4f47329b966/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8= +github.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= +github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM= +github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.2.1 h1:RY7tHKZcRlk788d5WSo/e83gOyyy742E8GSs771ySpg= -github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.7.0 h1:IcsPKeInNvYi7eqSaDjiZqDDKu5rsmunY0Y1YupQSSQ= -github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw= +github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= +github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s= +github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A= github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= -github.com/gophercloud/gophercloud v1.1.1 h1:MuGyqbSxiuVBqkPZ3+Nhbytk1xZxhmfCB2Rg1cJWFWM= -github.com/gophercloud/gophercloud v1.1.1/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= +github.com/gophercloud/gophercloud v1.14.0 h1:Bt9zQDhPrbd4qX7EILGmy+i7GP35cc+AAL2+wIJpUE8= +github.com/gophercloud/gophercloud v1.14.0/go.mod h1:aAVqcocTSXh2vYFZ1JTvx4EQmfgzxRcNupUfxZbBNDM= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI= -github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= -github.com/hashicorp/go-retryablehttp v0.7.2 h1:AcYqCvkpalPnPF2pn0KamgwamS42TqUDDYFRKq/RAd0= -github.com/hashicorp/go-retryablehttp v0.7.2/go.mod h1:Jy/gPYAdjqffZ/yFGCFV2doI5wjtH1ewM9u8iYVjtX8= -github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= -github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= +github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= +github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= +github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/heptiolabs/healthcheck v0.0.0-20211123025425-613501dd5deb h1:tsEKRC3PU9rMw18w/uAptoijhgG4EvlA5kfJPtwrMDk= github.com/heptiolabs/healthcheck v0.0.0-20211123025425-613501dd5deb/go.mod h1:NtmN9h8vrTveVQRLHcX2HQ5wIPBDCsZ351TGbZWgg38= -github.com/hetznercloud/hcloud-go v1.39.0 h1:RUlzI458nGnPR6dlcZlrsGXYC1hQlFbKdm8tVtEQQB0= -github.com/hetznercloud/hcloud-go v1.39.0/go.mod h1:mepQwR6va27S3UQthaEPGS86jtzSY9xWL1e9dyxXpgA= -github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= +github.com/hetznercloud/hcloud-go/v2 v2.13.1 h1:jq0GP4QaYE5d8xR/Zw17s9qoaESRJMXfGmtD1a/qckQ= +github.com/hetznercloud/hcloud-go/v2 v2.13.1/go.mod h1:dhix40Br3fDiBhwaSG/zgaYOFFddpfBm/6R1Zz0IiF0= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU= -github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= -github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= -github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= -github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jarcoal/httpmock v1.3.1 h1:iUx3whfZWVf3jT01hQTO/Eo5sAYtB2/rqaUuOtpInww= +github.com/jarcoal/httpmock v1.3.1/go.mod h1:3yb8rc4BI7TCBhFY8ng0gjuLKJNquuDNiPaZjnENuYg= +github.com/jhump/protoreflect v1.14.0 h1:MBbQK392K3u8NTLbKOCIi3XdI+y+c6yt5oMq0X3xviw= +github.com/jhump/protoreflect v1.14.0/go.mod h1:JytZfP5d0r8pVNLZvai7U/MCuTWITgrI4tTg7puQFKI= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/joshdk/go-junit v1.0.0 h1:S86cUKIdwBHWwA6xCmFlf3RTLfVXYQfvanM5Uh+K6GE= +github.com/joshdk/go-junit v1.0.0/go.mod h1:TiiV0PqkaNfFXjEiyjWM3XXrhVyCa1K4Zfga6W52ung= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= -github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= -github.com/keploy/go-sdk v0.4.3 h1:dCsmfANlZH94It+JKWx8/JEEC6dn8W7KIRRKRZwCPZQ= -github.com/keploy/go-sdk v0.4.3/go.mod h1:tn62gQ8a/AD7mY51DvQfhudiBPTlD+w3XtXemDcbON4= -github.com/kevinmbeaulieu/eq-go v1.0.0/go.mod h1:G3S8ajA56gKBZm4UB9AOyoOS37JO3roToPzKNM8dtdM= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= +github.com/k0kubun/pp/v3 v3.1.0 h1:ifxtqJkRZhw3h554/z/8zm6AAbyO4LLKDlA5eV+9O8Q= +github.com/k0kubun/pp/v3 v3.1.0/go.mod h1:vIrP5CF0n78pKHm2Ku6GVerpZBJvscg48WepUYEk2gw= +github.com/keploy/go-sdk v0.9.0 h1:kpSNcCTDdELsa1gWyhoD9oV57SgSMbG/wq6Cjp4y7cY= +github.com/keploy/go-sdk v0.9.0/go.mod h1:vNKXoFd2MaK+Gly/K6XeP1Hs9dP834C74szH+vtBPwg= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.15.0/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/kolo/xmlrpc v0.0.0-20190717152603-07c4ee3fd181/go.mod h1:o03bZfuBwAXHetKXuInt4S7omeXUu62/A845kiycsSQ= +github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b h1:udzkj9S/zlT5X367kqJis0QP7YMxobob6zhzq6Yre00= +github.com/kolo/xmlrpc v0.0.0-20220921171641-a4b6fa1dd06b/go.mod h1:pcaDhQK0/NJZEvtCO0qQPPropqV0sJOJ6YW7X+9kRwM= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/labstack/echo/v4 v4.6.1/go.mod h1:RnjgMWNDB9g/HucVWhQYNQP9PvbYf6adqftqryo7s9k= -github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= -github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w= github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY= -github.com/lestrrat-go/backoff/v2 v2.0.8/go.mod h1:rHP/q/r9aT27n24JQLa7JhSQZCKBBOiM/uP402WwN8Y= -github.com/lestrrat-go/blackmagic v1.0.0/go.mod h1:TNgH//0vYSs8VXDCfkZLgIrVTTXQELZffUV0tz3MtdQ= -github.com/lestrrat-go/httpcc v1.0.0/go.mod h1:tGS/u00Vh5N6FHNkExqGGNId8e0Big+++0Gf8MBnAvE= -github.com/lestrrat-go/iter v1.0.1/go.mod h1:zIdgO1mRKhn8l9vrZJZz9TUMMFbQbLeTsbqPDrJ/OJc= -github.com/lestrrat-go/jwx v1.2.20/go.mod h1:tLE1XszaFgd7zaS5wHe4NxA+XVhu7xgdRvDpNyi3kNM= -github.com/lestrrat-go/option v1.0.0/go.mod h1:5ZHFbivi4xwXxhxY9XHDe2FHo6/Z7WWmtT7T5nBBp3I= -github.com/linode/linodego v1.10.0 h1:nH/BffTBQEZr48q/9UszuB5dhWpGKuVuJs/uE9Nweuc= -github.com/linode/linodego v1.10.0/go.mod h1:lRWOfS3HmRV63U6Rt+llKziobIwpySYGlCdTIHoIgps= -github.com/logrusorgru/aurora/v3 v3.0.0/go.mod h1:vsR12bk5grlLvLXAYrBsb5Oc/N+LxAlxggSjiwMnCUc= +github.com/linode/linodego v1.40.0 h1:7ESY0PwK94hoggoCtIroT1Xk6b1flrFBNZ6KwqbTqlI= +github.com/linode/linodego v1.40.0/go.mod h1:NsUw4l8QrLdIofRg1NYFBbW5ZERnmbZykVBszPZLORM= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/matryer/moq v0.2.3/go.mod h1:9RtPYjTnH1bSBIkpvtHkFN7nbWAnO7oRpdJkEIn6UtE= -github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/maruel/natural v1.1.1 h1:Hja7XhhmvEFhcByqDoHz9QZbkWey+COd9xWfCfn1ioo= +github.com/maruel/natural v1.1.1/go.mod h1:v+Rfd79xlw1AgVBjbO0BEQmptqb5HvL/k9GRHB7ZKEg= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-runewidth v0.0.10/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= -github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mfridman/tparse v0.18.0 h1:wh6dzOKaIwkUGyKgOntDW4liXSo37qg5AXbIhkMV3vE= +github.com/mfridman/tparse v0.18.0/go.mod h1:gEvqZTuCgEhPbYk/2lS3Kcxg1GmTxxU7kTC8DvP0i/A= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.2.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8= +github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nutanix-cloud-native/prism-go-client v0.3.4 h1:bHY3VPrHHYnbRtkpGaKK+2ZmvUjNVRC55CYZbXIfnOk= -github.com/nutanix-cloud-native/prism-go-client v0.3.4/go.mod h1:tTIH02E6o6AWSShr98QChoxuZl+jBhkXFixom9+fd1Y= +github.com/nutanix-cloud-native/prism-go-client v0.5.1 h1:ykiXPCILzEMORHz7XvI8KXNomChsdLIpOAlT/YqBCmo= +github.com/nutanix-cloud-native/prism-go-client v0.5.1/go.mod h1:QhLX+sEep0cStzHVYU6mPgIlnA8U3DySskagrbDprRk= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= -github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= -github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= -github.com/onsi/ginkgo/v2 v2.1.6/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk= -github.com/onsi/ginkgo/v2 v2.3.0/go.mod h1:Eew0uilEqZmIEZr8JrvYlvOM7Rr6xzTmMV8AyFNU9d0= -github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= -github.com/onsi/ginkgo/v2 v2.6.0 h1:9t9b9vRUbFq3C4qKFCGkVuq/fIHji802N1nrtkh1mNc= -github.com/onsi/ginkgo/v2 v2.6.0/go.mod h1:63DOGlLAH8+REH8jUGdL3YpCpu7JODesutUjdENfUAc= +github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns= +github.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= -github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= -github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= -github.com/onsi/gomega v1.21.1/go.mod h1:iYAIXgPSaDHak0LCMA+AWBpIKBr8WZicMxnE8luStNc= -github.com/onsi/gomega v1.22.1/go.mod h1:x6n7VNe4hw0vkyYUM4mjIXx3JbLiPaBPNgB7PRQ1tuM= -github.com/onsi/gomega v1.23.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= -github.com/onsi/gomega v1.24.1 h1:KORJXNNTzJXzu4ScJWssJfJMnJ+2QJqhoQSRwNlze9E= -github.com/onsi/gomega v1.24.1/go.mod h1:3AOiACssS3/MajrniINInwbfOOtfZvplPzuRSmvt1jM= -github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= -github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/openshift/api v0.0.0-20211217221424-8779abfbd571 h1:+ShYlGoPriGahTTFTjQ0RtNXW0srxDodk2STdc238Rk= -github.com/openshift/api v0.0.0-20211217221424-8779abfbd571/go.mod h1:F/eU6jgr6Q2VhMu1mSpMmygxAELd7+BUxs3NHZ25jV4= -github.com/openshift/build-machinery-go v0.0.0-20211213093930-7e33a7eb4ce3/go.mod h1:b1BuldmJlbA/xYtdZvKi+7j5YGB44qJUJDZ9zwiNCfE= +github.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A= +github.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k= github.com/openshift/custom-resource-status v1.1.2 h1:C3DL44LEbvlbItfd8mT5jWrqPfHnSOQoQf/sypqA6A4= github.com/openshift/custom-resource-status v1.1.2/go.mod h1:DB/Mf2oTeiAmVVX1gN+NEqweonAPY0TKUwADizj8+ZA= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b h1:FfH+VrHHk6Lxt9HdVS0PXzSXFyS2NbZKXv33FYPol0A= github.com/opentracing/opentracing-go v1.2.1-0.20220228012449-10b1cf09e00b/go.mod h1:AC62GU6hc0BrNm+9RK9VSiwa/EUe1bkIeFORAMcHvJU= -github.com/packethost/packngo v0.29.0 h1:gRIhciVZQ/zLNrIdIdbOUyB/Tw5IgoaXyhP4bvE+D2s= -github.com/packethost/packngo v0.29.0/go.mod h1:/UHguFdPs6Lf6FOkkSEPnRY5tgS0fsVM+Zv/bvBrmt0= -github.com/packethost/pkg v0.0.0-20211110202003-387414657e83 h1:uhBvTY/Hnm7rLz7gPkA83JU4EQf4A2YZUBry6+Gyn9g= -github.com/packethost/pkg v0.0.0-20211110202003-387414657e83/go.mod h1:iF7Mj6XXQ6O+bCfrBCrsJrIGxG7ptrZwb0bW91+wzm8= github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ= github.com/pborman/uuid v1.2.1 h1:+ZZIw58t/ozdjRaXh/3awHfmWRbzYxJoAdNJxe/3pvw= github.com/pborman/uuid v1.2.1/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/peterhellberg/link v1.2.0 h1:UA5pg3Gp/E0F2WdX7GERiNrPQrM1K6CVJUUWfHa4t6c= github.com/peterhellberg/link v1.2.0/go.mod h1:gYfAh+oJgQu2SrZHg5hROVRQe1ICoK0/HHJTcE0edxc= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= -github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o= +github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= -github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.31.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI= -github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= -github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= +github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk= +github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE= +github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs= +github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA= +github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg= +github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is= github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= -github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.6.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= -github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rollbar/rollbar-go v1.4.2/go.mod h1:kLQ9gP3WCRGrvJmF0ueO3wK9xWocej8GRX98D8sa39w= -github.com/rollbar/rollbar-go/errors v0.0.0-20210929193720-32947096267e/go.mod h1:Ie0xEc1Cyj+T4XMO8s0Vf7pMfvSAAy1sb4AYc8aJsao= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/rwcarlsen/goexif v0.0.0-20190401172101-9e8deecbddbd/go.mod h1:hPqNNc0+uJM6H+SuU8sEs5K5IQeKccPqeSjfgcKGgPk= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b h1:gQZ0qzfKHQIybLANtM3mBXNUtOfsCFXeTsnBqCsx1KM= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.10 h1:wsfMs0iv+MJiViM37qh5VEKISi3/ZUq2nNKNdqmumAs= -github.com/scaleway/scaleway-sdk-go v1.0.0-beta.10/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= +github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30 h1:yoKAVkEVwAqbGbR8n87rHQ1dulL25rKloGadb3vm770= +github.com/scaleway/scaleway-sdk-go v1.0.0-beta.30/go.mod h1:sH0u6fq6x4R5M7WxkoQFY/o7UaiItec0o1LinLCJNq8= github.com/scylladb/termtables v0.0.0-20191203121021-c4c0b6d42ff4/go.mod h1:C1a7PQSMz9NShzorzCiG2fk9+xuCgLkPeCvMHYR2OWg= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/sethvargo/go-password v0.2.0 h1:BTDl4CC/gjf/axHMaDQtw507ogrXLci6XRiLc7i/UHI= -github.com/sethvargo/go-password v0.2.0/go.mod h1:Ym4Mr9JXLBycr02MFuVQ/0JHidNetSgbzutTr3zsYXE= -github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k= +github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= -github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= -github.com/spf13/cobra v1.6.1 h1:o94oiPyS4KD1mPy2fmcYYHHfCxLqYjJOhGsCHFZtEzA= -github.com/spf13/cobra v1.6.1/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tinkerbell/lint-install v0.0.0-20211012174934-5ee5ab01db76/go.mod h1:0h2KsALaQLNkoVeV+G+HjBWWCnp0COFYhJdRd5WCQPM= -github.com/tinkerbell/tink v0.8.0 h1:qgl/rglpO5Rvq6UKZd29O6X9mDgZZYgf841+Y0IYWak= -github.com/tinkerbell/tink v0.8.0/go.mod h1:bfAkSH7J/QQYIyqZRR6IQp8w78aac6l8Z2Lws5uXz6A= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY= +github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= +github.com/tidwall/match v1.1.1 h1:+Ho715JplO36QYgwN9PGYNhgZvoUSc9X2c80KVTi+GA= +github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4= +github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY= +github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= +github.com/tinkerbell/tink v0.10.1 h1:mxdPQf7n4nB/AVdjbqCm5c98vsITU35g7Yw5cdOWmCw= +github.com/tinkerbell/tink v0.10.1/go.mod h1:yULdVrzAfPnA8KdOkjvo8qDn6pw0JD6kBzF94gtXMjA= github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o= github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg= github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= -github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= -github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= -github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= -github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasthttp v1.35.0/go.mod h1:t/G+3rLek+CyY9bnIE+YlMRddxVAAGjhxndDB4i4C0I= -github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= -github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= -github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc= -github.com/vektah/gqlparser/v2 v2.2.0 h1:bAc3slekAAJW6sZTi07aGq0OrfaCjj4jxARAaC7g2EM= -github.com/vektah/gqlparser/v2 v2.2.0/go.mod h1:i3mQIGIrbK2PD1RrCeMTlVbkF2FJ6WkU1KJlJlC+3F4= -github.com/vincent-petithory/dataurl v1.0.0 h1:cXw+kPto8NLuJtlMsI152irrVw9fRDX8AbShPRpg2CI= -github.com/vincent-petithory/dataurl v1.0.0/go.mod h1:FHafX5vmDzyP+1CQATJn7WFKc9CvnvxyvZy6I1MrG/U= -github.com/vmware/go-vcloud-director/v2 v2.18.0 h1:3kXfaLyYObVBn7SsGxPPiIcqogwnHF0FpH5oY3KVSow= -github.com/vmware/go-vcloud-director/v2 v2.18.0/go.mod h1:KjnB8t5l1bRrc+jLKDJbx0vZLRzz2RPzNQ7xzg7yI3o= -github.com/vmware/govmomi v0.30.0 h1:Fm8ugPnnlMSTSceDKY9goGvjmqc6eQLPUSUeNXdpeXA= -github.com/vmware/govmomi v0.30.0/go.mod h1:F7adsVewLNHsW/IIm7ziFURaXDaHEwcc+ym4r3INMdY= -github.com/vultr/govultr/v2 v2.17.2 h1:gej/rwr91Puc/tgh+j33p/BLR16UrIPnSr+AIwYWZQs= -github.com/vultr/govultr/v2 v2.17.2/go.mod h1:ZFOKGWmgjytfyjeyAdhQlSWwTjh2ig+X49cAp50dzXI= -github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= -github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= -github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= -github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= -github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= -github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/vmware/go-vcloud-director/v2 v2.25.0 h1:RcJ5FQRku3FvQktTi8YOZsRfvhfLm315Cme50M9x9MQ= +github.com/vmware/go-vcloud-director/v2 v2.25.0/go.mod h1:7Of1qJja+LLNKVegjZG7uuhhy6xgGg3q7Fkw2CEP+Tw= +github.com/vmware/govmomi v0.43.0 h1:7Kg3Bkdly+TrE67BYXzRq7ZrDnn7xqpKX95uEh2f9Go= +github.com/vmware/govmomi v0.43.0/go.mod h1:IOv5nTXCPqH9qVJAlRuAGffogaLsNs8aF+e7vLgsHJU= +github.com/vultr/govultr/v3 v3.9.1 h1:uxSIb8Miel7tqTs3ee+z3t+JelZikwqBBsZzCOPBy/8= +github.com/vultr/govultr/v3 v3.9.1/go.mod h1:Rd8ebpXm7jxH3MDmhnEs+zrlYW212ouhx+HeUMfHm2o= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.anx.io/go-anxcloud v0.5.0 h1:kKzAY+CRAXmQYCr+/lbEoO6JvPEVi5qjR2XgT0CMwx4= -go.anx.io/go-anxcloud v0.5.0/go.mod h1:IjUqXU0829myWH9015ES2KG2fBUnWNF5FChLwi5tUig= -go.keploy.io/server v0.1.8 h1:b50vAt1+WKMscYVP5Bm8gx/iSaR7mpHox8VpaxjrQ88= -go.keploy.io/server v0.1.8/go.mod h1:ZqhwTZOBb+dzx5t30Wt6eUGI6kO5QizvPg6coNPtbow= -go.mongodb.org/mongo-driver v1.8.0/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY= -go.mongodb.org/mongo-driver v1.8.1/go.mod h1:0sQWfOeY63QTntERDJJ/0SuKK0T1uVSgKCuAROlKEPY= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.anx.io/go-anxcloud v0.7.3 h1:NWkm4KAg0GyJALBbSgp++J2K563lHQGDDVJAcM6CgUU= +go.anx.io/go-anxcloud v0.7.3/go.mod h1:RpJvC8ZmXNu9dSygIgZ0ossqPz0+6n9xDX9weeATmSo= +go.keploy.io/server v0.8.6 h1:czE9jaliyAkMMJcYnMPNuu6tun7UgwFbokxEG95vLN4= +go.keploy.io/server v0.8.6/go.mod h1:t7BPuZQSiC3PNHZ9dbn3e3VB61HNWwiqVmaRujfDFUg= +go.mongodb.org/mongo-driver v1.16.1 h1:rIVLL3q0IHM39dvE+z2ulZLp9ENZKThVfuvN/IiN4l8= +go.mongodb.org/mongo-driver v1.16.1/go.mod h1:oB6AhJQvFQL4LEHyXi6aJzQJtBiTQHiAd83l0GdFaiw= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.25.0/go.mod h1:E5NNboN0UqSAki0Atn9kVwaN7I+l25gGxDqBueo/74E= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.37.0 h1:+uFejS4DCfNH6d3xODVIGsdhzgzhh45p9gpbHQMbdZI= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.37.0/go.mod h1:HSmzQvagH8pS2/xrK7ScWsk0vAMtRTGbMFgInXCi8Tc= -go.opentelemetry.io/otel v1.0.1/go.mod h1:OPEOD4jIT2SlZPMmwT6FqZz2C0ZNdQqiWcoK6M0SNFU= -go.opentelemetry.io/otel v1.11.2 h1:YBZcQlsVekzFsFbjygXMOXSs6pialIZxcjfO/mBDmR0= -go.opentelemetry.io/otel v1.11.2/go.mod h1:7p4EUV+AqgdlNV9gL97IgUZiVR3yrFXYo53f9BM3tRI= -go.opentelemetry.io/otel/metric v0.34.0 h1:MCPoQxcg/26EuuJwpYN1mZTeCYAUGx8ABxfW07YkjP8= -go.opentelemetry.io/otel/metric v0.34.0/go.mod h1:ZFuI4yQGNCupurTXCwkeD/zHBt+C2bR7bw5JqUm/AP8= -go.opentelemetry.io/otel/trace v1.0.1/go.mod h1:5g4i4fKLaX2BQpSBsxw8YYcgKpMMSW3x7ZTuYBr3sUk= -go.opentelemetry.io/otel/trace v1.11.2 h1:Xf7hWSF2Glv0DE3MH7fBHvtpSBsjcBUe5MYAmZM/+y0= -go.opentelemetry.io/otel/trace v1.11.2/go.mod h1:4N+yC7QEz7TTsG9BSRLNAa63eg5E06ObSbKPmxQ/pKA= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= +go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0 h1:x7wzEgXfnzJcHDwStJT+mxOz4etr2EcexjqhBvmoakw= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.60.0/go.mod h1:rg+RlpR5dKwaS95IyyZqj5Wd4E13lk/msnTS0Xl9lJM= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q= +go.opentelemetry.io/otel v1.39.0 h1:8yPrr/S0ND9QEfTfdP9V+SiwT4E0G7Y5MO7p85nis48= +go.opentelemetry.io/otel v1.39.0/go.mod h1:kLlFTywNWrFyEdH0oj2xK0bFYZtHRYUdv1NklR/tgc8= +go.opentelemetry.io/otel/metric v1.39.0 h1:d1UzonvEZriVfpNKEVmHXbdf909uGTOQjA0HF0Ls5Q0= +go.opentelemetry.io/otel/metric v1.39.0/go.mod h1:jrZSWL33sD7bBxg1xjrqyDjnuzTUB0x1nBERXd7Ftcs= +go.opentelemetry.io/otel/sdk v1.39.0 h1:nMLYcjVsvdui1B/4FRkwjzoRVsMK8uL/cj0OyhKzt18= +go.opentelemetry.io/otel/sdk v1.39.0/go.mod h1:vDojkC4/jsTJsE+kh+LXYQlbL8CgrEcwmt1ENZszdJE= +go.opentelemetry.io/otel/sdk/metric v1.39.0 h1:cXMVVFVgsIf2YL6QkRF4Urbr/aMInf+2WKg+sEJTtB8= +go.opentelemetry.io/otel/sdk/metric v1.39.0/go.mod h1:xq9HEVH7qeX69/JnwEfp6fVq5wosJsY1mt4lLfYdVew= +go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6/qCJI= +go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= -go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= -go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= -go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= -go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI= -go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= -go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= -go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= -go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= -go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= -go4.org v0.0.0-20201209231011-d4a079459e60 h1:iqAGo78tVOJXELHQFRjR6TMwItrvXH4hrGJ32I/NFF8= -go4.org v0.0.0-20201209231011-d4a079459e60/go.mod h1:CIiUVy99QCPfoE13bO4EZaz5GZMZXMSBGhxRdsvzbkg= -golang.org/x/arch v0.0.0-20180920145803-b19384d3c130/go.mod h1:cYlCBUl1MsqxdiKgmc4uh7TxZfWSFLOGSRR090WDxt8= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0= +go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200420201142-3c4aac89819a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= -golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.4.0 h1:UVQgzMY87xqpKNgb+kDsll2Igd33HszWHFLmpaRMq/8= -golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= +golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU= +golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= -golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= -golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= -golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20250305212735-054e65f0b394 h1:nDVHiLt8aIbd/VzvPWN6kSOPE7+F/fNFDSXLVYkE/Iw= +golang.org/x/exp v0.0.0-20250305212735-054e65f0b394/go.mod h1:sIifuuw/Yco/y6yb6+bDNfyeQ/MdPUy/hKEMYQV17cM= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= -golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= -golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk= +golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210913180222-943fd674d43e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.3.1-0.20221206200815-1e63c2f08a10/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= -golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk= +golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= +golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= +golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.3.0 h1:6l90koy8/LaBLmLu8jpHeHexzMwEita0zFfYlggy2F8= -golang.org/x/oauth2 v0.3.0/go.mod h1:rQrIauxkUhJ6CuwEXwymO2/eh4xz2ZWF1nBkcxS+tGk= +golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw= +golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= +golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180903190138-2b024373dcd9/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210910150752-751e447fb3d0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211015200801-69063c4bb744/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= +golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= -golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= +golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q= +golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU= +golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= +golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0= +golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= +golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= -golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200509030707-2212a7e161a5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200815165600-90abf76919f3/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210101214203-2dba1e4ea05c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM= golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= -golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ= +golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gomodules.xyz/jsonpatch/v2 v2.2.0 h1:4pT439QV83L+G9FkcCriY6EkpcK6r6bK+A5FBUMI7qY= -gomodules.xyz/jsonpatch/v2 v2.2.0/go.mod h1:WXp+iVDkoLQqPudfQ9GBlwB2eZ5DKOnjQZCYdOS8GPY= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= -google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= -google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= -google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= -google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= -google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.105.0 h1:t6P9Jj+6XTn4U9I2wycQai6Q/Kz7iOT+QzjJ3G2V4x8= -google.golang.org/api v0.105.0/go.mod h1:qh7eD5FJks5+BcE+cjBIm6Gz8vioK7EHvnlniqXBnqI= +gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= +gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= +google.golang.org/api v0.197.0 h1:x6CwqQLsFiA5JKAiGyGBjc2bNtHtLddhJCE2IKuhhcQ= +google.golang.org/api v0.197.0/go.mod h1:AuOuo20GoQ331nq7DquGHlU6d+2wN2fZ8O0ta60nRNw= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20211018162055-cf77aa76bad2/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef h1:uQ2vjV/sHTsWSqdKeLqmwitzgvjMl7o4IdtHwUDXSJY= -google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1 h1:BulPr26Jqjnd4eYDVe+YvyR7Yc2vJGkO5/0UxD0/jZU= +google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:hL97c3SYopEHblzpxRL4lSs523++l8DYxGM1FQiYmb4= +google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 h1:fCvbg86sFXwdrl5LgVcTEvNC+2txB5mgROGmRL5mrls= +google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:+rXWjjaukWZun3mLfjmVnQi18E1AsFbDN9QdJ5YXLto= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 h1:gRkg/vSppuSQoDjxyiGfN4Upv/h/DQmIR10ZU8dh4Ww= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= -google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U= -google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= -google.golang.org/grpc/examples v0.0.0-20210728214646-ad0a2a847cdf/go.mod h1:bF8wuZSAZTcbF7ZPKrDI/qY52toTP/yxLpRRY4Eu9Js= +google.golang.org/grpc v1.79.3 h1:sybAEdRIEtvcD68Gx7dmnwjZKlyfuc61Dyo9pGXXkKE= +google.golang.org/grpc v1.79.3/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1139,36 +742,27 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= -google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= +google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/DATA-DOG/go-sqlmock.v1 v1.3.0 h1:FVCohIoYO7IJoDDVpV2pdq7SgrMH6wHnuTyrdrxJNoY= gopkg.in/DATA-DOG/go-sqlmock.v1 v1.3.0/go.mod h1:OdE7CF6DbADk7lN8LIKRzRJTTZXIjtWgA5THM5lhBAw= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnfEbYzo= +gopkg.in/evanphx/json-patch.v4 v4.13.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gcfg.v1 v1.2.3 h1:m8OOJ4ccYHnx2f4gQwpno8nAX5OGOh7RLaaz0pj3Ogs= -gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/src-d/go-billy.v4 v4.3.0/go.mod h1:tm33zBoOwxjYHZIE+OV8bxTWFMJLrconzFMd38aARFk= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= -gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= @@ -1180,80 +774,59 @@ gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.23.0/go.mod h1:8wmDdLBHBNxtOIytwLstXt5E9PddnZb0GaMcqsvDBpg= k8s.io/api v0.23.3/go.mod h1:w258XdGyvCmnBj/vGzQMj6kzdufJZVUwEM1U2fRJwSQ= -k8s.io/api v0.24.2/go.mod h1:AHqbSkTm6YrQ0ObxjO3Pmp/ubFF/KuM7jU+3khoBsOg= -k8s.io/api v0.26.0 h1:IpPlZnxBpV1xl7TGk/X6lFtpgjgntCg8PJ+qrPHAC7I= -k8s.io/api v0.26.0/go.mod h1:k6HDTaIFC8yn1i6pSClSqIwLABIcLV9l5Q4EcngKnQg= -k8s.io/apiextensions-apiserver v0.26.0 h1:Gy93Xo1eg2ZIkNX/8vy5xviVSxwQulsnUdQ00nEdpDo= -k8s.io/apiextensions-apiserver v0.26.0/go.mod h1:7ez0LTiyW5nq3vADtK6C3kMESxadD51Bh6uz3JOlqWQ= -k8s.io/apimachinery v0.23.0/go.mod h1:fFCTTBKvKcwTPFzjlcxp91uPFZr+JA0FubU4fLzzFYc= +k8s.io/api v0.35.0 h1:iBAU5LTyBI9vw3L5glmat1njFK34srdLmktWwLTprlY= +k8s.io/api v0.35.0/go.mod h1:AQ0SNTzm4ZAczM03QH42c7l3bih1TbAXYo0DkF8ktnA= +k8s.io/apiextensions-apiserver v0.35.0 h1:3xHk2rTOdWXXJM+RDQZJvdx0yEOgC0FgQ1PlJatA5T4= +k8s.io/apiextensions-apiserver v0.35.0/go.mod h1:E1Ahk9SADaLQ4qtzYFkwUqusXTcaV2uw3l14aqpL2LU= k8s.io/apimachinery v0.23.3/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= -k8s.io/apimachinery v0.24.2/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= -k8s.io/apimachinery v0.24.3/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= -k8s.io/apimachinery v0.26.0 h1:1feANjElT7MvPqp0JT6F3Ss6TWDwmcjLypwoPpEf7zg= -k8s.io/apimachinery v0.26.0/go.mod h1:tnPmbONNJ7ByJNz9+n9kMjNP8ON+1qoAIIC70lztu74= -k8s.io/client-go v0.26.0 h1:lT1D3OfO+wIi9UFolCrifbjUUgu7CpLca0AD8ghRLI8= -k8s.io/client-go v0.26.0/go.mod h1:I2Sh57A79EQsDmn7F7ASpmru1cceh3ocVT9KlX2jEZg= -k8s.io/code-generator v0.23.0/go.mod h1:vQvOhDXhuzqiVfM/YHp+dmg10WDZCchJVObc9MvowsE= +k8s.io/apimachinery v0.35.0 h1:Z2L3IHvPVv/MJ7xRxHEtk6GoJElaAqDCCU0S6ncYok8= +k8s.io/apimachinery v0.35.0/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns= +k8s.io/client-go v0.35.0 h1:IAW0ifFbfQQwQmga0UdoH0yvdqrbwMdq9vIFEhRpxBE= +k8s.io/client-go v0.35.0/go.mod h1:q2E5AAyqcbeLGPdoRB+Nxe3KYTfPce1Dnu1myQdqz9o= +k8s.io/cloud-provider v0.35.0 h1:syiBCQbKh2gho/S1BkIl006Dc44pV8eAtGZmv5NMe7M= +k8s.io/cloud-provider v0.35.0/go.mod h1:7grN+/Nt5Hf7tnSGPT3aErt4K7aQpygyCrGpbrQbzNc= k8s.io/code-generator v0.23.3/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk= -k8s.io/component-base v0.26.0 h1:0IkChOCohtDHttmKuz+EP3j3+qKmV55rM9gIFTXA7Vs= -k8s.io/component-base v0.26.0/go.mod h1:lqHwlfV1/haa14F/Z5Zizk5QmzaVf23nQzCwVOQpfC8= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.10.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/klog/v2 v2.40.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= -k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= k8s.io/kube-openapi v0.0.0-20220124234850-424119656bbf/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= -k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk= -k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= -k8s.io/kube-openapi v0.0.0-20221207184640-f3cff1453715 h1:tBEbstoM+K0FiBV5KGAKQ0kuvf54v/hwpldiJt69w1s= -k8s.io/kube-openapi v0.0.0-20221207184640-f3cff1453715/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= -k8s.io/kubelet v0.26.0 h1:08bDb5IoUH/1K1t2NUwnGIIWxjm9LSqn6k3FWw1tJGI= -k8s.io/kubelet v0.26.0/go.mod h1:DluF+d8jS2nE/Hs7CC3QM+OZlIEb22NTOihQ3EDwCQ4= +k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912 h1:Y3gxNAuB0OBLImH611+UDZcmKS3g6CthxToOb37KgwE= +k8s.io/kube-openapi v0.0.0-20250910181357-589584f1c912/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ= +k8s.io/kubectl v0.35.0 h1:cL/wJKHDe8E8+rP3G7avnymcMg6bH6JEcR5w5uo06wc= +k8s.io/kubectl v0.35.0/go.mod h1:VR5/TSkYyxZwrRwY5I5dDq6l5KXmiCb+9w8IKplk3Qo= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20221107191617-1a15be271d1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 h1:KTgPnR10d5zhztWptI952TNtt/4u5h3IzDXkdIMuo2Y= -k8s.io/utils v0.0.0-20221128185143-99ec85e7a448/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -kubevirt.io/api v0.58.0 h1:qeNeRtD6AIJ5WVJuRXajmmXtnrO5dYchy+hpCm6QwhE= -kubevirt.io/api v0.58.0/go.mod h1:U0CQlZR0JoJCaC+Va0wz4dMOtYDdVywJ98OT1KmOkzI= -kubevirt.io/containerized-data-importer-api v1.55.2 h1:AzYnKIUFkKwO6c0uCQZYlAIxfzbiPkJXP29hFhauaQ8= -kubevirt.io/containerized-data-importer-api v1.55.2/go.mod h1:92HiQEyzPoeMiCbgfG5Qe10JQVbtWMZOXucy56dKdGg= +k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck= +k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +kubevirt.io/api v1.3.1 h1:MoTNo/zvDlZ44c2ocXLPln8XTaQOeUodiYbEKrTCqv4= +kubevirt.io/api v1.3.1/go.mod h1:tCn7VAZktEvymk490iPSMPCmKM9UjbbfH2OsFR/IOLU= +kubevirt.io/containerized-data-importer-api v1.60.3 h1:kQEXi7scpzUa0RPf3/3MKk1Kmem0ZlqqiuK3kDF5L2I= +kubevirt.io/containerized-data-importer-api v1.60.3/go.mod h1:8mwrkZIdy8j/LmCyKt2wFXbiMavLUIqDaegaIF67CZs= kubevirt.io/controller-lifecycle-operator-sdk/api v0.2.4 h1:fZYvD3/Vnitfkx6IJxjLAk8ugnZQ7CXVYcRfkSKmuZY= kubevirt.io/controller-lifecycle-operator-sdk/api v0.2.4/go.mod h1:018lASpFYBsYN6XwmA2TIrPCx6e0gviTd/ZNtSitKgc= -mvdan.cc/gofumpt v0.1.1/go.mod h1:yXG1r1WqZVKWbVRtBWKWX9+CxGYfA51nSomhM0woR48= -rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= -rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/controller-runtime v0.14.1 h1:vThDes9pzg0Y+UbCPY3Wj34CGIYPgdmspPm2GIpxpzM= -sigs.k8s.io/controller-runtime v0.14.1/go.mod h1:GaRkrY8a7UZF0kqFFbUKG7n9ICiTY5T55P1RiE3UZlU= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= +sigs.k8s.io/controller-runtime v0.23.1 h1:TjJSM80Nf43Mg21+RCy3J70aj/W6KyvDtOlpKf+PupE= +sigs.k8s.io/controller-runtime v0.23.1/go.mod h1:B6COOxKptp+YaUT5q4l6LqUJTRpizbgf9KSRNdQGns0= sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= -sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg= +sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= -sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= +sigs.k8s.io/structured-merge-diff/v6 v6.3.2-0.20260122202528-d9cc6641c482 h1:2WOzJpHUBVrrkDjU4KBT8n5LDcj824eX0I5UKcgeRUs= +sigs.k8s.io/structured-merge-diff/v6 v6.3.2-0.20260122202528-d9cc6641c482/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= +sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4= diff --git a/hack/build-kubevirt-images.sh b/hack/build-kubevirt-images.sh index 440a44217..2028e2151 100755 --- a/hack/build-kubevirt-images.sh +++ b/hack/build-kubevirt-images.sh @@ -20,7 +20,7 @@ BUILD_NUM=2 cd $(dirname $0)/kubevirt_dockerfiles -for flavor in ubuntu centos; do +for flavor in ubuntu; do docker build \ -t quay.io/kubermatic/machine-controller-kubevirt:$flavor-$BUILD_NUM \ -f dockerfile.$flavor . diff --git a/hack/ci/calico.yaml b/hack/ci/calico.yaml index f64820f0f..666a3a9f1 100644 --- a/hack/ci/calico.yaml +++ b/hack/ci/calico.yaml @@ -325,7 +325,7 @@ spec: numAllowedLocalASNumbers: description: Maximum number of local AS numbers that are allowed in the AS path for received routes. This removes BGP loop prevention - and should only be used if absolutely necesssary. + and should only be used if absolutely necessary. format: int32 type: integer password: diff --git a/hack/ci/download-gocache.sh b/hack/ci/download-gocache.sh index d9a94d119..8b1244349 100755 --- a/hack/ci/download-gocache.sh +++ b/hack/ci/download-gocache.sh @@ -50,7 +50,7 @@ GIT_BRANCH="${PULL_BASE_REF:-}" CACHE_VERSION="${PULL_BASE_SHA:-}" # Periodics just use their head ref -if [[ -z "${CACHE_VERSION}" ]]; then +if [[ -z ${CACHE_VERSION} ]]; then CACHE_VERSION="$(git rev-parse HEAD)" GIT_BRANCH="main" fi diff --git a/hack/ci/run-e2e-tests.sh b/hack/ci/run-e2e-tests.sh index 2b235c06b..7c592da63 100755 --- a/hack/ci/run-e2e-tests.sh +++ b/hack/ci/run-e2e-tests.sh @@ -37,7 +37,6 @@ trap cleanup EXIT export GIT_HEAD_HASH="$(git rev-parse HEAD)" export MC_VERSION="${GIT_HEAD_HASH}" -export OPERATING_SYSTEM_MANAGER="${OPERATING_SYSTEM_MANAGER:-true}" TEST_NAME="Pre-warm Go build cache" echodate "Attempting to pre-warm Go build cache" @@ -51,13 +50,8 @@ echodate "Building machine-controller and webhook..." make all pushElapsed binary_build_duration_milliseconds $beforeBuild -# Copy userdata plugins. -echodate "Copying machine-controller plugins..." -cp machine-controller-userdata-* /usr/local/bin -ls -l /usr/local/bin - # Install genisoimage, this is required for generating user-data for vSphere -if [[ "${JOB_NAME:-}" = *"pull-machine-controller-e2e-vsphere"* ]]; then +if [[ ${JOB_NAME:-} == *"pull-machine-controller-e2e-vsphere"* ]]; then echo "Installing genisoimage..." apt install -y genisoimage fi diff --git a/hack/ci/setup-cni-in-kind.sh b/hack/ci/setup-cni-in-kind.sh index 50b075a8d..430a0042d 100755 --- a/hack/ci/setup-cni-in-kind.sh +++ b/hack/ci/setup-cni-in-kind.sh @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -CNI_VERSION="${CNI_VERSION:-v0.8.7}" +CNI_VERSION="${CNI_VERSION:-v1.2.0}" cni_bin_dir=/opt/cni/bin mkdir -p /etc/cni/net.d "$cni_bin_dir" diff --git a/hack/ci/setup-kind-cluster.sh b/hack/ci/setup-kind-cluster.sh index b30c78820..14b29aed0 100755 --- a/hack/ci/setup-kind-cluster.sh +++ b/hack/ci/setup-kind-cluster.sh @@ -131,7 +131,7 @@ EOF # unwrap the socket inside the kind cluster and make it available on a TCP port, # because containerd/Docker doesn't support sockets for mirrors. - docker exec $KIND_CLUSTER_NAME-control-plane bash -c 'socat TCP4-LISTEN:5001,fork,reuseaddr UNIX:/mirror/mirror.sock &' + docker exec $KIND_CLUSTER_NAME-control-plane bash -c 'apt update --quiet; apt install --quiet socat; socat TCP4-LISTEN:5001,fork,reuseaddr UNIX:/mirror/mirror.sock &' else kind create cluster --config kind-config.yaml fi @@ -150,22 +150,9 @@ if [ -z "${DISABLE_CLUSTER_EXPOSER:-}" ]; then # Start cluster exposer, which will expose services from within kind as # a NodePort service on the host - echodate "Starting cluster exposer" - ( - # Clone kubermatic repo to build clusterexposer - mkdir -p /tmp/kubermatic - cd /tmp/kubermatic - echodate "Cloning cluster exposer" - KKP_REPO_URL="${KKP_REPO_URL:-https://github.com/kubermatic/kubermatic.git}" - KKP_REPO_TAG="${KKP_REPO_BRANCH:-main}" - git clone --depth 1 --branch "${KKP_REPO_TAG}" "${KKP_REPO_URL}" . - - echodate "Building cluster exposer" - CGO_ENABLED=0 go build --tags ce -v -o /tmp/clusterexposer ./pkg/test/clusterexposer/cmd - ) - export KUBECONFIG=~/.kube/config - /tmp/clusterexposer \ + echodate "Starting cluster exposer" + clusterexposer \ --kubeconfig-inner "$KUBECONFIG" \ --kubeconfig-outer "/etc/kubeconfig/kubeconfig" \ --build-id "$PROW_JOB_ID" &> /var/log/clusterexposer.log & @@ -199,12 +186,15 @@ if [ -z "${DISABLE_CLUSTER_EXPOSER:-}" ]; then echodate "Successfully set up iptables rules for nodeports" + # Wait for 10 seconds before checking if the apiserver is reachable. + sleep 10 + # Compute external kube-apiserver address # If svc is not found then we need to check cluster-exposer logs PORT=$(kubectl --kubeconfig /etc/kubeconfig/kubeconfig get svc -l prow.k8s.io/id=$PROW_JOB_ID -o jsonpath="{.items..spec.ports[0].nodePort}") - if [ -z "$PORT" ] || [ -z "$NODE_NAME" ] || [ -z "$NODE_IP" ]; then - echodate "This script was unable to determine the external IP for kube-apiserver." + if [ -z "$PORT" ]; then + echodate "This script was unable to determine the nodeport for kube-apiserver." exit 1 fi diff --git a/hack/ci/setup-machine-controller-in-kind.sh b/hack/ci/setup-machine-controller-in-kind.sh index d34d5b934..535346ff9 100755 --- a/hack/ci/setup-machine-controller-in-kind.sh +++ b/hack/ci/setup-machine-controller-in-kind.sh @@ -22,7 +22,18 @@ if [ -z "${KIND_CLUSTER_NAME:-}" ]; then fi export MC_VERSION="${MC_VERSION:-$(git rev-parse HEAD)}" -export OPERATING_SYSTEM_MANAGER="${OPERATING_SYSTEM_MANAGER:-true}" +OSM_REPO_URL="${OSM_REPO_URL:-https://github.com/kubermatic/operating-system-manager.git}" +OSM_REPO_TAG="${OSM_REPO_TAG:-main}" + +# cert-manager is required by OSM for generating TLS Certificates +echodate "Installing cert-manager" +( + kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.11.2/cert-manager.yaml + # Wait for cert-manager to be ready + kubectl -n cert-manager rollout status deploy/cert-manager + kubectl -n cert-manager rollout status deploy/cert-manager-cainjector + kubectl -n cert-manager rollout status deploy/cert-manager-webhook +) # Build the Docker image for machine-controller beforeDockerBuild=$(nowms) @@ -39,7 +50,7 @@ echodate "Successfully built and loaded machine-controller image" if [ ! -f machine-controller-deployed ]; then # The 10 minute window given by default for the node to appear is too short # when we upgrade the instance during the upgrade test - if [[ ${LC_JOB_NAME:-} = "pull-machine-controller-e2e-ubuntu-upgrade" ]]; then + if [[ ${LC_JOB_NAME:-} == "pull-machine-controller-e2e-ubuntu-upgrade" ]]; then sed -i '/.*join-cluster-timeout=.*/d' examples/machine-controller.yaml fi sed -i -e 's/-worker-count=5/-worker-count=50/g' examples/machine-controller.yaml @@ -47,36 +58,39 @@ if [ ! -f machine-controller-deployed ]; then url="-override-bootstrap-kubelet-apiserver=$MASTER_URL" sed -i "s;-node-csr-approver=true;$url;g" examples/machine-controller.yaml - # Ensure that we update `use-osm` flag if OSM is disabled - if [[ "$OPERATING_SYSTEM_MANAGER" == "false" ]]; then - sed -i "s;-use-osm=true;-use-osm=false;g" examples/machine-controller.yaml - fi + # e2e tests logs are primarily read by humans, if ever + sed -i 's/log-format=json/log-format=console/g' examples/machine-controller.yaml - make deploy + kubectl apply -f examples/machine-controller.yaml touch machine-controller-deployed -fi -if [[ "$OPERATING_SYSTEM_MANAGER" == "true" ]]; then - # cert-manager is required by OSM for generating TLS Certificates - echodate "Installing cert-manager" - ( - kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.7.1/cert-manager.yaml - # Wait for cert-manager to be ready - kubectl -n cert-manager rollout status deploy/cert-manager - kubectl -n cert-manager rollout status deploy/cert-manager-cainjector - kubectl -n cert-manager rollout status deploy/cert-manager-webhook - ) - - echodate "Installing operating-system-manager" - ( - # This is required for running e2e tests in KIND - url="-override-bootstrap-kubelet-apiserver=$MASTER_URL" - sed -i "s;-container-runtime=containerd;$url;g" examples/operating-system-manager.yaml - sed -i -e 's/-worker-count=5/-worker-count=50/g' examples/operating-system-manager.yaml - kubectl apply -f examples/operating-system-manager.yaml - ) + protokol --kubeconfig "$KUBECONFIG" --flat --output "$ARTIFACTS/logs" --namespace kube-system 'machine-controller-*' > /dev/null 2>&1 & fi +OSM_TMP_DIR=/tmp/osm +( + # Clone OSM repo + mkdir -p $OSM_TMP_DIR + echodate "Cloning OSM repository" + git clone --depth 1 --branch "${OSM_REPO_TAG}" "${OSM_REPO_URL}" $OSM_TMP_DIR +) + +( + OSM_TAG="$(git -C $OSM_TMP_DIR rev-parse HEAD)" + echodate "Installing operating-system-manager with image: $OSM_TAG" + + # In release branches we'll have this pinned to a specific semver instead of latest. + sed -i "s;:latest;:$OSM_TAG;g" examples/operating-system-manager.yaml + + # This is required for running e2e tests in KIND + url="-override-bootstrap-kubelet-apiserver=$MASTER_URL" + sed -i "s;-container-runtime=containerd;$url;g" examples/operating-system-manager.yaml + sed -i -e 's/-worker-count=5/-worker-count=50/g' examples/operating-system-manager.yaml + kubectl apply -f examples/operating-system-manager.yaml +) + +protokol --kubeconfig "$KUBECONFIG" --flat --output "$ARTIFACTS/logs" --namespace kube-system 'operating-system-manager-*' > /dev/null 2>&1 & + sleep 10 retry 10 check_all_deployments_ready kube-system diff --git a/hack/e2e-setup-openstack-images.sh b/hack/e2e-setup-openstack-images.sh index d67c3825f..309b9bf04 100755 --- a/hack/e2e-setup-openstack-images.sh +++ b/hack/e2e-setup-openstack-images.sh @@ -20,6 +20,5 @@ set -o pipefail cd $(dirname $0)/ export UBUNTU_IMAGE_NAME="machine-controller-e2e-ubuntu" -export CENTOS_IMAGE_NAME="machine-controller-e2e-centos" ./setup-openstack-images.sh diff --git a/hack/kubevirt_dockerfiles/dockerfile.centos b/hack/kubevirt_dockerfiles/dockerfile.centos deleted file mode 100644 index c26389521..000000000 --- a/hack/kubevirt_dockerfiles/dockerfile.centos +++ /dev/null @@ -1,3 +0,0 @@ -FROM kubevirt/registry-disk-v1alpha:v0.10.0 - -RUN curl -L -o /disk/centos7.img https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2 diff --git a/hack/lib.sh b/hack/lib.sh index 5fc9131c4..cfeb7bb52 100644 --- a/hack/lib.sh +++ b/hack/lib.sh @@ -52,7 +52,7 @@ is_containerized() { containerize() { local cmd="$1" - local image="${CONTAINERIZE_IMAGE:-quay.io/kubermatic/util:2.2.0}" + local image="${CONTAINERIZE_IMAGE:-quay.io/kubermatic/util:2.7.0}" local gocache="${CONTAINERIZE_GOCACHE:-/tmp/.gocache}" local gomodcache="${CONTAINERIZE_GOMODCACHE:-/tmp/.gomodcache}" local skip="${NO_CONTAINERIZE:-}" @@ -140,7 +140,7 @@ retry() { actual_retry $@ rc=$? set -e - elapsed_time=$(($(date +%s) - $start_time)) + elapsed_time=$(($(date +%s) - start_time)) write_junit "$rc" "$elapsed_time" return $rc } diff --git a/hack/run-machine-controller.sh b/hack/run-machine-controller.sh index 7718af663..a1d2f68e8 100755 --- a/hack/run-machine-controller.sh +++ b/hack/run-machine-controller.sh @@ -21,17 +21,12 @@ set -e MC_KUBECONFIG=${MC_KUBECONFIG:-$(dirname $0)/../.kubeconfig} # If you want to use the default kubeconfig `export MC_KUBECONFIG=$KUBECONFIG` -# `-use-osm` flag can be removed to use legacy userdata that is generated by machine-controller. - make -C $(dirname $0)/.. build-machine-controller $(dirname $0)/../machine-controller \ -kubeconfig=$MC_KUBECONFIG \ -worker-count=50 \ - -logtostderr \ - -v=6 \ + -log-debug \ -cluster-dns=169.254.20.10 \ -enable-profiling \ -metrics-address=0.0.0.0:8080 \ - -health-probe-address=0.0.0.0:8085 \ - -use-osm=true \ - -node-container-runtime=containerd + -health-probe-address=0.0.0.0:8085 diff --git a/hack/setup-openstack-images.sh b/hack/setup-openstack-images.sh index 52ab771ae..28d51b005 100755 --- a/hack/setup-openstack-images.sh +++ b/hack/setup-openstack-images.sh @@ -18,7 +18,6 @@ set -o nounset set -o pipefail UBUNTU_IMAGE_NAME=${UBUNTU_IMAGE_NAME:-"machine-controller-ubuntu"} -CENTOS_IMAGE_NAME=${CENTOS_IMAGE_NAME:-"machine-controller-centos"} echo "Downloading Ubuntu 18.04 image from upstream..." curl -L -o ubuntu.img http://cloud-images.ubuntu.com/bionic/current/bionic-server-cloudimg-amd64.img @@ -30,14 +29,3 @@ openstack image create \ ${UBUNTU_IMAGE_NAME} rm ubuntu.img echo "Successfully uploaded ${UBUNTU_IMAGE_NAME} to OpenStack..." - -echo "Downloading CentOS 7 image from upstream..." -curl -L -o centos.qcow2 http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2 -echo "Uploading CentOS 7 image to OpenStack..." -openstack image create \ - --disk-format qcow2 \ - --container-format bare \ - --file centos.qcow2 \ - ${CENTOS_IMAGE_NAME} -rm centos.qcow2 -echo "Successfully uploaded ${CENTOS_IMAGE_NAME} to OpenStack..." diff --git a/hack/update-fixtures.sh b/hack/update-fixtures.sh index b17ff3596..1e238dca6 100755 --- a/hack/update-fixtures.sh +++ b/hack/update-fixtures.sh @@ -19,7 +19,7 @@ set -euo pipefail cd $(dirname $0)/.. source hack/lib.sh -CONTAINERIZE_IMAGE=golang:1.19.4 containerize ./hack/update-fixtures.sh +CONTAINERIZE_IMAGE=quay.io/kubermatic/build:go-1.25-node-22-8 containerize ./hack/update-fixtures.sh go test ./... -v -update || go test ./... diff --git a/hack/verify-boilerplate.sh b/hack/verify-boilerplate.sh index 101dbdc31..70e2169e0 100755 --- a/hack/verify-boilerplate.sh +++ b/hack/verify-boilerplate.sh @@ -20,6 +20,4 @@ cd $(dirname $0)/.. boilerplate \ -boilerplates hack/boilerplate \ - -exclude pkg/machines/v1alpha1 \ - -exclude pkg/signals \ - -exclude pkg/userdata/scripts + -exclude sdk/apis/machines/v1alpha1 diff --git a/hack/verify-licenses.sh b/hack/verify-licenses.sh index 7d71c7113..0ab22be74 100755 --- a/hack/verify-licenses.sh +++ b/hack/verify-licenses.sh @@ -19,7 +19,7 @@ set -euo pipefail cd $(dirname $0)/.. source hack/lib.sh -CONTAINERIZE_IMAGE=quay.io/kubermatic/build:go-1.19-node-18-kind-0.17-5 containerize ./hack/verify-licenses.sh +CONTAINERIZE_IMAGE=quay.io/kubermatic/build:go-1.25-node-22-8 containerize ./hack/verify-licenses.sh go mod vendor diff --git a/image-builder/README.md b/image-builder/README.md index 613132bac..743cc7adc 100644 --- a/image-builder/README.md +++ b/image-builder/README.md @@ -4,19 +4,17 @@ Currently supported operating systems: * RedHat CoreOS - * CentOS 7 * Debian 9 ### Usage -`./build.sh --target-os centos7|debian9 [--release K8S-RELEASE]` +`./build.sh --target-os debian9 [--release K8S-RELEASE]` Parameters: * `--target-os` is mandatory and specifies the Linux distribution image to be built. Possible values: - * `centos7` * `debian9` * `--release` specifies the Kubernetes release to be added to the image, e.g. `v1.10.2`. If not provided, the script will look up the latest stable release and use that. ### Output -The script will generate a VMDK disk image with the filename `TARGET_OS-output.vmdk`, e.g. `centos7-output.vmdk`. +The script will generate a VMDK disk image with the filename `TARGET_OS-output.vmdk`. diff --git a/image-builder/build.sh b/image-builder/build.sh index 40eb535e2..97bff8688 100755 --- a/image-builder/build.sh +++ b/image-builder/build.sh @@ -22,56 +22,56 @@ K8S_RELEASE="" TARGET_OS="" usage() { - echo -e "usage:" - echo -e "\t$0 --target-os centos7|debian9|ubuntu-xenial|ubuntu-bionic [--release K8S-RELEASE]" + echo -e "usage:" + echo -e "\t$0 --target-os debian9|ubuntu-xenial|ubuntu-bionic [--release K8S-RELEASE]" } while [ $# -gt 0 ]; do - case "$1" in - --release) - K8S_RELEASE="$2" - shift - ;; - --target-os) - if [[ -z "$2" ]]; then - echo "You must specify target OS. Currently 'centos7' is supported." - exit 1 - fi - TARGET_OS="$2" - shift - ;; - *) - echo "Unknown parameter \"$1\"" - usage - exit 1 - ;; - esac - shift + case "$1" in + --release) + K8S_RELEASE="$2" + shift + ;; + --target-os) + if [[ -z $2 ]]; then + echo "You must specify target OS." + exit 1 + fi + TARGET_OS="$2" + shift + ;; + *) + echo "Unknown parameter \"$1\"" + usage + exit 1 + ;; + esac + shift done -if [[ -z "$TARGET_OS" ]]; then - usage - exit 1 +if [[ -z $TARGET_OS ]]; then + usage + exit 1 fi if ! which guestmount &>/dev/null; then - echo "guestmount is not available. On Ubuntu, you need to install libguestfs-tools" - exit 1 + echo "guestmount is not available. On Ubuntu, you need to install libguestfs-tools" + exit 1 fi if ! which qemu-img &>/dev/null; then - echo "qemu-img is not available. On Ubuntu, you need to install qemu-utils" - exit 1 + echo "qemu-img is not available. On Ubuntu, you need to install qemu-utils" + exit 1 fi if ! which gpg2 &>/dev/null; then - echo "gpg2 is not available. On Ubuntu, you need to install gnupg2" - exit 1 + echo "gpg2 is not available. On Ubuntu, you need to install gnupg2" + exit 1 fi # if no K8S version has was specified on the command line, get the latest stable -if [[ -z "$K8S_RELEASE" ]]; then - K8S_RELEASE="$(curl -sSL https://dl.k8s.io/release/stable.txt)" +if [[ -z $K8S_RELEASE ]]; then + K8S_RELEASE="$(curl -sSL https://dl.k8s.io/release/stable.txt)" fi TEMPDIR="$(mktemp -d)" @@ -80,132 +80,105 @@ mkdir -p "$TARGETFS" "$SCRIPT_DIR/downloads" # on failure unmount target filesystem (if mounted) and delete the temporary directory trap "sudo mountpoint --quiet $TARGETFS && sudo umount --recursive $TARGETFS; rm -rf $TEMPDIR" EXIT SIGINT -get_centos7_image() { - CENTOS7_BUILD="1802" - echo " * Downloading vanilla CentOS image." - wget "https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-$CENTOS7_BUILD.qcow2.xz" -P "$TEMPDIR" - - echo " * Verifying GPG signature" - wget --quiet "https://cloud.centos.org/centos/7/images/sha256sum.txt.asc" -O "$TEMPDIR/centos7-sha256sum.txt.asc" - gpg2 --quiet --import "$SCRIPT_DIR/RPM-GPG-KEY-CentOS-7" - gpg2 "$TEMPDIR/centos7-sha256sum.txt.asc" - - echo " * Verifying SHA256 digest" - EXPECTED_SHA256="$(grep "CentOS-7-x86_64-GenericCloud-$CENTOS7_BUILD.qcow2.xz$" < "$TEMPDIR/centos7-sha256sum.txt" | cut -f1 -d ' ')" - CALCULATED_SHA256="$(sha256sum "$TEMPDIR/CentOS-7-x86_64-GenericCloud-$CENTOS7_BUILD.qcow2.xz" | cut -f1 -d ' ')" - if [[ "$CALCULATED_SHA256" != "$EXPECTED_SHA256" ]]; then - echo " * SHA256 digest verification failed. '$CALCULATED_SHA256' != '$EXPECTED_SHA256'" - exit 1 - fi - - echo " * Decompressing" - unxz --keep "$TEMPDIR/CentOS-7-x86_64-GenericCloud-$CENTOS7_BUILD.qcow2.xz" - mv "$TEMPDIR/CentOS-7-x86_64-GenericCloud-$CENTOS7_BUILD.qcow2" "$SCRIPT_DIR/downloads/CentOS-7-x86_64-GenericCloud.qcow2" -} - get_debian9_image() { - DEBIAN_CD_SIGNING_KEY_FINGERPRINT="DF9B9C49EAA9298432589D76DA87E80D6294BE9B" - - echo " * Downloading vanilla Debian image." - wget "https://cdimage.debian.org/cdimage/openstack/current-9/debian-9-openstack-amd64.qcow2" -P "$TEMPDIR" - - echo " * Verifying GPG signature" - wget --quiet "https://cdimage.debian.org/cdimage/openstack/current-9/SHA512SUMS" -O "$TEMPDIR/Debian-SHA512SUMS" - wget --quiet "https://cdimage.debian.org/cdimage/openstack/current-9/SHA512SUMS.sign" -O "$TEMPDIR/Debian-SHA512SUMS.sign" - gpg2 --quiet --recv-keys "$DEBIAN_CD_SIGNING_KEY_FINGERPRINT" - gpg2 --quiet --verify "$TEMPDIR/Debian-SHA512SUMS.sign" - - echo " * Verifying SHA512 digest" - EXPECTED_SHA512="$(grep 'debian-9-openstack-amd64.qcow2$' < "$TEMPDIR/Debian-SHA512SUMS" | cut -f1 -d ' ')" - CALCULATED_SHA512="$(sha512sum "$TEMPDIR/debian-9-openstack-amd64.qcow2" | cut -f1 -d ' ')" - if [[ "$CALCULATED_SHA512" != "$EXPECTED_SHA512" ]]; then - echo " * SHA512 digest verification failed. '$CALCULATED_SHA512' != '$EXPECTED_SHA512'" - exit 1 - fi - - echo " * Finalizing" - mv "$TEMPDIR/debian-9-openstack-amd64.qcow2" "$SCRIPT_DIR/downloads/debian-9-openstack-amd64.qcow2" + DEBIAN_CD_SIGNING_KEY_FINGERPRINT="DF9B9C49EAA9298432589D76DA87E80D6294BE9B" + + echo " * Downloading vanilla Debian image." + wget "https://cdimage.debian.org/cdimage/openstack/current-9/debian-9-openstack-amd64.qcow2" -P "$TEMPDIR" + + echo " * Verifying GPG signature" + wget --quiet "https://cdimage.debian.org/cdimage/openstack/current-9/SHA512SUMS" -O "$TEMPDIR/Debian-SHA512SUMS" + wget --quiet "https://cdimage.debian.org/cdimage/openstack/current-9/SHA512SUMS.sign" -O "$TEMPDIR/Debian-SHA512SUMS.sign" + gpg2 --quiet --recv-keys "$DEBIAN_CD_SIGNING_KEY_FINGERPRINT" + gpg2 --quiet --verify "$TEMPDIR/Debian-SHA512SUMS.sign" + + echo " * Verifying SHA512 digest" + EXPECTED_SHA512="$(grep 'debian-9-openstack-amd64.qcow2$' <"$TEMPDIR/Debian-SHA512SUMS" | cut -f1 -d ' ')" + CALCULATED_SHA512="$(sha512sum "$TEMPDIR/debian-9-openstack-amd64.qcow2" | cut -f1 -d ' ')" + if [[ $CALCULATED_SHA512 != "$EXPECTED_SHA512" ]]; then + echo " * SHA512 digest verification failed. '$CALCULATED_SHA512' != '$EXPECTED_SHA512'" + exit 1 + fi + + echo " * Finalizing" + mv "$TEMPDIR/debian-9-openstack-amd64.qcow2" "$SCRIPT_DIR/downloads/debian-9-openstack-amd64.qcow2" } get_ubuntu_image() { - local UBUNTU_CLOUD_IMAGE_SIGNING_KEY_FINGERPRINT="D2EB44626FDDC30B513D5BB71A5D6C4C7DB87C81" - local RELEASE="$1" - local IMG_NAME - if [[ $RELEASE == "xenial" ]]; then - IMG_NAME="$RELEASE-server-cloudimg-amd64-disk1.vmdk" - else - IMG_NAME="$RELEASE-server-cloudimg-amd64.vmdk" - fi - - echo " * Downloading vanilla Ubuntu image." - wget "https://cloud-images.ubuntu.com/$RELEASE/current/$IMG_NAME" -P "$TEMPDIR" - - echo " * Verifying GPG signature" - wget --quiet "https://cloud-images.ubuntu.com/$RELEASE/current/SHA256SUMS" -O "$TEMPDIR/Ubuntu-SHA256SUMS" - wget --quiet "https://cloud-images.ubuntu.com/$RELEASE/current/SHA256SUMS.gpg" -O "$TEMPDIR/Ubuntu-SHA256SUMS.gpg" - gpg2 --quiet --recv-keys $UBUNTU_CLOUD_IMAGE_SIGNING_KEY_FINGERPRINT - gpg2 --quiet --verify "$TEMPDIR/Ubuntu-SHA256SUMS.gpg" "$TEMPDIR/Ubuntu-SHA256SUMS" - - echo " * Verifying SHA256 digest" - EXPECTED_SHA256="$(grep "$IMG_NAME\$" < "$TEMPDIR/Ubuntu-SHA256SUMS" | cut -f1 -d ' ')" - CALCULATED_SHA256="$(sha256sum "$TEMPDIR/$IMG_NAME" | cut -f1 -d ' ')" - if [[ "$CALCULATED_SHA256" != "$EXPECTED_SHA256" ]]; then - echo " * SHA256 digest verification failed. '$CALCULATED_SHA256' != '$EXPECTED_SHA256'" - exit 1 - fi - - # This is needed because Ubuntu cloud images come in a Read-Only format - # that can only be used for linked-base VMs. - echo " * Converting to a read-write enabled image" - qemu-img convert -O vmdk "$TEMPDIR/$IMG_NAME" "$TEMPDIR/$IMG_NAME-rw" - - echo " * Finalizing" - mv "$TEMPDIR/$IMG_NAME-rw" "$SCRIPT_DIR/downloads/$RELEASE-server-cloudimg-amd64.vmdk" + local UBUNTU_CLOUD_IMAGE_SIGNING_KEY_FINGERPRINT="D2EB44626FDDC30B513D5BB71A5D6C4C7DB87C81" + local RELEASE="$1" + local IMG_NAME + if [[ $RELEASE == "xenial" ]]; then + IMG_NAME="$RELEASE-server-cloudimg-amd64-disk1.vmdk" + else + IMG_NAME="$RELEASE-server-cloudimg-amd64.vmdk" + fi + + echo " * Downloading vanilla Ubuntu image." + wget "https://cloud-images.ubuntu.com/$RELEASE/current/$IMG_NAME" -P "$TEMPDIR" + + echo " * Verifying GPG signature" + wget --quiet "https://cloud-images.ubuntu.com/$RELEASE/current/SHA256SUMS" -O "$TEMPDIR/Ubuntu-SHA256SUMS" + wget --quiet "https://cloud-images.ubuntu.com/$RELEASE/current/SHA256SUMS.gpg" -O "$TEMPDIR/Ubuntu-SHA256SUMS.gpg" + gpg2 --quiet --recv-keys $UBUNTU_CLOUD_IMAGE_SIGNING_KEY_FINGERPRINT + gpg2 --quiet --verify "$TEMPDIR/Ubuntu-SHA256SUMS.gpg" "$TEMPDIR/Ubuntu-SHA256SUMS" + + echo " * Verifying SHA256 digest" + EXPECTED_SHA256="$(grep "$IMG_NAME\$" <"$TEMPDIR/Ubuntu-SHA256SUMS" | cut -f1 -d ' ')" + CALCULATED_SHA256="$(sha256sum "$TEMPDIR/$IMG_NAME" | cut -f1 -d ' ')" + if [[ $CALCULATED_SHA256 != "$EXPECTED_SHA256" ]]; then + echo " * SHA256 digest verification failed. '$CALCULATED_SHA256' != '$EXPECTED_SHA256'" + exit 1 + fi + + # This is needed because Ubuntu cloud images come in a Read-Only format + # that can only be used for linked-base VMs. + echo " * Converting to a read-write enabled image" + qemu-img convert -O vmdk "$TEMPDIR/$IMG_NAME" "$TEMPDIR/$IMG_NAME-rw" + + echo " * Finalizing" + mv "$TEMPDIR/$IMG_NAME-rw" "$SCRIPT_DIR/downloads/$RELEASE-server-cloudimg-amd64.vmdk" } mount_rootfs() { - local IMAGE="$1" - local FOLDER="$2" - case $TARGET_OS in - debian9|centos7|ubuntu-*) - echo " * /" - sudo guestmount -a "$IMAGE" -m "/dev/sda1" "$TARGETFS" - ;; - *) - echo "mount_rootfs(): unknown OS \"$TARGET_OS\"" - usage - exit 1 - esac + local IMAGE="$1" + local FOLDER="$2" + case $TARGET_OS in + debian9 | ubuntu-*) + echo " * /" + sudo guestmount -a "$IMAGE" -m "/dev/sda1" "$TARGETFS" + ;; + *) + echo "mount_rootfs(): unknown OS \"$TARGET_OS\"" + usage + exit 1 + ;; + esac } case $TARGET_OS in - centos7) - CLEAN_IMAGE="$SCRIPT_DIR/downloads/CentOS-7-x86_64-GenericCloud.qcow2" - if [[ ! -f "$CLEAN_IMAGE" ]]; then - get_centos7_image - fi - ;; - debian9) - CLEAN_IMAGE="$SCRIPT_DIR/downloads/debian-9-openstack-amd64.qcow2" - if [[ ! -f "$CLEAN_IMAGE" ]]; then - get_debian9_image - fi - ;; - ubuntu-xenial) - CLEAN_IMAGE="$SCRIPT_DIR/downloads/xenial-server-cloudimg-amd64.vmdk" - if [[ ! -f "$CLEAN_IMAGE" ]]; then - get_ubuntu_image xenial - fi - ;; - ubuntu-bionic) - CLEAN_IMAGE="$SCRIPT_DIR/downloads/bionic-server-cloudimg-amd64.vmdk" - if [[ ! -f "$CLEAN_IMAGE" ]]; then - get_ubuntu_image bionic - fi - ;; - *) - usage - exit 1 +debian9) + CLEAN_IMAGE="$SCRIPT_DIR/downloads/debian-9-openstack-amd64.qcow2" + if [[ ! -f $CLEAN_IMAGE ]]; then + get_debian9_image + fi + ;; +ubuntu-xenial) + CLEAN_IMAGE="$SCRIPT_DIR/downloads/xenial-server-cloudimg-amd64.vmdk" + if [[ ! -f $CLEAN_IMAGE ]]; then + get_ubuntu_image xenial + fi + ;; +ubuntu-bionic) + CLEAN_IMAGE="$SCRIPT_DIR/downloads/bionic-server-cloudimg-amd64.vmdk" + if [[ ! -f $CLEAN_IMAGE ]]; then + get_ubuntu_image bionic + fi + ;; +*) + usage + exit 1 + ;; esac echo " * Verifying/Downloading kubernetes" @@ -224,11 +197,11 @@ sudo cp "$SCRIPT_DIR/downloads/kubelet-$K8S_RELEASE" "$TARGETFS/opt/bin/kubelet" echo " * Finalizing" sudo umount --recursive "$TARGETFS" EXTENSION="${CLEAN_IMAGE##*.}" -if [[ "$EXTENSION" == "vmdk" ]]; then - cp "$TEMPDIR/work-in-progress-image" "$SCRIPT_DIR/$TARGET_OS-output.vmdk" +if [[ $EXTENSION == "vmdk" ]]; then + cp "$TEMPDIR/work-in-progress-image" "$SCRIPT_DIR/$TARGET_OS-output.vmdk" else - echo " * Converting to VMDK" - qemu-img convert -O vmdk "$TEMPDIR/work-in-progress-image" "$SCRIPT_DIR/$TARGET_OS-output.vmdk" + echo " * Converting to VMDK" + qemu-img convert -O vmdk "$TEMPDIR/work-in-progress-image" "$SCRIPT_DIR/$TARGET_OS-output.vmdk" fi echo "$(realpath "$SCRIPT_DIR/$TARGET_OS-output.vmdk") ready." diff --git a/image-builder/download_kubernetes.sh b/image-builder/download_kubernetes.sh index 14fc74e7a..ddcd277ad 100755 --- a/image-builder/download_kubernetes.sh +++ b/image-builder/download_kubernetes.sh @@ -26,49 +26,49 @@ mkdir -p "$SCRIPT_DIR/downloads" K8S_RELEASE="" while [ $# -gt 0 ]; do - case "$1" in - --release) - K8S_RELEASE="$2" - shift - ;; - *) - echo "Unknown parameter \"$1\"" - exit 1 - ;; - esac - shift + case "$1" in + --release) + K8S_RELEASE="$2" + shift + ;; + *) + echo "Unknown parameter \"$1\"" + exit 1 + ;; + esac + shift done -if [[ -z "$K8S_RELEASE" ]]; then - K8S_RELEASE="$(curl -sSL https://dl.k8s.io/release/stable.txt)" - echo " * Latest stable version is $K8S_RELEASE" +if [[ -z $K8S_RELEASE ]]; then + K8S_RELEASE="$(curl -sSL https://dl.k8s.io/release/stable.txt)" + echo " * Latest stable version is $K8S_RELEASE" else - echo " * Using version $K8S_RELEASE" + echo " * Using version $K8S_RELEASE" fi -wget --quiet https://storage.googleapis.com/kubernetes-release/release/$K8S_RELEASE/bin/linux/amd64/{kubeadm,kubelet,kubectl}.sha1 -P "$TEMPDIR" +wget --quiet https://dl.k8s.io/$K8S_RELEASE/bin/linux/amd64/{kubeadm,kubelet,kubectl}.sha1 -P "$TEMPDIR" for util in kubeadm kubelet kubectl; do - echo " * $util" - if [[ -x "$SCRIPT_DIR/downloads/$util-$K8S_RELEASE" ]]; then - CALCULATED_SHA1="$(sha1sum "$SCRIPT_DIR/downloads/$util-$K8S_RELEASE" | cut -f1 -d ' ')" - EXPECTED_SHA1="$(<"$TEMPDIR/$util.sha1")" - if [[ "$CALCULATED_SHA1" != "$EXPECTED_SHA1" ]]; then - echo " * SHA1 digest verification failed. $CALCULATED_SHA1 != $EXPECTED_SHA1" - echo " * The downloaded $util is either corrupted or out of date. Check your downloads and remove manually to continue." - exit 1 - fi - else - wget "https://storage.googleapis.com/kubernetes-release/release/$K8S_RELEASE/bin/linux/amd64/$util" -P "$TEMPDIR" + echo " * $util" + if [[ -x "$SCRIPT_DIR/downloads/$util-$K8S_RELEASE" ]]; then + CALCULATED_SHA1="$(sha1sum "$SCRIPT_DIR/downloads/$util-$K8S_RELEASE" | cut -f1 -d ' ')" + EXPECTED_SHA1="$(<"$TEMPDIR/$util.sha1")" + if [[ $CALCULATED_SHA1 != "$EXPECTED_SHA1" ]]; then + echo " * SHA1 digest verification failed. $CALCULATED_SHA1 != $EXPECTED_SHA1" + echo " * The downloaded $util is either corrupted or out of date. Check your downloads and remove manually to continue." + exit 1 + fi + else + wget "https://dl.k8s.io/$K8S_RELEASE/bin/linux/amd64/$util" -P "$TEMPDIR" - CALCULATED_SHA1="$(sha1sum "$TEMPDIR/$util" | cut -f1 -d ' ')" - EXPECTED_SHA1="$(<"$TEMPDIR/$util.sha1")" - if [[ "$CALCULATED_SHA1" != "$EXPECTED_SHA1" ]]; then - echo " * SHA1 digest verification failed. $CALCULATED_SHA1 != $EXPECTED_SHA1. Download failed." - exit 1 - fi + CALCULATED_SHA1="$(sha1sum "$TEMPDIR/$util" | cut -f1 -d ' ')" + EXPECTED_SHA1="$(<"$TEMPDIR/$util.sha1")" + if [[ $CALCULATED_SHA1 != "$EXPECTED_SHA1" ]]; then + echo " * SHA1 digest verification failed. $CALCULATED_SHA1 != $EXPECTED_SHA1. Download failed." + exit 1 + fi - mv "$TEMPDIR/$util" "$SCRIPT_DIR/downloads/$util-$K8S_RELEASE" - chmod +x "$SCRIPT_DIR/downloads/$util-$K8S_RELEASE" - fi + mv "$TEMPDIR/$util" "$SCRIPT_DIR/downloads/$util-$K8S_RELEASE" + chmod +x "$SCRIPT_DIR/downloads/$util-$K8S_RELEASE" + fi done diff --git a/pkg/admission/admission.go b/pkg/admission/admission.go index cf5ae12ea..a8ed057ab 100644 --- a/pkg/admission/admission.go +++ b/pkg/admission/admission.go @@ -22,75 +22,102 @@ import ( "errors" "fmt" "io" + "net" "net/http" "reflect" - "time" + "strconv" "github.com/Masterminds/semver/v3" + "go.uber.org/zap" "gomodules.xyz/jsonpatch/v2" - machinecontroller "github.com/kubermatic/machine-controller/pkg/controller/machine" - "github.com/kubermatic/machine-controller/pkg/node" - userdatamanager "github.com/kubermatic/machine-controller/pkg/userdata/manager" + machinecontroller "k8c.io/machine-controller/pkg/controller/machine" + "k8c.io/machine-controller/pkg/node" admissionv1 "k8s.io/api/admission/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/klog" ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/healthz" + "sigs.k8s.io/controller-runtime/pkg/webhook" ) type admissionData struct { - client ctrlruntimeclient.Client - workerClient ctrlruntimeclient.Client - userDataManager *userdatamanager.Manager - nodeSettings machinecontroller.NodeSettings - useExternalBootstrap bool - namespace string - constraints *semver.Constraints + log *zap.SugaredLogger + client ctrlruntimeclient.Client + workerClient ctrlruntimeclient.Client + nodeSettings machinecontroller.NodeSettings + namespace string + constraints *semver.Constraints } var jsonPatch = admissionv1.PatchTypeJSONPatch type Builder struct { - ListenAddress string - Client ctrlruntimeclient.Client - WorkerClient ctrlruntimeclient.Client - UserdataManager *userdatamanager.Manager - UseExternalBootstrap bool - NodeFlags *node.Flags - Namespace string - VersionConstraints *semver.Constraints + ListenAddress string + Log *zap.SugaredLogger + Client ctrlruntimeclient.Client + WorkerClient ctrlruntimeclient.Client + NodeFlags *node.Flags + Namespace string + VersionConstraints *semver.Constraints + + CertDir string + CertName string + KeyName string } -func (build Builder) Build() (*http.Server, error) { - mux := http.NewServeMux() +func (build Builder) Build() (webhook.Server, error) { ad := &admissionData{ - client: build.Client, - workerClient: build.WorkerClient, - userDataManager: build.UserdataManager, - useExternalBootstrap: build.UseExternalBootstrap, - namespace: build.Namespace, - constraints: build.VersionConstraints, + log: build.Log, + client: build.Client, + workerClient: build.WorkerClient, + namespace: build.Namespace, + constraints: build.VersionConstraints, } if err := build.NodeFlags.UpdateNodeSettings(&ad.nodeSettings); err != nil { return nil, fmt.Errorf("error updating nodeSettings, %w", err) } - mux.HandleFunc("/machinedeployments", handleFuncFactory(ad.mutateMachineDeployments)) - mux.HandleFunc("/machines", handleFuncFactory(ad.mutateMachines)) - mux.HandleFunc("/healthz", healthZHandler) + options := webhook.Options{ + CertDir: build.CertDir, + CertName: build.CertName, + KeyName: build.KeyName, + } - return &http.Server{ - Addr: build.ListenAddress, - Handler: http.TimeoutHandler(mux, 25*time.Second, "timeout"), - }, nil -} + if build.ListenAddress != "" { + host, port, err := net.SplitHostPort(build.ListenAddress) + if err != nil { + return nil, fmt.Errorf("error parsing ListenAddress: %w", err) + } + + options.Host = host + + if port != "" { + port, err := strconv.ParseInt(port, 10, 16) + if err != nil { + return nil, fmt.Errorf("error parsing port from ListenAddress: %w", err) + } + + options.Port = int(port) + } + } + + server := webhook.NewServer(options) + + server.Register("/machinedeployments", handleFuncFactory(build.Log, ad.mutateMachineDeployments)) + server.Register("/machines", handleFuncFactory(build.Log, ad.mutateMachines)) + + checkers := healthz.Handler{ + Checks: map[string]healthz.Checker{ + "ping": healthz.Ping, + }, + } + server.Register("/healthz/", http.StripPrefix("/healthz/", &checkers)) -func healthZHandler(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) + return server, nil } func newJSONPatch(original, current runtime.Object) ([]jsonpatch.JsonPatchOperation, error) { @@ -103,16 +130,14 @@ func newJSONPatch(original, current runtime.Object) ([]jsonpatch.JsonPatchOperat if err != nil { return nil, err } - klog.V(6).Infof("jsonpatch: Marshaled original: %s", string(ori)) cur, err := json.Marshal(current) if err != nil { return nil, err } - klog.V(6).Infof("jsonpatch: Marshaled target: %s", string(cur)) return jsonpatch.CreatePatch(ori, cur) } -func createAdmissionResponse(original, mutated runtime.Object) (*admissionv1.AdmissionResponse, error) { +func createAdmissionResponse(log *zap.SugaredLogger, original, mutated runtime.Object) (*admissionv1.AdmissionResponse, error) { response := &admissionv1.AdmissionResponse{} response.Allowed = true if !apiequality.Semantic.DeepEqual(original, mutated) { @@ -125,7 +150,7 @@ func createAdmissionResponse(original, mutated runtime.Object) (*admissionv1.Adm if err != nil { return nil, fmt.Errorf("failed to marshal json patch: %w", err) } - klog.V(3).Infof("Produced jsonpatch: %s", string(patchRaw)) + log.Debugw("Produced jsonpatch", "patch", string(patchRaw)) response.Patch = patchRaw response.PatchType = &jsonPatch @@ -135,17 +160,17 @@ func createAdmissionResponse(original, mutated runtime.Object) (*admissionv1.Adm type mutator func(context.Context, admissionv1.AdmissionRequest) (*admissionv1.AdmissionResponse, error) -func handleFuncFactory(mutate mutator) func(http.ResponseWriter, *http.Request) { +func handleFuncFactory(log *zap.SugaredLogger, mutate mutator) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { review, err := readReview(r) if err != nil { - klog.Warningf("invalid admission review: %v", err) + log.Errorw("Invalid admission review", zap.Error(err)) // proper AdmissionReview responses require metadata that is not available // in broken requests, so we return a basic failure response w.WriteHeader(http.StatusBadRequest) - if _, err := w.Write([]byte(fmt.Sprintf("invalid request: %v", err))); err != nil { - klog.Errorf("failed to write badRequest: %v", err) + if _, err := fmt.Fprintf(w, "invalid request: %v", err); err != nil { + log.Errorw("Failed to write badRequest", zap.Error(err)) } return } @@ -166,12 +191,12 @@ func handleFuncFactory(mutate mutator) func(http.ResponseWriter, *http.Request) Response: response, }) if err != nil { - klog.Errorf("failed to marshal admissionResponse: %v", err) + log.Errorw("Failed to marshal admissionResponse", zap.Error(err)) return } if _, err := w.Write(resp); err != nil { - klog.Errorf("failed to write admissionResponse: %v", err) + log.Errorw("Failed to write admissionResponse", zap.Error(err)) } } } diff --git a/pkg/admission/machinedeployments.go b/pkg/admission/machinedeployments.go index a20fa5f53..ec3d9f5cb 100644 --- a/pkg/admission/machinedeployments.go +++ b/pkg/admission/machinedeployments.go @@ -21,10 +21,11 @@ import ( "encoding/json" "fmt" - clusterv1alpha1 "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" admissionv1 "k8s.io/api/admission/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" + ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" ) func (ad *admissionData) mutateMachineDeployments(ctx context.Context, ar admissionv1.AdmissionRequest) (*admissionv1.AdmissionResponse, error) { @@ -34,6 +35,9 @@ func (ad *admissionData) mutateMachineDeployments(ctx context.Context, ar admiss } machineDeploymentOriginal := machineDeployment.DeepCopy() + log := ad.log.With("machinedeployment", ctrlruntimeclient.ObjectKeyFromObject(&machineDeployment)) + log.Debug("Defaulting and validating machine deployment") + machineDeploymentDefaultingFunction(&machineDeployment) if err := mutationsForMachineDeployment(&machineDeployment); err != nil { @@ -62,5 +66,5 @@ func (ad *admissionData) mutateMachineDeployments(ctx context.Context, ar admiss } } - return createAdmissionResponse(machineDeploymentOriginal, &machineDeployment) + return createAdmissionResponse(log, machineDeploymentOriginal, &machineDeployment) } diff --git a/pkg/admission/machinedeployments_test.go b/pkg/admission/machinedeployments_test.go index 865be9151..763473f10 100644 --- a/pkg/admission/machinedeployments_test.go +++ b/pkg/admission/machinedeployments_test.go @@ -19,7 +19,7 @@ package admission import ( "testing" - clusterv1alpha1 "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) diff --git a/pkg/admission/machinedeployments_validation.go b/pkg/admission/machinedeployments_validation.go index dd4a9c1d6..2e58d7da5 100644 --- a/pkg/admission/machinedeployments_validation.go +++ b/pkg/admission/machinedeployments_validation.go @@ -20,9 +20,9 @@ import ( "encoding/json" "fmt" - "github.com/kubermatic/machine-controller/pkg/apis/cluster/common" - "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" + "k8c.io/machine-controller/sdk/apis/cluster/common" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" + providerconfigtypes "k8c.io/machine-controller/sdk/providerconfig" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1validation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" @@ -32,13 +32,13 @@ import ( "k8s.io/apimachinery/pkg/util/validation/field" ) -func validateMachineDeployment(md v1alpha1.MachineDeployment) field.ErrorList { +func validateMachineDeployment(md clusterv1alpha1.MachineDeployment) field.ErrorList { allErrs := field.ErrorList{} allErrs = append(allErrs, validateMachineDeploymentSpec(&md.Spec, field.NewPath("spec"))...) return allErrs } -func validateMachineDeploymentSpec(spec *v1alpha1.MachineDeploymentSpec, fldPath *field.Path) field.ErrorList { +func validateMachineDeploymentSpec(spec *clusterv1alpha1.MachineDeploymentSpec, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} allErrs = append(allErrs, metav1validation.ValidateLabelSelector(&spec.Selector, metav1validation.LabelSelectorValidationOptions{}, fldPath.Child("selector"))...) if len(spec.Selector.MatchLabels)+len(spec.Selector.MatchExpressions) == 0 { @@ -60,7 +60,7 @@ func validateMachineDeploymentSpec(spec *v1alpha1.MachineDeploymentSpec, fldPath return allErrs } -func validateMachineDeploymentStrategy(strategy *v1alpha1.MachineDeploymentStrategy, fldPath *field.Path) field.ErrorList { +func validateMachineDeploymentStrategy(strategy *clusterv1alpha1.MachineDeploymentStrategy, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} switch strategy.Type { case common.RollingUpdateMachineDeploymentStrategyType: @@ -73,7 +73,7 @@ func validateMachineDeploymentStrategy(strategy *v1alpha1.MachineDeploymentStrat return allErrs } -func validateMachineRollingUpdateDeployment(rollingUpdate *v1alpha1.MachineRollingUpdateDeployment, fldPath *field.Path) field.ErrorList { +func validateMachineRollingUpdateDeployment(rollingUpdate *clusterv1alpha1.MachineRollingUpdateDeployment, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} var maxUnavailable int var maxSurge int @@ -110,11 +110,11 @@ func getIntOrPercent(s *intstr.IntOrString, roundUp bool) (int, error) { return intstr.GetValueFromIntOrPercent(s, 100, roundUp) } -func machineDeploymentDefaultingFunction(md *v1alpha1.MachineDeployment) { - v1alpha1.PopulateDefaultsMachineDeployment(md) +func machineDeploymentDefaultingFunction(md *clusterv1alpha1.MachineDeployment) { + clusterv1alpha1.PopulateDefaultsMachineDeployment(md) } -func mutationsForMachineDeployment(md *v1alpha1.MachineDeployment) error { +func mutationsForMachineDeployment(md *clusterv1alpha1.MachineDeployment) error { providerConfig, err := providerconfigtypes.GetConfig(md.Spec.Template.Spec.ProviderSpec) if err != nil { return fmt.Errorf("failed to read MachineDeployment.Spec.Template.Spec.ProviderSpec: %w", err) @@ -128,6 +128,14 @@ func mutationsForMachineDeployment(md *v1alpha1.MachineDeployment) error { } } + // Migrate + if providerConfig.CloudProvider == providerconfigtypes.CloudProviderVMwareCloudDirector { + err := migrateVMwareCloudDirector(providerConfig) + if err != nil { + return fmt.Errorf("failed to migrate VMware Cloud Director Network Settings: %w", err) + } + } + // Update value in original object md.Spec.Template.Spec.ProviderSpec.Value.Raw, err = json.Marshal(providerConfig) if err != nil { diff --git a/pkg/admission/machines.go b/pkg/admission/machines.go index 4d7df978f..50f81dd4c 100644 --- a/pkg/admission/machines.go +++ b/pkg/admission/machines.go @@ -19,21 +19,22 @@ package admission import ( "context" "encoding/json" + "errors" "fmt" "github.com/Masterminds/semver/v3" "golang.org/x/crypto/ssh" - "github.com/kubermatic/machine-controller/pkg/apis/cluster/common" - clusterv1alpha1 "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" - "github.com/kubermatic/machine-controller/pkg/cloudprovider" - controllerutil "github.com/kubermatic/machine-controller/pkg/controller/util" - "github.com/kubermatic/machine-controller/pkg/providerconfig" - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" + "k8c.io/machine-controller/pkg/cloudprovider" + "k8c.io/machine-controller/sdk/apis/cluster/common" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" + "k8c.io/machine-controller/sdk/providerconfig" + "k8c.io/machine-controller/sdk/providerconfig/configvar" + "k8c.io/machine-controller/sdk/userdata" admissionv1 "k8s.io/api/admission/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/klog" + ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" ) // BypassSpecNoModificationRequirementAnnotation is used to bypass the "no machine.spec modification" allowed @@ -48,7 +49,8 @@ func (ad *admissionData) mutateMachines(ctx context.Context, ar admissionv1.Admi } machineOriginal := machine.DeepCopy() - klog.V(3).Infof("Defaulting and validating machine %s/%s", machine.Namespace, machine.Name) + log := ad.log.With("machine", ctrlruntimeclient.ObjectKeyFromObject(&machine)) + log.Debug("Defaulting and validating machine") // Mutating .Spec is never allowed // Only hidden exception: the machine-controller may set the .Spec.Name to .Metadata.Name @@ -63,6 +65,16 @@ func (ad *admissionData) mutateMachines(ctx context.Context, ar admissionv1.Admi if oldMachine.Spec.Name != machine.Spec.Name && machine.Spec.Name == machine.Name { oldMachine.Spec.Name = machine.Spec.Name } + + if oldMachine.Spec.ProviderID != nil && machine.Spec.ProviderID != nil && *oldMachine.Spec.ProviderID != *machine.Spec.ProviderID { + return nil, fmt.Errorf("providerID is immutable") + } + + // Allow mutation of the ProviderID field, as it can only be computed after the machine is created. + if oldMachine.Spec.ProviderID == nil && machine.Spec.ProviderID != nil { + oldMachine.Spec.ProviderID = machine.Spec.ProviderID + } + // Allow mutation when: // * machine has the `MigrationBypassSpecNoModificationRequirementAnnotation` annotation (used for type migration) bypassValidationForMigration := machine.Annotations[BypassSpecNoModificationRequirementAnnotation] == "true" @@ -91,7 +103,7 @@ func (ad *admissionData) mutateMachines(ctx context.Context, ar admissionv1.Admi common.SetKubeletFlags(&machine, map[common.KubeletFlags]string{ common.ExternalCloudProviderKubeletFlag: fmt.Sprintf("%t", ad.nodeSettings.ExternalCloudProvider), }) - providerConfig, err := providerconfigtypes.GetConfig(machine.Spec.ProviderSpec) + providerConfig, err := providerconfig.GetConfig(machine.Spec.ProviderSpec) if err != nil { return nil, err } @@ -102,18 +114,11 @@ func (ad *admissionData) mutateMachines(ctx context.Context, ar admissionv1.Admi machine.Labels = make(map[string]string) } - // Set LegacyMachineControllerUserDataLabel to false if external bootstrapping is expected for managing the machine configuration. - if ad.useExternalBootstrap { - machine.Labels[controllerutil.LegacyMachineControllerUserDataLabel] = "false" - } else { - machine.Labels[controllerutil.LegacyMachineControllerUserDataLabel] = "true" - } - - return createAdmissionResponse(machineOriginal, &machine) + return createAdmissionResponse(log, machineOriginal, &machine) } func (ad *admissionData) defaultAndValidateMachineSpec(ctx context.Context, spec *clusterv1alpha1.MachineSpec) error { - providerConfig, err := providerconfigtypes.GetConfig(spec.ProviderSpec) + providerConfig, err := providerconfig.GetConfig(spec.ProviderSpec) if err != nil { return fmt.Errorf("failed to read machine.spec.providerSpec: %w", err) } @@ -126,20 +131,27 @@ func (ad *admissionData) defaultAndValidateMachineSpec(ctx context.Context, spec } } - skg := providerconfig.NewConfigVarResolver(ctx, ad.workerClient) - prov, err := cloudprovider.ForProvider(providerConfig.CloudProvider, skg) + // For KubeVirt we need to initialize the annotations for MachineDeployment, to enable setting of the needed annotations. + if providerConfig.CloudProvider == providerconfig.CloudProviderKubeVirt { + if spec.Annotations == nil { + spec.Annotations = make(map[string]string) + } + } + + configResolver := configvar.NewResolver(ctx, ad.workerClient) + prov, err := cloudprovider.ForProvider(providerConfig.CloudProvider, configResolver) if err != nil { return fmt.Errorf("failed to get cloud provider %q: %w", providerConfig.CloudProvider, err) } // Verify operating system. - if _, err := ad.userDataManager.ForOS(providerConfig.OperatingSystem); err != nil { + if err := providerConfig.OperatingSystem.Validate(); err != nil { return fmt.Errorf("failed to get OS '%s': %w", providerConfig.OperatingSystem, err) } // Check kubelet version if spec.Versions.Kubelet == "" { - return fmt.Errorf("Kubelet version must be set") + return errors.New("kubelet version must be set") } kubeletVer, err := semver.NewVersion(spec.Versions.Kubelet) @@ -158,14 +170,12 @@ func (ad *admissionData) defaultAndValidateMachineSpec(ctx context.Context, spec // Validate SSH keys if err := validatePublicKeys(providerConfig.SSHPublicKeys); err != nil { - return fmt.Errorf("Invalid public keys specified: %w", err) + return fmt.Errorf("invalid public keys specified: %w", err) } - defaultedOperatingSystemSpec, err := providerconfig.DefaultOperatingSystemSpec( + defaultedOperatingSystemSpec, err := userdata.DefaultOperatingSystemSpec( providerConfig.OperatingSystem, - providerConfig.CloudProvider, providerConfig.OperatingSystemSpec, - ad.useExternalBootstrap, ) if err != nil { return err @@ -177,13 +187,13 @@ func (ad *admissionData) defaultAndValidateMachineSpec(ctx context.Context, spec return fmt.Errorf("failed to json marshal machine.spec.providerSpec: %w", err) } - defaultedSpec, err := prov.AddDefaults(*spec) + defaultedSpec, err := prov.AddDefaults(ad.log, *spec) if err != nil { return fmt.Errorf("failed to default machineSpec: %w", err) } spec = &defaultedSpec - if err := prov.Validate(ctx, *spec); err != nil { + if err := prov.Validate(ctx, ad.log, *spec); err != nil { return fmt.Errorf("validation failed: %w", err) } diff --git a/pkg/admission/machines_test.go b/pkg/admission/machines_test.go index 3d92870d7..e6cd322e7 100644 --- a/pkg/admission/machines_test.go +++ b/pkg/admission/machines_test.go @@ -60,7 +60,7 @@ func TestValidatePublicKeys(t *testing.T) { { name: "invalid key", keys: []string{"some invalid key"}, - err: errors.New(`invalid public key "some invalid key": ssh: no key found`), + err: errors.New(`invalid public key "some invalid key": ssh: no key found; last parsing error for ignored line: illegal base64 data at input byte 0`), }, { name: "one of many is invalid", @@ -68,7 +68,7 @@ func TestValidatePublicKeys(t *testing.T) { validRSA1024Key, "some invalid key", }, - err: errors.New(`invalid public key "some invalid key": ssh: no key found`), + err: errors.New(`invalid public key "some invalid key": ssh: no key found; last parsing error for ignored line: illegal base64 data at input byte 0`), }, } diff --git a/pkg/admission/util.go b/pkg/admission/util.go index 8e95017a4..1e49dbf26 100644 --- a/pkg/admission/util.go +++ b/pkg/admission/util.go @@ -20,7 +20,8 @@ import ( "encoding/json" "fmt" - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" + vcdtypes "k8c.io/machine-controller/sdk/cloudprovider/vmwareclouddirector" + providerconfigtypes "k8c.io/machine-controller/sdk/providerconfig" ) const cloudProviderPacket = "packet" @@ -49,3 +50,41 @@ func migrateToEquinixMetal(providerConfig *providerconfigtypes.Config) (err erro } return nil } + +func migrateVMwareCloudDirector(providerConfig *providerconfigtypes.Config) (err error) { + config, err := vcdtypes.GetConfig(*providerConfig) + if err != nil { + return fmt.Errorf("failed to get vcd config: %w", err) + } + + if config.Network.Value != "" { + config.Networks = append([]providerconfigtypes.ConfigVarString{config.Network}, config.Networks...) + config.Network.Value = "" + p := &providerconfigtypes.ConfigVarString{Value: ""} + config.Network = *p + } + + config.Networks = Deduplicate(config.Networks) + + cloudProviderSpecRaw, err := json.Marshal(config) + if err != nil { + return fmt.Errorf("failed to marshal cloudProviderConfig: %w", err) + } + + providerConfig.CloudProviderSpec.Raw = cloudProviderSpecRaw + return nil +} + +func Deduplicate[T comparable](slice []T) []T { + seen := make(map[T]struct{}) + result := []T{} + + for _, val := range slice { + if _, exists := seen[val]; !exists { + seen[val] = struct{}{} + result = append(result, val) + } + } + + return result +} diff --git a/pkg/apis/plugin/plugin.go b/pkg/apis/plugin/plugin.go deleted file mode 100644 index 2ed8ea5fb..000000000 --- a/pkg/apis/plugin/plugin.go +++ /dev/null @@ -1,69 +0,0 @@ -/* -Copyright 2019 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// -// Environment and serialisation types for UserData plugins. -// - -package plugin - -import ( - "net" - - clusterv1alpha1 "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" - "github.com/kubermatic/machine-controller/pkg/containerruntime" - - clientcmdapi "k8s.io/client-go/tools/clientcmd/api" -) - -const ( - // EnvUserDataRequest names the environment variable containing - // the user data request. - EnvUserDataRequest = "MACHINE_CONTROLLER_USER_DATA_REQUEST" - - // EnvPluginDir names the environment variable containing - // a user defined location of the plugins. - EnvPluginDir = "MACHINE_CONTROLLER_USERDATA_PLUGIN_DIR" -) - -// UserDataRequest requests user data with the given arguments. -type UserDataRequest struct { - MachineSpec clusterv1alpha1.MachineSpec - Kubeconfig *clientcmdapi.Config - CloudProviderName string - CloudConfig string - DNSIPs []net.IP - ExternalCloudProvider bool - HTTPProxy string - NoProxy string - PauseImage string - KubeletCloudProviderName string - KubeletFeatureGates map[string]bool - KubeletConfigs map[string]string - ContainerRuntime containerruntime.Config - NodePortRange string -} - -// UserDataResponse contains the responded user data. -type UserDataResponse struct { - UserData string - Err string -} - -// ErrorResponse contains a single responded error. -type ErrorResponse struct { - Err string -} diff --git a/pkg/cloudprovider/cache/cloudprovidercache.go b/pkg/cloudprovider/cache/cloudprovidercache.go index 7b2c576fe..f9d6c46fc 100644 --- a/pkg/cloudprovider/cache/cloudprovidercache.go +++ b/pkg/cloudprovider/cache/cloudprovidercache.go @@ -24,7 +24,7 @@ import ( gocache "github.com/patrickmn/go-cache" - clusterv1alpha1 "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" ) type CloudproviderCache struct { diff --git a/pkg/cloudprovider/cache/cloudprovidercache_test.go b/pkg/cloudprovider/cache/cloudprovidercache_test.go index 1f948642c..b7013109c 100644 --- a/pkg/cloudprovider/cache/cloudprovidercache_test.go +++ b/pkg/cloudprovider/cache/cloudprovidercache_test.go @@ -20,7 +20,7 @@ import ( "errors" "testing" - clusterv1alpha1 "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" corev1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" diff --git a/pkg/cloudprovider/errors/errors.go b/pkg/cloudprovider/errors/errors.go index d0df77409..7182c5033 100644 --- a/pkg/cloudprovider/errors/errors.go +++ b/pkg/cloudprovider/errors/errors.go @@ -20,7 +20,7 @@ import ( "errors" "fmt" - "github.com/kubermatic/machine-controller/pkg/apis/cluster/common" + "k8c.io/machine-controller/sdk/apis/cluster/common" ) var ( diff --git a/pkg/cloudprovider/instance/instance.go b/pkg/cloudprovider/instance/instance.go index f97c327a9..6bee0865d 100644 --- a/pkg/cloudprovider/instance/instance.go +++ b/pkg/cloudprovider/instance/instance.go @@ -16,7 +16,7 @@ limitations under the License. package instance -import v1 "k8s.io/api/core/v1" +import corev1 "k8s.io/api/core/v1" // Instance represents a instance on the cloud provider. type Instance interface { @@ -27,7 +27,7 @@ type Instance interface { // ProviderID returns the expected providerID for the instance ProviderID() string // Addresses returns a list of addresses associated with the instance. - Addresses() map[string]v1.NodeAddressType + Addresses() map[string]corev1.NodeAddressType // Status returns the instance status. Status() Status } diff --git a/pkg/cloudprovider/provider.go b/pkg/cloudprovider/provider.go index 55546bf90..c334b3a84 100644 --- a/pkg/cloudprovider/provider.go +++ b/pkg/cloudprovider/provider.go @@ -19,28 +19,30 @@ package cloudprovider import ( "errors" - cloudprovidercache "github.com/kubermatic/machine-controller/pkg/cloudprovider/cache" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/provider/alibaba" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/provider/anexia" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/provider/aws" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/provider/azure" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/provider/baremetal" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/provider/digitalocean" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/provider/equinixmetal" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/provider/fake" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/provider/gce" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/provider/hetzner" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/provider/kubevirt" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/provider/linode" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/provider/nutanix" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/provider/openstack" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/provider/scaleway" - vcd "github.com/kubermatic/machine-controller/pkg/cloudprovider/provider/vmwareclouddirector" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/provider/vsphere" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/provider/vultr" - cloudprovidertypes "github.com/kubermatic/machine-controller/pkg/cloudprovider/types" - "github.com/kubermatic/machine-controller/pkg/providerconfig" - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" + cloudprovidercache "k8c.io/machine-controller/pkg/cloudprovider/cache" + "k8c.io/machine-controller/pkg/cloudprovider/provider/alibaba" + "k8c.io/machine-controller/pkg/cloudprovider/provider/anexia" + "k8c.io/machine-controller/pkg/cloudprovider/provider/aws" + "k8c.io/machine-controller/pkg/cloudprovider/provider/azure" + "k8c.io/machine-controller/pkg/cloudprovider/provider/baremetal" + "k8c.io/machine-controller/pkg/cloudprovider/provider/digitalocean" + "k8c.io/machine-controller/pkg/cloudprovider/provider/edge" + "k8c.io/machine-controller/pkg/cloudprovider/provider/equinixmetal" + "k8c.io/machine-controller/pkg/cloudprovider/provider/external" + "k8c.io/machine-controller/pkg/cloudprovider/provider/fake" + "k8c.io/machine-controller/pkg/cloudprovider/provider/gce" + "k8c.io/machine-controller/pkg/cloudprovider/provider/hetzner" + "k8c.io/machine-controller/pkg/cloudprovider/provider/kubevirt" + "k8c.io/machine-controller/pkg/cloudprovider/provider/linode" + "k8c.io/machine-controller/pkg/cloudprovider/provider/nutanix" + "k8c.io/machine-controller/pkg/cloudprovider/provider/opennebula" + "k8c.io/machine-controller/pkg/cloudprovider/provider/openstack" + "k8c.io/machine-controller/pkg/cloudprovider/provider/scaleway" + vcd "k8c.io/machine-controller/pkg/cloudprovider/provider/vmwareclouddirector" + "k8c.io/machine-controller/pkg/cloudprovider/provider/vsphere" + "k8c.io/machine-controller/pkg/cloudprovider/provider/vultr" + cloudprovidertypes "k8c.io/machine-controller/pkg/cloudprovider/types" + "k8c.io/machine-controller/sdk/providerconfig" ) var ( @@ -49,76 +51,93 @@ var ( // ErrProviderNotFound tells that the requested cloud provider was not found. ErrProviderNotFound = errors.New("cloudprovider not found") - providers = map[providerconfigtypes.CloudProvider]func(cvr *providerconfig.ConfigVarResolver) cloudprovidertypes.Provider{ - providerconfigtypes.CloudProviderDigitalocean: func(cvr *providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { + providers = map[providerconfig.CloudProvider]func(cvr providerconfig.ConfigVarResolver) cloudprovidertypes.Provider{ + providerconfig.CloudProviderDigitalocean: func(cvr providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { return digitalocean.New(cvr) }, - providerconfigtypes.CloudProviderAWS: func(cvr *providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { + providerconfig.CloudProviderAWS: func(cvr providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { return aws.New(cvr) }, - providerconfigtypes.CloudProviderOpenstack: func(cvr *providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { + providerconfig.CloudProviderOpenstack: func(cvr providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { return openstack.New(cvr) }, - providerconfigtypes.CloudProviderGoogle: func(cvr *providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { + providerconfig.CloudProviderGoogle: func(cvr providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { return gce.New(cvr) }, - providerconfigtypes.CloudProviderHetzner: func(cvr *providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { + providerconfig.CloudProviderHetzner: func(cvr providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { return hetzner.New(cvr) }, - providerconfigtypes.CloudProviderLinode: func(cvr *providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { - return linode.New(cvr) - }, - providerconfigtypes.CloudProviderVsphere: func(cvr *providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { + providerconfig.CloudProviderVsphere: func(cvr providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { return vsphere.New(cvr) }, - providerconfigtypes.CloudProviderAzure: func(cvr *providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { + providerconfig.CloudProviderAzure: func(cvr providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { return azure.New(cvr) }, - providerconfigtypes.CloudProviderEquinixMetal: func(cvr *providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { + providerconfig.CloudProviderEquinixMetal: func(cvr providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { return equinixmetal.New(cvr) }, - providerconfigtypes.CloudProviderVultr: func(cvr *providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { - return vultr.New(cvr) - }, // NB: This is explicitly left to allow old Packet machines to be deleted. // We can handle those machines in the same way as Equinix Metal machines // because there are no API changes. // TODO: Remove this after deprecation period. - providerconfigtypes.CloudProviderPacket: func(cvr *providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { + providerconfig.CloudProviderPacket: func(cvr providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { return equinixmetal.New(cvr) }, - providerconfigtypes.CloudProviderFake: func(cvr *providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { + providerconfig.CloudProviderFake: func(cvr providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { return fake.New(cvr) }, - providerconfigtypes.CloudProviderKubeVirt: func(cvr *providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { + providerconfig.CloudProviderEdge: func(cvr providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { + return edge.New(cvr) + }, + providerconfig.CloudProviderKubeVirt: func(cvr providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { return kubevirt.New(cvr) }, - providerconfigtypes.CloudProviderAlibaba: func(cvr *providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { + providerconfig.CloudProviderAlibaba: func(cvr providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { return alibaba.New(cvr) }, - providerconfigtypes.CloudProviderScaleway: func(cvr *providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { + providerconfig.CloudProviderScaleway: func(cvr providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { return scaleway.New(cvr) }, - providerconfigtypes.CloudProviderAnexia: func(cvr *providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { + providerconfig.CloudProviderAnexia: func(cvr providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { return anexia.New(cvr) }, - providerconfigtypes.CloudProviderBaremetal: func(cvr *providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { + providerconfig.CloudProviderBaremetal: func(cvr providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { // TODO(MQ): add a baremetal driver. return baremetal.New(cvr) }, - providerconfigtypes.CloudProviderNutanix: func(cvr *providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { + providerconfig.CloudProviderNutanix: func(cvr providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { return nutanix.New(cvr) }, - providerconfigtypes.CloudProviderVMwareCloudDirector: func(cvr *providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { + providerconfig.CloudProviderVMwareCloudDirector: func(cvr providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { return vcd.New(cvr) }, + providerconfig.CloudProviderExternal: func(cvr providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { + return external.New(cvr) + }, + } + + // communityProviders holds a map of cloud providers that have been implemented by community members and + // contributed to machine-controller. They are not end-to-end tested by the machine-controller development team. + communityProviders = map[providerconfig.CloudProvider]func(cvr providerconfig.ConfigVarResolver) cloudprovidertypes.Provider{ + providerconfig.CloudProviderLinode: func(cvr providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { + return linode.New(cvr) + }, + providerconfig.CloudProviderVultr: func(cvr providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { + return vultr.New(cvr) + }, + providerconfig.CloudProviderOpenNebula: func(cvr providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { + return opennebula.New(cvr) + }, } ) // ForProvider returns a CloudProvider actuator for the requested provider. -func ForProvider(p providerconfigtypes.CloudProvider, cvr *providerconfig.ConfigVarResolver) (cloudprovidertypes.Provider, error) { +func ForProvider(p providerconfig.CloudProvider, cvr providerconfig.ConfigVarResolver) (cloudprovidertypes.Provider, error) { if p, found := providers[p]; found { return NewValidationCacheWrappingCloudProvider(p(cvr)), nil } + if p, found := communityProviders[p]; found { + return NewValidationCacheWrappingCloudProvider(p(cvr)), nil + } return nil, ErrProviderNotFound } diff --git a/pkg/cloudprovider/provider/alibaba/provider.go b/pkg/cloudprovider/provider/alibaba/provider.go index f70d42c45..0b473b14f 100644 --- a/pkg/cloudprovider/provider/alibaba/provider.go +++ b/pkg/cloudprovider/provider/alibaba/provider.go @@ -25,25 +25,24 @@ import ( "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" "github.com/aliyun/alibaba-cloud-sdk-go/services/ecs" - - "github.com/kubermatic/machine-controller/pkg/apis/cluster/common" - clusterv1alpha1 "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" - cloudprovidererrors "github.com/kubermatic/machine-controller/pkg/cloudprovider/errors" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/instance" - alibabatypes "github.com/kubermatic/machine-controller/pkg/cloudprovider/provider/alibaba/types" - cloudprovidertypes "github.com/kubermatic/machine-controller/pkg/cloudprovider/types" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/util" - kuberneteshelper "github.com/kubermatic/machine-controller/pkg/kubernetes" - "github.com/kubermatic/machine-controller/pkg/providerconfig" - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" - - v1 "k8s.io/api/core/v1" + "go.uber.org/zap" + + cloudprovidererrors "k8c.io/machine-controller/pkg/cloudprovider/errors" + "k8c.io/machine-controller/pkg/cloudprovider/instance" + cloudprovidertypes "k8c.io/machine-controller/pkg/cloudprovider/types" + "k8c.io/machine-controller/pkg/cloudprovider/util" + kuberneteshelper "k8c.io/machine-controller/pkg/kubernetes" + "k8c.io/machine-controller/sdk/apis/cluster/common" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" + alibabatypes "k8c.io/machine-controller/sdk/cloudprovider/alibaba" + "k8c.io/machine-controller/sdk/providerconfig" + + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" ) const ( machineUIDTag = "machine_uid" - centosImageName = "CentOS 7.9 64 bit" ubuntuImageName = "Ubuntu 22.04 64 bit" finalizerInstance = "kubermatic.io/cleanup-alibaba-instance" @@ -57,7 +56,7 @@ const ( ) type provider struct { - configVarResolver *providerconfig.ConfigVarResolver + configVarResolver providerconfig.ConfigVarResolver } type Config struct { @@ -91,10 +90,10 @@ func (a *alibabaInstance) ProviderID() string { return "" } -func (a *alibabaInstance) Addresses() map[string]v1.NodeAddressType { - primaryIPAddresses := map[string]v1.NodeAddressType{} +func (a *alibabaInstance) Addresses() map[string]corev1.NodeAddressType { + primaryIPAddresses := map[string]corev1.NodeAddressType{} for _, networkInterface := range a.instance.NetworkInterfaces.NetworkInterface { - primaryIPAddresses[networkInterface.PrimaryIpAddress] = v1.NodeInternalIP + primaryIPAddresses[networkInterface.PrimaryIpAddress] = corev1.NodeInternalIP } return primaryIPAddresses @@ -105,15 +104,15 @@ func (a *alibabaInstance) Status() instance.Status { } // New returns an Alibaba cloud provider. -func New(configVarResolver *providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { +func New(configVarResolver providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { return &provider{configVarResolver: configVarResolver} } -func (p *provider) AddDefaults(spec clusterv1alpha1.MachineSpec) (clusterv1alpha1.MachineSpec, error) { +func (p *provider) AddDefaults(_ *zap.SugaredLogger, spec clusterv1alpha1.MachineSpec) (clusterv1alpha1.MachineSpec, error) { return spec, nil } -func (p *provider) Validate(_ context.Context, machineSpec clusterv1alpha1.MachineSpec) error { +func (p *provider) Validate(_ context.Context, _ *zap.SugaredLogger, machineSpec clusterv1alpha1.MachineSpec) error { c, pc, err := p.getConfig(machineSpec.ProviderSpec) if err != nil { return fmt.Errorf("failed to parse config: %w", err) @@ -154,7 +153,7 @@ func (p *provider) Validate(_ context.Context, machineSpec clusterv1alpha1.Machi return nil } -func (p *provider) Get(_ context.Context, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData) (instance.Instance, error) { +func (p *provider) Get(_ context.Context, _ *zap.SugaredLogger, machine *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData) (instance.Instance, error) { c, _, err := p.getConfig(machine.Spec.ProviderSpec) if err != nil { return nil, cloudprovidererrors.TerminalError{ @@ -199,11 +198,7 @@ func (p *provider) Get(_ context.Context, machine *clusterv1alpha1.Machine, data return nil, fmt.Errorf("instance %v is not ready", foundInstance.InstanceId) } -func (p *provider) GetCloudConfig(spec clusterv1alpha1.MachineSpec) (config string, name string, err error) { - return "", "", nil -} - -func (p *provider) Create(_ context.Context, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData, userdata string) (instance.Instance, error) { +func (p *provider) Create(_ context.Context, _ *zap.SugaredLogger, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData, userdata string) (instance.Instance, error) { c, pc, err := p.getConfig(machine.Spec.ProviderSpec) if err != nil { return nil, cloudprovidererrors.TerminalError{ @@ -263,8 +258,8 @@ func (p *provider) Create(_ context.Context, machine *clusterv1alpha1.Machine, d return &alibabaInstance{instance: foundInstance}, nil } -func (p *provider) Cleanup(ctx context.Context, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData) (bool, error) { - foundInstance, err := p.Get(ctx, machine, data) +func (p *provider) Cleanup(ctx context.Context, log *zap.SugaredLogger, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData) (bool, error) { + foundInstance, err := p.Get(ctx, log, machine, data) if err != nil { if errors.Is(err, cloudprovidererrors.ErrInstanceNotFound) { return util.RemoveFinalizerOnInstanceNotFound(finalizerInstance, machine, data) @@ -308,7 +303,7 @@ func (p *provider) MachineMetricsLabels(machine *clusterv1alpha1.Machine) (map[s return labels, err } -func (p *provider) MigrateUID(_ context.Context, machine *clusterv1alpha1.Machine, newUID types.UID) error { +func (p *provider) MigrateUID(_ context.Context, _ *zap.SugaredLogger, machine *clusterv1alpha1.Machine, newUID types.UID) error { c, _, err := p.getConfig(machine.Spec.ProviderSpec) if err != nil { return fmt.Errorf("failed to decode providerconfig: %w", err) @@ -341,16 +336,12 @@ func (p *provider) MigrateUID(_ context.Context, machine *clusterv1alpha1.Machin return nil } -func (p *provider) SetMetricsForMachines(machines clusterv1alpha1.MachineList) error { +func (p *provider) SetMetricsForMachines(_ clusterv1alpha1.MachineList) error { return nil } -func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *providerconfigtypes.Config, error) { - if provSpec.Value == nil { - return nil, nil, errors.New("machine.spec.providerconfig.value is nil") - } - - pconfig, err := providerconfigtypes.GetConfig(provSpec) +func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *providerconfig.Config, error) { + pconfig, err := providerconfig.GetConfig(provSpec) if err != nil { return nil, nil, fmt.Errorf("failed to decode providers config: %w", err) } @@ -365,40 +356,40 @@ func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *p } c := Config{} - c.AccessKeyID, err = p.configVarResolver.GetConfigVarStringValueOrEnv(rawConfig.AccessKeyID, "ALIBABA_ACCESS_KEY_ID") + c.AccessKeyID, err = p.configVarResolver.GetStringValueOrEnv(rawConfig.AccessKeyID, "ALIBABA_ACCESS_KEY_ID") if err != nil { return nil, nil, fmt.Errorf("failed to get the value of \"AccessKeyID\" field, error = %w", err) } - c.AccessKeySecret, err = p.configVarResolver.GetConfigVarStringValueOrEnv(rawConfig.AccessKeySecret, "ALIBABA_ACCESS_KEY_SECRET") + c.AccessKeySecret, err = p.configVarResolver.GetStringValueOrEnv(rawConfig.AccessKeySecret, "ALIBABA_ACCESS_KEY_SECRET") if err != nil { return nil, nil, fmt.Errorf("failed to get the value of \"AccessKeySecret\" field, error = %w", err) } - c.InstanceType, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.InstanceType) + c.InstanceType, err = p.configVarResolver.GetStringValue(rawConfig.InstanceType) if err != nil { return nil, nil, fmt.Errorf("failed to get the value of \"instanceType\" field, error = %w", err) } - c.RegionID, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.RegionID) + c.RegionID, err = p.configVarResolver.GetStringValue(rawConfig.RegionID) if err != nil { return nil, nil, fmt.Errorf("failed to get the value of \"regionID\" field, error = %w", err) } - c.VSwitchID, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.VSwitchID) + c.VSwitchID, err = p.configVarResolver.GetStringValue(rawConfig.VSwitchID) if err != nil { return nil, nil, fmt.Errorf("failed to get the value of \"vSwitchID\" field, error = %w", err) } - c.ZoneID, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.ZoneID) + c.ZoneID, err = p.configVarResolver.GetStringValue(rawConfig.ZoneID) if err != nil { return nil, nil, fmt.Errorf("failed to get the value of \"zoneID\" field, error = %w", err) } - c.InternetMaxBandwidthOut, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.InternetMaxBandwidthOut) + c.InternetMaxBandwidthOut, err = p.configVarResolver.GetStringValue(rawConfig.InternetMaxBandwidthOut) if err != nil { return nil, nil, fmt.Errorf("failed to get the value of \"internetMaxBandwidthOut\" field, error = %w", err) } c.Labels = rawConfig.Labels - c.DiskType, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.DiskType) + c.DiskType, err = p.configVarResolver.GetStringValue(rawConfig.DiskType) if err != nil { return nil, nil, fmt.Errorf("failed to get the value of \"diskType\" field, error = %w", err) } - c.DiskSize, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.DiskSize) + c.DiskSize, err = p.configVarResolver.GetStringValue(rawConfig.DiskSize) if err != nil { return nil, nil, fmt.Errorf("failed to get the value of \"diskSize\" field, error = %w", err) } @@ -430,16 +421,14 @@ func getInstance(client *ecs.Client, instanceName string, uid string) (*ecs.Inst return nil, fmt.Errorf("failed to describe instance with instanceName: %s: %w", instanceName, err) } - if response.Instances.Instance == nil || - len(response.Instances.Instance) == 0 || - response.GetHttpStatus() == http.StatusNotFound { + if len(response.Instances.Instance) == 0 || response.GetHttpStatus() == http.StatusNotFound { return nil, cloudprovidererrors.ErrInstanceNotFound } return &response.Instances.Instance[0], nil } -func (p *provider) getImageIDForOS(machineSpec clusterv1alpha1.MachineSpec, os providerconfigtypes.OperatingSystem) (string, error) { +func (p *provider) getImageIDForOS(machineSpec clusterv1alpha1.MachineSpec, os providerconfig.OperatingSystem) (string, error) { c, _, err := p.getConfig(machineSpec.ProviderSpec) if err != nil { return "", fmt.Errorf("failed to get alibaba client: %w", err) @@ -460,13 +449,11 @@ func (p *provider) getImageIDForOS(machineSpec clusterv1alpha1.MachineSpec, os p return "", fmt.Errorf("failed to describe alibaba images: %w", err) } - var availableImage = map[providerconfigtypes.OperatingSystem]string{} + var availableImage = map[providerconfig.OperatingSystem]string{} for _, image := range response.Images.Image { switch image.OSNameEn { case ubuntuImageName: - availableImage[providerconfigtypes.OperatingSystemUbuntu] = image.ImageId - case centosImageName: - availableImage[providerconfigtypes.OperatingSystemCentOS] = image.ImageId + availableImage[providerconfig.OperatingSystemUbuntu] = image.ImageId } } @@ -474,5 +461,5 @@ func (p *provider) getImageIDForOS(machineSpec clusterv1alpha1.MachineSpec, os p return imageID, nil } - return "", providerconfigtypes.ErrOSNotSupported + return "", providerconfig.ErrOSNotSupported } diff --git a/pkg/cloudprovider/provider/alibaba/types/types.go b/pkg/cloudprovider/provider/alibaba/types/types.go deleted file mode 100644 index 9e58cd401..000000000 --- a/pkg/cloudprovider/provider/alibaba/types/types.go +++ /dev/null @@ -1,42 +0,0 @@ -/* -Copyright 2019 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package types - -import ( - "github.com/kubermatic/machine-controller/pkg/jsonutil" - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" -) - -type RawConfig struct { - AccessKeyID providerconfigtypes.ConfigVarString `json:"accessKeyID,omitempty"` - AccessKeySecret providerconfigtypes.ConfigVarString `json:"accessKeySecret,omitempty"` - RegionID providerconfigtypes.ConfigVarString `json:"regionID,omitempty"` - InstanceName providerconfigtypes.ConfigVarString `json:"instanceName,omitempty"` - InstanceType providerconfigtypes.ConfigVarString `json:"instanceType,omitempty"` - VSwitchID providerconfigtypes.ConfigVarString `json:"vSwitchID,omitempty"` - InternetMaxBandwidthOut providerconfigtypes.ConfigVarString `json:"internetMaxBandwidthOut,omitempty"` - Labels map[string]string `json:"labels,omitempty"` - ZoneID providerconfigtypes.ConfigVarString `json:"zoneID,omitempty"` - DiskType providerconfigtypes.ConfigVarString `json:"diskType,omitempty"` - DiskSize providerconfigtypes.ConfigVarString `json:"diskSize,omitempty"` -} - -func GetConfig(pconfig providerconfigtypes.Config) (*RawConfig, error) { - rawConfig := &RawConfig{} - - return rawConfig, jsonutil.StrictUnmarshal(pconfig.CloudProviderSpec.Raw, rawConfig) -} diff --git a/pkg/cloudprovider/provider/anexia/helper_test.go b/pkg/cloudprovider/provider/anexia/helper_test.go index 38c3a37ef..1bcfec34f 100644 --- a/pkg/cloudprovider/provider/anexia/helper_test.go +++ b/pkg/cloudprovider/provider/anexia/helper_test.go @@ -22,20 +22,29 @@ import ( "github.com/gophercloud/gophercloud/testhelper" - "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" - anxtypes "github.com/kubermatic/machine-controller/pkg/cloudprovider/provider/anexia/types" - "github.com/kubermatic/machine-controller/pkg/providerconfig/types" + cloudprovidertypes "k8c.io/machine-controller/pkg/cloudprovider/types" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" + anxtypes "k8c.io/machine-controller/sdk/cloudprovider/anexia" + providerconfigtypes "k8c.io/machine-controller/sdk/providerconfig" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" ) +type jsonObject = map[string]interface{} + +type ProvisionVMTestCase struct { + ReconcileContext reconcileContext + AssertJSONBody func(jsonBody jsonObject) +} + type ConfigTestCase struct { Config anxtypes.RawConfig Error error } type ValidateCallTestCase struct { - Spec v1alpha1.MachineSpec + Spec clusterv1alpha1.MachineSpec ExpectedError error } @@ -45,14 +54,14 @@ func getSpecsForValidationTest(t *testing.T, configCases []ConfigTestCase) []Val for _, configCase := range configCases { jsonConfig, err := json.Marshal(configCase.Config) testhelper.AssertNoErr(t, err) - jsonProviderConfig, err := json.Marshal(types.Config{ + jsonProviderConfig, err := json.Marshal(providerconfigtypes.Config{ CloudProviderSpec: runtime.RawExtension{Raw: jsonConfig}, OperatingSystemSpec: runtime.RawExtension{Raw: []byte("{}")}, }) testhelper.AssertNoErr(t, err) testCases = append(testCases, ValidateCallTestCase{ - Spec: v1alpha1.MachineSpec{ - ProviderSpec: v1alpha1.ProviderSpec{ + Spec: clusterv1alpha1.MachineSpec{ + ProviderSpec: clusterv1alpha1.ProviderSpec{ Value: &runtime.RawExtension{Raw: jsonProviderConfig}, }, }, @@ -62,8 +71,93 @@ func getSpecsForValidationTest(t *testing.T, configCases []ConfigTestCase) []Val return testCases } -func newConfigVarString(str string) types.ConfigVarString { - return types.ConfigVarString{ +func newConfigVarString(str string) providerconfigtypes.ConfigVarString { + return providerconfigtypes.ConfigVarString{ Value: str, } } + +// this generates a full config and allows hooking into it to e.g. remove a value. +func hookableConfig(hook func(*anxtypes.RawConfig)) anxtypes.RawConfig { + config := anxtypes.RawConfig{ + CPUs: 1, + + Memory: 2, + + Disks: []anxtypes.RawDisk{ + {Size: 5, PerformanceType: newConfigVarString("ENT6")}, + }, + + Networks: []anxtypes.RawNetwork{ + {VlanID: newConfigVarString("test-vlan"), PrefixIDs: []providerconfigtypes.ConfigVarString{newConfigVarString("test-prefix")}}, + }, + + Token: newConfigVarString("test-token"), + LocationID: newConfigVarString("test-location"), + TemplateID: newConfigVarString("test-template-id"), + } + + if hook != nil { + hook(&config) + } + + return config +} + +// this generates a full reconcileContext with some default values and allows hooking into it to e.g. remove/overwrite a value. +func hookableReconcileContext(locationID string, templateID string, hook func(*reconcileContext)) reconcileContext { + context := reconcileContext{ + Machine: &clusterv1alpha1.Machine{ + ObjectMeta: metav1.ObjectMeta{Name: "TestMachine"}, + }, + Status: &anxtypes.ProviderStatus{}, + UserData: "", + Config: resolvedConfig{ + LocationID: locationID, + TemplateID: templateID, + Disks: []resolvedDisk{ + { + RawDisk: anxtypes.RawDisk{ + Size: 5, + }, + }, + }, + Networks: []resolvedNetwork{ + { + VlanID: "VLAN-ID", + Prefixes: []string{ + "Prefix-ID", + }, + }, + }, + RawConfig: anxtypes.RawConfig{ + CPUs: 5, + Memory: 5, + }, + }, + ProviderData: &cloudprovidertypes.ProviderData{ + Update: func(*clusterv1alpha1.Machine, ...cloudprovidertypes.MachineModifier) error { + return nil + }, + }, + ProviderConfig: &providerconfigtypes.Config{ + Network: &providerconfigtypes.NetworkConfig{ + DNS: providerconfigtypes.DNSConfig{ + Servers: []string{ + "1.1.1.1", + "", + "192.168.0.1", + "192.168.0.2", + "192.168.0.3", + }, + }, + }, + }, + } + + if hook != nil { + hook(&context) + } + + return context +} diff --git a/pkg/cloudprovider/provider/anexia/instance.go b/pkg/cloudprovider/provider/anexia/instance.go index cd67d80c5..9bb212802 100644 --- a/pkg/cloudprovider/provider/anexia/instance.go +++ b/pkg/cloudprovider/provider/anexia/instance.go @@ -21,14 +21,15 @@ import ( "go.anx.io/go-anxcloud/pkg/vsphere/info" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/instance" - anxtypes "github.com/kubermatic/machine-controller/pkg/cloudprovider/provider/anexia/types" + "k8c.io/machine-controller/pkg/cloudprovider/instance" + anxtypes "k8c.io/machine-controller/sdk/cloudprovider/anexia" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) type anexiaInstance struct { isCreating bool + isDeleting bool info *info.Info reservedAddresses []string } @@ -50,25 +51,28 @@ func (ai *anexiaInstance) ID() string { } func (ai *anexiaInstance) ProviderID() string { + if ai == nil || ai.ID() == "" { + return "" + } return ai.ID() } -func (ai *anexiaInstance) Addresses() map[string]v1.NodeAddressType { - addresses := map[string]v1.NodeAddressType{} +func (ai *anexiaInstance) Addresses() map[string]corev1.NodeAddressType { + addresses := map[string]corev1.NodeAddressType{} if ai.reservedAddresses != nil { for _, reservedIP := range ai.reservedAddresses { - addresses[reservedIP] = v1.NodeExternalIP + addresses[reservedIP] = corev1.NodeExternalIP } } if ai.info != nil { for _, network := range ai.info.Network { for _, ip := range network.IPv4 { - addresses[ip] = v1.NodeExternalIP + addresses[ip] = corev1.NodeExternalIP } for _, ip := range network.IPv6 { - addresses[ip] = v1.NodeExternalIP + addresses[ip] = corev1.NodeExternalIP } } } @@ -76,9 +80,9 @@ func (ai *anexiaInstance) Addresses() map[string]v1.NodeAddressType { for ip := range addresses { parsed := net.ParseIP(ip) if parsed.IsPrivate() { - addresses[ip] = v1.NodeInternalIP + addresses[ip] = corev1.NodeInternalIP } else { - addresses[ip] = v1.NodeExternalIP + addresses[ip] = corev1.NodeExternalIP } } @@ -86,6 +90,9 @@ func (ai *anexiaInstance) Addresses() map[string]v1.NodeAddressType { } func (ai *anexiaInstance) Status() instance.Status { + if ai.isDeleting { + return instance.StatusDeleting + } if ai.isCreating { return instance.StatusCreating } diff --git a/pkg/cloudprovider/provider/anexia/instance_test.go b/pkg/cloudprovider/provider/anexia/instance_test.go index 8340752a9..dbda1c43e 100644 --- a/pkg/cloudprovider/provider/anexia/instance_test.go +++ b/pkg/cloudprovider/provider/anexia/instance_test.go @@ -22,11 +22,11 @@ import ( "github.com/gophercloud/gophercloud/testhelper" "go.anx.io/go-anxcloud/pkg/vsphere/info" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) func TestAnexiaInstance(t *testing.T) { - addressCheck := func(t *testing.T, testcase string, instance *anexiaInstance, expected map[string]v1.NodeAddressType) { + addressCheck := func(t *testing.T, testcase string, instance *anexiaInstance, expected map[string]corev1.NodeAddressType) { t.Run(testcase, func(t *testing.T) { addresses := instance.Addresses() @@ -36,7 +36,7 @@ func TestAnexiaInstance(t *testing.T) { t.Run("empty instance", func(t *testing.T) { instance := anexiaInstance{} - addressCheck(t, "no addresses", &instance, map[string]v1.NodeAddressType{}) + addressCheck(t, "no addresses", &instance, map[string]corev1.NodeAddressType{}) }) t.Run("instance with only reservedAddresses set", func(t *testing.T) { @@ -44,11 +44,11 @@ func TestAnexiaInstance(t *testing.T) { reservedAddresses: []string{"10.0.0.2", "fda0:23::2", "8.8.8.8", "2001:db8::2"}, } - addressCheck(t, "expected addresses", &instance, map[string]v1.NodeAddressType{ - "10.0.0.2": v1.NodeInternalIP, - "fda0:23::2": v1.NodeInternalIP, - "8.8.8.8": v1.NodeExternalIP, - "2001:db8::2": v1.NodeExternalIP, + addressCheck(t, "expected addresses", &instance, map[string]corev1.NodeAddressType{ + "10.0.0.2": corev1.NodeInternalIP, + "fda0:23::2": corev1.NodeInternalIP, + "8.8.8.8": corev1.NodeExternalIP, + "2001:db8::2": corev1.NodeExternalIP, }) }) @@ -68,11 +68,11 @@ func TestAnexiaInstance(t *testing.T) { }, } - addressCheck(t, "expected addresses", &instance, map[string]v1.NodeAddressType{ - "10.0.0.2": v1.NodeInternalIP, - "fda0:23::2": v1.NodeInternalIP, - "8.8.8.8": v1.NodeExternalIP, - "2001:db8::2": v1.NodeExternalIP, + addressCheck(t, "expected addresses", &instance, map[string]corev1.NodeAddressType{ + "10.0.0.2": corev1.NodeInternalIP, + "fda0:23::2": corev1.NodeInternalIP, + "8.8.8.8": corev1.NodeExternalIP, + "2001:db8::2": corev1.NodeExternalIP, }) }) @@ -93,11 +93,11 @@ func TestAnexiaInstance(t *testing.T) { }, } - addressCheck(t, "expected addresses", &instance, map[string]v1.NodeAddressType{ - "10.0.0.2": v1.NodeInternalIP, - "fda0:23::2": v1.NodeInternalIP, - "8.8.8.8": v1.NodeExternalIP, - "2001:db8::2": v1.NodeExternalIP, + addressCheck(t, "expected addresses", &instance, map[string]corev1.NodeAddressType{ + "10.0.0.2": corev1.NodeInternalIP, + "fda0:23::2": corev1.NodeInternalIP, + "8.8.8.8": corev1.NodeExternalIP, + "2001:db8::2": corev1.NodeExternalIP, }) }) @@ -117,11 +117,11 @@ func TestAnexiaInstance(t *testing.T) { }, } - addressCheck(t, "expected addresses", &instance, map[string]v1.NodeAddressType{ - "10.0.0.2": v1.NodeInternalIP, - "fda0:23::2": v1.NodeInternalIP, - "8.8.8.8": v1.NodeExternalIP, - "2001:db8::2": v1.NodeExternalIP, + addressCheck(t, "expected addresses", &instance, map[string]corev1.NodeAddressType{ + "10.0.0.2": corev1.NodeInternalIP, + "fda0:23::2": corev1.NodeInternalIP, + "8.8.8.8": corev1.NodeExternalIP, + "2001:db8::2": corev1.NodeExternalIP, }) }) } diff --git a/pkg/cloudprovider/provider/anexia/network_provisioning.go b/pkg/cloudprovider/provider/anexia/network_provisioning.go new file mode 100644 index 000000000..a133509a3 --- /dev/null +++ b/pkg/cloudprovider/provider/anexia/network_provisioning.go @@ -0,0 +1,159 @@ +/* +Copyright 2024 The Machine Controller Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package anexia + +import ( + "context" + "sync" + "time" + + anxclient "go.anx.io/go-anxcloud/pkg/client" + anxaddr "go.anx.io/go-anxcloud/pkg/ipam/address" + anxvm "go.anx.io/go-anxcloud/pkg/vsphere/provisioning/vm" + "go.uber.org/zap" + + "k8c.io/machine-controller/sdk/apis/cluster/common" + anxtypes "k8c.io/machine-controller/sdk/cloudprovider/anexia" +) + +func networkInterfacesForProvisioning(ctx context.Context, log *zap.SugaredLogger, client anxclient.Client) ([]anxvm.Network, error) { + reconcileContext := getReconcileContext(ctx) + + config := reconcileContext.Config + status := reconcileContext.Status + + // make sure we have the status.Networks array allocated to fill it with + // data, warning if we already have something but not matching the + // configuration. + if len(status.Networks) != len(config.Networks) { + if len(status.Networks) != 0 { + log.Warn("size of status.Networks != config.Networks, this should not happen in normal operation - ignoring existing status") + } + + status.Networks = make([]anxtypes.NetworkStatus, len(config.Networks)) + } + + ret := make([]anxvm.Network, len(config.Networks)) + for netIndex, network := range config.Networks { + networkStatus := &status.Networks[netIndex] + addresses := make([]string, len(network.Prefixes)) + + for prefixIndex, prefix := range network.Prefixes { + // make sure we have the address status array allocated to fill it + // with our IP reserve status, warning if we already have something + // there but not matching the configuration. + if len(networkStatus.Addresses) != len(network.Prefixes) { + if len(networkStatus.Addresses) != 0 { + log.Warnf("size of status.Networks[%[1]v].Addresses != config.Networks[%[1]v].Prefixes, this should not happen in normal operation - ignoring existing status", netIndex) + } + + networkStatus.Addresses = make([]anxtypes.NetworkAddressStatus, len(network.Prefixes)) + } + + reservedIP, err := getIPAddress(ctx, log, &network, prefix, &networkStatus.Addresses[prefixIndex], client) + if err != nil { + return nil, newError(common.CreateMachineError, "failed to reserve IP: %v", err) + } + + addresses[prefixIndex] = reservedIP + } + + ret[netIndex] = anxvm.Network{ + VLAN: network.VlanID, + IPs: addresses, + + // the one NIC type supported by the ADC API + NICType: anxtypes.VmxNet3NIC, + } + } + + return ret, nil +} + +// ENGSUP-3404 is about a race condition when reserving IPs - two calls for one +// IP each, coming in at "nearly the same millisecond", can result in both +// reserving the same IP. +// +// The proposed fix was to reserve n IPs in one call, but that would require +// lots of architecture changes - we can't really do the "reserve IPs for all +// the Machines we want to create and then create the Machines" here. +// +// This mutex alleviates the issue enough, that we didn't see it in a long +// time. It's not impossible this race condition was fixed in some other change +// and we weren't told, but I'd rather not test this and risk having problems +// again.. it's not too expensive of a Mutex. +var _engsup3404mutex sync.Mutex + +func getIPAddress(ctx context.Context, log *zap.SugaredLogger, network *resolvedNetwork, prefix string, status *anxtypes.NetworkAddressStatus, client anxclient.Client) (string, error) { + reconcileContext := getReconcileContext(ctx) + + // only use IP if it is still unbound + if status.ReservedIP != "" && status.IPState == anxtypes.IPStateUnbound && (!status.IPProvisioningExpires.IsZero() && status.IPProvisioningExpires.After(time.Now())) { + log.Infow("Re-using already provisioned IP", "ip", status.ReservedIP) + return status.ReservedIP, nil + } + + _engsup3404mutex.Lock() + defer _engsup3404mutex.Unlock() + + log.Info("Creating a new IP for machine") + addrAPI := anxaddr.NewAPI(client) + config := reconcileContext.Config + + res, err := addrAPI.ReserveRandom(ctx, anxaddr.ReserveRandom{ + LocationID: config.LocationID, + VlanID: network.VlanID, + PrefixID: prefix, + ReservationPeriod: uint(anxtypes.IPProvisioningExpires / time.Second), + Count: 1, + }) + if err != nil { + return "", newError(common.InvalidConfigurationMachineError, "failed to reserve an ip address: %v", err) + } + + if len(res.Data) < 1 { + return "", newError(common.InsufficientResourcesMachineError, "no ip address is available for this machine") + } + + ip := res.Data[0].Address + status.ReservedIP = ip + status.IPState = anxtypes.IPStateUnbound + status.IPProvisioningExpires = time.Now().Add(anxtypes.IPProvisioningExpires) + + return ip, nil +} + +func networkReservedAddresses(status *anxtypes.ProviderStatus) []string { + ret := make([]string, 0) + for _, network := range status.Networks { + for _, address := range network.Addresses { + if address.ReservedIP != "" && address.IPState == anxtypes.IPStateBound { + ret = append(ret, address.ReservedIP) + } + } + } + + return ret +} + +func networkStatusMarkIPsBound(status *anxtypes.ProviderStatus) { + for network := range status.Networks { + for addr := range status.Networks[network].Addresses { + status.Networks[network].Addresses[addr].IPState = anxtypes.IPStateBound + } + } +} diff --git a/pkg/cloudprovider/provider/anexia/provider.go b/pkg/cloudprovider/provider/anexia/provider.go index b036d9149..e0a5070ba 100644 --- a/pkg/cloudprovider/provider/anexia/provider.go +++ b/pkg/cloudprovider/provider/anexia/provider.go @@ -24,92 +24,62 @@ import ( "fmt" "net/http" "strings" - "sync" "time" "go.anx.io/go-anxcloud/pkg/api" - corev1 "go.anx.io/go-anxcloud/pkg/apis/core/v1" - vspherev1 "go.anx.io/go-anxcloud/pkg/apis/vsphere/v1" - "go.anx.io/go-anxcloud/pkg/client" anxclient "go.anx.io/go-anxcloud/pkg/client" - anxaddr "go.anx.io/go-anxcloud/pkg/ipam/address" "go.anx.io/go-anxcloud/pkg/vsphere" "go.anx.io/go-anxcloud/pkg/vsphere/provisioning/progress" anxvm "go.anx.io/go-anxcloud/pkg/vsphere/provisioning/vm" - - "github.com/kubermatic/machine-controller/pkg/apis/cluster/common" - clusterv1alpha1 "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/common/ssh" - cloudprovidererrors "github.com/kubermatic/machine-controller/pkg/cloudprovider/errors" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/instance" - anxtypes "github.com/kubermatic/machine-controller/pkg/cloudprovider/provider/anexia/types" - cloudprovidertypes "github.com/kubermatic/machine-controller/pkg/cloudprovider/types" - "github.com/kubermatic/machine-controller/pkg/providerconfig" - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" + "go.uber.org/zap" + + "k8c.io/machine-controller/pkg/cloudprovider/common/ssh" + cloudprovidererrors "k8c.io/machine-controller/pkg/cloudprovider/errors" + "k8c.io/machine-controller/pkg/cloudprovider/instance" + cloudprovidertypes "k8c.io/machine-controller/pkg/cloudprovider/types" + cloudproviderutil "k8c.io/machine-controller/pkg/cloudprovider/util" + "k8c.io/machine-controller/sdk/apis/cluster/common" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" + anxtypes "k8c.io/machine-controller/sdk/cloudprovider/anexia" + "k8c.io/machine-controller/sdk/providerconfig" "k8s.io/apimachinery/pkg/api/meta" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" k8stypes "k8s.io/apimachinery/pkg/types" - "k8s.io/klog" + kerrors "k8s.io/apimachinery/pkg/util/errors" ) const ( ProvisionedType = "Provisioned" ) -var ( - // ErrConfigDiskSizeAndDisks is returned when the config has both DiskSize and Disks set, which is unsupported. - ErrConfigDiskSizeAndDisks = errors.New("both the deprecated DiskSize and new Disks attribute are set") - - // ErrMultipleDisksNotYetImplemented is returned when multiple disks are configured. - ErrMultipleDisksNotYetImplemented = errors.New("multiple disks configured, but this feature is not yet implemented") -) - type provider struct { - configVarResolver *providerconfig.ConfigVarResolver -} - -// resolvedDisk contains the resolved values from types.RawDisk. -type resolvedDisk struct { - anxtypes.RawDisk - - PerformanceType string + configVarResolver providerconfig.ConfigVarResolver } -// resolvedConfig contains the resolved values from types.RawConfig. -type resolvedConfig struct { - anxtypes.RawConfig - - Token string - VlanID string - LocationID string - TemplateID string - - Disks []resolvedDisk -} - -func (p *provider) Create(ctx context.Context, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData, userdata string) (instance instance.Instance, retErr error) { - status := getProviderStatus(machine) - klog.V(3).Infof(fmt.Sprintf("'%s' has status %#v", machine.Name, status)) +func (p *provider) Create(ctx context.Context, log *zap.SugaredLogger, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData, userdata string) (instance instance.Instance, retErr error) { + status := getProviderStatus(log, machine) + log.Debugw("Machine status", "status", status) // ensure conditions are present on machine ensureConditions(&status) - config, _, err := p.getConfig(ctx, machine.Spec.ProviderSpec) + config, providerCfg, err := p.getConfig(ctx, log, machine.Spec.ProviderSpec) if err != nil { - return nil, fmt.Errorf("unable to get provider config: %w", err) + return nil, fmt.Errorf("failed to get provider config: %w", err) } ctx = createReconcileContext(ctx, reconcileContext{ - Status: &status, - UserData: userdata, - Config: *config, - ProviderData: data, - Machine: machine, + Status: &status, + UserData: userdata, + Config: *config, + ProviderData: data, + ProviderConfig: providerCfg, + Machine: machine, }) - _, client, err := getClient(config.Token) + _, client, err := getClient(config.Token, &machine.Name) if err != nil { return nil, err } @@ -117,18 +87,18 @@ func (p *provider) Create(ctx context.Context, machine *clusterv1alpha1.Machine, // make sure status is reflected in Machine Object defer func() { // if error occurs during updating the machine object don't override the original error - retErr = anxtypes.NewMultiError(retErr, updateMachineStatus(machine, status, data.Update)) + retErr = kerrors.NewAggregate([]error{retErr, updateMachineStatus(machine, status, data.Update)}) }() // provision machine - err = provisionVM(ctx, client) + err = provisionVM(ctx, log, client) if err != nil { return nil, anexiaErrorToTerminalError(err, "failed waiting for vm provisioning") } - return p.Get(ctx, machine, data) + return p.Get(ctx, log, machine, data) } -func provisionVM(ctx context.Context, client anxclient.Client) error { +func provisionVM(ctx context.Context, log *zap.SugaredLogger, client anxclient.Client) error { reconcileContext := getReconcileContext(ctx) vmAPI := vsphere.NewAPI(client) @@ -137,19 +107,13 @@ func provisionVM(ctx context.Context, client anxclient.Client) error { status := reconcileContext.Status if status.ProvisioningID == "" { - klog.V(2).Info(fmt.Sprintf("Machine '%s' does not contain a provisioningID yet. Starting to provision", - reconcileContext.Machine.Name)) + log.Info("Machine does not contain a provisioningID yet. Starting to provision") config := reconcileContext.Config - reservedIP, err := getIPAddress(ctx, client) + networkInterfaces, err := networkInterfacesForProvisioning(ctx, log, client) if err != nil { - return newError(common.CreateMachineError, "failed to reserve IP: %v", err) + return fmt.Errorf("error generating network config for machine: %w", err) } - networkInterfaces := []anxvm.Network{{ - NICType: anxtypes.VmxNet3NIC, - IPs: []string{reservedIP}, - VLAN: config.VlanID, - }} vm := vmAPI.Provisioning().VM().NewDefinition( config.LocationID, @@ -164,8 +128,35 @@ func provisionVM(ctx context.Context, client anxclient.Client) error { vm.DiskType = config.Disks[0].PerformanceType + if config.CPUPerformanceType != "" { + vm.CPUPerformanceType = config.CPUPerformanceType + } + + for _, disk := range config.Disks[1:] { + vm.AdditionalDisks = append(vm.AdditionalDisks, anxvm.AdditionalDisk{ + SizeGBs: disk.Size, + Type: disk.PerformanceType, + }) + } + vm.Script = base64.StdEncoding.EncodeToString([]byte(reconcileContext.UserData)) + providerCfg := reconcileContext.ProviderConfig + if providerCfg.Network != nil { + for index, dnsServer := range providerCfg.Network.DNS.Servers { + switch index { + case 0: + vm.DNS1 = dnsServer + case 1: + vm.DNS2 = dnsServer + case 2: + vm.DNS3 = dnsServer + case 3: + vm.DNS4 = dnsServer + } + } + } + // We generate a fresh SSH key but will never actually use it - we just want a valid public key to disable password authentication for our fresh VM. sshKey, err := ssh.NewKey() if err != nil { @@ -174,9 +165,9 @@ func provisionVM(ctx context.Context, client anxclient.Client) error { vm.SSH = sshKey.PublicKey provisionResponse, err := vmAPI.Provisioning().VM().Provision(ctx, vm, false) - meta.SetStatusCondition(&status.Conditions, v1.Condition{ + meta.SetStatusCondition(&status.Conditions, metav1.Condition{ Type: ProvisionedType, - Status: v1.ConditionFalse, + Status: metav1.ConditionFalse, Reason: "Provisioning", Message: "provisioning request was sent", }) @@ -185,7 +176,7 @@ func provisionVM(ctx context.Context, client anxclient.Client) error { } // we successfully sent a VM provisioning request to the API, we consider the IP as 'Bound' now - status.IPState = anxtypes.IPStateBound + networkStatusMarkIPsBound(status) status.ProvisioningID = provisionResponse.Identifier err = updateMachineStatus(reconcileContext.Machine, *status, reconcileContext.ProviderData.Update) @@ -194,12 +185,11 @@ func provisionVM(ctx context.Context, client anxclient.Client) error { } } - klog.V(2).Info(fmt.Sprintf("Using provisionID from machine '%s' to await completion", - reconcileContext.Machine.Name)) + log.Info("Using provisionID from machine to await completion") - meta.SetStatusCondition(&status.Conditions, v1.Condition{ + meta.SetStatusCondition(&status.Conditions, metav1.Condition{ Type: ProvisionedType, - Status: v1.ConditionTrue, + Status: metav1.ConditionTrue, Reason: "Provisioned", Message: "Machine has been successfully created", }) @@ -207,63 +197,26 @@ func provisionVM(ctx context.Context, client anxclient.Client) error { return updateMachineStatus(reconcileContext.Machine, *status, reconcileContext.ProviderData.Update) } -var _engsup3404mutex sync.Mutex - -func getIPAddress(ctx context.Context, client anxclient.Client) (string, error) { - reconcileContext := getReconcileContext(ctx) - status := reconcileContext.Status - - // only use IP if it is still unbound - if status.ReservedIP != "" && status.IPState == anxtypes.IPStateUnbound { - klog.Infof("reusing already provisioned ip %q", status.ReservedIP) - return status.ReservedIP, nil - } - - _engsup3404mutex.Lock() - defer _engsup3404mutex.Unlock() - - klog.Info(fmt.Sprintf("Creating a new IP for machine %q", reconcileContext.Machine.Name)) - addrAPI := anxaddr.NewAPI(client) - config := reconcileContext.Config - res, err := addrAPI.ReserveRandom(ctx, anxaddr.ReserveRandom{ - LocationID: config.LocationID, - VlanID: config.VlanID, - Count: 1, - }) - if err != nil { - return "", newError(common.InvalidConfigurationMachineError, "failed to reserve an ip address: %v", err) - } - if len(res.Data) < 1 { - return "", newError(common.InsufficientResourcesMachineError, "no ip address is available for this machine") - } - - ip := res.Data[0].Address - status.ReservedIP = ip - status.IPState = anxtypes.IPStateUnbound - - return ip, nil -} - func isAlreadyProvisioning(ctx context.Context) bool { status := getReconcileContext(ctx).Status condition := meta.FindStatusCondition(status.Conditions, ProvisionedType) lastChange := condition.LastTransitionTime.Time const reasonInProvisioning = "InProvisioning" if condition.Reason == reasonInProvisioning && time.Since(lastChange) > 5*time.Minute { - meta.SetStatusCondition(&status.Conditions, v1.Condition{ + meta.SetStatusCondition(&status.Conditions, metav1.Condition{ Type: ProvisionedType, Reason: "ReInitialising", Message: "Could not find ongoing VM provisioning", - Status: v1.ConditionFalse, + Status: metav1.ConditionFalse, }) } - return condition.Status == v1.ConditionFalse && condition.Reason == reasonInProvisioning + return condition.Status == metav1.ConditionFalse && condition.Reason == reasonInProvisioning } func ensureConditions(status *anxtypes.ProviderStatus) { - conditions := [...]v1.Condition{ - {Type: ProvisionedType, Message: "", Status: v1.ConditionUnknown, Reason: "Initialising"}, + conditions := [...]metav1.Condition{ + {Type: ProvisionedType, Message: "", Status: metav1.ConditionUnknown, Reason: "Initialising"}, } for _, condition := range conditions { if meta.FindStatusCondition(status.Conditions, condition.Type) == nil { @@ -272,100 +225,8 @@ func ensureConditions(status *anxtypes.ProviderStatus) { } } -func resolveTemplateID(ctx context.Context, a api.API, config anxtypes.RawConfig, configVarResolver *providerconfig.ConfigVarResolver, locationID string) (string, error) { - templateName, err := configVarResolver.GetConfigVarStringValue(config.Template) - if err != nil { - return "", fmt.Errorf("failed to get 'template': %w", err) - } - - templateBuild, err := configVarResolver.GetConfigVarStringValue(config.TemplateBuild) - if err != nil { - return "", fmt.Errorf("failed to get 'templateBuild': %w", err) - } - - template, err := vspherev1.FindNamedTemplate(ctx, a, templateName, templateBuild, corev1.Location{Identifier: locationID}) - if err != nil { - return "", fmt.Errorf("failed to retrieve named template: %w", err) - } - - return template.Identifier, nil -} - -func (p *provider) resolveConfig(ctx context.Context, config anxtypes.RawConfig) (*resolvedConfig, error) { - var err error - ret := resolvedConfig{ - RawConfig: config, - } - - ret.Token, err = p.configVarResolver.GetConfigVarStringValueOrEnv(config.Token, anxtypes.AnxTokenEnv) - if err != nil { - return nil, fmt.Errorf("failed to get 'token': %w", err) - } - - ret.LocationID, err = p.configVarResolver.GetConfigVarStringValue(config.LocationID) - if err != nil { - return nil, fmt.Errorf("failed to get 'locationID': %w", err) - } - - ret.TemplateID, err = p.configVarResolver.GetConfigVarStringValue(config.TemplateID) - if err != nil { - return nil, fmt.Errorf("failed to get 'templateID': %w", err) - } - - // when "templateID" is not set, we expect "template" to be - if ret.TemplateID == "" { - a, _, err := getClient(ret.Token) - if err != nil { - return nil, fmt.Errorf("failed initializing API clients: %w", err) - } - - templateID, err := resolveTemplateID(ctx, a, config, p.configVarResolver, ret.LocationID) - if err != nil { - return nil, fmt.Errorf("failed retrieving template id from named template: %w", err) - } - - ret.TemplateID = templateID - } - - ret.VlanID, err = p.configVarResolver.GetConfigVarStringValue(config.VlanID) - if err != nil { - return nil, fmt.Errorf("failed to get 'vlanID': %w", err) - } - - if config.DiskSize != 0 { - if len(config.Disks) != 0 { - return nil, ErrConfigDiskSizeAndDisks - } - - klog.Warningf("Configuration uses the deprecated DiskSize attribute, please migrate to the Disks array instead.") - - config.Disks = []anxtypes.RawDisk{ - { - Size: config.DiskSize, - }, - } - config.DiskSize = 0 - } - - ret.Disks = make([]resolvedDisk, len(config.Disks)) - - for idx, disk := range config.Disks { - ret.Disks[idx].RawDisk = disk - - ret.Disks[idx].PerformanceType, err = p.configVarResolver.GetConfigVarStringValue(disk.PerformanceType) - if err != nil { - return nil, fmt.Errorf("failed to get 'performanceType' of disk %v: %w", idx, err) - } - } - - return &ret, nil -} - -func (p *provider) getConfig(ctx context.Context, provSpec clusterv1alpha1.ProviderSpec) (*resolvedConfig, *providerconfigtypes.Config, error) { - if provSpec.Value == nil { - return nil, nil, fmt.Errorf("machine.spec.providerSpec.value is nil") - } - pconfig, err := providerconfigtypes.GetConfig(provSpec) +func (p *provider) getConfig(ctx context.Context, log *zap.SugaredLogger, provSpec clusterv1alpha1.ProviderSpec) (*resolvedConfig, *providerconfig.Config, error) { + pconfig, err := providerconfig.GetConfig(provSpec) if err != nil { return nil, nil, err } @@ -379,7 +240,7 @@ func (p *provider) getConfig(ctx context.Context, provSpec clusterv1alpha1.Provi return nil, nil, fmt.Errorf("error parsing provider config: %w", err) } - resolvedConfig, err := p.resolveConfig(ctx, *rawConfig) + resolvedConfig, err := p.resolveConfig(ctx, log, *rawConfig) if err != nil { return nil, nil, fmt.Errorf("error resolving config: %w", err) } @@ -388,18 +249,18 @@ func (p *provider) getConfig(ctx context.Context, provSpec clusterv1alpha1.Provi } // New returns an Anexia provider. -func New(configVarResolver *providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { +func New(configVarResolver providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { return &provider{configVarResolver: configVarResolver} } // AddDefaults adds omitted optional values to the given MachineSpec. -func (p *provider) AddDefaults(spec clusterv1alpha1.MachineSpec) (clusterv1alpha1.MachineSpec, error) { +func (p *provider) AddDefaults(_ *zap.SugaredLogger, spec clusterv1alpha1.MachineSpec) (clusterv1alpha1.MachineSpec, error) { return spec, nil } // Validate returns success or failure based according to its ProviderSpec. -func (p *provider) Validate(ctx context.Context, machinespec clusterv1alpha1.MachineSpec) error { - config, _, err := p.getConfig(ctx, machinespec.ProviderSpec) +func (p *provider) Validate(ctx context.Context, log *zap.SugaredLogger, machinespec clusterv1alpha1.MachineSpec) error { + config, _, err := p.getConfig(ctx, log, machinespec.ProviderSpec) if err != nil { return fmt.Errorf("failed to parse config: %w", err) } @@ -416,10 +277,6 @@ func (p *provider) Validate(ctx context.Context, machinespec clusterv1alpha1.Mac return errors.New("no disks configured") } - if len(config.Disks) > 1 { - return ErrMultipleDisksNotYetImplemented - } - for _, disk := range config.Disks { if disk.Size == 0 { return errors.New("disk size is missing") @@ -438,34 +295,47 @@ func (p *provider) Validate(ctx context.Context, machinespec clusterv1alpha1.Mac return errors.New("no valid template configured") } - if config.VlanID == "" { - return errors.New("vlan id is missing") + if len(config.Networks) == 0 { + return errors.New("no networks configured") + } + + atLeastOneAddressSourceConfigured := false + for _, network := range config.Networks { + if len(network.Prefixes) > 0 { + atLeastOneAddressSourceConfigured = true + break + } + } + if !atLeastOneAddressSourceConfigured { + return errors.New("none of the configured networks define an address source, cannot create Machines without any IP") } return nil } -func (p *provider) Get(ctx context.Context, machine *clusterv1alpha1.Machine, pd *cloudprovidertypes.ProviderData) (instance.Instance, error) { - config, _, err := p.getConfig(ctx, machine.Spec.ProviderSpec) +func (p *provider) Get(ctx context.Context, log *zap.SugaredLogger, machine *clusterv1alpha1.Machine, pd *cloudprovidertypes.ProviderData) (instance.Instance, error) { + config, _, err := p.getConfig(ctx, log, machine.Spec.ProviderSpec) if err != nil { return nil, newError(common.InvalidConfigurationMachineError, "failed to retrieve config: %v", err) } - _, cli, err := getClient(config.Token) + _, cli, err := getClient(config.Token, &machine.Name) if err != nil { return nil, newError(common.InvalidConfigurationMachineError, "failed to create Anexia client: %v", err) } vsphereAPI := vsphere.NewAPI(cli) - status := getProviderStatus(machine) - if err != nil { - return nil, newError(common.InvalidConfigurationMachineError, "failed to get machine status: %v", err) - } + status := getProviderStatus(log, machine) if status.InstanceID == "" && status.ProvisioningID == "" { return nil, cloudprovidererrors.ErrInstanceNotFound } + if status.DeprovisioningID != "" { + // info endpoint no longer available for vm -> stop here + return &anexiaInstance{isDeleting: true}, nil + } + if status.InstanceID == "" { progress, err := vsphereAPI.Provisioning().Progress().Get(ctx, status.ProvisioningID) if err != nil { @@ -486,10 +356,7 @@ func (p *provider) Get(ctx context.Context, machine *clusterv1alpha1.Machine, pd } instance := anexiaInstance{} - - if status.IPState == anxtypes.IPStateBound && status.ReservedIP != "" { - instance.reservedAddresses = []string{status.ReservedIP} - } + instance.reservedAddresses = networkReservedAddresses(&status) timeoutCtx, cancel := context.WithTimeout(ctx, anxtypes.GetRequestTimeout) defer cancel() @@ -503,33 +370,37 @@ func (p *provider) Get(ctx context.Context, machine *clusterv1alpha1.Machine, pd return &instance, nil } -func (p *provider) GetCloudConfig(_ clusterv1alpha1.MachineSpec) (string, string, error) { - return "", "", nil -} +func (p *provider) Cleanup(ctx context.Context, log *zap.SugaredLogger, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData) (isDeleted bool, retErr error) { + if inst, err := p.Get(ctx, log, machine, data); err != nil { + if cloudprovidererrors.IsNotFound(err) { + return true, nil + } + + return false, err + } else if inst.Status() == instance.StatusCreating { + log.Error("Failed to cleanup machine: instance is still creating") + return false, nil + } -func (p *provider) Cleanup(ctx context.Context, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData) (isDeleted bool, retErr error) { - status := getProviderStatus(machine) + status := getProviderStatus(log, machine) // make sure status is reflected in Machine Object defer func() { // if error occurs during updating the machine object don't override the original error - retErr = anxtypes.NewMultiError(retErr, updateMachineStatus(machine, status, data.Update)) + retErr = kerrors.NewAggregate([]error{retErr, updateMachineStatus(machine, status, data.Update)}) }() ensureConditions(&status) - config, _, err := p.getConfig(ctx, machine.Spec.ProviderSpec) + config, _, err := p.getConfig(ctx, log, machine.Spec.ProviderSpec) if err != nil { return false, newError(common.InvalidConfigurationMachineError, "failed to parse MachineSpec: %v", err) } - _, cli, err := getClient(config.Token) + _, cli, err := getClient(config.Token, &machine.Name) if err != nil { return false, newError(common.InvalidConfigurationMachineError, "failed to create Anexia client: %v", err) } - vsphereAPI := vsphere.NewAPI(cli) - if err != nil { - return false, newError(common.InvalidConfigurationMachineError, "failed to get machine status: %v", err) - } + vsphereAPI := vsphere.NewAPI(cli) deleteCtx, cancel := context.WithTimeout(ctx, anxtypes.DeleteRequestTimeout) defer cancel() @@ -539,10 +410,20 @@ func (p *provider) Cleanup(ctx context.Context, machine *clusterv1alpha1.Machine response, err := vsphereAPI.Provisioning().VM().Deprovision(deleteCtx, status.InstanceID, false) if err != nil { var respErr *anxclient.ResponseError + // Only error if the error was not "not found" - if !(errors.As(err, &respErr) && respErr.ErrorData.Code == http.StatusNotFound) { + if !errors.As(err, &respErr) || respErr.ErrorData.Code != http.StatusNotFound { return false, newError(common.DeleteMachineError, "failed to delete machine: %v", err) } + + // good thinking checking for a "not found" error, but go-anxcloud does only + // return >= 500 && < 600 errors (: + // since that's the legacy client in go-anxcloud and the new one is not yet available, + // this will not be fixed there but we have a nice workaround here: + + if response.Identifier == "" { + return true, nil + } } status.DeprovisioningID = response.Identifier } @@ -568,7 +449,7 @@ func isTaskDone(ctx context.Context, cli anxclient.Client, progressIdentifier st return false, nil } -func (p *provider) MigrateUID(_ context.Context, _ *clusterv1alpha1.Machine, _ k8stypes.UID) error { +func (p *provider) MigrateUID(_ context.Context, _ *zap.SugaredLogger, _ *clusterv1alpha1.Machine, _ k8stypes.UID) error { return nil } @@ -580,16 +461,29 @@ func (p *provider) SetMetricsForMachines(_ clusterv1alpha1.MachineList) error { return nil } -func getClient(token string) (api.API, anxclient.Client, error) { - tokenOpt := anxclient.TokenFromString(token) - client := anxclient.HTTPClient(&http.Client{Timeout: 120 * time.Second}) +func getClient(token string, machineName *string) (api.API, anxclient.Client, error) { + logPrefix := "[Anexia API]" + + if machineName != nil { + logPrefix = fmt.Sprintf("[Anexia API for Machine %q]", *machineName) + } + + httpClient := cloudproviderutil.HTTPClientConfig{ + Timeout: 120 * time.Second, + LogPrefix: logPrefix, + }.New() + + legacyClientOptions := []anxclient.Option{ + anxclient.TokenFromString(token), + anxclient.HTTPClient(&httpClient), + } - a, err := api.NewAPI(api.WithClientOptions(client, tokenOpt)) + a, err := api.NewAPI(api.WithClientOptions(legacyClientOptions...)) if err != nil { return nil, nil, fmt.Errorf("error creating generic API client: %w", err) } - legacyClient, err := anxclient.New(tokenOpt, client) + legacyClient, err := anxclient.New(legacyClientOptions...) if err != nil { return nil, nil, fmt.Errorf("error creating legacy client: %w", err) } @@ -597,12 +491,12 @@ func getClient(token string) (api.API, anxclient.Client, error) { return a, legacyClient, nil } -func getProviderStatus(machine *clusterv1alpha1.Machine) anxtypes.ProviderStatus { +func getProviderStatus(log *zap.SugaredLogger, machine *clusterv1alpha1.Machine) anxtypes.ProviderStatus { var providerStatus anxtypes.ProviderStatus status := machine.Status.ProviderStatus if status != nil && status.Raw != nil { if err := json.Unmarshal(status.Raw, &providerStatus); err != nil { - klog.Warningf("Unable to parse status from machine object. status was discarded for machine") + log.Error("Failed to parse status from machine object; status was discarded for machine") return anxtypes.ProviderStatus{} } } @@ -646,7 +540,7 @@ func anexiaErrorToTerminalError(err error, msg string) error { } } - var responseError *client.ResponseError + var responseError *anxclient.ResponseError if errors.As(err, &responseError) && (responseError.ErrorData.Code == http.StatusForbidden || responseError.ErrorData.Code == http.StatusUnauthorized) { return cloudprovidererrors.TerminalError{ Reason: common.InvalidConfigurationMachineError, diff --git a/pkg/cloudprovider/provider/anexia/provider_test.go b/pkg/cloudprovider/provider/anexia/provider_test.go index 4bda59f26..24b51c416 100644 --- a/pkg/cloudprovider/provider/anexia/provider_test.go +++ b/pkg/cloudprovider/provider/anexia/provider_test.go @@ -20,6 +20,7 @@ import ( "context" "encoding/json" "errors" + "fmt" "net/http" "net/http/httptest" "net/url" @@ -30,24 +31,26 @@ import ( "github.com/gophercloud/gophercloud/testhelper" "go.anx.io/go-anxcloud/pkg/api" "go.anx.io/go-anxcloud/pkg/api/mock" - corev1 "go.anx.io/go-anxcloud/pkg/apis/core/v1" - vspherev1 "go.anx.io/go-anxcloud/pkg/apis/vsphere/v1" - "go.anx.io/go-anxcloud/pkg/client" + anxcorev1 "go.anx.io/go-anxcloud/pkg/apis/core/v1" + anxvspherev1 "go.anx.io/go-anxcloud/pkg/apis/vsphere/v1" anxclient "go.anx.io/go-anxcloud/pkg/client" "go.anx.io/go-anxcloud/pkg/core" "go.anx.io/go-anxcloud/pkg/ipam/address" "go.anx.io/go-anxcloud/pkg/vsphere/provisioning/progress" "go.anx.io/go-anxcloud/pkg/vsphere/provisioning/vm" + "go.uber.org/zap" - "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" - clusterv1alpha1 "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" - cloudprovidererrors "github.com/kubermatic/machine-controller/pkg/cloudprovider/errors" - anxtypes "github.com/kubermatic/machine-controller/pkg/cloudprovider/provider/anexia/types" - cloudprovidertypes "github.com/kubermatic/machine-controller/pkg/cloudprovider/types" + cloudprovidererrors "k8c.io/machine-controller/pkg/cloudprovider/errors" + cloudprovidertypes "k8c.io/machine-controller/pkg/cloudprovider/types" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" + anxtypes "k8c.io/machine-controller/sdk/cloudprovider/anexia" + providerconfigtypes "k8c.io/machine-controller/sdk/providerconfig" + "k8c.io/machine-controller/sdk/providerconfig/configvar" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" ) const ( @@ -58,11 +61,14 @@ const ( func TestAnexiaProvider(t *testing.T) { testhelper.SetupHTTP() client, server := anxclient.NewTestClient(nil, testhelper.Mux) + log := zap.NewNop().Sugar() a := mock.NewMockAPI() - a.FakeExisting(&vspherev1.Template{Identifier: "TEMPLATE-ID-OLD-BUILD", Name: testTemplateName, Build: "b01"}) - a.FakeExisting(&vspherev1.Template{Identifier: "TEMPLATE-ID", Name: testTemplateName, Build: "b02"}) - a.FakeExisting(&vspherev1.Template{Identifier: "WRONG-TEMPLATE-NAME", Name: "Wrong Template Name", Build: "b02"}) + a.FakeExisting(&anxvspherev1.Template{Identifier: "TEMPLATE-ID-OLD-BUILD", Name: testTemplateName, Build: "b01"}) + a.FakeExisting(&anxvspherev1.Template{Identifier: "TEMPLATE-ID", Name: testTemplateName, Build: "b02"}) + a.FakeExisting(&anxvspherev1.Template{Identifier: "WRONG-TEMPLATE-NAME", Name: "Wrong Template Name", Build: "b02"}) + a.FakeExisting(&anxvspherev1.Template{Identifier: "TEMPLATE-ID-NO-NETWORK-CONFIG", Name: "no-network-config", Build: "b03"}) + a.FakeExisting(&anxvspherev1.Template{Identifier: "TEMPLATE-ID-ADDITIONAL-DISKS", Name: "additional-disks", Build: "b03"}) t.Cleanup(func() { testhelper.TeardownHTTP() @@ -71,7 +77,77 @@ func TestAnexiaProvider(t *testing.T) { t.Run("Test provision VM", func(t *testing.T) { t.Parallel() - testhelper.Mux.HandleFunc("/api/ipam/v1/address/reserve/ip/count.json", func(writer http.ResponseWriter, request *http.Request) { + + testCases := []ProvisionVMTestCase{ + { + // Provision a generic VM with some custom dns entries + ReconcileContext: hookableReconcileContext("LOCATION-ID", "TEMPLATE-ID", func(rc *reconcileContext) { + rc.ProviderConfig = &providerconfigtypes.Config{ + Network: &providerconfigtypes.NetworkConfig{ + DNS: providerconfigtypes.DNSConfig{ + Servers: []string{ + "1.1.1.1", + "", + "192.168.0.1", + "192.168.0.2", + "192.168.0.3", + }, + }, + }, + } + }), + AssertJSONBody: func(jsonBody jsonObject) { + testhelper.AssertEquals(t, jsonBody["cpu_performance_type"], "performance") + testhelper.AssertEquals(t, jsonBody["hostname"], "TestMachine") + testhelper.AssertEquals(t, jsonBody["memory_mb"], json.Number("5")) + + testhelper.AssertEquals(t, jsonBody["dns1"], "1.1.1.1") + _, exists := jsonBody["dns2"] + testhelper.AssertEquals(t, exists, false) + testhelper.AssertEquals(t, jsonBody["dns3"], "192.168.0.1") + testhelper.AssertEquals(t, jsonBody["dns4"], "192.168.0.2") + + networkArray := jsonBody["network"].([]interface{}) + networkObject := networkArray[0].(jsonObject) + testhelper.AssertEquals(t, networkObject["vlan"], "VLAN-ID") + testhelper.AssertEquals(t, networkObject["nic_type"], "vmxnet3") + testhelper.AssertEquals(t, networkObject["ips"].([]interface{})[0], "8.8.8.8") + }, + }, + { + // Provision a VM without any ProviderConfig + ReconcileContext: hookableReconcileContext("LOCATION-ID", "TEMPLATE-ID-NO-NETWORK-CONFIG", func(rc *reconcileContext) { + rc.ProviderConfig = &providerconfigtypes.Config{} + }), + AssertJSONBody: func(jsonBody jsonObject) { + _, exists := jsonBody["dns1"] + testhelper.AssertEquals(t, exists, false) + _, exists = jsonBody["dns2"] + testhelper.AssertEquals(t, exists, false) + _, exists = jsonBody["dns3"] + testhelper.AssertEquals(t, exists, false) + _, exists = jsonBody["dns4"] + testhelper.AssertEquals(t, exists, false) + }, + }, + { + ReconcileContext: hookableReconcileContext("LOCATION-ID", "ADDITIONAL-DISKS", func(rc *reconcileContext) { + rc.Config.Disks = append(rc.Config.Disks, resolvedDisk{ + RawDisk: anxtypes.RawDisk{ + Size: 10, + }, + PerformanceType: "STD1", + }) + }), + + AssertJSONBody: func(jsonBody jsonObject) { + testhelper.AssertEquals(t, json.Number("5"), jsonBody["disk_gb"]) + testhelper.AssertJSONEquals(t, `[{"gb":10,"type":"STD1"}]`, jsonBody["additional_disks"]) + }, + }, + } + + testhelper.Mux.HandleFunc("/api/ipam/v1/address/reserve/ip/count.json", func(writer http.ResponseWriter, _ *http.Request) { err := json.NewEncoder(writer).Encode(address.ReserveRandomSummary{ Data: []address.ReservedIP{ { @@ -83,90 +159,46 @@ func TestAnexiaProvider(t *testing.T) { testhelper.AssertNoErr(t, err) }) - testhelper.Mux.HandleFunc("/api/vsphere/v1/provisioning/vm.json/LOCATION-ID/templates/TEMPLATE-ID", func(writer http.ResponseWriter, request *http.Request) { - testhelper.TestMethod(t, request, http.MethodPost) - type jsonObject = map[string]interface{} - expectedJSON := map[string]interface{}{ - "cpu_performance_type": "performance", - "hostname": "TestMachine", - "memory_mb": json.Number("5"), - "network": []jsonObject{ - { - "vlan": "VLAN-ID", - "nic_type": "vmxnet3", - "ips": []interface{}{"8.8.8.8"}, - }, - }, - } - var jsonBody jsonObject - decoder := json.NewDecoder(request.Body) - decoder.UseNumber() - testhelper.AssertNoErr(t, decoder.Decode(&jsonBody)) - testhelper.AssertEquals(t, expectedJSON["cpu_performance_type"], jsonBody["cpu_performance_type"]) - testhelper.AssertEquals(t, expectedJSON["hostname"], jsonBody["hostname"]) - testhelper.AssertEquals(t, expectedJSON["memory_mb"], jsonBody["memory_mb"]) - testhelper.AssertEquals(t, expectedJSON["count"], jsonBody["count"]) - - expectedNetwork := expectedJSON["network"].([]jsonObject)[0] - bodyNetwork := jsonBody["network"].([]interface{})[0].(jsonObject) - testhelper.AssertEquals(t, expectedNetwork["vlan"], bodyNetwork["vlan"]) - testhelper.AssertEquals(t, expectedNetwork["nic_type"], bodyNetwork["nic_type"]) - testhelper.AssertEquals(t, expectedNetwork["ips"].([]interface{})[0], bodyNetwork["ips"].([]interface{})[0]) - - err := json.NewEncoder(writer).Encode(vm.ProvisioningResponse{ - Progress: 100, - Errors: nil, - Identifier: "TEST-IDENTIFIER", - Queued: false, + for _, testCase := range testCases { + templateID := testCase.ReconcileContext.Config.TemplateID + locationID := testCase.ReconcileContext.Config.LocationID + + testhelper.Mux.HandleFunc(fmt.Sprintf("/api/vsphere/v1/provisioning/vm.json/%s/templates/%s", locationID, templateID), func(writer http.ResponseWriter, request *http.Request) { + testhelper.TestMethod(t, request, http.MethodPost) + var jsonBody jsonObject + decoder := json.NewDecoder(request.Body) + decoder.UseNumber() + testhelper.AssertNoErr(t, decoder.Decode(&jsonBody)) + + testCase.AssertJSONBody(jsonBody) + + err := json.NewEncoder(writer).Encode(vm.ProvisioningResponse{ + Progress: 100, + Errors: nil, + Identifier: templateID, + Queued: false, + }) + testhelper.AssertNoErr(t, err) }) - testhelper.AssertNoErr(t, err) - }) - testhelper.Mux.HandleFunc("/api/vsphere/v1/provisioning/progress.json/TEST-IDENTIFIER", func(writer http.ResponseWriter, request *http.Request) { - testhelper.TestMethod(t, request, http.MethodGet) + testhelper.Mux.HandleFunc(fmt.Sprintf("/api/vsphere/v1/provisioning/progress.json/%s", templateID), func(writer http.ResponseWriter, request *http.Request) { + testhelper.TestMethod(t, request, http.MethodGet) - err := json.NewEncoder(writer).Encode(progress.Progress{ - TaskIdentifier: "TEST-IDENTIFIER", - Queued: false, - Progress: 100, - VMIdentifier: "VM-IDENTIFIER", - Errors: nil, + err := json.NewEncoder(writer).Encode(progress.Progress{ + TaskIdentifier: templateID, + Queued: false, + Progress: 100, + VMIdentifier: "VM-IDENTIFIER", + Errors: nil, + }) + testhelper.AssertNoErr(t, err) }) - testhelper.AssertNoErr(t, err) - }) - providerStatus := anxtypes.ProviderStatus{} - ctx := createReconcileContext(context.Background(), reconcileContext{ - Machine: &v1alpha1.Machine{ - ObjectMeta: metav1.ObjectMeta{Name: "TestMachine"}, - }, - Status: &providerStatus, - UserData: "", - Config: resolvedConfig{ - VlanID: "VLAN-ID", - LocationID: "LOCATION-ID", - TemplateID: "TEMPLATE-ID", - Disks: []resolvedDisk{ - { - RawDisk: anxtypes.RawDisk{ - Size: 5, - }, - }, - }, - RawConfig: anxtypes.RawConfig{ - CPUs: 5, - Memory: 5, - }, - }, - ProviderData: &cloudprovidertypes.ProviderData{ - Update: func(m *clusterv1alpha1.Machine, mods ...cloudprovidertypes.MachineModifier) error { - return nil - }, - }, - }) + ctx := createReconcileContext(context.Background(), testCase.ReconcileContext) - err := provisionVM(ctx, client) - testhelper.AssertNoErr(t, err) + err := provisionVM(ctx, log, client) + testhelper.AssertNoErr(t, err) + } }) t.Run("Test resolve template", func(t *testing.T) { @@ -209,9 +241,9 @@ func TestAnexiaProvider(t *testing.T) { }, } - provider := New(nil).(*provider) + provider := New(configvar.NewResolver(context.Background(), fake.NewClientBuilder().Build())).(*provider) for _, testCase := range testCases { - templateID, err := resolveTemplateID(context.TODO(), a, testCase.config, provider.configVarResolver, "foo") + templateID, err := provider.resolveTemplateID(context.Background(), a, testCase.config, "foo") if testCase.expectedError != "" { if err != nil { testhelper.AssertErr(t, err) @@ -261,46 +293,31 @@ func TestAnexiaProvider(t *testing.T) { t.Run("Test getIPAddress", func(t *testing.T) { t.Parallel() providerStatus := &anxtypes.ProviderStatus{ - ReservedIP: "", - IPState: "", + Networks: []anxtypes.NetworkStatus{ + { + Addresses: []anxtypes.NetworkAddressStatus{ + { + ReservedIP: "", + IPState: "", + }, + }, + }, + }, } ctx := createReconcileContext(context.Background(), reconcileContext{Status: providerStatus}) t.Run("with unbound reserved IP", func(t *testing.T) { expectedIP := "8.8.8.8" - providerStatus.ReservedIP = expectedIP - providerStatus.IPState = anxtypes.IPStateUnbound - reservedIP, err := getIPAddress(ctx, client) + providerStatus.Networks[0].Addresses[0].ReservedIP = expectedIP + providerStatus.Networks[0].Addresses[0].IPState = anxtypes.IPStateUnbound + providerStatus.Networks[0].Addresses[0].IPProvisioningExpires = time.Now().Add(anxtypes.IPProvisioningExpires) + reservedIP, err := getIPAddress(ctx, log, &resolvedNetwork{}, "Prefix-ID", &providerStatus.Networks[0].Addresses[0], client) testhelper.AssertNoErr(t, err) testhelper.AssertEquals(t, expectedIP, reservedIP) }) }) } -// this generates a full config and allows hooking into it to e.g. remove a value. -func hookableConfig(hook func(*anxtypes.RawConfig)) anxtypes.RawConfig { - config := anxtypes.RawConfig{ - CPUs: 1, - - Memory: 2, - - Disks: []anxtypes.RawDisk{ - {Size: 5, PerformanceType: newConfigVarString("ENT6")}, - }, - - Token: newConfigVarString("test-token"), - VlanID: newConfigVarString("test-vlan"), - LocationID: newConfigVarString("test-location"), - TemplateID: newConfigVarString("test-template-id"), - } - - if hook != nil { - hook(&config) - } - - return config -} - func TestValidate(t *testing.T) { t.Parallel() @@ -320,11 +337,7 @@ func TestValidate(t *testing.T) { }, ConfigTestCase{ Config: hookableConfig(func(c *anxtypes.RawConfig) { c.DiskSize = 10 }), - Error: ErrConfigDiskSizeAndDisks, - }, - ConfigTestCase{ - Config: hookableConfig(func(c *anxtypes.RawConfig) { c.Disks = append(c.Disks, anxtypes.RawDisk{Size: 10}) }), - Error: ErrMultipleDisksNotYetImplemented, + Error: anxtypes.ErrConfigDiskSizeAndDisks, }, ConfigTestCase{ Config: hookableConfig(func(c *anxtypes.RawConfig) { c.Disks[0].Size = 0 }), @@ -338,9 +351,14 @@ func TestValidate(t *testing.T) { Config: hookableConfig(func(c *anxtypes.RawConfig) { c.LocationID.Value = "" }), Error: errors.New("location id is missing"), }, + + ConfigTestCase{ + Config: hookableConfig(func(c *anxtypes.RawConfig) { c.Networks = []anxtypes.RawNetwork{} }), + Error: errors.New("no networks configured"), + }, ConfigTestCase{ - Config: hookableConfig(func(c *anxtypes.RawConfig) { c.VlanID.Value = "" }), - Error: errors.New("vlan id is missing"), + Config: hookableConfig(func(c *anxtypes.RawConfig) { c.VlanID.Value = "legacy VLAN-ID" }), + Error: anxtypes.ErrConfigVlanIDAndNetworks, }, ConfigTestCase{ Config: hookableConfig(func(c *anxtypes.RawConfig) { c.DiskSize = 10; c.Disks = []anxtypes.RawDisk{} }), @@ -352,9 +370,9 @@ func TestValidate(t *testing.T) { }, ) - provider := New(nil) + provider := New(configvar.NewResolver(context.Background(), fake.NewClientBuilder().Build())) for _, testCase := range getSpecsForValidationTest(t, configCases) { - err := provider.Validate(context.Background(), testCase.Spec) + err := provider.Validate(context.Background(), zap.NewNop().Sugar(), testCase.Spec) if testCase.ExpectedError != nil { if !errors.Is(err, testCase.ExpectedError) { testhelper.AssertEquals(t, testCase.ExpectedError.Error(), err.Error()) @@ -382,7 +400,7 @@ func TestEnsureConditions(t *testing.T) { func TestGetProviderStatus(t *testing.T) { t.Parallel() - machine := &v1alpha1.Machine{} + machine := &clusterv1alpha1.Machine{} providerStatus := anxtypes.ProviderStatus{ InstanceID: "InstanceID", } @@ -390,14 +408,14 @@ func TestGetProviderStatus(t *testing.T) { testhelper.AssertNoErr(t, err) machine.Status.ProviderStatus = &runtime.RawExtension{Raw: providerStatusJSON} - returnedStatus := getProviderStatus(machine) + returnedStatus := getProviderStatus(zap.NewNop().Sugar(), machine) testhelper.AssertEquals(t, "InstanceID", returnedStatus.InstanceID) } func TestUpdateStatus(t *testing.T) { t.Parallel() - machine := &v1alpha1.Machine{} + machine := &clusterv1alpha1.Machine{} providerStatus := anxtypes.ProviderStatus{ InstanceID: "InstanceID", } @@ -406,10 +424,10 @@ func TestUpdateStatus(t *testing.T) { machine.Status.ProviderStatus = &runtime.RawExtension{Raw: providerStatusJSON} called := false - err = updateMachineStatus(machine, providerStatus, func(paramMachine *v1alpha1.Machine, modifier ...cloudprovidertypes.MachineModifier) error { + err = updateMachineStatus(machine, providerStatus, func(paramMachine *clusterv1alpha1.Machine, _ ...cloudprovidertypes.MachineModifier) error { called = true testhelper.AssertEquals(t, machine, paramMachine) - status := getProviderStatus(machine) + status := getProviderStatus(zap.NewNop().Sugar(), machine) testhelper.AssertEquals(t, status.InstanceID, providerStatus.InstanceID) return nil }) @@ -419,32 +437,32 @@ func TestUpdateStatus(t *testing.T) { } func Test_anexiaErrorToTerminalError(t *testing.T) { - forbiddenMockHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + forbiddenMockHandler := http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusForbidden) _, err := w.Write([]byte(`{"error": {"code": 403}}`)) testhelper.AssertNoErr(t, err) }) - unauthorizedMockHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + unauthorizedMockHandler := http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.WriteHeader(http.StatusUnauthorized) _, err := w.Write([]byte(`{"error": {"code": 401}}`)) testhelper.AssertNoErr(t, err) }) legacyClientRun := func(url string) error { - client, err := client.New(client.BaseURL(url), client.IgnoreMissingToken(), client.ParseEngineErrors(true)) + client, err := anxclient.New(anxclient.BaseURL(url), anxclient.IgnoreMissingToken(), anxclient.ParseEngineErrors(true)) testhelper.AssertNoErr(t, err) - _, err = core.NewAPI(client).Location().List(context.TODO(), 1, 1, "", "") + _, err = core.NewAPI(client).Location().List(context.Background(), 1, 1, "", "") return err } apiClientRun := func(url string) error { - client, err := api.NewAPI(api.WithClientOptions( - client.BaseURL(url), - client.IgnoreMissingToken(), + api, err := api.NewAPI(api.WithClientOptions( + anxclient.BaseURL(url), + anxclient.IgnoreMissingToken(), )) testhelper.AssertNoErr(t, err) - return client.Get(context.TODO(), &corev1.Location{Identifier: "foo"}) + return api.Get(context.Background(), &anxcorev1.Location{Identifier: "foo"}) } testCases := []struct { @@ -495,7 +513,7 @@ func Test_anexiaErrorToTerminalError(t *testing.T) { }) t.Run("legacy api client unspecific ResponseError shouldn't convert to TerminalError", func(t *testing.T) { - var err error = &client.ResponseError{} + var err error = &anxclient.ResponseError{} err = anexiaErrorToTerminalError(err, "foo") if ok, _, _ := cloudprovidererrors.IsTerminalError(err); ok { t.Errorf("unexpected error %#v, expected no TerminalError", err) diff --git a/pkg/cloudprovider/provider/anexia/reconcile_context.go b/pkg/cloudprovider/provider/anexia/reconcile_context.go index dea3577c8..e30e44365 100644 --- a/pkg/cloudprovider/provider/anexia/reconcile_context.go +++ b/pkg/cloudprovider/provider/anexia/reconcile_context.go @@ -19,9 +19,10 @@ package anexia import ( "context" - "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" - anxtypes "github.com/kubermatic/machine-controller/pkg/cloudprovider/provider/anexia/types" - cloudprovidertypes "github.com/kubermatic/machine-controller/pkg/cloudprovider/types" + cloudprovidertypes "k8c.io/machine-controller/pkg/cloudprovider/types" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" + anxtypes "k8c.io/machine-controller/sdk/cloudprovider/anexia" + providerconfigtypes "k8c.io/machine-controller/sdk/providerconfig" ) type contextKey byte @@ -29,11 +30,12 @@ type contextKey byte const machineReconcileContextKey contextKey = 0 type reconcileContext struct { - Machine *v1alpha1.Machine - Status *anxtypes.ProviderStatus - UserData string - Config resolvedConfig - ProviderData *cloudprovidertypes.ProviderData + Machine *clusterv1alpha1.Machine + Status *anxtypes.ProviderStatus + UserData string + Config resolvedConfig + ProviderData *cloudprovidertypes.ProviderData + ProviderConfig *providerconfigtypes.Config } func createReconcileContext(ctx context.Context, cc reconcileContext) context.Context { diff --git a/pkg/cloudprovider/provider/anexia/resolve_config.go b/pkg/cloudprovider/provider/anexia/resolve_config.go new file mode 100644 index 000000000..96a009a39 --- /dev/null +++ b/pkg/cloudprovider/provider/anexia/resolve_config.go @@ -0,0 +1,211 @@ +/* +Copyright 2024 The Machine Controller Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package anexia + +import ( + "context" + "fmt" + + "go.anx.io/go-anxcloud/pkg/api" + anxcorev1 "go.anx.io/go-anxcloud/pkg/apis/core/v1" + anxvspherev1 "go.anx.io/go-anxcloud/pkg/apis/vsphere/v1" + "go.uber.org/zap" + + anxtypes "k8c.io/machine-controller/sdk/cloudprovider/anexia" +) + +// resolvedDisk contains the resolved values from types.RawDisk. +type resolvedDisk struct { + anxtypes.RawDisk + + PerformanceType string +} + +// resolvedNetwork contains the resolved values from types.RawNetwork. +type resolvedNetwork struct { + anxtypes.RawNetwork + + VlanID string + + // List of prefixes to each reserve an IP address from. + // + // Legacy compatibility: may contain an empty string as entry to reserve an IP address from the given VLAN instead of a specific prefix. + Prefixes []string +} + +// resolvedConfig contains the resolved values from types.RawConfig. +type resolvedConfig struct { + anxtypes.RawConfig + + Token string + LocationID string + TemplateID string + + Disks []resolvedDisk + Networks []resolvedNetwork +} + +func (p *provider) resolveTemplateID(ctx context.Context, a api.API, config anxtypes.RawConfig, locationID string) (string, error) { + templateName, err := p.configVarResolver.GetStringValue(config.Template) + if err != nil { + return "", fmt.Errorf("failed to get 'template': %w", err) + } + + templateBuild, err := p.configVarResolver.GetStringValue(config.TemplateBuild) + if err != nil { + return "", fmt.Errorf("failed to get 'templateBuild': %w", err) + } + + template, err := anxvspherev1.FindNamedTemplate(ctx, a, templateName, templateBuild, anxcorev1.Location{Identifier: locationID}) + if err != nil { + return "", fmt.Errorf("failed to retrieve named template: %w", err) + } + + return template.Identifier, nil +} + +func (p *provider) resolveNetworkConfig(log *zap.SugaredLogger, config anxtypes.RawConfig) (*[]resolvedNetwork, error) { + legacyVlanIDConfig, _ := config.VlanID.MarshalJSON() + if string(legacyVlanIDConfig) != `""` { + if len(config.Networks) != 0 { + return nil, anxtypes.ErrConfigVlanIDAndNetworks + } + + log.Info("Configuration uses the deprecated VlanID attribute, please migrate to the Networks array instead.") + + vlanID, err := p.configVarResolver.GetStringValue(config.VlanID) + if err != nil { + return nil, fmt.Errorf("failed to get 'vlanID': %w", err) + } + + return &[]resolvedNetwork{ + { + VlanID: vlanID, + Prefixes: []string{""}, + }, + }, nil + } + + ret := make([]resolvedNetwork, len(config.Networks)) + for netIndex, net := range config.Networks { + vlanID, err := p.configVarResolver.GetStringValue(net.VlanID) + if err != nil { + return nil, fmt.Errorf("failed to get 'vlanID' for network %v: %w", netIndex, err) + } + + prefixes := make([]string, len(net.PrefixIDs)) + for prefixIndex, prefix := range net.PrefixIDs { + prefixID, err := p.configVarResolver.GetStringValue(prefix) + if err != nil { + return nil, fmt.Errorf("failed to get 'prefixID' for network %v, prefix %v: %w", netIndex, prefixIndex, err) + } + + prefixes[prefixIndex] = prefixID + } + + ret[netIndex] = resolvedNetwork{ + VlanID: vlanID, + Prefixes: prefixes, + } + } + + return &ret, nil +} + +func (p *provider) resolveDiskConfig(log *zap.SugaredLogger, config anxtypes.RawConfig) (*[]resolvedDisk, error) { + if config.DiskSize != 0 { + if len(config.Disks) != 0 { + return nil, anxtypes.ErrConfigDiskSizeAndDisks + } + + log.Info("Configuration uses the deprecated DiskSize attribute, please migrate to the Disks array instead.") + + config.Disks = []anxtypes.RawDisk{ + { + Size: config.DiskSize, + }, + } + config.DiskSize = 0 + } + + ret := make([]resolvedDisk, len(config.Disks)) + + for idx, disk := range config.Disks { + performanceType, err := p.configVarResolver.GetStringValue(disk.PerformanceType) + if err != nil { + return nil, fmt.Errorf("failed to get 'performanceType' of disk %v: %w", idx, err) + } + + ret[idx] = resolvedDisk{ + RawDisk: disk, + PerformanceType: performanceType, + } + } + + return &ret, nil +} + +func (p *provider) resolveConfig(ctx context.Context, log *zap.SugaredLogger, config anxtypes.RawConfig) (*resolvedConfig, error) { + var err error + ret := resolvedConfig{ + RawConfig: config, + } + + ret.Token, err = p.configVarResolver.GetStringValueOrEnv(config.Token, anxtypes.AnxTokenEnv) + if err != nil { + return nil, fmt.Errorf("failed to get 'token': %w", err) + } + + ret.LocationID, err = p.configVarResolver.GetStringValue(config.LocationID) + if err != nil { + return nil, fmt.Errorf("failed to get 'locationID': %w", err) + } + + ret.TemplateID, err = p.configVarResolver.GetStringValue(config.TemplateID) + if err != nil { + return nil, fmt.Errorf("failed to get 'templateID': %w", err) + } + + diskConfig, err := p.resolveDiskConfig(log, config) + if err != nil { + return nil, fmt.Errorf("failed to resolve disk config: %w", err) + } + ret.Disks = *diskConfig + + networkConfig, err := p.resolveNetworkConfig(log, config) + if err != nil { + return nil, fmt.Errorf("failed to resolve network config: %w", err) + } + ret.Networks = *networkConfig + + // when "templateID" is not set, we expect "template" to be + if ret.TemplateID == "" { + a, _, err := getClient(ret.Token, nil) + if err != nil { + return nil, fmt.Errorf("failed initializing API clients: %w", err) + } + + templateID, err := p.resolveTemplateID(ctx, a, config, ret.LocationID) + if err != nil { + return nil, fmt.Errorf("failed retrieving template id from named template: %w", err) + } + + ret.TemplateID = templateID + } + + return &ret, nil +} diff --git a/pkg/cloudprovider/provider/anexia/types/errors.go b/pkg/cloudprovider/provider/anexia/types/errors.go deleted file mode 100644 index 65f7ab6d2..000000000 --- a/pkg/cloudprovider/provider/anexia/types/errors.go +++ /dev/null @@ -1,49 +0,0 @@ -/* -Copyright 2022 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package types - -import ( - "fmt" - "strings" -) - -// MultiError represent multiple errors at the same time. -type MultiErrors []error - -func (r MultiErrors) Error() string { - errString := make([]string, len(r)) - for i, err := range r { - errString[i] = fmt.Sprintf("Error %d: %s", i, err) - } - return fmt.Sprintf("Multiple errors occurred:\n%s", strings.Join(errString, "\n")) -} - -func NewMultiError(errs ...error) error { - var combinedErr []error - for _, err := range errs { - if err == nil { - continue - } - combinedErr = append(combinedErr, err) - } - - if len(combinedErr) > 0 { - return MultiErrors(combinedErr) - } - - return nil -} diff --git a/pkg/cloudprovider/provider/anexia/types/types.go b/pkg/cloudprovider/provider/anexia/types/types.go deleted file mode 100644 index e6d8e9f22..000000000 --- a/pkg/cloudprovider/provider/anexia/types/types.go +++ /dev/null @@ -1,87 +0,0 @@ -/* -Copyright 2020 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package types - -import ( - "time" - - "github.com/kubermatic/machine-controller/pkg/apis/cluster/common" - cloudprovidererrors "github.com/kubermatic/machine-controller/pkg/cloudprovider/errors" - "github.com/kubermatic/machine-controller/pkg/jsonutil" - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" - - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -const ( - AnxTokenEnv = "ANEXIA_TOKEN" - - CreateRequestTimeout = 15 * time.Minute - GetRequestTimeout = 1 * time.Minute - DeleteRequestTimeout = 1 * time.Minute - - IPStateBound = "Bound" - IPStateUnbound = "Unbound" - - VmxNet3NIC = "vmxnet3" - MachinePoweredOn = "poweredOn" -) - -var StatusUpdateFailed = cloudprovidererrors.TerminalError{ - Reason: common.UpdateMachineError, - Message: "Unable to update the machine status", -} - -// RawDisk specifies a single disk, with some values maybe being fetched from secrets. -type RawDisk struct { - Size int `json:"size"` - PerformanceType providerconfigtypes.ConfigVarString `json:"performanceType"` -} - -// RawConfig contains all the configuration values for VMs to create, with some values maybe being fetched from secrets. -type RawConfig struct { - Token providerconfigtypes.ConfigVarString `json:"token,omitempty"` - VlanID providerconfigtypes.ConfigVarString `json:"vlanID"` - LocationID providerconfigtypes.ConfigVarString `json:"locationID"` - - TemplateID providerconfigtypes.ConfigVarString `json:"templateID"` - Template providerconfigtypes.ConfigVarString `json:"template"` - TemplateBuild providerconfigtypes.ConfigVarString `json:"templateBuild"` - - CPUs int `json:"cpus"` - Memory int `json:"memory"` - - // Deprecated, use Disks instead. - DiskSize int `json:"diskSize"` - - Disks []RawDisk `json:"disks"` -} - -type ProviderStatus struct { - InstanceID string `json:"instanceID"` - ProvisioningID string `json:"provisioningID"` - DeprovisioningID string `json:"deprovisioningID"` - ReservedIP string `json:"reservedIP"` - IPState string `json:"ipState"` - Conditions []v1.Condition `json:"conditions,omitempty"` -} - -func GetConfig(pconfig providerconfigtypes.Config) (*RawConfig, error) { - rawConfig := &RawConfig{} - - return rawConfig, jsonutil.StrictUnmarshal(pconfig.CloudProviderSpec.Raw, rawConfig) -} diff --git a/pkg/userdata/convert/gzip.go b/pkg/cloudprovider/provider/aws/gzip.go similarity index 93% rename from pkg/userdata/convert/gzip.go rename to pkg/cloudprovider/provider/aws/gzip.go index a15af3f60..689fd89d4 100644 --- a/pkg/userdata/convert/gzip.go +++ b/pkg/cloudprovider/provider/aws/gzip.go @@ -14,14 +14,14 @@ See the License for the specific language governing permissions and limitations under the License. */ -package convert +package aws import ( "bytes" "compress/gzip" ) -func GzipString(s string) (string, error) { +func gzipString(s string) (string, error) { var b bytes.Buffer gz := gzip.NewWriter(&b) diff --git a/pkg/cloudprovider/provider/aws/provider.go b/pkg/cloudprovider/provider/aws/provider.go index 9637476ff..8c48823d6 100644 --- a/pkg/cloudprovider/provider/aws/provider.go +++ b/pkg/cloudprovider/provider/aws/provider.go @@ -36,24 +36,22 @@ import ( "github.com/aws/smithy-go" gocache "github.com/patrickmn/go-cache" "github.com/prometheus/client_golang/prometheus" - - "github.com/kubermatic/machine-controller/pkg/apis/cluster/common" - clusterv1alpha1 "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" - cloudprovidererrors "github.com/kubermatic/machine-controller/pkg/cloudprovider/errors" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/instance" - awstypes "github.com/kubermatic/machine-controller/pkg/cloudprovider/provider/aws/types" - cloudprovidertypes "github.com/kubermatic/machine-controller/pkg/cloudprovider/types" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/util" - "github.com/kubermatic/machine-controller/pkg/providerconfig" - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" - "github.com/kubermatic/machine-controller/pkg/userdata/convert" - - v1 "k8s.io/api/core/v1" + "go.uber.org/zap" + + cloudprovidererrors "k8c.io/machine-controller/pkg/cloudprovider/errors" + "k8c.io/machine-controller/pkg/cloudprovider/instance" + cloudprovidertypes "k8c.io/machine-controller/pkg/cloudprovider/types" + "k8c.io/machine-controller/sdk/apis/cluster/common" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" + awstypes "k8c.io/machine-controller/sdk/cloudprovider/aws" + "k8c.io/machine-controller/sdk/net" + "k8c.io/machine-controller/sdk/providerconfig" + + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/klog" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" "sigs.k8s.io/controller-runtime/pkg/metrics" ) @@ -61,6 +59,11 @@ const ( // Interval and timeout for polling. pollInterval = 2 * time.Second pollTimeout = 5 * time.Minute + // The maximum number of hops that the metadata service can be forwarded to, defaults to 2. + // We need to set this to a higher value i.e. 3 to ensure that it is not blocked by extra hops that are introduced either by CNI or other networking components. With lower + // limits AWS metadata service is not reachable from the container network in such a scenario. + // For example: https://github.com/cilium/cilium/issues/25232 + awsMetadataHTTPPutResponseHopLimit = 3 ) var ( @@ -74,11 +77,11 @@ func init() { } type provider struct { - configVarResolver *providerconfig.ConfigVarResolver + configVarResolver providerconfig.ConfigVarResolver } // New returns a aws provider. -func New(configVarResolver *providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { +func New(configVarResolver providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { return &provider{configVarResolver: configVarResolver} } @@ -99,33 +102,20 @@ var ( ec2types.VolumeTypeSt1: nil, } - amiFilters = map[providerconfigtypes.OperatingSystem]map[awstypes.CPUArchitecture]amiFilter{ - // Source: https://wiki.centos.org/Cloud/AWS - providerconfigtypes.OperatingSystemCentOS: { + amiFilters = map[providerconfig.OperatingSystem]map[awstypes.CPUArchitecture]amiFilter{ + providerconfig.OperatingSystemRockyLinux: { awstypes.CPUArchitectureX86_64: { - description: "CentOS Linux 7* x86_64*", - // The AWS marketplace ID from CentOS Community Platform Engineering (CPE) - owner: "125523088429", - }, - awstypes.CPUArchitectureARM64: { - description: "CentOS Linux 7* aarch64*", - // The AWS marketplace ID from CentOS Community Platform Engineering (CPE) - owner: "125523088429", - }, - }, - providerconfigtypes.OperatingSystemRockyLinux: { - awstypes.CPUArchitectureX86_64: { - description: "Rocky-8-ec2-8*.x86_64", + description: "*Rocky-9-EC2-*.x86_64", // The AWS marketplace ID from Rocky Linux Community Platform Engineering (CPE) owner: "792107900819", }, awstypes.CPUArchitectureARM64: { - description: "Rocky-8-ec2-8*.aarch64", + description: "*Rocky-9-EC2-*.aarch64", // The AWS marketplace ID from Rocky Linux Community Platform Engineering (CPE) owner: "792107900819", }, }, - providerconfigtypes.OperatingSystemAmazonLinux2: { + providerconfig.OperatingSystemAmazonLinux2: { awstypes.CPUArchitectureX86_64: { description: "Amazon Linux 2 AMI * x86_64 HVM gp2", // The AWS marketplace ID from Amazon @@ -137,21 +127,21 @@ var ( owner: "137112412989", }, }, - providerconfigtypes.OperatingSystemUbuntu: { + providerconfig.OperatingSystemUbuntu: { awstypes.CPUArchitectureX86_64: { // Be as precise as possible - otherwise we might get a nightly dev build - description: "Canonical, Ubuntu, 22.04 LTS, amd64 jammy image build on ????-??-??", + description: "Canonical, Ubuntu, 24.04, amd64 noble image", // The AWS marketplace ID from Canonical owner: "099720109477", }, awstypes.CPUArchitectureARM64: { // Be as precise as possible - otherwise we might get a nightly dev build - description: "Canonical, Ubuntu, 22.04 LTS, arm64 jammy image build on ????-??-??", + description: "Canonical, Ubuntu, 24.04, arm64 noble image", // The AWS marketplace ID from Canonical owner: "099720109477", }, }, - providerconfigtypes.OperatingSystemRHEL: { + providerconfig.OperatingSystemRHEL: { awstypes.CPUArchitectureX86_64: { // Be as precise as possible - otherwise we might get a nightly dev build description: "Provided by Red Hat, Inc.", @@ -165,7 +155,7 @@ var ( owner: "309956199498", }, }, - providerconfigtypes.OperatingSystemFlatcar: { + providerconfig.OperatingSystemFlatcar: { awstypes.CPUArchitectureX86_64: { // Be as precise as possible - otherwise we might get a nightly dev build description: "Flatcar Container Linux stable *", @@ -216,7 +206,7 @@ type amiFilter struct { productCode string } -func getDefaultAMIID(ctx context.Context, client *ec2.Client, os providerconfigtypes.OperatingSystem, region string, cpuArchitecture awstypes.CPUArchitecture) (string, error) { +func getDefaultAMIID(ctx context.Context, log *zap.SugaredLogger, client *ec2.Client, os providerconfig.OperatingSystem, region string, cpuArchitecture awstypes.CPUArchitecture) (string, error) { cacheLock.Lock() defer cacheLock.Unlock() @@ -233,7 +223,7 @@ func getDefaultAMIID(ctx context.Context, client *ec2.Client, os providerconfigt cacheKey := fmt.Sprintf("ami-id-%s-%s-%s", region, os, cpuArchitecture) amiID, found := cache.Get(cacheKey) if found { - klog.V(3).Info("found AMI-ID in cache!") + log.Debugw("Found AMI-ID in cache", "key", cacheKey, "ami", amiID) return amiID.(string), nil } @@ -275,7 +265,7 @@ func getDefaultAMIID(ctx context.Context, client *ec2.Client, os providerconfigt return "", fmt.Errorf("could not find Image for '%s' with arch '%s'", os, cpuArchitecture) } - if os == providerconfigtypes.OperatingSystemRHEL { + if os == providerconfig.OperatingSystemRHEL { imagesOut.Images, err = filterSupportedRHELImages(imagesOut.Images) if err != nil { return "", err @@ -323,24 +313,22 @@ func getCPUArchitecture(ctx context.Context, client *ec2.Client, instanceType ec return "", errors.New("returned instance type data did not include supported architectures") } -func getDefaultRootDevicePath(os providerconfigtypes.OperatingSystem) (string, error) { +func getDefaultRootDevicePath(os providerconfig.OperatingSystem) (string, error) { const ( rootDevicePathSDA = "/dev/sda1" rootDevicePathXVDA = "/dev/xvda" ) switch os { - case providerconfigtypes.OperatingSystemUbuntu: - return rootDevicePathSDA, nil - case providerconfigtypes.OperatingSystemCentOS: + case providerconfig.OperatingSystemUbuntu: return rootDevicePathSDA, nil - case providerconfigtypes.OperatingSystemRockyLinux: + case providerconfig.OperatingSystemRockyLinux: return rootDevicePathSDA, nil - case providerconfigtypes.OperatingSystemRHEL: + case providerconfig.OperatingSystemRHEL: return rootDevicePathSDA, nil - case providerconfigtypes.OperatingSystemFlatcar: + case providerconfig.OperatingSystemFlatcar: return rootDevicePathXVDA, nil - case providerconfigtypes.OperatingSystemAmazonLinux2: + case providerconfig.OperatingSystemAmazonLinux2: return rootDevicePathXVDA, nil } @@ -348,8 +336,8 @@ func getDefaultRootDevicePath(os providerconfigtypes.OperatingSystem) (string, e } //gocyclo:ignore -func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *providerconfigtypes.Config, *awstypes.RawConfig, error) { - pconfig, err := providerconfigtypes.GetConfig(provSpec) +func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *providerconfig.Config, *awstypes.RawConfig, error) { + pconfig, err := providerconfig.GetConfig(provSpec) if err != nil { return nil, nil, nil, err } @@ -364,55 +352,55 @@ func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *p } c := Config{} - c.AccessKeyID, err = p.configVarResolver.GetConfigVarStringValueOrEnv(rawConfig.AccessKeyID, "AWS_ACCESS_KEY_ID") + c.AccessKeyID, err = p.configVarResolver.GetStringValueOrEnv(rawConfig.AccessKeyID, "AWS_ACCESS_KEY_ID") if err != nil { return nil, nil, nil, fmt.Errorf("failed to get the value of \"accessKeyId\" field, error = %w", err) } - c.SecretAccessKey, err = p.configVarResolver.GetConfigVarStringValueOrEnv(rawConfig.SecretAccessKey, "AWS_SECRET_ACCESS_KEY") + c.SecretAccessKey, err = p.configVarResolver.GetStringValueOrEnv(rawConfig.SecretAccessKey, "AWS_SECRET_ACCESS_KEY") if err != nil { return nil, nil, nil, fmt.Errorf("failed to get the value of \"secretAccessKey\" field, error = %w", err) } - c.Region, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.Region) + c.Region, err = p.configVarResolver.GetStringValue(rawConfig.Region) if err != nil { return nil, nil, nil, err } - c.VpcID, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.VpcID) + c.VpcID, err = p.configVarResolver.GetStringValue(rawConfig.VpcID) if err != nil { return nil, nil, nil, err } - c.SubnetID, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.SubnetID) + c.SubnetID, err = p.configVarResolver.GetStringValue(rawConfig.SubnetID) if err != nil { return nil, nil, nil, err } - c.AvailabilityZone, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.AvailabilityZone) + c.AvailabilityZone, err = p.configVarResolver.GetStringValue(rawConfig.AvailabilityZone) if err != nil { return nil, nil, nil, err } for _, securityGroupIDRaw := range rawConfig.SecurityGroupIDs { - securityGroupID, err := p.configVarResolver.GetConfigVarStringValue(securityGroupIDRaw) + securityGroupID, err := p.configVarResolver.GetStringValue(securityGroupIDRaw) if err != nil { return nil, nil, nil, err } c.SecurityGroupIDs = append(c.SecurityGroupIDs, securityGroupID) } - c.InstanceProfile, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.InstanceProfile) + c.InstanceProfile, err = p.configVarResolver.GetStringValue(rawConfig.InstanceProfile) if err != nil { return nil, nil, nil, err } - instanceTypeStr, err := p.configVarResolver.GetConfigVarStringValue(rawConfig.InstanceType) + instanceTypeStr, err := p.configVarResolver.GetStringValue(rawConfig.InstanceType) if err != nil { return nil, nil, nil, err } c.InstanceType = ec2types.InstanceType(instanceTypeStr) - c.AMI, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.AMI) + c.AMI, err = p.configVarResolver.GetStringValue(rawConfig.AMI) if err != nil { return nil, nil, nil, err } c.DiskSize = rawConfig.DiskSize - diskTypeStr, err := p.configVarResolver.GetConfigVarStringValue(rawConfig.DiskType) + diskTypeStr, err := p.configVarResolver.GetStringValue(rawConfig.DiskType) if err != nil { return nil, nil, nil, err } @@ -420,12 +408,12 @@ func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *p if c.DiskType == ec2types.VolumeTypeIo1 { if rawConfig.DiskIops == nil { - return nil, nil, nil, errors.New("Missing required field `diskIops`") + return nil, nil, nil, errors.New("missing required field `diskIops`") } iops := *rawConfig.DiskIops if iops < 100 || iops > 64000 { - return nil, nil, nil, errors.New("Invalid value for `diskIops` (min: 100, max: 64000)") + return nil, nil, nil, errors.New("invalid value for `diskIops` (min: 100, max: 64000)") } c.DiskIops = rawConfig.DiskIops @@ -434,13 +422,13 @@ func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *p iops := *rawConfig.DiskIops if iops < 3000 || iops > 64000 { - return nil, nil, nil, errors.New("Invalid value for `diskIops` (min: 3000, max: 64000)") + return nil, nil, nil, errors.New("invalid value for `diskIops` (min: 3000, max: 64000)") } c.DiskIops = rawConfig.DiskIops } - c.EBSVolumeEncrypted, _, err = p.configVarResolver.GetConfigVarBoolValue(rawConfig.EBSVolumeEncrypted) + c.EBSVolumeEncrypted, _, err = p.configVarResolver.GetBoolValue(rawConfig.EBSVolumeEncrypted) if err != nil { return nil, nil, nil, fmt.Errorf("failed to get ebsVolumeEncrypted value: %w", err) } @@ -448,30 +436,30 @@ func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *p c.AssignPublicIP = rawConfig.AssignPublicIP c.IsSpotInstance = rawConfig.IsSpotInstance if rawConfig.SpotInstanceConfig != nil && c.IsSpotInstance != nil && *c.IsSpotInstance { - maxPrice, err := p.configVarResolver.GetConfigVarStringValue(rawConfig.SpotInstanceConfig.MaxPrice) + maxPrice, err := p.configVarResolver.GetStringValue(rawConfig.SpotInstanceConfig.MaxPrice) if err != nil { return nil, nil, nil, err } - c.SpotMaxPrice = pointer.String(maxPrice) + c.SpotMaxPrice = ptr.To(maxPrice) - persistentRequest, _, err := p.configVarResolver.GetConfigVarBoolValue(rawConfig.SpotInstanceConfig.PersistentRequest) + persistentRequest, _, err := p.configVarResolver.GetBoolValue(rawConfig.SpotInstanceConfig.PersistentRequest) if err != nil { return nil, nil, nil, err } - c.SpotPersistentRequest = pointer.Bool(persistentRequest) + c.SpotPersistentRequest = ptr.To(persistentRequest) - interruptionBehavior, err := p.configVarResolver.GetConfigVarStringValue(rawConfig.SpotInstanceConfig.InterruptionBehavior) + interruptionBehavior, err := p.configVarResolver.GetStringValue(rawConfig.SpotInstanceConfig.InterruptionBehavior) if err != nil { return nil, nil, nil, err } - c.SpotInterruptionBehavior = pointer.String(interruptionBehavior) + c.SpotInterruptionBehavior = ptr.To(interruptionBehavior) } - assumeRoleARN, err := p.configVarResolver.GetConfigVarStringValueOrEnv(rawConfig.AssumeRoleARN, "AWS_ASSUME_ROLE_ARN") + assumeRoleARN, err := p.configVarResolver.GetStringValueOrEnv(rawConfig.AssumeRoleARN, "AWS_ASSUME_ROLE_ARN") if err != nil { return nil, nil, nil, err } c.AssumeRoleARN = assumeRoleARN - assumeRoleExternalID, err := p.configVarResolver.GetConfigVarStringValueOrEnv(rawConfig.AssumeRoleExternalID, "AWS_ASSUME_ROLE_EXTERNAL_ID") + assumeRoleExternalID, err := p.configVarResolver.GetStringValueOrEnv(rawConfig.AssumeRoleExternalID, "AWS_ASSUME_ROLE_EXTERNAL_ID") if err != nil { return nil, nil, nil, err } @@ -495,7 +483,7 @@ func getAwsConfig(ctx context.Context, id, secret, token, region, assumeRoleARN, stsSvc := sts.NewFromConfig(cfg) creds := stscreds.NewAssumeRoleProvider(stsSvc, assumeRoleARN, func(o *stscreds.AssumeRoleOptions) { - o.ExternalID = pointer.String(assumeRoleExternalID) + o.ExternalID = ptr.To(assumeRoleExternalID) }, ) @@ -514,7 +502,7 @@ func getEC2client(ctx context.Context, id, secret, region, assumeRoleArn, assume return ec2.NewFromConfig(cfg), nil } -func (p *provider) AddDefaults(spec clusterv1alpha1.MachineSpec) (clusterv1alpha1.MachineSpec, error) { +func (p *provider) AddDefaults(_ *zap.SugaredLogger, spec clusterv1alpha1.MachineSpec) (clusterv1alpha1.MachineSpec, error) { _, _, rawConfig, err := p.getConfig(spec.ProviderSpec) if err != nil { return spec, err @@ -535,7 +523,7 @@ func (p *provider) AddDefaults(spec clusterv1alpha1.MachineSpec) (clusterv1alpha return spec, err } -func (p *provider) Validate(ctx context.Context, spec clusterv1alpha1.MachineSpec) error { +func (p *provider) Validate(ctx context.Context, _ *zap.SugaredLogger, spec clusterv1alpha1.MachineSpec) error { config, pc, _, err := p.getConfig(spec.ProviderSpec) if err != nil { return fmt.Errorf("failed to parse config: %w", err) @@ -578,14 +566,23 @@ func (p *provider) Validate(ctx context.Context, spec clusterv1alpha1.MachineSpe } switch f := pc.Network.GetIPFamily(); f { - case util.IPFamilyUnspecified, util.IPFamilyIPv4: + case net.IPFamilyUnspecified, net.IPFamilyIPv4: // noop - case util.IPFamilyIPv6, util.IPFamilyIPv4IPv6, util.IPFamilyIPv6IPv4: + case net.IPFamilyIPv6, net.IPFamilyIPv4IPv6, net.IPFamilyIPv6IPv4: if len(vpc.Ipv6CidrBlockAssociationSet) == 0 { - return fmt.Errorf("vpc %s does not have IPv6 CIDR block", pointer.StringDeref(vpc.VpcId, "")) + return fmt.Errorf("vpc %s does not have IPv6 CIDR block", ptr.Deref(vpc.VpcId, "")) } default: - return fmt.Errorf(util.ErrUnknownNetworkFamily, f) + return fmt.Errorf(net.ErrUnknownNetworkFamily, f) + } + + dnsHostnames, err := areVpcDNSHostnamesEnabled(ctx, ec2Client, config.VpcID) + if err != nil { + return fmt.Errorf("failed to retrieve VPC attributes: %w", err) + } + + if !dnsHostnames { + return fmt.Errorf("vpc %s does not have the enableDnsHostname attribute enabled, new machines in this VPC would be incompatible with Kubernetes", config.VpcID) } _, err = ec2Client.DescribeAvailabilityZones(ctx, &ec2.DescribeAvailabilityZonesInput{ZoneNames: []string{config.AvailabilityZone}}) @@ -633,13 +630,30 @@ func getVpc(ctx context.Context, client *ec2.Client, id string) (*ec2types.Vpc, } if len(vpcOut.Vpcs) != 1 { - return nil, fmt.Errorf("unable to find specified vpc with id %q", id) + return nil, fmt.Errorf("failed to find specified vpc with id %q", id) } return &vpcOut.Vpcs[0], nil } -func (p *provider) Create(ctx context.Context, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData, userdata string) (instance.Instance, error) { +func areVpcDNSHostnamesEnabled(ctx context.Context, client *ec2.Client, id string) (bool, error) { + out, err := client.DescribeVpcAttribute(ctx, &ec2.DescribeVpcAttributeInput{ + VpcId: &id, + Attribute: ec2types.VpcAttributeNameEnableDnsHostnames, + }) + + if err != nil { + return false, awsErrorToTerminalError(err, "failed to describe vpc attributes") + } + + if out.EnableDnsHostnames == nil { + return false, errors.New("API response does not include expected field enableDnsHostnames") + } + + return *out.EnableDnsHostnames.Value, nil +} + +func (p *provider) Create(ctx context.Context, log *zap.SugaredLogger, machine *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData, userdata string) (instance.Instance, error) { config, pc, _, err := p.getConfig(machine.Spec.ProviderSpec) if err != nil { return nil, cloudprovidererrors.TerminalError{ @@ -670,7 +684,7 @@ func (p *provider) Create(ctx context.Context, machine *clusterv1alpha1.Machine, } } - if amiID, err = getDefaultAMIID(ctx, ec2Client, pc.OperatingSystem, config.Region, cpuArchitecture); err != nil { + if amiID, err = getDefaultAMIID(ctx, log, ec2Client, pc.OperatingSystem, config.Region, cpuArchitecture); err != nil { return nil, cloudprovidererrors.TerminalError{ Reason: common.InvalidConfigurationMachineError, Message: fmt.Sprintf("Failed to get AMI-ID for operating system %s in region %s: %v", pc.OperatingSystem, config.Region, err), @@ -678,9 +692,9 @@ func (p *provider) Create(ctx context.Context, machine *clusterv1alpha1.Machine, } } - if pc.OperatingSystem != providerconfigtypes.OperatingSystemFlatcar { + if pc.OperatingSystem != providerconfig.OperatingSystemFlatcar { // Gzip the userdata in case we don't use Flatcar - userdata, err = convert.GzipString(userdata) + userdata, err = gzipString(userdata) if err != nil { return nil, fmt.Errorf("failed to gzip the userdata") } @@ -734,6 +748,9 @@ func (p *provider) Create(ctx context.Context, machine *clusterv1alpha1.Machine, assignPublicIP := config.AssignPublicIP == nil || *config.AssignPublicIP instanceRequest := &ec2.RunInstancesInput{ + MetadataOptions: &ec2types.InstanceMetadataOptionsRequest{ + HttpPutResponseHopLimit: aws.Int32(awsMetadataHTTPPutResponseHopLimit), + }, ImageId: aws.String(amiID), InstanceMarketOptions: instanceMarketOptions, BlockDeviceMappings: []ec2types.BlockDeviceMapping{ @@ -744,7 +761,7 @@ func (p *provider) Create(ctx context.Context, machine *clusterv1alpha1.Machine, DeleteOnTermination: aws.Bool(true), VolumeType: config.DiskType, Iops: config.DiskIops, - Encrypted: pointer.Bool(config.EBSVolumeEncrypted), + Encrypted: ptr.To(config.EBSVolumeEncrypted), }, }, }, @@ -791,7 +808,7 @@ func (p *provider) Create(ctx context.Context, machine *clusterv1alpha1.Machine, return &awsInstance{instance: &runOut.Instances[0]}, nil } -func (p *provider) Cleanup(ctx context.Context, machine *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData) (bool, error) { +func (p *provider) Cleanup(ctx context.Context, log *zap.SugaredLogger, machine *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData) (bool, error) { ec2instance, err := p.get(ctx, machine) if err != nil { if errors.Is(err, cloudprovidererrors.ErrInstanceNotFound) { @@ -800,7 +817,7 @@ func (p *provider) Cleanup(ctx context.Context, machine *clusterv1alpha1.Machine return false, err } - // (*Config, *providerconfigtypes.Config, *awstypes.RawConfig, error) + // (*Config, *providerconfig.Config, *awstypes.RawConfig, error) config, _, _, err := p.getConfig(machine.Spec.ProviderSpec) if err != nil { @@ -826,7 +843,7 @@ func (p *provider) Cleanup(ctx context.Context, machine *clusterv1alpha1.Machine } if cOut.CancelledSpotInstanceRequests[0].State == ec2types.CancelSpotInstanceRequestStateCancelled { - klog.V(3).Infof("successfully canceled spot instance request %s at aws", *ec2instance.instance.SpotInstanceRequestId) + log.Infow("Successfully canceled spot instance request", "request", *ec2instance.instance.SpotInstanceRequestId) } } @@ -838,13 +855,13 @@ func (p *provider) Cleanup(ctx context.Context, machine *clusterv1alpha1.Machine } if tOut.TerminatingInstances[0].PreviousState.Name != tOut.TerminatingInstances[0].CurrentState.Name { - klog.V(3).Infof("successfully triggered termination of instance %s at aws", ec2instance.ID()) + log.Infow("Successfully triggered termination of instance", "instance", ec2instance.ID()) } return false, nil } -func (p *provider) Get(ctx context.Context, machine *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData) (instance.Instance, error) { +func (p *provider) Get(ctx context.Context, _ *zap.SugaredLogger, machine *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData) (instance.Instance, error) { return p.get(ctx, machine) } @@ -891,28 +908,6 @@ func (p *provider) get(ctx context.Context, machine *clusterv1alpha1.Machine) (* return nil, cloudprovidererrors.ErrInstanceNotFound } -func (p *provider) GetCloudConfig(spec clusterv1alpha1.MachineSpec) (config string, name string, err error) { - c, _, _, err := p.getConfig(spec.ProviderSpec) - if err != nil { - return "", "", fmt.Errorf("failed to parse config: %w", err) - } - - cc := &awstypes.CloudConfig{ - Global: awstypes.GlobalOpts{ - VPC: c.VpcID, - SubnetID: c.SubnetID, - Zone: c.AvailabilityZone, - }, - } - - s, err := awstypes.CloudConfigToString(cc) - if err != nil { - return "", "", fmt.Errorf("failed to convert cloud-config to string: %w", err) - } - - return s, "aws", nil -} - func (p *provider) MachineMetricsLabels(machine *clusterv1alpha1.Machine) (map[string]string, error) { labels := make(map[string]string) @@ -927,7 +922,7 @@ func (p *provider) MachineMetricsLabels(machine *clusterv1alpha1.Machine) (map[s return labels, err } -func (p *provider) MigrateUID(ctx context.Context, machine *clusterv1alpha1.Machine, newUID types.UID) error { +func (p *provider) MigrateUID(ctx context.Context, _ *zap.SugaredLogger, machine *clusterv1alpha1.Machine, newUID types.UID) error { machineInstance, err := p.get(ctx, machine) if err != nil { if errors.Is(err, cloudprovidererrors.ErrInstanceNotFound) { @@ -968,7 +963,7 @@ func (d *awsInstance) Name() string { } func (d *awsInstance) ID() string { - return pointer.StringDeref(d.instance.InstanceId, "") + return ptr.Deref(d.instance.InstanceId, "") } func (d *awsInstance) ProviderID() string { @@ -982,22 +977,22 @@ func (d *awsInstance) ProviderID() string { return "aws:///" + *d.instance.Placement.AvailabilityZone + "/" + *d.instance.InstanceId } -func (d *awsInstance) Addresses() map[string]v1.NodeAddressType { - addresses := map[string]v1.NodeAddressType{ - pointer.StringDeref(d.instance.PublicIpAddress, ""): v1.NodeExternalIP, - pointer.StringDeref(d.instance.PublicDnsName, ""): v1.NodeExternalDNS, - pointer.StringDeref(d.instance.PrivateIpAddress, ""): v1.NodeInternalIP, - pointer.StringDeref(d.instance.PrivateDnsName, ""): v1.NodeInternalDNS, +func (d *awsInstance) Addresses() map[string]corev1.NodeAddressType { + addresses := map[string]corev1.NodeAddressType{ + ptr.Deref(d.instance.PublicIpAddress, ""): corev1.NodeExternalIP, + ptr.Deref(d.instance.PublicDnsName, ""): corev1.NodeExternalDNS, + ptr.Deref(d.instance.PrivateIpAddress, ""): corev1.NodeInternalIP, + ptr.Deref(d.instance.PrivateDnsName, ""): corev1.NodeInternalDNS, } for _, netInterface := range d.instance.NetworkInterfaces { for _, addr := range netInterface.Ipv6Addresses { - ipAddr := pointer.StringDeref(addr.Ipv6Address, "") + ipAddr := ptr.Deref(addr.Ipv6Address, "") // link-local addresses not very useful in machine status // filter them out - if !util.IsLinkLocal(ipAddr) { - addresses[ipAddr] = v1.NodeExternalIP + if !net.IsLinkLocal(ipAddr) { + addresses[ipAddr] = corev1.NodeExternalIP } } } @@ -1073,11 +1068,7 @@ func awsErrorToTerminalError(err error, msg string) error { } func setProviderSpec(rawConfig awstypes.RawConfig, provSpec clusterv1alpha1.ProviderSpec) (*runtime.RawExtension, error) { - if provSpec.Value == nil { - return nil, fmt.Errorf("machine.spec.providerconfig.value is nil") - } - - pconfig, err := providerconfigtypes.GetConfig(provSpec) + pconfig, err := providerconfig.GetConfig(provSpec) if err != nil { return nil, err } @@ -1185,13 +1176,13 @@ func getInstanceCountForMachine(machine clusterv1alpha1.Machine, reservations [] func filterSupportedRHELImages(images []ec2types.Image) ([]ec2types.Image, error) { var filteredImages []ec2types.Image for _, image := range images { - if strings.HasPrefix(*image.Name, "RHEL-8") { + if strings.HasPrefix(*image.Name, "RHEL-9") { filteredImages = append(filteredImages, image) } } if filteredImages == nil { - return nil, errors.New("rhel 8 images are not found") + return nil, errors.New("rhel 9 images are not found") } return filteredImages, nil @@ -1205,7 +1196,7 @@ func filterSupportedRHELImages(images []ec2types.Image) ([]ec2types.Image, error // This happens more often in some AWS regions because some regions have // slower instance creation (e.g. us-east-1 and us-west-2). func (p *provider) waitForInstance(ctx context.Context, machine *clusterv1alpha1.Machine) error { - return wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { + return wait.PollUntilContextTimeout(ctx, pollInterval, pollTimeout, false, func(ctx context.Context) (bool, error) { _, err := p.get(ctx, machine) if errors.Is(err, cloudprovidererrors.ErrInstanceNotFound) { // Retry if instance is not found diff --git a/pkg/cloudprovider/provider/aws/types/cloudconfig.go b/pkg/cloudprovider/provider/aws/types/cloudconfig.go deleted file mode 100644 index 2fca4788e..000000000 --- a/pkg/cloudprovider/provider/aws/types/cloudconfig.go +++ /dev/null @@ -1,79 +0,0 @@ -/* -Copyright 2019 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package types - -import ( - "bytes" - "fmt" - "text/template" - - "github.com/Masterminds/sprig/v3" - - "github.com/kubermatic/machine-controller/pkg/ini" -) - -const ( - cloudConfigTpl = `[global] -Zone={{ .Global.Zone | iniEscape }} -VPC={{ .Global.VPC | iniEscape }} -SubnetID={{ .Global.SubnetID | iniEscape }} -RouteTableID={{ .Global.RouteTableID | iniEscape }} -RoleARN={{ .Global.RoleARN | iniEscape }} -KubernetesClusterID={{ .Global.KubernetesClusterID | iniEscape }} -DisableSecurityGroupIngress={{ .Global.DisableSecurityGroupIngress }} -ElbSecurityGroup={{ .Global.ElbSecurityGroup | iniEscape }} -DisableStrictZoneCheck={{ .Global.DisableStrictZoneCheck }} -{{- range .Global.NodeIPFamilies }} -NodeIPFamilies={{ . | iniEscape}} -{{- end }} -` -) - -type CloudConfig struct { - Global GlobalOpts -} - -type GlobalOpts struct { - Zone string - VPC string - SubnetID string - RouteTableID string - RoleARN string - KubernetesClusterTag string - KubernetesClusterID string - ElbSecurityGroup string - DisableSecurityGroupIngress bool - DisableStrictZoneCheck bool - NodeIPFamilies []string -} - -func CloudConfigToString(c *CloudConfig) (string, error) { - funcMap := sprig.TxtFuncMap() - funcMap["iniEscape"] = ini.Escape - - tpl, err := template.New("cloud-config").Funcs(funcMap).Parse(cloudConfigTpl) - if err != nil { - return "", fmt.Errorf("failed to parse the cloud config template: %w", err) - } - - buf := &bytes.Buffer{} - if err := tpl.Execute(buf, c); err != nil { - return "", fmt.Errorf("failed to execute cloud config template: %w", err) - } - - return buf.String(), nil -} diff --git a/pkg/cloudprovider/provider/aws/types/cloudconfig_test.go b/pkg/cloudprovider/provider/aws/types/cloudconfig_test.go deleted file mode 100644 index f9eaa1cfb..000000000 --- a/pkg/cloudprovider/provider/aws/types/cloudconfig_test.go +++ /dev/null @@ -1,71 +0,0 @@ -/* -Copyright 2019 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package types - -import ( - "flag" - "testing" - - "gopkg.in/gcfg.v1" - - testhelper "github.com/kubermatic/machine-controller/pkg/test" -) - -var update = flag.Bool("update", false, "update testdata files") - -func TestCloudConfigToString(t *testing.T) { - tests := []struct { - name string - config *CloudConfig - }{ - { - name: "simple-config", - config: &CloudConfig{ - Global: GlobalOpts{ - Zone: "some-zone", - VPC: "some-vpc", - SubnetID: "some-subnet", - KubernetesClusterID: "some-tag", - DisableSecurityGroupIngress: true, - DisableStrictZoneCheck: true, - ElbSecurityGroup: "some-sg", - KubernetesClusterTag: "some-tag", - RoleARN: "some-arn", - RouteTableID: "some-rt", - NodeIPFamilies: []string{"ipv4", "ipv6"}, - }, - }, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - s, err := CloudConfigToString(test.config) - if err != nil { - t.Fatal(err) - } - t.Logf("\n%s", s) - - nc := &CloudConfig{} - if err := gcfg.ReadStringInto(nc, s); err != nil { - t.Fatalf("failed to load string into config object: %v", err) - } - goldenName := test.name + ".golden" - testhelper.CompareOutput(t, goldenName, s, *update) - }) - } -} diff --git a/pkg/cloudprovider/provider/aws/types/testdata/simple-config.golden b/pkg/cloudprovider/provider/aws/types/testdata/simple-config.golden deleted file mode 100644 index 57bffe19e..000000000 --- a/pkg/cloudprovider/provider/aws/types/testdata/simple-config.golden +++ /dev/null @@ -1,12 +0,0 @@ -[global] -Zone="some-zone" -VPC="some-vpc" -SubnetID="some-subnet" -RouteTableID="some-rt" -RoleARN="some-arn" -KubernetesClusterID="some-tag" -DisableSecurityGroupIngress=true -ElbSecurityGroup="some-sg" -DisableStrictZoneCheck=true -NodeIPFamilies="ipv4" -NodeIPFamilies="ipv6" diff --git a/pkg/cloudprovider/provider/aws/types/types.go b/pkg/cloudprovider/provider/aws/types/types.go deleted file mode 100644 index 243eb209c..000000000 --- a/pkg/cloudprovider/provider/aws/types/types.go +++ /dev/null @@ -1,69 +0,0 @@ -/* -Copyright 2019 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package types - -import ( - "github.com/kubermatic/machine-controller/pkg/jsonutil" - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" -) - -type RawConfig struct { - AccessKeyID providerconfigtypes.ConfigVarString `json:"accessKeyId,omitempty"` - SecretAccessKey providerconfigtypes.ConfigVarString `json:"secretAccessKey,omitempty"` - - AssumeRoleARN providerconfigtypes.ConfigVarString `json:"assumeRoleARN,omitempty"` - AssumeRoleExternalID providerconfigtypes.ConfigVarString `json:"assumeRoleExternalID,omitempty"` - - Region providerconfigtypes.ConfigVarString `json:"region"` - AvailabilityZone providerconfigtypes.ConfigVarString `json:"availabilityZone,omitempty"` - VpcID providerconfigtypes.ConfigVarString `json:"vpcId"` - SubnetID providerconfigtypes.ConfigVarString `json:"subnetId"` - SecurityGroupIDs []providerconfigtypes.ConfigVarString `json:"securityGroupIDs,omitempty"` - InstanceProfile providerconfigtypes.ConfigVarString `json:"instanceProfile,omitempty"` - InstanceType providerconfigtypes.ConfigVarString `json:"instanceType,omitempty"` - AMI providerconfigtypes.ConfigVarString `json:"ami,omitempty"` - DiskSize int32 `json:"diskSize"` - DiskType providerconfigtypes.ConfigVarString `json:"diskType,omitempty"` - DiskIops *int32 `json:"diskIops,omitempty"` - EBSVolumeEncrypted providerconfigtypes.ConfigVarBool `json:"ebsVolumeEncrypted"` - Tags map[string]string `json:"tags,omitempty"` - AssignPublicIP *bool `json:"assignPublicIP,omitempty"` - - IsSpotInstance *bool `json:"isSpotInstance,omitempty"` - SpotInstanceConfig *SpotInstanceConfig `json:"spotInstanceConfig,omitempty"` -} - -type SpotInstanceConfig struct { - MaxPrice providerconfigtypes.ConfigVarString `json:"maxPrice,omitempty"` - PersistentRequest providerconfigtypes.ConfigVarBool `json:"persistentRequest,omitempty"` - InterruptionBehavior providerconfigtypes.ConfigVarString `json:"interruptionBehavior,omitempty"` -} - -// CPUArchitecture defines processor architectures returned by the AWS API. -type CPUArchitecture string - -const ( - CPUArchitectureARM64 CPUArchitecture = "arm64" - CPUArchitectureX86_64 CPUArchitecture = "x86_64" - CPUArchitectureI386 CPUArchitecture = "i386" -) - -func GetConfig(pconfig providerconfigtypes.Config) (*RawConfig, error) { - rawConfig := &RawConfig{} - - return rawConfig, jsonutil.StrictUnmarshal(pconfig.CloudProviderSpec.Raw, rawConfig) -} diff --git a/pkg/cloudprovider/provider/azure/config.go b/pkg/cloudprovider/provider/azure/config.go new file mode 100644 index 000000000..1984fe1db --- /dev/null +++ b/pkg/cloudprovider/provider/azure/config.go @@ -0,0 +1,47 @@ +/* +Copyright 2025 The Machine Controller Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azure + +import ( + "fmt" + + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" + azuretypes "k8c.io/machine-controller/sdk/cloudprovider/azure" + "k8c.io/machine-controller/sdk/providerconfig" +) + +// newCloudProviderSpec creates a cloud provider specification out of the +// given ProviderSpec. +func newCloudProviderSpec(provSpec clusterv1alpha1.ProviderSpec) (*azuretypes.RawConfig, *providerconfig.Config, error) { + // Retrieve provider configuration from machine specification. + pConfig, err := providerconfig.GetConfig(provSpec) + if err != nil { + return nil, nil, fmt.Errorf("cannot unmarshal machine.spec.providerconfig.value: %w", err) + } + + if pConfig.OperatingSystemSpec.Raw == nil { + return nil, nil, fmt.Errorf("operatingSystemSpec in the MachineDeployment cannot be empty") + } + + // Retrieve cloud provider specification from cloud provider specification. + cpSpec, err := azuretypes.GetConfig(*pConfig) + if err != nil { + return nil, nil, fmt.Errorf("cannot unmarshal cloud provider specification: %w", err) + } + + return cpSpec, pConfig, nil +} diff --git a/pkg/cloudprovider/provider/azure/create_delete_resources.go b/pkg/cloudprovider/provider/azure/create_delete_resources.go index b751736a1..4e76f90b2 100644 --- a/pkg/cloudprovider/provider/azure/create_delete_resources.go +++ b/pkg/cloudprovider/provider/azure/create_delete_resources.go @@ -20,15 +20,15 @@ import ( "context" "fmt" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-11-01/compute" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-05-01/network" + "github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute" + "github.com/Azure/azure-sdk-for-go/profiles/latest/network/mgmt/network" "github.com/Azure/go-autorest/autorest/azure/auth" "github.com/Azure/go-autorest/autorest/to" + "go.uber.org/zap" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/util" + "k8c.io/machine-controller/sdk/net" "k8s.io/apimachinery/pkg/types" - "k8s.io/klog" ) // deleteInterfacesByMachineUID will remove all network interfaces tagged with the specific machine's UID. @@ -152,7 +152,7 @@ func deleteDisksByMachineUID(ctx context.Context, c *config, machineUID types.UI return fmt.Errorf("failed to get disks client: %w", err) } - matchingDisks, err := getDisksByMachineUID(ctx, disksClient, c, machineUID) + matchingDisks, err := getDisksByMachineUID(ctx, disksClient, machineUID) if err != nil { return err } @@ -171,7 +171,7 @@ func deleteDisksByMachineUID(ctx context.Context, c *config, machineUID types.UI return nil } -func getDisksByMachineUID(ctx context.Context, disksClient *compute.DisksClient, c *config, UID types.UID) ([]compute.Disk, error) { +func getDisksByMachineUID(ctx context.Context, disksClient *compute.DisksClient, UID types.UID) ([]compute.Disk, error) { list, err := disksClient.List(ctx) if err != nil { return nil, fmt.Errorf("failed to list disks: %w", err) @@ -194,8 +194,8 @@ func getDisksByMachineUID(ctx context.Context, disksClient *compute.DisksClient, return matchingDisks, nil } -func createOrUpdatePublicIPAddress(ctx context.Context, ipName string, ipVersion network.IPVersion, sku network.PublicIPAddressSkuName, ipAllocationMethod network.IPAllocationMethod, machineUID types.UID, c *config) (*network.PublicIPAddress, error) { - klog.Infof("Creating public IP %q", ipName) +func createOrUpdatePublicIPAddress(ctx context.Context, log *zap.SugaredLogger, ipName string, ipVersion network.IPVersion, sku network.PublicIPAddressSkuName, ipAllocationMethod network.IPAllocationMethod, machineUID types.UID, c *config) (*network.PublicIPAddress, error) { + log.Infow("Creating public IP", "name", ipName) ipClient, err := getIPClient(c) if err != nil { return nil, err @@ -229,7 +229,7 @@ func createOrUpdatePublicIPAddress(ctx context.Context, ipName string, ipVersion return nil, fmt.Errorf("failed to create public IP address: %w", err) } - klog.Infof("Fetching info for IP address %q", ipName) + log.Infow("Fetching info for IP address", "name", ipName) ip, err := getPublicIPAddress(ctx, ipName, c.ResourceGroup, ipClient) if err != nil { return nil, fmt.Errorf("failed to fetch info about public IP %q: %w", ipName, err) @@ -256,14 +256,14 @@ func getSubnet(ctx context.Context, c *config) (network.Subnet, error) { return subnetsClient.Get(ctx, c.VNetResourceGroup, c.VNetName, c.SubnetName, "") } -func getSKU(ctx context.Context, c *config) (compute.ResourceSku, error) { +func getSKU(ctx context.Context, log *zap.SugaredLogger, c *config) (compute.ResourceSku, error) { cacheLock.Lock() defer cacheLock.Unlock() cacheKey := fmt.Sprintf("%s-%s", c.Location, c.VMSize) cacheSku, found := cache.Get(cacheKey) if found { - klog.V(3).Info("found SKU in cache!") + log.Debugw("Found SKU in cache", "key", cacheKey, "sku", cacheSku) return cacheSku.(compute.ResourceSku), nil } @@ -319,7 +319,7 @@ func getVirtualNetwork(ctx context.Context, c *config) (network.VirtualNetwork, return virtualNetworksClient.Get(ctx, c.VNetResourceGroup, c.VNetName, "") } -func createOrUpdateNetworkInterface(ctx context.Context, ifName string, machineUID types.UID, config *config, publicIP, publicIPv6 *network.PublicIPAddress, ipFamily util.IPFamily, enableAcceleratedNetworking *bool) (*network.Interface, error) { +func createOrUpdateNetworkInterface(ctx context.Context, log *zap.SugaredLogger, ifName string, machineUID types.UID, config *config, publicIP, publicIPv6 *network.PublicIPAddress, ipFamily net.IPFamily, enableAcceleratedNetworking *bool) (*network.Interface, error) { ifClient, err := getInterfacesClient(config) if err != nil { return nil, fmt.Errorf("failed to create interfaces client: %w", err) @@ -339,30 +339,30 @@ func createOrUpdateNetworkInterface(ctx context.Context, ifName string, machineU Tags: map[string]*string{machineUIDTag: to.StringPtr(string(machineUID))}, } - *ifSpec.InterfacePropertiesFormat.IPConfigurations = append(*ifSpec.InterfacePropertiesFormat.IPConfigurations, network.InterfaceIPConfiguration{ + *ifSpec.IPConfigurations = append(*ifSpec.IPConfigurations, network.InterfaceIPConfiguration{ Name: to.StringPtr("ip-config-1"), InterfaceIPConfigurationPropertiesFormat: &network.InterfaceIPConfigurationPropertiesFormat{ Subnet: &subnet, - PrivateIPAllocationMethod: network.IPAllocationMethodDynamic, + PrivateIPAllocationMethod: network.Dynamic, PublicIPAddress: publicIP, Primary: to.BoolPtr(true), }, }) if ipFamily.IsDualstack() { - *ifSpec.InterfacePropertiesFormat.IPConfigurations = append(*ifSpec.InterfacePropertiesFormat.IPConfigurations, network.InterfaceIPConfiguration{ + *ifSpec.IPConfigurations = append(*ifSpec.IPConfigurations, network.InterfaceIPConfiguration{ Name: to.StringPtr("ip-config-2"), InterfaceIPConfigurationPropertiesFormat: &network.InterfaceIPConfigurationPropertiesFormat{ - PrivateIPAllocationMethod: network.IPAllocationMethodDynamic, + PrivateIPAllocationMethod: network.Dynamic, Subnet: &subnet, PublicIPAddress: publicIPv6, Primary: to.BoolPtr(false), - PrivateIPAddressVersion: network.IPVersionIPv6, + PrivateIPAddressVersion: network.IPv6, }, }) } - ifSpec.InterfacePropertiesFormat.EnableAcceleratedNetworking = enableAcceleratedNetworking + ifSpec.EnableAcceleratedNetworking = enableAcceleratedNetworking if config.SecurityGroupName != "" { authorizer, err := auth.NewClientCredentialsConfig(config.ClientID, config.ClientSecret, config.TenantID).Authorizer() @@ -377,7 +377,7 @@ func createOrUpdateNetworkInterface(ctx context.Context, ifName string, machineU } ifSpec.NetworkSecurityGroup = &secGroup } - klog.Infof("Creating/Updating public network interface %q", ifName) + log.Infow("Creating/Updating public network interface", "interface", ifName) future, err := ifClient.CreateOrUpdate(ctx, config.ResourceGroup, ifName, ifSpec) if err != nil { return nil, fmt.Errorf("failed to create interface: %w", err) @@ -393,7 +393,7 @@ func createOrUpdateNetworkInterface(ctx context.Context, ifName string, machineU return nil, fmt.Errorf("failed to get interface creation result: %w", err) } - klog.Infof("Fetching info about network interface %q", ifName) + log.Infow("Fetching info about network interface", "interface", ifName) iface, err := ifClient.Get(ctx, config.ResourceGroup, ifName, "") if err != nil { return nil, fmt.Errorf("failed to fetch info about interface %q: %w", ifName, err) diff --git a/pkg/cloudprovider/provider/azure/get_client.go b/pkg/cloudprovider/provider/azure/get_client.go index a4ee34021..c79a0eeba 100644 --- a/pkg/cloudprovider/provider/azure/get_client.go +++ b/pkg/cloudprovider/provider/azure/get_client.go @@ -19,8 +19,8 @@ package azure import ( "fmt" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-11-01/compute" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-05-01/network" + "github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute" + "github.com/Azure/azure-sdk-for-go/profiles/latest/network/mgmt/network" "github.com/Azure/go-autorest/autorest/azure/auth" ) diff --git a/pkg/cloudprovider/provider/azure/provider.go b/pkg/cloudprovider/provider/azure/provider.go index 85def986b..143be9a8f 100644 --- a/pkg/cloudprovider/provider/azure/provider.go +++ b/pkg/cloudprovider/provider/azure/provider.go @@ -19,6 +19,7 @@ package azure import ( "context" "encoding/base64" + "encoding/json" "errors" "fmt" "strings" @@ -27,27 +28,26 @@ import ( "unicode" "unicode/utf8" - "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2021-11-01/compute" - "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2021-05-01/network" + "github.com/Azure/azure-sdk-for-go/profiles/latest/compute/mgmt/compute" + "github.com/Azure/azure-sdk-for-go/profiles/latest/network/mgmt/network" "github.com/Azure/go-autorest/autorest/to" gocache "github.com/patrickmn/go-cache" - - "github.com/kubermatic/machine-controller/pkg/apis/cluster/common" - clusterv1alpha1 "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/common/ssh" - cloudprovidererrors "github.com/kubermatic/machine-controller/pkg/cloudprovider/errors" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/instance" - azuretypes "github.com/kubermatic/machine-controller/pkg/cloudprovider/provider/azure/types" - cloudprovidertypes "github.com/kubermatic/machine-controller/pkg/cloudprovider/types" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/util" - kuberneteshelper "github.com/kubermatic/machine-controller/pkg/kubernetes" - "github.com/kubermatic/machine-controller/pkg/providerconfig" - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" - - v1 "k8s.io/api/core/v1" + "go.uber.org/zap" + + "k8c.io/machine-controller/pkg/cloudprovider/common/ssh" + cloudprovidererrors "k8c.io/machine-controller/pkg/cloudprovider/errors" + "k8c.io/machine-controller/pkg/cloudprovider/instance" + cloudprovidertypes "k8c.io/machine-controller/pkg/cloudprovider/types" + kuberneteshelper "k8c.io/machine-controller/pkg/kubernetes" + "k8c.io/machine-controller/sdk/apis/cluster/common" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" + azuretypes "k8c.io/machine-controller/sdk/cloudprovider/azure" + "k8c.io/machine-controller/sdk/net" + "k8c.io/machine-controller/sdk/providerconfig" + + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/klog" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) const ( @@ -73,7 +73,7 @@ const ( ) type provider struct { - configVarResolver *providerconfig.ConfigVarResolver + configVarResolver providerconfig.ConfigVarResolver } type config struct { @@ -112,11 +112,11 @@ type config struct { type azureVM struct { vm *compute.VirtualMachine - ipAddresses map[string]v1.NodeAddressType + ipAddresses map[string]corev1.NodeAddressType status instance.Status } -func (vm *azureVM) Addresses() map[string]v1.NodeAddressType { +func (vm *azureVM) Addresses() map[string]corev1.NodeAddressType { return vm.ipAddresses } @@ -140,54 +140,46 @@ func (vm *azureVM) Status() instance.Status { return vm.status } -var imageReferences = map[providerconfigtypes.OperatingSystem]compute.ImageReference{ - providerconfigtypes.OperatingSystemCentOS: { - Publisher: to.StringPtr("OpenLogic"), - Offer: to.StringPtr("CentOS"), - Sku: to.StringPtr("7_9"), // https://docs.microsoft.com/en-us/azure/virtual-machines/linux/using-cloud-init - Version: to.StringPtr("latest"), - }, - providerconfigtypes.OperatingSystemUbuntu: { +const SKUGen2Ubuntu = "server" + +var imageReferences = map[providerconfig.OperatingSystem]compute.ImageReference{ + providerconfig.OperatingSystemUbuntu: { Publisher: to.StringPtr("Canonical"), - Offer: to.StringPtr("0001-com-ubuntu-server-jammy"), - Sku: to.StringPtr("22_04-lts"), + Offer: to.StringPtr("ubuntu-24_04-lts"), + Sku: to.StringPtr("server-gen1"), Version: to.StringPtr("latest"), }, - providerconfigtypes.OperatingSystemRHEL: { + providerconfig.OperatingSystemRHEL: { Publisher: to.StringPtr("RedHat"), Offer: to.StringPtr("rhel-byos"), - Sku: to.StringPtr("rhel-lvm85"), - Version: to.StringPtr("8.5.20220316"), + Sku: to.StringPtr("rhel-lvm95"), + Version: to.StringPtr("9.5.2024112215"), }, - providerconfigtypes.OperatingSystemFlatcar: { + providerconfig.OperatingSystemFlatcar: { Publisher: to.StringPtr("kinvolk"), - Offer: to.StringPtr("flatcar-container-linux"), - Sku: to.StringPtr("stable"), - Version: to.StringPtr("3374.2.0"), + // flatcar-container-linux-corevm-amd64 doesn't require a plan. For more info: https://www.flatcar.org/docs/latest/installing/cloud/azure/#corevm + Offer: to.StringPtr("flatcar-container-linux-corevm-amd64"), + Sku: to.StringPtr("stable"), + Version: to.StringPtr("4230.2.2"), }, - providerconfigtypes.OperatingSystemRockyLinux: { - Publisher: to.StringPtr("procomputers"), - Offer: to.StringPtr("rocky-linux-8-5"), - Sku: to.StringPtr("rocky-linux-8-5"), - Version: to.StringPtr("8.5.20211118"), + providerconfig.OperatingSystemRockyLinux: { + Publisher: to.StringPtr("resf"), + Offer: to.StringPtr("rockylinux-x86_64"), + Sku: to.StringPtr("9-base"), + Version: to.StringPtr("9.6.20250531"), }, } -var osPlans = map[providerconfigtypes.OperatingSystem]*compute.Plan{ - providerconfigtypes.OperatingSystemFlatcar: { - Name: pointer.String("stable"), - Publisher: pointer.String("kinvolk"), - Product: pointer.String("flatcar-container-linux"), - }, - providerconfigtypes.OperatingSystemRHEL: { - Name: pointer.String("rhel-lvm85"), - Publisher: pointer.String("redhat"), - Product: pointer.String("rhel-byos"), +var osPlans = map[providerconfig.OperatingSystem]*compute.Plan{ + providerconfig.OperatingSystemRHEL: { + Name: ptr.To("rhel-lvm95"), + Publisher: ptr.To("redhat"), + Product: ptr.To("rhel-byos"), }, - providerconfigtypes.OperatingSystemRockyLinux: { - Name: pointer.String("rocky-linux-8-5"), - Publisher: pointer.String("procomputers"), - Product: pointer.String("rocky-linux-8-5"), + providerconfig.OperatingSystemRockyLinux: { + Name: ptr.To("9-base"), + Publisher: ptr.To("resf"), + Product: ptr.To("rockylinux-x86_64"), }, } @@ -211,7 +203,7 @@ var ( cache = gocache.New(10*time.Minute, 10*time.Minute) ) -func getOSImageReference(c *config, os providerconfigtypes.OperatingSystem) (*compute.ImageReference, error) { +func getOSImageReference(c *config, os providerconfig.OperatingSystem) (*compute.ImageReference, error) { if c.ImageID != "" { return &compute.ImageReference{ ID: to.StringPtr(c.ImageID), @@ -236,56 +228,43 @@ func getOSImageReference(c *config, os providerconfigtypes.OperatingSystem) (*co } // New returns a new azure provider. -func New(configVarResolver *providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { +func New(configVarResolver providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { return &provider{configVarResolver: configVarResolver} } -func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*config, *providerconfigtypes.Config, error) { - if provSpec.Value == nil { - return nil, nil, fmt.Errorf("machine.spec.providerconfig.value is nil") - } - - pconfig, err := providerconfigtypes.GetConfig(provSpec) +func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*config, *providerconfig.Config, error) { + rawCfg, pConfig, err := newCloudProviderSpec(provSpec) if err != nil { - return nil, nil, err - } - - if pconfig.OperatingSystemSpec.Raw == nil { - return nil, nil, errors.New("operatingSystemSpec in the MachineDeployment cannot be empty") - } - - rawCfg, err := azuretypes.GetConfig(*pconfig) - if err != nil { - return nil, nil, err + return nil, nil, fmt.Errorf("failed to parse provider spec: %w", err) } c := config{} - c.SubscriptionID, err = p.configVarResolver.GetConfigVarStringValueOrEnv(rawCfg.SubscriptionID, envSubscriptionID) + c.SubscriptionID, err = p.configVarResolver.GetStringValueOrEnv(rawCfg.SubscriptionID, envSubscriptionID) if err != nil { return nil, nil, fmt.Errorf("failed to get the value of \"subscriptionID\" field, error = %w", err) } - c.TenantID, err = p.configVarResolver.GetConfigVarStringValueOrEnv(rawCfg.TenantID, envTenantID) + c.TenantID, err = p.configVarResolver.GetStringValueOrEnv(rawCfg.TenantID, envTenantID) if err != nil { return nil, nil, fmt.Errorf("failed to get the value of \"tenantID\" field, error = %w", err) } - c.ClientID, err = p.configVarResolver.GetConfigVarStringValueOrEnv(rawCfg.ClientID, envClientID) + c.ClientID, err = p.configVarResolver.GetStringValueOrEnv(rawCfg.ClientID, envClientID) if err != nil { return nil, nil, fmt.Errorf("failed to get the value of \"clientID\" field, error = %w", err) } - c.ClientSecret, err = p.configVarResolver.GetConfigVarStringValueOrEnv(rawCfg.ClientSecret, envClientSecret) + c.ClientSecret, err = p.configVarResolver.GetStringValueOrEnv(rawCfg.ClientSecret, envClientSecret) if err != nil { return nil, nil, fmt.Errorf("failed to get the value of \"clientSecret\" field, error = %w", err) } - c.ResourceGroup, err = p.configVarResolver.GetConfigVarStringValue(rawCfg.ResourceGroup) + c.ResourceGroup, err = p.configVarResolver.GetStringValue(rawCfg.ResourceGroup) if err != nil { return nil, nil, fmt.Errorf("failed to get the value of \"resourceGroup\" field, error = %w", err) } - c.VNetResourceGroup, err = p.configVarResolver.GetConfigVarStringValue(rawCfg.VNetResourceGroup) + c.VNetResourceGroup, err = p.configVarResolver.GetStringValue(rawCfg.VNetResourceGroup) if err != nil { return nil, nil, fmt.Errorf("failed to get the value of \"vnetResourceGroup\" field, error = %w", err) } @@ -294,37 +273,37 @@ func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*config, *p c.VNetResourceGroup = c.ResourceGroup } - c.Location, err = p.configVarResolver.GetConfigVarStringValue(rawCfg.Location) + c.Location, err = p.configVarResolver.GetStringValue(rawCfg.Location) if err != nil { return nil, nil, fmt.Errorf("failed to get the value of \"location\" field, error = %w", err) } - c.VMSize, err = p.configVarResolver.GetConfigVarStringValue(rawCfg.VMSize) + c.VMSize, err = p.configVarResolver.GetStringValue(rawCfg.VMSize) if err != nil { return nil, nil, fmt.Errorf("failed to get the value of \"vmSize\" field, error = %w", err) } - c.VNetName, err = p.configVarResolver.GetConfigVarStringValue(rawCfg.VNetName) + c.VNetName, err = p.configVarResolver.GetStringValue(rawCfg.VNetName) if err != nil { return nil, nil, fmt.Errorf("failed to get the value of \"vnetName\" field, error = %w", err) } - c.SubnetName, err = p.configVarResolver.GetConfigVarStringValue(rawCfg.SubnetName) + c.SubnetName, err = p.configVarResolver.GetStringValue(rawCfg.SubnetName) if err != nil { return nil, nil, fmt.Errorf("failed to get the value of \"subnetName\" field, error = %w", err) } - c.LoadBalancerSku, err = p.configVarResolver.GetConfigVarStringValue(rawCfg.LoadBalancerSku) + c.LoadBalancerSku, err = p.configVarResolver.GetStringValue(rawCfg.LoadBalancerSku) if err != nil { return nil, nil, fmt.Errorf("failed to get the value of \"loadBalancerSku\" field, error = %w", err) } - c.RouteTableName, err = p.configVarResolver.GetConfigVarStringValue(rawCfg.RouteTableName) + c.RouteTableName, err = p.configVarResolver.GetStringValue(rawCfg.RouteTableName) if err != nil { return nil, nil, fmt.Errorf("failed to get the value of \"routeTableName\" field, error = %w", err) } - c.AssignPublicIP, _, err = p.configVarResolver.GetConfigVarBoolValue(rawCfg.AssignPublicIP) + c.AssignPublicIP, _, err = p.configVarResolver.GetBoolValue(rawCfg.AssignPublicIP) if err != nil { return nil, nil, fmt.Errorf("failed to get the value of \"assignPublicIP\" field, error = %w", err) } @@ -336,12 +315,12 @@ func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*config, *p c.AssignAvailabilitySet = rawCfg.AssignAvailabilitySet c.EnableAcceleratedNetworking = rawCfg.EnableAcceleratedNetworking - c.AvailabilitySet, err = p.configVarResolver.GetConfigVarStringValue(rawCfg.AvailabilitySet) + c.AvailabilitySet, err = p.configVarResolver.GetStringValue(rawCfg.AvailabilitySet) if err != nil { return nil, nil, fmt.Errorf("failed to get the value of \"availabilitySet\" field, error = %w", err) } - c.SecurityGroupName, err = p.configVarResolver.GetConfigVarStringValue(rawCfg.SecurityGroupName) + c.SecurityGroupName, err = p.configVarResolver.GetStringValue(rawCfg.SecurityGroupName) if err != nil { return nil, nil, fmt.Errorf("failed to get the value of \"securityGroupName\" field, error = %w", err) } @@ -361,22 +340,22 @@ func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*config, *p if rawCfg.ImagePlan != nil && rawCfg.ImagePlan.Name != "" { c.ImagePlan = &compute.Plan{ - Name: pointer.String(rawCfg.ImagePlan.Name), - Publisher: pointer.String(rawCfg.ImagePlan.Publisher), - Product: pointer.String(rawCfg.ImagePlan.Product), + Name: ptr.To(rawCfg.ImagePlan.Name), + Publisher: ptr.To(rawCfg.ImagePlan.Publisher), + Product: ptr.To(rawCfg.ImagePlan.Product), } } if rawCfg.ImageReference != nil { c.ImageReference = &compute.ImageReference{ - Publisher: pointer.String(rawCfg.ImageReference.Publisher), - Offer: pointer.String(rawCfg.ImageReference.Offer), - Sku: pointer.String(rawCfg.ImageReference.Sku), - Version: pointer.String(rawCfg.ImageReference.Version), + Publisher: ptr.To(rawCfg.ImageReference.Publisher), + Offer: ptr.To(rawCfg.ImageReference.Offer), + Sku: ptr.To(rawCfg.ImageReference.Sku), + Version: ptr.To(rawCfg.ImageReference.Version), } } - c.ImageID, err = p.configVarResolver.GetConfigVarStringValue(rawCfg.ImageID) + c.ImageID, err = p.configVarResolver.GetStringValue(rawCfg.ImageID) if err != nil { return nil, nil, fmt.Errorf("failed to get image id: %w", err) } @@ -385,12 +364,12 @@ func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*config, *p c.EnableBootDiagnostics = *rawCfg.EnableBootDiagnostics } - return &c, pconfig, nil + return &c, pConfig, nil } -func getVMIPAddresses(ctx context.Context, c *config, vm *compute.VirtualMachine, ipFamily util.IPFamily) (map[string]v1.NodeAddressType, error) { +func getVMIPAddresses(ctx context.Context, log *zap.SugaredLogger, c *config, vm *compute.VirtualMachine, ipFamily net.IPFamily) (map[string]corev1.NodeAddressType, error) { var ( - ipAddresses = map[string]v1.NodeAddressType{} + ipAddresses = map[string]corev1.NodeAddressType{} err error ) @@ -398,7 +377,7 @@ func getVMIPAddresses(ctx context.Context, c *config, vm *compute.VirtualMachine return nil, fmt.Errorf("machine is missing properties") } - if vm.VirtualMachineProperties.NetworkProfile == nil { + if vm.NetworkProfile == nil { return nil, fmt.Errorf("machine has no network profile") } @@ -413,7 +392,7 @@ func getVMIPAddresses(ctx context.Context, c *config, vm *compute.VirtualMachine splitIfaceID := strings.Split(*iface.ID, "/") ifaceName := splitIfaceID[len(splitIfaceID)-1] - ipAddresses, err = getNICIPAddresses(ctx, c, ipFamily, ifaceName) + ipAddresses, err = getNICIPAddresses(ctx, log, c, ipFamily, ifaceName) if err != nil || vm.NetworkProfile.NetworkInterfaces == nil { return nil, fmt.Errorf("failed to get addresses for interface %q: %w", ifaceName, err) } @@ -422,7 +401,7 @@ func getVMIPAddresses(ctx context.Context, c *config, vm *compute.VirtualMachine return ipAddresses, nil } -func getNICIPAddresses(ctx context.Context, c *config, ipFamily util.IPFamily, ifaceName string) (map[string]v1.NodeAddressType, error) { +func getNICIPAddresses(ctx context.Context, log *zap.SugaredLogger, c *config, ipFamily net.IPFamily, ifaceName string) (map[string]corev1.NodeAddressType, error) { ifClient, err := getInterfacesClient(c) if err != nil { return nil, fmt.Errorf("failed to create interfaces client: %w", err) @@ -433,7 +412,7 @@ func getNICIPAddresses(ctx context.Context, c *config, ipFamily util.IPFamily, i return nil, fmt.Errorf("failed to get interface %q: %w", ifaceName, err) } - ipAddresses := map[string]v1.NodeAddressType{} + ipAddresses := map[string]corev1.NodeAddressType{} if netIf.IPConfigurations == nil { return ipAddresses, nil @@ -444,7 +423,7 @@ func getNICIPAddresses(ctx context.Context, c *config, ipFamily util.IPFamily, i if conf.Name != nil { name = *conf.Name } else { - klog.Warningf("IP configuration of NIC %q was returned with no name, trying to dissect the ID.", ifaceName) + log.Infow("IP configuration of NIC was returned with no name, trying to dissect the ID.", "interface", ifaceName) if conf.ID == nil || len(*conf.ID) == 0 { return nil, fmt.Errorf("IP configuration of NIC %q was returned with no ID", ifaceName) } @@ -458,7 +437,7 @@ func getNICIPAddresses(ctx context.Context, c *config, ipFamily util.IPFamily, i return nil, fmt.Errorf("failed to retrieve IP string for IP %q: %w", name, err) } for _, ip := range publicIPs { - ipAddresses[ip] = v1.NodeExternalIP + ipAddresses[ip] = corev1.NodeExternalIP } if ipFamily.HasIPv6() { @@ -467,7 +446,7 @@ func getNICIPAddresses(ctx context.Context, c *config, ipFamily util.IPFamily, i return nil, fmt.Errorf("failed to retrieve IP string for IP %q: %w", name, err) } for _, ip := range publicIP6s { - ipAddresses[ip] = v1.NodeExternalIP + ipAddresses[ip] = corev1.NodeExternalIP } } } @@ -477,7 +456,7 @@ func getNICIPAddresses(ctx context.Context, c *config, ipFamily util.IPFamily, i return nil, fmt.Errorf("failed to retrieve internal IP string for IP %q: %w", name, err) } for _, ip := range internalIPs { - ipAddresses[ip] = v1.NodeInternalIP + ipAddresses[ip] = corev1.NodeInternalIP } } return ipAddresses, nil @@ -524,15 +503,86 @@ func getInternalIPAddresses(ctx context.Context, c *config, inetface, ipconfigNa if internalIP.PrivateIPAddress != nil { ipAddresses = append(ipAddresses, *internalIP.PrivateIPAddress) } - return ipAddresses, nil } -func (p *provider) AddDefaults(spec clusterv1alpha1.MachineSpec) (clusterv1alpha1.MachineSpec, error) { +func (p *provider) AddDefaults(log *zap.SugaredLogger, spec clusterv1alpha1.MachineSpec) (clusterv1alpha1.MachineSpec, error) { + rawConfig, pconfig, err := newCloudProviderSpec(spec.ProviderSpec) + if err != nil { + return spec, fmt.Errorf("failed to parse provider spec: %w", err) + } + + if rawConfig.ImageID.Value != "" { + return spec, nil + } + + // Skip if imageReference is already fully specified + if rawConfig.ImageReference != nil && rawConfig.ImageReference.Sku != "" { + return spec, nil + } + + vmSize := rawConfig.VMSize.Value + if vmSize == "" { + return spec, nil + } + if pconfig.OperatingSystem == providerconfig.OperatingSystemUbuntu { + if rawConfig.ImageReference == nil { + rawConfig.ImageReference = &azuretypes.ImageReference{} + } + + if rawConfig.ImageReference.Sku == "" { + config, _, err := p.getConfig(spec.ProviderSpec) + sku := *imageReferences[providerconfig.OperatingSystemUbuntu].Sku + + if err != nil { + log.Warnw("Failed to get Azure config for SKU lookup, defaulting sku heuristically", "error", err) + if vmSizeSupportsGen2(vmSize) { + sku = SKUGen2Ubuntu + } + } else { + vmSKU, err := getSKU(context.Background(), log, config) + if err != nil { + log.Warnw("Failed to get Azure config for SKU lookup, defaulting sku heuristic", "error", err) + if vmSizeSupportsGen2(vmSize) { + sku = SKUGen2Ubuntu + } + } else { + if skuSupportsGen2(vmSKU) { + sku = SKUGen2Ubuntu + log.Debugw("Using Gen2 image SKU based on Azure API", "vmSize", vmSize) + } + } + } + + rawConfig.ImageReference.Sku = sku + } + + if rawConfig.ImageReference.Publisher == "" { + rawConfig.ImageReference.Publisher = *imageReferences[providerconfig.OperatingSystemUbuntu].Publisher + } + if rawConfig.ImageReference.Offer == "" { + rawConfig.ImageReference.Offer = *imageReferences[providerconfig.OperatingSystemUbuntu].Offer + } + if rawConfig.ImageReference.Version == "" { + rawConfig.ImageReference.Version = *imageReferences[providerconfig.OperatingSystemUbuntu].Version + } + } + + updatedCloudProviderSpec, err := json.Marshal(rawConfig) + if err != nil { + return spec, fmt.Errorf("failed to marshal updated Azure config: %w", err) + } + pconfig.CloudProviderSpec.Raw = updatedCloudProviderSpec + + spec.ProviderSpec.Value.Raw, err = json.Marshal(pconfig) + if err != nil { + return spec, fmt.Errorf("failed to marshal provider config: %w", err) + } + return spec, nil } -func getStorageProfile(config *config, providerCfg *providerconfigtypes.Config) (*compute.StorageProfile, error) { +func getStorageProfile(config *config, providerCfg *providerconfig.Config) (*compute.StorageProfile, error) { osRef, err := getOSImageReference(config, providerCfg.OperatingSystem) if err != nil { return nil, fmt.Errorf("failed to get OSImageReference: %w", err) @@ -543,7 +593,7 @@ func getStorageProfile(config *config, providerCfg *providerconfigtypes.Config) } if config.OSDiskSize != 0 { sp.OsDisk = &compute.OSDisk{ - DiskSizeGB: pointer.Int32(config.OSDiskSize), + DiskSizeGB: ptr.To(config.OSDiskSize), CreateOption: compute.DiskCreateOptionTypesFromImage, } @@ -559,7 +609,7 @@ func getStorageProfile(config *config, providerCfg *providerconfigtypes.Config) { // this should be in range 0-63 and should be unique per datadisk, since we have only one datadisk, this should be fine Lun: new(int32), - DiskSizeGB: pointer.Int32(config.DataDiskSize), + DiskSizeGB: ptr.To(config.DataDiskSize), CreateOption: compute.DiskCreateOptionTypesEmpty, }, } @@ -573,7 +623,7 @@ func getStorageProfile(config *config, providerCfg *providerconfigtypes.Config) return sp, nil } -func (p *provider) Create(ctx context.Context, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData, userdata string) (instance.Instance, error) { +func (p *provider) Create(ctx context.Context, log *zap.SugaredLogger, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData, userdata string) (instance.Instance, error) { config, providerCfg, err := p.getConfig(machine.Spec.ProviderSpec) if err != nil { return nil, cloudprovidererrors.TerminalError{ @@ -618,13 +668,13 @@ func (p *provider) Create(ctx context.Context, machine *clusterv1alpha1.Machine, }); err != nil { return nil, err } - publicIP, err = createOrUpdatePublicIPAddress(ctx, publicIPName(ifaceName(machine)), network.IPVersionIPv4, sku, network.IPAllocationMethodStatic, machine.UID, config) + publicIP, err = createOrUpdatePublicIPAddress(ctx, log, publicIPName(ifaceName(machine)), network.IPv4, sku, network.Static, machine.UID, config) if err != nil { return nil, fmt.Errorf("failed to create public IP: %w", err) } if ipFamily.IsDualstack() { - publicIPv6, err = createOrUpdatePublicIPAddress(ctx, publicIPv6Name(ifaceName(machine)), network.IPVersionIPv6, sku, network.IPAllocationMethodStatic, machine.UID, config) + publicIPv6, err = createOrUpdatePublicIPAddress(ctx, log, publicIPv6Name(ifaceName(machine)), network.IPv6, sku, network.Static, machine.UID, config) if err != nil { return nil, fmt.Errorf("failed to create public IP: %w", err) } @@ -639,7 +689,7 @@ func (p *provider) Create(ctx context.Context, machine *clusterv1alpha1.Machine, return nil, err } - iface, err := createOrUpdateNetworkInterface(ctx, ifaceName(machine), machine.UID, config, publicIP, publicIPv6, ipFamily, config.EnableAcceleratedNetworking) + iface, err := createOrUpdateNetworkInterface(ctx, log, ifaceName(machine), machine.UID, config, publicIP, publicIPv6, ipFamily, config.EnableAcceleratedNetworking) if err != nil { return nil, fmt.Errorf("failed to generate main network interface: %w", err) } @@ -700,18 +750,18 @@ func (p *provider) Create(ctx context.Context, machine *clusterv1alpha1.Machine, config.AssignAvailabilitySet != nil && *config.AssignAvailabilitySet && config.AvailabilitySet != "" { // Azure expects the full path to the resource asURI := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/availabilitySets/%s", config.SubscriptionID, config.ResourceGroup, config.AvailabilitySet) - vmSpec.VirtualMachineProperties.AvailabilitySet = &compute.SubResource{ID: to.StringPtr(asURI)} + vmSpec.AvailabilitySet = &compute.SubResource{ID: to.StringPtr(asURI)} } if config.EnableBootDiagnostics { vmSpec.DiagnosticsProfile = &compute.DiagnosticsProfile{ BootDiagnostics: &compute.BootDiagnostics{ - Enabled: pointer.Bool(config.EnableBootDiagnostics), + Enabled: ptr.To(config.EnableBootDiagnostics), }, } } - klog.Infof("Creating machine %q", machine.Name) + log.Info("Creating machine") if err := data.Update(machine, func(updatedMachine *clusterv1alpha1.Machine) { if !kuberneteshelper.HasFinalizer(updatedMachine, finalizerDisks) { updatedMachine.Finalizers = append(updatedMachine.Finalizers, finalizerDisks) @@ -744,12 +794,12 @@ func (p *provider) Create(ctx context.Context, machine *clusterv1alpha1.Machine, return nil, fmt.Errorf("failed to retrieve updated data for VM %q: %w", machine.Name, err) } - ipAddresses, err := getVMIPAddresses(ctx, config, &vm, ipFamily) + ipAddresses, err := getVMIPAddresses(ctx, log, config, &vm, ipFamily) if err != nil { return nil, fmt.Errorf("failed to retrieve IP addresses for VM %q: %w", machine.Name, err) } - status, err := getVMStatus(ctx, config, machine.Name) + status, err := getVMStatus(ctx, log, config, machine.Name) if err != nil { return nil, fmt.Errorf("failed to retrieve status for VM %q: %w", machine.Name, err) } @@ -757,14 +807,14 @@ func (p *provider) Create(ctx context.Context, machine *clusterv1alpha1.Machine, return &azureVM{vm: &vm, ipAddresses: ipAddresses, status: status}, nil } -func (p *provider) Cleanup(ctx context.Context, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData) (bool, error) { +func (p *provider) Cleanup(ctx context.Context, log *zap.SugaredLogger, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData) (bool, error) { config, _, err := p.getConfig(machine.Spec.ProviderSpec) if err != nil { return false, fmt.Errorf("failed to parse MachineSpec: %w", err) } if kuberneteshelper.HasFinalizer(machine, finalizerVM) { - klog.Infof("deleting VM %q", machine.Name) + log.Info("Deleting VM") if err = deleteVMsByMachineUID(ctx, config, machine.UID); err != nil { return false, fmt.Errorf("failed to delete instance for machine %q: %w", machine.Name, err) } @@ -777,7 +827,7 @@ func (p *provider) Cleanup(ctx context.Context, machine *clusterv1alpha1.Machine } if kuberneteshelper.HasFinalizer(machine, finalizerDisks) { - klog.Infof("deleting disks of VM %q", machine.Name) + log.Info("Deleting disks") if err := deleteDisksByMachineUID(ctx, config, machine.UID); err != nil { return false, fmt.Errorf("failed to remove disks of machine %q: %w", machine.Name, err) } @@ -789,7 +839,7 @@ func (p *provider) Cleanup(ctx context.Context, machine *clusterv1alpha1.Machine } if kuberneteshelper.HasFinalizer(machine, finalizerNIC) { - klog.Infof("deleting network interfaces of VM %q", machine.Name) + log.Info("Deleting network interfaces") if err := deleteInterfacesByMachineUID(ctx, config, machine.UID); err != nil { return false, fmt.Errorf("failed to remove network interfaces of machine %q: %w", machine.Name, err) } @@ -801,7 +851,7 @@ func (p *provider) Cleanup(ctx context.Context, machine *clusterv1alpha1.Machine } if kuberneteshelper.HasFinalizer(machine, finalizerPublicIP) { - klog.Infof("deleting public IP addresses of VM %q", machine.Name) + log.Infof("Deleting public IP addresses") if err := deleteIPAddressesByMachineUID(ctx, config, machine.UID); err != nil { return false, fmt.Errorf("failed to remove public IP addresses of machine %q: %w", machine.Name, err) } @@ -844,7 +894,7 @@ func getVMByUID(ctx context.Context, c *config, uid types.UID) (*compute.Virtual return nil, cloudprovidererrors.ErrInstanceNotFound } -func getVMStatus(ctx context.Context, c *config, vmName string) (instance.Status, error) { +func getVMStatus(ctx context.Context, log *zap.SugaredLogger, c *config, vmName string) (instance.Status, error) { vmClient, err := getVMClient(c) if err != nil { return instance.StatusUnknown, err @@ -863,7 +913,7 @@ func getVMStatus(ctx context.Context, c *config, vmName string) (instance.Status if len(*iv.Statuses) < 2 { provisioningStatus := (*iv.Statuses)[0] if provisioningStatus.Code == nil { - klog.Warningf("azure provisioning status has missing code") + log.Info("Azure provisioning status has missing code") return instance.StatusUnknown, nil } @@ -873,7 +923,7 @@ func getVMStatus(ctx context.Context, c *config, vmName string) (instance.Status case "ProvisioningState/deleting": return instance.StatusDeleting, nil default: - klog.Warningf("unknown Azure provisioning status %q", *provisioningStatus.Code) + log.Errorw("Unknown Azure provisioning status", "code", *provisioningStatus.Code, "level", provisioningStatus.Level) return instance.StatusUnknown, nil } } @@ -882,7 +932,7 @@ func getVMStatus(ctx context.Context, c *config, vmName string) (instance.Status // https://docs.microsoft.com/en-us/azure/virtual-machines/windows/tutorial-manage-vm#vm-power-states powerStatus := (*iv.Statuses)[1] if powerStatus.Code == nil { - klog.Warningf("azure power status has missing code") + log.Info("Azure power status has missing code") return instance.StatusUnknown, nil } @@ -894,16 +944,16 @@ func getVMStatus(ctx context.Context, c *config, vmName string) (instance.Status case "PowerState/starting": return instance.StatusCreating, nil default: - klog.Warningf("unknown Azure power status %q", *powerStatus.Code) + log.Errorw("Unknown Azure power status", "code", *powerStatus.Code, "level", powerStatus.Level) return instance.StatusUnknown, nil } } -func (p *provider) Get(ctx context.Context, machine *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData) (instance.Instance, error) { - return p.get(ctx, machine) +func (p *provider) Get(ctx context.Context, log *zap.SugaredLogger, machine *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData) (instance.Instance, error) { + return p.get(ctx, log, machine) } -func (p *provider) get(ctx context.Context, machine *clusterv1alpha1.Machine) (*azureVM, error) { +func (p *provider) get(ctx context.Context, log *zap.SugaredLogger, machine *clusterv1alpha1.Machine) (*azureVM, error) { config, providerCfg, err := p.getConfig(machine.Spec.ProviderSpec) if err != nil { return nil, fmt.Errorf("failed to parse MachineSpec: %w", err) @@ -919,12 +969,12 @@ func (p *provider) get(ctx context.Context, machine *clusterv1alpha1.Machine) (* } ipFamily := providerCfg.Network.GetIPFamily() - ipAddresses, err := getVMIPAddresses(ctx, config, vm, ipFamily) + ipAddresses, err := getVMIPAddresses(ctx, log, config, vm, ipFamily) if err != nil { return nil, fmt.Errorf("failed to retrieve IP addresses for VM %v: %w", vm.Name, err) } - status, err := getVMStatus(ctx, config, machine.Name) + status, err := getVMStatus(ctx, log, config, machine.Name) if err != nil { return nil, fmt.Errorf("failed to retrieve status for VM %v: %w", vm.Name, err) } @@ -932,45 +982,7 @@ func (p *provider) get(ctx context.Context, machine *clusterv1alpha1.Machine) (* return &azureVM{vm: vm, ipAddresses: ipAddresses, status: status}, nil } -func (p *provider) GetCloudConfig(spec clusterv1alpha1.MachineSpec) (config string, name string, err error) { - c, _, err := p.getConfig(spec.ProviderSpec) - if err != nil { - return "", "", fmt.Errorf("failed to parse config: %w", err) - } - - var avSet string - if c.AssignAvailabilitySet == nil && c.AvailabilitySet != "" || - c.AssignAvailabilitySet != nil && *c.AssignAvailabilitySet && c.AvailabilitySet != "" { - avSet = c.AvailabilitySet - } - - cc := &azuretypes.CloudConfig{ - Cloud: "AZUREPUBLICCLOUD", - TenantID: c.TenantID, - SubscriptionID: c.SubscriptionID, - AADClientID: c.ClientID, - AADClientSecret: c.ClientSecret, - ResourceGroup: c.ResourceGroup, - VnetResourceGroup: c.VNetResourceGroup, - Location: c.Location, - VNetName: c.VNetName, - SubnetName: c.SubnetName, - LoadBalancerSku: c.LoadBalancerSku, - RouteTableName: c.RouteTableName, - PrimaryAvailabilitySetName: avSet, - SecurityGroupName: c.SecurityGroupName, - UseInstanceMetadata: true, - } - - s, err := azuretypes.CloudConfigToString(cc) - if err != nil { - return "", "", fmt.Errorf("failed to convert cloud-config to string: %w", err) - } - - return s, "azure", nil -} - -func validateDiskSKUs(ctx context.Context, c *config, sku compute.ResourceSku) error { +func validateDiskSKUs(_ context.Context, c *config, sku compute.ResourceSku) error { if c.OSDiskSKU != nil || c.DataDiskSKU != nil { if c.OSDiskSKU != nil { if _, ok := osDiskSKUs[*c.OSDiskSKU]; !ok { @@ -1002,7 +1014,7 @@ func validateDiskSKUs(ctx context.Context, c *config, sku compute.ResourceSku) e return nil } -func validateSKUCapabilities(ctx context.Context, c *config, sku compute.ResourceSku) error { +func validateSKUCapabilities(_ context.Context, c *config, sku compute.ResourceSku) error { if c.EnableAcceleratedNetworking != nil && *c.EnableAcceleratedNetworking { if !SKUHasCapability(sku, capabilityAcceleratedNetworking) { return fmt.Errorf("VM size %q does not support accelerated networking", c.VMSize) @@ -1011,7 +1023,7 @@ func validateSKUCapabilities(ctx context.Context, c *config, sku compute.Resourc return nil } -func (p *provider) Validate(ctx context.Context, spec clusterv1alpha1.MachineSpec) error { +func (p *provider) Validate(ctx context.Context, log *zap.SugaredLogger, spec clusterv1alpha1.MachineSpec) error { c, providerConfig, err := p.getConfig(spec.ProviderSpec) if err != nil { return fmt.Errorf("failed to parse config: %w", err) @@ -1050,14 +1062,14 @@ func (p *provider) Validate(ctx context.Context, spec clusterv1alpha1.MachineSpe } switch f := providerConfig.Network.GetIPFamily(); f { - case util.IPFamilyUnspecified, util.IPFamilyIPv4: + case net.IPFamilyUnspecified, net.IPFamilyIPv4: //noop - case util.IPFamilyIPv6: - return fmt.Errorf(util.ErrIPv6OnlyUnsupported) - case util.IPFamilyIPv4IPv6, util.IPFamilyIPv6IPv4: + case net.IPFamilyIPv6: + return fmt.Errorf(net.ErrIPv6OnlyUnsupported) + case net.IPFamilyIPv4IPv6, net.IPFamilyIPv6IPv4: // validate default: - return fmt.Errorf(util.ErrUnknownNetworkFamily, f) + return fmt.Errorf(net.ErrUnknownNetworkFamily, f) } if c.PublicIPSKU != nil { @@ -1095,7 +1107,7 @@ func (p *provider) Validate(ctx context.Context, spec clusterv1alpha1.MachineSpe return fmt.Errorf("failed to get subnet: %w", err) } - sku, err := getSKU(ctx, c) + sku, err := getSKU(ctx, log, c) if err != nil { return fmt.Errorf("failed to get VM SKU: %w", err) } @@ -1124,7 +1136,7 @@ func publicIPv6Name(ifaceName string) string { return ifaceName + "-pubipv6" } -func (p *provider) MigrateUID(ctx context.Context, machine *clusterv1alpha1.Machine, newUID types.UID) error { +func (p *provider) MigrateUID(ctx context.Context, log *zap.SugaredLogger, machine *clusterv1alpha1.Machine, newUID types.UID) error { config, _, err := p.getConfig(machine.Spec.ProviderSpec) if err != nil { return cloudprovidererrors.TerminalError{ @@ -1143,21 +1155,21 @@ func (p *provider) MigrateUID(ctx context.Context, machine *clusterv1alpha1.Mach if kuberneteshelper.HasFinalizer(machine, finalizerPublicIPv6) { sku = network.PublicIPAddressSkuNameStandard - _, err = createOrUpdatePublicIPAddress(ctx, publicIPv6Name(ifaceName(machine)), network.IPVersionIPv6, sku, network.IPAllocationMethodDynamic, newUID, config) + _, err = createOrUpdatePublicIPAddress(ctx, log, publicIPv6Name(ifaceName(machine)), network.IPv6, sku, network.Dynamic, newUID, config) if err != nil { return fmt.Errorf("failed to update UID on public IP: %w", err) } } if kuberneteshelper.HasFinalizer(machine, finalizerPublicIP) { - _, err = createOrUpdatePublicIPAddress(ctx, publicIPName(ifaceName(machine)), network.IPVersionIPv4, sku, network.IPAllocationMethodStatic, newUID, config) + _, err = createOrUpdatePublicIPAddress(ctx, log, publicIPName(ifaceName(machine)), network.IPv4, sku, network.Static, newUID, config) if err != nil { return fmt.Errorf("failed to update UID on public IP: %w", err) } } if kuberneteshelper.HasFinalizer(machine, finalizerNIC) { - _, err = createOrUpdateNetworkInterface(ctx, ifaceName(machine), newUID, config, publicIP, publicIPv6, util.IPFamilyUnspecified, config.EnableAcceleratedNetworking) + _, err = createOrUpdateNetworkInterface(ctx, log, ifaceName(machine), newUID, config, publicIP, publicIPv6, net.IPFamilyUnspecified, config.EnableAcceleratedNetworking) if err != nil { return fmt.Errorf("failed to update UID on main network interface: %w", err) } @@ -1169,7 +1181,7 @@ func (p *provider) MigrateUID(ctx context.Context, machine *clusterv1alpha1.Mach return fmt.Errorf("failed to get disks client: %w", err) } - disks, err := getDisksByMachineUID(ctx, disksClient, config, machine.UID) + disks, err := getDisksByMachineUID(ctx, disksClient, machine.UID) if err != nil { return fmt.Errorf("failed to get disks: %w", err) } @@ -1217,13 +1229,13 @@ func (p *provider) MachineMetricsLabels(machine *clusterv1alpha1.Machine) (map[s return labels, err } -func (p *provider) SetMetricsForMachines(machines clusterv1alpha1.MachineList) error { +func (p *provider) SetMetricsForMachines(_ clusterv1alpha1.MachineList) error { return nil } -func getOSUsername(os providerconfigtypes.OperatingSystem) string { +func getOSUsername(os providerconfig.OperatingSystem) string { switch os { - case providerconfigtypes.OperatingSystemFlatcar: + case providerconfig.OperatingSystemFlatcar: return "core" default: return string(os) @@ -1330,3 +1342,63 @@ func SKUHasCapability(sku compute.ResourceSku, name string) bool { } return false } + +// getHyperVGenerations returns the supported Hyper-V generations for a VM SKU. +// Returns a string like "V1,V2" or "V2" or "V1" from the Azure API. +func getHyperVGenerations(sku compute.ResourceSku) string { + if sku.Capabilities != nil { + for _, capability := range *sku.Capabilities { + if capability.Name != nil && *capability.Name == "HyperVGenerations" && capability.Value != nil { + return *capability.Value + } + } + } + return "" +} + +// skuSupportsGen2 checks if a VM SKU supports Generation 2 VMs using Azure API. +func skuSupportsGen2(sku compute.ResourceSku) bool { + generations := getHyperVGenerations(sku) + return strings.Contains(generations, "V2") +} + +// vmSizeSupportsGen2 checks if a VM size is known to support Generation 2 VMs using heuristics. +func vmSizeSupportsGen2(vmSize string) bool { + size := strings.ToLower(vmSize) + + if !strings.HasPrefix(size, "standard_") { + return false + } + + // A-family explicitly does NOT support Gen2 per Azure docs. + if strings.HasPrefix(size, "standard_a") { + return false + } + + // Families that have Gen2 support according to: + // https://learn.microsoft.com/azure/virtual-machines/generation-2 + // + // Actual availability still depends on the specific SKU and region. + gen2Families := []string{ + "standard_b", + "standard_d", + "standard_f", + "standard_e", + "standard_m", + "standard_l", + "standard_nc", + "standard_nd", + "standard_nv", + "standard_hb", + "standard_hc", + "standard_hx", + } + + for _, family := range gen2Families { + if strings.HasPrefix(size, family) { + return true + } + } + + return false +} diff --git a/pkg/cloudprovider/provider/azure/provider_test.go b/pkg/cloudprovider/provider/azure/provider_test.go new file mode 100644 index 000000000..535c0e650 --- /dev/null +++ b/pkg/cloudprovider/provider/azure/provider_test.go @@ -0,0 +1,89 @@ +/* +Copyright 2024 The Machine Controller Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azure + +import ( + "testing" +) + +func TestVMSizeSupportsGen2(t *testing.T) { + tests := []struct { + name string + vmSize string + expected bool + }{ + { + name: "Standard_F2s_v2 should support Gen2", + vmSize: "Standard_F2s_v2", + expected: true, + }, + { + name: "Standard_D2s_v3 should support Gen2", + vmSize: "Standard_D2s_v3", + expected: true, + }, + { + name: "Standard_E2s_v4 should support Gen2", + vmSize: "Standard_E2s_v4", + expected: true, + }, + { + name: "Standard_B2ms should support Gen2", + vmSize: "Standard_B2ms", + expected: true, + }, + { + name: "Standard_D2_v2 should support Gen2", + vmSize: "Standard_D2_v2", + expected: true, + }, + { + name: "Standard_A2 should not support Gen2", + vmSize: "Standard_A2", + expected: false, + }, + { + name: "Standard_D2 (old) should support Gen2", + vmSize: "Standard_D2", + expected: true, + }, + { + name: "lowercase Standard_f2s_v2 should support Gen2", + vmSize: "standard_f2s_v2", + expected: true, + }, + { + name: "Standard_NC6s_v3 should support Gen2", + vmSize: "Standard_NC6s_v3", + expected: true, + }, + { + name: "Standard_NC40ads_H100_v5 should support Gen2", + vmSize: "Standard_NC40ads_H100_v5", + expected: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result := vmSizeSupportsGen2(tt.vmSize) + if result != tt.expected { + t.Errorf("vmSizeSupportsGen2(%s) = %v, expected %v", tt.vmSize, result, tt.expected) + } + }) + } +} diff --git a/pkg/cloudprovider/provider/azure/types/cloudconfig.go b/pkg/cloudprovider/provider/azure/types/cloudconfig.go deleted file mode 100644 index 6ddb8b5ca..000000000 --- a/pkg/cloudprovider/provider/azure/types/cloudconfig.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright 2019 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package types - -import ( - "encoding/json" - "fmt" -) - -type CloudConfig struct { - Cloud string `json:"cloud"` - TenantID string `json:"tenantId"` - SubscriptionID string `json:"subscriptionId"` - AADClientID string `json:"aadClientId"` - AADClientSecret string `json:"aadClientSecret"` - - ResourceGroup string `json:"resourceGroup"` - Location string `json:"location"` - VNetName string `json:"vnetName"` - SubnetName string `json:"subnetName"` - RouteTableName string `json:"routeTableName"` - SecurityGroupName string `json:"securityGroupName" yaml:"securityGroupName"` - PrimaryAvailabilitySetName string `json:"primaryAvailabilitySetName"` - VnetResourceGroup string `json:"vnetResourceGroup"` - UseInstanceMetadata bool `json:"useInstanceMetadata"` - LoadBalancerSku string `json:"loadBalancerSku"` -} - -func CloudConfigToString(c *CloudConfig) (string, error) { - b, err := json.Marshal(c) - if err != nil { - return "", fmt.Errorf("failed to unmarshal config: %w", err) - } - - return string(b), nil -} diff --git a/pkg/cloudprovider/provider/azure/types/types.go b/pkg/cloudprovider/provider/azure/types/types.go deleted file mode 100644 index 7b472689e..000000000 --- a/pkg/cloudprovider/provider/azure/types/types.go +++ /dev/null @@ -1,77 +0,0 @@ -/* -Copyright 2019 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package types - -import ( - "github.com/kubermatic/machine-controller/pkg/jsonutil" - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" -) - -// RawConfig is a direct representation of an Azure machine object's configuration. -type RawConfig struct { - SubscriptionID providerconfigtypes.ConfigVarString `json:"subscriptionID,omitempty"` - TenantID providerconfigtypes.ConfigVarString `json:"tenantID,omitempty"` - ClientID providerconfigtypes.ConfigVarString `json:"clientID,omitempty"` - ClientSecret providerconfigtypes.ConfigVarString `json:"clientSecret,omitempty"` - - Location providerconfigtypes.ConfigVarString `json:"location"` - ResourceGroup providerconfigtypes.ConfigVarString `json:"resourceGroup"` - VNetResourceGroup providerconfigtypes.ConfigVarString `json:"vnetResourceGroup"` - VMSize providerconfigtypes.ConfigVarString `json:"vmSize"` - VNetName providerconfigtypes.ConfigVarString `json:"vnetName"` - SubnetName providerconfigtypes.ConfigVarString `json:"subnetName"` - LoadBalancerSku providerconfigtypes.ConfigVarString `json:"loadBalancerSku"` - RouteTableName providerconfigtypes.ConfigVarString `json:"routeTableName"` - AvailabilitySet providerconfigtypes.ConfigVarString `json:"availabilitySet"` - AssignAvailabilitySet *bool `json:"assignAvailabilitySet"` - SecurityGroupName providerconfigtypes.ConfigVarString `json:"securityGroupName"` - Zones []string `json:"zones"` - ImagePlan *ImagePlan `json:"imagePlan,omitempty"` - ImageReference *ImageReference `json:"imageReference,omitempty"` - EnableAcceleratedNetworking *bool `json:"enableAcceleratedNetworking"` - EnableBootDiagnostics *bool `json:"enableBootDiagnostics,omitempty"` - - ImageID providerconfigtypes.ConfigVarString `json:"imageID"` - OSDiskSize int32 `json:"osDiskSize"` - OSDiskSKU *string `json:"osDiskSKU,omitempty"` - DataDiskSize int32 `json:"dataDiskSize"` - DataDiskSKU *string `json:"dataDiskSKU,omitempty"` - AssignPublicIP providerconfigtypes.ConfigVarBool `json:"assignPublicIP"` - PublicIPSKU *string `json:"publicIPSKU,omitempty"` - Tags map[string]string `json:"tags,omitempty"` -} - -// ImagePlan contains azure OS Plan fields for the marketplace images. -type ImagePlan struct { - Name string `json:"name,omitempty"` - Publisher string `json:"publisher,omitempty"` - Product string `json:"product,omitempty"` -} - -// ImageReference specifies information about the image to use. -type ImageReference struct { - Publisher string `json:"publisher,omitempty"` - Offer string `json:"offer,omitempty"` - Sku string `json:"sku,omitempty"` - Version string `json:"version,omitempty"` -} - -func GetConfig(pconfig providerconfigtypes.Config) (*RawConfig, error) { - rawConfig := &RawConfig{} - - return rawConfig, jsonutil.StrictUnmarshal(pconfig.CloudProviderSpec.Raw, rawConfig) -} diff --git a/pkg/cloudprovider/provider/baremetal/plugins/driver.go b/pkg/cloudprovider/provider/baremetal/plugins/driver.go index 0147e1907..e42090cb5 100644 --- a/pkg/cloudprovider/provider/baremetal/plugins/driver.go +++ b/pkg/cloudprovider/provider/baremetal/plugins/driver.go @@ -19,27 +19,18 @@ package plugins import ( "context" + "go.uber.org/zap" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" ) -type Driver string - -const Tinkerbell Driver = "tinkerbell" - -type CloudConfigSettings struct { - Token string - Namespace string - SecretName string - ClusterHost string -} - // PluginDriver manages the communications between the machine controller cloud provider and the bare metal env. type PluginDriver interface { - GetServer(context.Context, types.UID, runtime.RawExtension) (Server, error) + GetServer(context.Context) (Server, error) Validate(runtime.RawExtension) error - ProvisionServer(context.Context, types.UID, *CloudConfigSettings, runtime.RawExtension) (Server, error) - DeprovisionServer(context.Context, types.UID) error + ProvisionServer(context.Context, *zap.SugaredLogger, metav1.ObjectMeta, runtime.RawExtension, string) (Server, error) + DeprovisionServer(context.Context) error } // Server represents the server/instance which exists in the bare metal env. diff --git a/pkg/cloudprovider/provider/baremetal/plugins/tinkerbell/client/common.go b/pkg/cloudprovider/provider/baremetal/plugins/tinkerbell/client/common.go deleted file mode 100644 index 524168e09..000000000 --- a/pkg/cloudprovider/provider/baremetal/plugins/tinkerbell/client/common.go +++ /dev/null @@ -1,32 +0,0 @@ -/* -Copyright 2021 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package client contains a client wrapper for Tinkerbell. -package client - -import ( - "errors" -) - -// ErrNotFound is returned if a requested resource is not found. -var ErrNotFound = errors.New("resource not found") - -// than parsing for these specific error message. -const ( - sqlErrorString = "rpc error: code = Unknown desc = sql: no rows in result set" - sqlErrorStringAlt = "rpc error: code = Unknown desc = SELECT: sql: no rows in result set" - sqlErrorNotFound = "rpc error: code = NotFound desc = not found" -) diff --git a/pkg/cloudprovider/provider/baremetal/plugins/tinkerbell/client/hardware.go b/pkg/cloudprovider/provider/baremetal/plugins/tinkerbell/client/hardware.go index c8ccf6c98..387a7f157 100644 --- a/pkg/cloudprovider/provider/baremetal/plugins/tinkerbell/client/hardware.go +++ b/pkg/cloudprovider/provider/baremetal/plugins/tinkerbell/client/hardware.go @@ -1,5 +1,5 @@ /* -Copyright 2021 The Machine Controller Authors. +Copyright 2024 The Machine Controller Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,94 +18,78 @@ package client import ( "context" - "errors" "fmt" - "strings" - "github.com/google/uuid" - "github.com/tinkerbell/tink/protos/hardware" - "google.golang.org/grpc" + tinkv1alpha1 "github.com/tinkerbell/tink/api/v1alpha1" + + "k8c.io/machine-controller/pkg/cloudprovider/errors" + tbtypes "k8c.io/machine-controller/pkg/cloudprovider/provider/baremetal/plugins/tinkerbell/types" + + "k8s.io/apimachinery/pkg/types" + ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" ) -// Hardware client for Tinkerbell. -type Hardware struct { - client hardware.HardwareServiceClient +// HardwareClient manages Tinkerbell hardware resources across two clusters. +type HardwareClient struct { + TinkerbellClient ctrlruntimeclient.Client } -// NewHardwareClient returns a Hardware client. -func NewHardwareClient(client hardware.HardwareServiceClient) *Hardware { - return &Hardware{client: client} +// NewHardwareClient creates a new instance of HardwareClient. +func NewHardwareClient(tinkerbellClient ctrlruntimeclient.Client) *HardwareClient { + return &HardwareClient{ + TinkerbellClient: tinkerbellClient, + } } -// Create Tinkerbell Hardware. -func (t *Hardware) Create(ctx context.Context, h *hardware.Hardware) error { - if h == nil { - return errors.New("hardware should not be nil") +// GetHardware fetches a hardware object from the Tinkerbell cluster based on the hardware reference in the machine +// deployment object. +func (h *HardwareClient) GetHardware(ctx context.Context, hardwareRef types.NamespacedName) (*tinkv1alpha1.Hardware, error) { + hardware := &tinkv1alpha1.Hardware{} + if err := h.TinkerbellClient.Get(ctx, hardwareRef, hardware); err != nil { + return nil, fmt.Errorf("failed to get hardware '%v': %w", hardwareRef, err) } - if h.GetId() == "" { - h.Id = uuid.New().String() + return hardware, nil +} + +// SetHardwareID sets the ID of a specified Hardware object. +func (h *HardwareClient) SetHardwareID(ctx context.Context, hardware *tinkv1alpha1.Hardware, newID string) error { + if hardware.Spec.Metadata == nil { + hardware.Spec.Metadata = &tinkv1alpha1.HardwareMetadata{} } - if _, err := t.client.Push(ctx, &hardware.PushRequest{Data: h}); err != nil { - return fmt.Errorf("creating hardware in Tinkerbell: %w", err) + if hardware.Spec.Metadata.Instance == nil { + hardware.Spec.Metadata.Instance = &tinkv1alpha1.MetadataInstance{} } - return nil -} + hardware.Spec.Metadata.Instance.ID = newID + hardware.Spec.Metadata.State = tbtypes.Staged + if newID == "" { + // Machine has been deprovisioned + hardware.Spec.Metadata.State = tbtypes.Decommissioned + } -// Update Tinkerbell Hardware. -func (t *Hardware) Update(ctx context.Context, h *hardware.Hardware) error { - if _, err := t.client.Push(ctx, &hardware.PushRequest{Data: h}); err != nil { - return fmt.Errorf("updating template in Tinkerbell: %w", err) + // Update the hardware object in the cluster + if err := h.TinkerbellClient.Update(ctx, hardware); err != nil { + return fmt.Errorf("failed to update hardware ID for '%s': %w", hardware.Name, err) } return nil } -// Get returns a Tinkerbell Hardware. -func (t *Hardware) Get(ctx context.Context, id, ip, mac string) (*hardware.Hardware, error) { - var method func(context.Context, *hardware.GetRequest, ...grpc.CallOption) (*hardware.Hardware, error) - - req := &hardware.GetRequest{} - - switch { - case id != "": - req.Id = id - method = t.client.ByID - case mac != "": - req.Mac = mac - method = t.client.ByMAC - case ip != "": - req.Ip = ip - method = t.client.ByIP - default: - return nil, errors.New("need to specify either id, ip, or mac") +func (h *HardwareClient) GetHardwareWithID(ctx context.Context, uid string) (*tinkv1alpha1.Hardware, error) { + // List all hardware in the cluster + var hardwares tinkv1alpha1.HardwareList + if err := h.TinkerbellClient.List(ctx, &hardwares); err != nil { + return nil, fmt.Errorf("failed to list hardware: %w", err) } - tinkHardware, err := method(ctx, req) - if err != nil { - if err.Error() == sqlErrorString || err.Error() == sqlErrorStringAlt { - return nil, fmt.Errorf("hardware %w", ErrNotFound) + // Find the Hardware with the given ID + for _, hw := range hardwares.Items { + if hw.Spec.Metadata.Instance.ID == uid { + return &hw, nil } - - return nil, fmt.Errorf("getting hardware from Tinkerbell: %w", err) - } - - return tinkHardware, nil -} - -// Delete a Tinkerbell Hardware. -func (t *Hardware) Delete(ctx context.Context, id string) error { - if _, err := t.client.Delete(ctx, &hardware.DeleteRequest{Id: id}); err != nil { - if err.Error() == sqlErrorString || - err.Error() == sqlErrorStringAlt || - strings.Contains(err.Error(), sqlErrorNotFound) { - return fmt.Errorf("hardware %w", ErrNotFound) - } - - return fmt.Errorf("deleting hardware from Tinkerbell: %w", err) } - return nil + return nil, errors.ErrInstanceNotFound } diff --git a/pkg/cloudprovider/provider/baremetal/plugins/tinkerbell/client/interface.go b/pkg/cloudprovider/provider/baremetal/plugins/tinkerbell/client/interface.go deleted file mode 100644 index 00b5c6410..000000000 --- a/pkg/cloudprovider/provider/baremetal/plugins/tinkerbell/client/interface.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright 2021 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package client - -import ( - "context" - - "github.com/tinkerbell/tink/protos/hardware" - "github.com/tinkerbell/tink/protos/template" -) - -type HardwareClient interface { - Get(context.Context, string, string, string) (*hardware.Hardware, error) - Delete(context.Context, string) error - Create(context.Context, *hardware.Hardware) error -} - -type TemplateClient interface { - Get(context.Context, string, string) (*template.WorkflowTemplate, error) - Create(context.Context, *template.WorkflowTemplate) error -} - -type WorkflowClient interface { - Create(context.Context, string, string) (string, error) -} diff --git a/pkg/cloudprovider/provider/baremetal/plugins/tinkerbell/client/template.go b/pkg/cloudprovider/provider/baremetal/plugins/tinkerbell/client/template.go index 79e9f00be..6672a675c 100644 --- a/pkg/cloudprovider/provider/baremetal/plugins/tinkerbell/client/template.go +++ b/pkg/cloudprovider/provider/baremetal/plugins/tinkerbell/client/template.go @@ -1,5 +1,5 @@ /* -Copyright 2021 The Machine Controller Authors. +Copyright 2024 The Machine Controller Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -20,73 +20,275 @@ import ( "context" "fmt" - "github.com/tinkerbell/tink/protos/template" + tinkv1alpha1 "github.com/tinkerbell/tink/api/v1alpha1" + "gopkg.in/yaml.v3" + + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" ) -// Template client for Tinkerbell. +type Task struct { + Name string `json:"name"` + WorkerAddr string `json:"worker" yaml:"worker"` + Actions []Action `json:"actions"` + Volumes []string `json:"volumes,omitempty"` + Environment map[string]string `json:"environment,omitempty"` +} + +// Action represents a workflow action. +type Action struct { + Name string `json:"name,omitempty"` + Image string `json:"image,omitempty"` + Timeout int64 `json:"timeout,omitempty"` + Volumes []string `json:"volumes,omitempty"` + Pid string `json:"pid,omitempty"` + Environment map[string]string `json:"environment,omitempty"` + Command []string `json:"command,omitempty"` +} type Template struct { - client template.TemplateServiceClient + Version string `yaml:"version"` + Name string `yaml:"name"` + GlobalTimeout int64 `yaml:"global_timeout"` + Tasks []Task `yaml:"tasks"` } -// NewTemplateClient returns a Template client. -func NewTemplateClient(client template.TemplateServiceClient) *Template { - return &Template{client: client} +const ( + fsType = "ext4" + defaultInterpreter = "/bin/sh -c" + hardwareDisk1 = "{{ index .Hardware.Disks 0 }}" + hardwareName = "{{.hardware_name}}" + ProvisionWorkerNodeTemplate = "provision-worker-node" + PartitionNumber = "{{.partition_number}}" + OSImageURL = "{{.os_image}}" +) + +// TemplateClient handles interactions with the Tinkerbell Templates in the Tinkerbell cluster. +type TemplateClient struct { + tinkclient ctrlruntimeclient.Client } -// Get returns a Tinkerbell Template. -func (t *Template) Get(ctx context.Context, id, name string) (*template.WorkflowTemplate, error) { - req := &template.GetRequest{} - if id != "" { - req.GetBy = &template.GetRequest_Id{Id: id} - } else { - req.GetBy = &template.GetRequest_Name{Name: name} +// NewTemplateClient creates a new client for managing Tinkerbell Templates. +func NewTemplateClient(k8sClient ctrlruntimeclient.Client) *TemplateClient { + return &TemplateClient{ + tinkclient: k8sClient, } +} - tinkTemplate, err := t.client.GetTemplate(ctx, req) - if err != nil { - if err.Error() == sqlErrorString || err.Error() == sqlErrorStringAlt { - return nil, fmt.Errorf("template %w", ErrNotFound) - } +func (t *TemplateClient) Delete(ctx context.Context, namespacedName types.NamespacedName) error { + template := &tinkv1alpha1.Template{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespacedName.Name, + Namespace: namespacedName.Namespace, + }, + } - return nil, fmt.Errorf("getting template from Tinkerbell: %w", err) + if err := t.tinkclient.Delete(ctx, template); err != nil { + return fmt.Errorf("failed to delete Template in Tinkerbell cluster: %w", err) } - return tinkTemplate, nil + return nil } -// Update a Tinkerbell Template. -func (t *Template) Update(ctx context.Context, template *template.WorkflowTemplate) error { - if _, err := t.client.UpdateTemplate(ctx, template); err != nil { - return fmt.Errorf("updating template in Tinkerbefll: %w", err) +// CreateTemplate creates a Tinkerbell Template in the Kubernetes cluster. +func (t *TemplateClient) CreateTemplate(ctx context.Context, namespace string) error { + template := &tinkv1alpha1.Template{} + if err := t.tinkclient.Get(ctx, types.NamespacedName{ + Name: ProvisionWorkerNodeTemplate, + Namespace: namespace, + }, template); err != nil { + if apierrors.IsNotFound(err) { + data, err := getTemplate(OSImageURL) + if err != nil { + return err + } + + template.Name = ProvisionWorkerNodeTemplate + template.Namespace = namespace + template.Spec = tinkv1alpha1.TemplateSpec{ + Data: &data, // templateData is a string containing the YAML definition. + } + + // Create the Template object in the Tinkerbell cluster + if err := t.tinkclient.Create(ctx, template); err != nil { + return fmt.Errorf("failed to create Template in Tinkerbell cluster: %w", err) + } + + return nil + } + + return fmt.Errorf("failed to get template %s: %w", ProvisionWorkerNodeTemplate, err) } return nil } -// Create a Tinkerbell Template. -func (t *Template) Create(ctx context.Context, template *template.WorkflowTemplate) error { - resp, err := t.client.CreateTemplate(ctx, template) +func getTemplate(osImageURL string) (string, error) { + actions := []Action{ + createWipeDiskAction(), + createStreamUbuntuImageAction(hardwareDisk1, osImageURL), + createGrowPartitionAction(hardwareDisk1), + createNetworkConfigAction(), + configureCloudInitAction(), + decodeCloudInitFile(hardwareName), + createRebootAction(), + } + + task := Task{ + Name: "os-installation", + WorkerAddr: "{{.device_1}}", + Volumes: []string{"/dev:/dev", "/dev/console:/dev/console", "/lib/firmware:/lib/firmware:ro"}, + Actions: actions, + } + + template := Template{ + Name: "ubuntu", + Version: "0.1", + GlobalTimeout: 1800, + Tasks: []Task{task}, + } + yamlData, err := yaml.Marshal(template) if err != nil { - return fmt.Errorf("creating template in Tinkerbell: %w", err) + return "", fmt.Errorf("error marshaling the template to YAML: %w", err) } - template.Id = resp.GetId() + return string(yamlData), nil +} - return nil +func createWipeDiskAction() Action { + wipeScript := `apk add --no-cache util-linux +disks="{{ .Hardware.Disks }}" +disks=${disks:1:-1} +for disk in $disks; do + for partition in $(ls ${disk}* 2>/dev/null); do + if [ -b "${partition}" ]; then + echo "Wiping ${partition}..." + wipefs -af "${partition}" + fi + done +done +echo "All partitions on ${disks} have been wiped." +` + return Action{ + Name: "wipe-disk", + Image: "alpine:3.23", + Timeout: 600, + Command: []string{"/bin/sh", "-c", wipeScript}, + } } -// Delete a Tinkerbell Template. -func (t *Template) Delete(ctx context.Context, id string) error { - req := &template.GetRequest{ - GetBy: &template.GetRequest_Id{Id: id}, +func createStreamUbuntuImageAction(destDisk, osImageURL string) Action { + return Action{ + Name: "stream-ubuntu-image", + Image: "quay.io/tinkerbell-actions/image2disk:v1.0.0", + Timeout: 600, + Environment: map[string]string{ + "DEST_DISK": destDisk, + "IMG_URL": osImageURL, + "COMPRESSED": "true", + }, } - if _, err := t.client.DeleteTemplate(ctx, req); err != nil { - if err.Error() == sqlErrorString || err.Error() == sqlErrorStringAlt { - return fmt.Errorf("template %w", ErrNotFound) - } +} - return fmt.Errorf("deleting template from Tinkerbell: %w", err) +func createGrowPartitionAction(destDisk string) Action { + return Action{ + Name: "grow-partition", + Image: "quay.io/tinkerbell/actions/cexec:c5bde803d9f6c90f1a9d5e06930d856d1481854c", + Timeout: 90, + Environment: map[string]string{ + "BLOCK_DEVICE": "{{ formatPartition ( index .Hardware.Disks 0 ) (.partition_number | int) }}", + "FS_TYPE": fsType, + "CHROOT": "y", + "DEFAULT_INTERPRETER": defaultInterpreter, + "CMD_LINE": fmt.Sprintf("growpart %s %s && resize2fs '{{ formatPartition ( index .Hardware.Disks 0 ) (.partition_number | int) }}'", destDisk, PartitionNumber), + }, } +} - return nil +func createNetworkConfigAction() Action { + netplanConfig := ` +network: + version: 2 + renderer: networkd + ethernets: + {{.interface_name}}: + dhcp4: no + addresses: + - {{.cidr}} + nameservers: + addresses: + - {{.ns}} + routes: + - to: default + via: {{.default_route}}` + return Action{ + Name: "add-netplan-config", + Image: "quay.io/tinkerbell-actions/writefile:v1.0.0", + Timeout: 90, + Environment: map[string]string{ + "DEST_DISK": "{{ formatPartition ( index .Hardware.Disks 0 ) (.partition_number | int) }}", + "FS_TYPE": fsType, + "DEST_PATH": "/etc/netplan/config.yaml", + "CONTENTS": netplanConfig, + "UID": "0", + "GID": "0", + "MODE": "0644", + "DIRMODE": "0755", + }, + } +} + +func configureCloudInitAction() Action { + commands := `mkdir -p /var/lib/cloud/seed/nocloud && chmod 755 /var/lib/cloud/seed/nocloud +echo 'datasource_list: [ NoCloud ]' > /etc/cloud/cloud.cfg.d/01_ds-identify.cfg +echo '{{.cloud_init_script}}' > /tmp/{{.hardware_name}}-bootstrap-config +echo 'instance-id: {{.hardware_name}}' > /var/lib/cloud/seed/nocloud/meta-data +echo 'local-hostname: {{.hardware_name}}' >> /var/lib/cloud/seed/nocloud/meta-data +` + + return Action{ + Name: "configure-cloud-init", + Image: "quay.io/tinkerbell-actions/cexec:v1.0.0", + Timeout: 90, + Environment: map[string]string{ + "BLOCK_DEVICE": "{{ formatPartition ( index .Hardware.Disks 0 ) (.partition_number | int) }}", + "FS_TYPE": fsType, + "CHROOT": "y", + "DEFAULT_INTERPRETER": defaultInterpreter, + "CMD_LINE": commands, + }, + } +} + +func decodeCloudInitFile(hardwareName string) Action { + return Action{ + Name: "decode-cloud-init-file", + Image: "quay.io/tinkerbell/actions/cexec:latest", + Timeout: 90, + Environment: map[string]string{ + "BLOCK_DEVICE": "{{ formatPartition ( index .Hardware.Disks 0 ) (.partition_number | int) }}", + "FS_TYPE": fsType, + "CHROOT": "y", + "DEFAULT_INTERPRETER": "/bin/sh -c", + "CMD_LINE": fmt.Sprintf("cat /tmp/%s-bootstrap-config | base64 -d > '/var/lib/cloud/seed/nocloud/user-data'", hardwareName), + }, + } +} + +func createRebootAction() Action { + return Action{ + Name: "reboot-action", + Image: "ghcr.io/jacobweinstock/waitdaemon:0.1.1", + Pid: "host", + Timeout: 90, + Command: []string{"reboot"}, + Environment: map[string]string{ + "IMAGE": "alpine", + "WAIT_SECONDS": "10", + }, + Volumes: []string{ + "/var/run/docker.sock:/var/run/docker.sock", + }, + } } diff --git a/pkg/ini/escape.go b/pkg/cloudprovider/provider/baremetal/plugins/tinkerbell/client/utils.go similarity index 54% rename from pkg/ini/escape.go rename to pkg/cloudprovider/provider/baremetal/plugins/tinkerbell/client/utils.go index fa45b5854..f73e817db 100644 --- a/pkg/ini/escape.go +++ b/pkg/cloudprovider/provider/baremetal/plugins/tinkerbell/client/utils.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Machine Controller Authors. +Copyright 2024 The Machine Controller Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,19 +14,26 @@ See the License for the specific language governing permissions and limitations under the License. */ -package ini +package client import ( + "fmt" + "net" "strings" -) -// Allowed escaping characters by gopkg.in/gcfg.v1 - the lib kubernetes uses. -var escaper = strings.NewReplacer( - `\`, `\\`, - `"`, `\"`, + tinkv1alpha1 "github.com/tinkerbell/tink/api/v1alpha1" ) -// Escape escapes values in ini files correctly according to gopkg.in/gcfg.v1 - the lib kubernetes uses. -func Escape(s string) string { - return `"` + escaper.Replace(s) + `"` +func convertNetmaskToCIDR(ip *tinkv1alpha1.IP) string { + mask := net.IPMask(net.ParseIP(ip.Netmask).To4()) + length, _ := mask.Size() + + cidr := "" + parts := strings.Split(ip.Address, ".") + for i := 0; i < len(parts); i++ { + cidr += parts[i] + "." + } + cidr = strings.TrimSuffix(cidr, ".") + + return fmt.Sprintf("%s/%v", cidr, length) } diff --git a/pkg/cloudprovider/provider/baremetal/plugins/tinkerbell/client/workflow.go b/pkg/cloudprovider/provider/baremetal/plugins/tinkerbell/client/workflow.go index 0c6e682eb..7135356b9 100644 --- a/pkg/cloudprovider/provider/baremetal/plugins/tinkerbell/client/workflow.go +++ b/pkg/cloudprovider/provider/baremetal/plugins/tinkerbell/client/workflow.go @@ -1,5 +1,5 @@ /* -Copyright 2021 The Machine Controller Authors. +Copyright 2024 The Machine Controller Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,203 +18,126 @@ package client import ( "context" - "encoding/json" - "errors" + "encoding/base64" "fmt" - "io" + "time" - "github.com/tinkerbell/tink/protos/hardware" - "github.com/tinkerbell/tink/protos/workflow" -) + tinkv1alpha1 "github.com/tinkerbell/tink/api/v1alpha1" -// Workflow client for Tinkerbell. -type Workflow struct { - client workflow.WorkflowServiceClient - hardwareClient *Hardware -} + tink "k8c.io/machine-controller/pkg/cloudprovider/provider/baremetal/plugins/tinkerbell/types" -// NewWorkflowClient returns a Workflow client. -func NewWorkflowClient(client workflow.WorkflowServiceClient, hClient *Hardware) *Workflow { - return &Workflow{client: client, hardwareClient: hClient} -} + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" +) -// Get returns a Tinkerbell Workflow. -func (t *Workflow) Get(ctx context.Context, id string) (*workflow.Workflow, error) { - tinkWorkflow, err := t.client.GetWorkflow(ctx, &workflow.GetRequest{Id: id}) - if err != nil { - if err.Error() == sqlErrorString || err.Error() == sqlErrorStringAlt { - return nil, fmt.Errorf("workflow %w", ErrNotFound) - } +// DefaultPartitionNumber defines the default value for the "partition_number" field. +const DefaultPartitionNumber = "3" - return nil, fmt.Errorf("getting workflow from Tinkerbell: %w", err) - } +// PartitionNumberAnnotation is used to specify the main partition number of the disk device. +const PartitionNumberAnnotation = "hardware.kubermatic.io/partition-number" - return tinkWorkflow, nil +// WorkflowClient handles interactions with the Tinkerbell Workflows. +type WorkflowClient struct { + tinkclient ctrlruntimeclient.Client } -// GetMetadata returns the metadata for a given Tinkerbell Workflow. -func (t *Workflow) GetMetadata(ctx context.Context, id string) ([]byte, error) { - verReq := &workflow.GetWorkflowDataRequest{WorkflowId: id} - - verResp, err := t.client.GetWorkflowDataVersion(ctx, verReq) - if err != nil { - if err.Error() == sqlErrorString || err.Error() == sqlErrorStringAlt { - return nil, fmt.Errorf("workflow %w", ErrNotFound) - } - - return nil, fmt.Errorf("getting workflow version from Tinkerbell: %w", err) - } - - req := &workflow.GetWorkflowDataRequest{WorkflowId: id, Version: verResp.GetVersion()} - - resp, err := t.client.GetWorkflowMetadata(ctx, req) - if err != nil { - if err.Error() == sqlErrorString || err.Error() == sqlErrorStringAlt { - return nil, fmt.Errorf("workflow %w", ErrNotFound) - } - - return nil, fmt.Errorf("getting workflow metadata from Tinkerbell: %w", err) +// NewWorkflowClient creates a new client for managing Tinkerbell workflows. +func NewWorkflowClient(k8sClient ctrlruntimeclient.Client) *WorkflowClient { + return &WorkflowClient{ + tinkclient: k8sClient, } - - return resp.GetData(), nil } -// GetActions returns the actions for a given Tinkerbell Workflow. -func (t *Workflow) GetActions(ctx context.Context, id string) ([]*workflow.WorkflowAction, error) { - req := &workflow.WorkflowActionsRequest{WorkflowId: id} +// CreateWorkflow creates a new Tinkerbell Workflow resource in the cluster. +func (w *WorkflowClient) CreateWorkflow(ctx context.Context, userData, templateRef, osImageURL string, hardware tink.Hardware) error { + // Construct the Workflow object + ifaceConfig := hardware.Spec.Interfaces[0].DHCP + dnsNameservers := "1.1.1.1" - resp, err := t.client.GetWorkflowActions(ctx, req) - if err != nil { - if err.Error() == sqlErrorString || err.Error() == sqlErrorStringAlt { - return nil, fmt.Errorf("workflow %w", ErrNotFound) - } - - return nil, fmt.Errorf("getting workflow actions from Tinkerbell: %w", err) + for _, ns := range ifaceConfig.NameServers { + dnsNameservers = ns } - return resp.GetActionList(), nil -} - -// GetEvents returns the events for a given Tinkerbell Workflow. -func (t *Workflow) GetEvents(ctx context.Context, id string) ([]*workflow.WorkflowActionStatus, error) { - req := &workflow.GetRequest{Id: id} - - resp, err := t.client.ShowWorkflowEvents(ctx, req) - if err != nil { - if err.Error() == sqlErrorString || err.Error() == sqlErrorStringAlt { - return nil, fmt.Errorf("workflow %w", ErrNotFound) - } - - return nil, fmt.Errorf("getting workflow events from Tinkerbell: %w", err) + workflowName := fmt.Sprintf("%s-%s-%s", hardware.Name, templateRef, time.Now().Format("20060102150405")) + workflow := &tinkv1alpha1.Workflow{ + ObjectMeta: metav1.ObjectMeta{ + Name: workflowName, + Namespace: hardware.Namespace, + Labels: map[string]string{ + tink.HardwareRefLabel: hardware.Name, + }, + }, + Spec: tinkv1alpha1.WorkflowSpec{ + TemplateRef: templateRef, + HardwareRef: hardware.GetName(), + HardwareMap: map[string]string{ + "device_1": hardware.GetMACAddress(), + "hardware_name": hardware.GetName(), + "cloud_init_script": base64.StdEncoding.EncodeToString([]byte(userData)), + "interface_name": ifaceConfig.IfaceName, + "cidr": convertNetmaskToCIDR(ifaceConfig.IP), + "ns": dnsNameservers, + "default_route": ifaceConfig.IP.Gateway, + "partition_number": w.getPartitionNumber(hardware), + "os_image": osImageURL, + }, + }, } - result := []*workflow.WorkflowActionStatus{} - - for { - e, err := resp.Recv() - if errors.Is(err, io.EOF) { - break - } - - if err != nil { - return nil, fmt.Errorf("getting workflow event from Tinkerbell: %w", err) - } - - result = append(result, e) + // Create the Workflow in the cluster + if err := w.tinkclient.Create(ctx, workflow); err != nil { + return fmt.Errorf("failed to create the workflow: %w", err) } - return result, nil + return nil } -// GetState returns the state for a given Tinkerbell Workflow. -func (t *Workflow) GetState(ctx context.Context, id string) (workflow.State, error) { - req := &workflow.GetRequest{Id: id} - - resp, err := t.client.GetWorkflowContext(ctx, req) - if err != nil { - if err.Error() == sqlErrorString || err.Error() == sqlErrorStringAlt { - return 0, fmt.Errorf("workflow %w", ErrNotFound) - } - - return 0, fmt.Errorf("getting workflow state from Tinkerbell: %w", err) - } - - currIndex := resp.GetCurrentActionIndex() - total := resp.GetTotalNumberOfActions() - currState := resp.GetCurrentActionState() - - switch { - case total == 0: - // If there are no actions, let's just call it pending - return workflow.State_STATE_PENDING, nil - case currIndex+1 == total: - // If we are on the last action, just report it's state - return currState, nil - case currState != workflow.State_STATE_SUCCESS: - // If the state of the last action is anything other than - // success, just report it's state. - return currState, nil - default: - // We are not on the last action, and the last action - // was successful, we should report pending - return workflow.State_STATE_PENDING, nil +// GetWorkflow retrieves a Tinkerbell Workflow resource from the cluster. +func (w *WorkflowClient) GetWorkflow(ctx context.Context, name string, namespace string) (*tinkv1alpha1.Workflow, error) { + workflow := &tinkv1alpha1.Workflow{} + if err := w.tinkclient.Get(ctx, ctrlruntimeclient.ObjectKey{Name: name, Namespace: namespace}, workflow); err != nil { + return nil, fmt.Errorf("failed to get workflow: %w", err) } + return workflow, nil } -// Create a Tinkerbell Workflow. -func (t *Workflow) Create(ctx context.Context, templateID, hardwareID string) (string, error) { - h, err := t.hardwareClient.Get(ctx, hardwareID, "", "") - if err != nil { - return "", err - } - - hardwareString, err := HardwareToJSON(h) - if err != nil { - return "", err - } - - req := &workflow.CreateRequest{ - Template: templateID, - Hardware: hardwareString, - } +// CleanupWorkflows would delete all workflows that are assigned to a de-provisioned hardware, and they are in a pending +// state, to avoid the situation of re-running a workflow for a de-provisioned machine. +func (w *WorkflowClient) CleanupWorkflows(ctx context.Context, hardwareName, namespace string) error { + workflows := &tinkv1alpha1.WorkflowList{} + if err := w.tinkclient.List(ctx, workflows, &ctrlruntimeclient.ListOptions{ + Namespace: namespace, + LabelSelector: labels.SelectorFromSet(map[string]string{ + tink.HardwareRefLabel: hardwareName, + }), + }); err != nil { + if apierrors.IsNotFound(err) { + return nil + } - resp, err := t.client.CreateWorkflow(ctx, req) - if err != nil { - return "", fmt.Errorf("creating workflow in Tinkerbell: %w", err) + return fmt.Errorf("failed to fetch workflows: %w", err) } - return resp.GetId(), nil -} - -// Delete a Tinkerbell Workflow. -func (t *Workflow) Delete(ctx context.Context, id string) error { - if _, err := t.client.DeleteWorkflow(ctx, &workflow.GetRequest{Id: id}); err != nil { - if err.Error() == sqlErrorString || err.Error() == sqlErrorStringAlt { - return fmt.Errorf("workflow %w", ErrNotFound) + for _, workflow := range workflows.Items { + if workflow.Status.State == tinkv1alpha1.WorkflowStatePending || + workflow.Status.State == tinkv1alpha1.WorkflowStateTimeout { + if err := w.tinkclient.Delete(ctx, &workflow); err != nil { + if !apierrors.IsNotFound(err) { + return fmt.Errorf("failed to delete workflow: %w", err) + } + } } - - return fmt.Errorf("deleting workflow from Tinkerbell: %w", err) } return nil } -// HardwareToJSON converts Hardware to a string suitable for use in a -// Workflow Request for the raw Tinkerbell client. -func HardwareToJSON(h *hardware.Hardware) (string, error) { - hardwareInterfaces := h.GetNetwork().GetInterfaces() - hardwareInfo := make(map[string]string, len(hardwareInterfaces)) - - for i, hi := range hardwareInterfaces { - if mac := hi.GetDhcp().GetMac(); mac != "" { - hardwareInfo[fmt.Sprintf("device_%d", i+1)] = mac - } +func (w *WorkflowClient) getPartitionNumber(hardware tink.Hardware) string { + partitionNumber, exists := hardware.Annotations[PartitionNumberAnnotation] + if !exists { + partitionNumber = DefaultPartitionNumber // Use the default value } - - hardwareJSON, err := json.Marshal(hardwareInfo) - if err != nil { - return "", fmt.Errorf("marshaling hardware info into json: %w", err) - } - - return string(hardwareJSON), nil + return partitionNumber } diff --git a/pkg/cloudprovider/provider/baremetal/plugins/tinkerbell/driver.go b/pkg/cloudprovider/provider/baremetal/plugins/tinkerbell/driver.go index 8c08ff8c4..941ba0148 100644 --- a/pkg/cloudprovider/provider/baremetal/plugins/tinkerbell/driver.go +++ b/pkg/cloudprovider/provider/baremetal/plugins/tinkerbell/driver.go @@ -18,205 +18,194 @@ package tinkerbell import ( "context" - "encoding/json" - "errors" + "encoding/base64" "fmt" - tinkclient "github.com/tinkerbell/tink/client" - tinkpkg "github.com/tinkerbell/tink/pkg" - "github.com/tinkerbell/tink/protos/hardware" - tinktmpl "github.com/tinkerbell/tink/protos/template" - "gopkg.in/yaml.v3" + "github.com/aws/smithy-go/ptr" + tinkv1alpha1 "github.com/tinkerbell/tink/api/v1alpha1" + "go.uber.org/zap" - cloudprovidererrors "github.com/kubermatic/machine-controller/pkg/cloudprovider/errors" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/provider/baremetal/plugins" - tinkerbellclient "github.com/kubermatic/machine-controller/pkg/cloudprovider/provider/baremetal/plugins/tinkerbell/client" - metadataclient "github.com/kubermatic/machine-controller/pkg/cloudprovider/provider/baremetal/plugins/tinkerbell/metadata" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/util" + cloudprovidererrors "k8c.io/machine-controller/pkg/cloudprovider/errors" + "k8c.io/machine-controller/pkg/cloudprovider/provider/baremetal/plugins" + "k8c.io/machine-controller/pkg/cloudprovider/provider/baremetal/plugins/tinkerbell/client" + tinkerbelltypes "k8c.io/machine-controller/pkg/cloudprovider/provider/baremetal/plugins/tinkerbell/types" + tinktypes "k8c.io/machine-controller/sdk/cloudprovider/baremetal/plugins/tinkerbell" + providerconfigtypes "k8c.io/machine-controller/sdk/providerconfig" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/clientcmd" + "k8s.io/kubectl/pkg/scheme" + ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" ) -type ClientFactory func() (metadataclient.Client, tinkerbellclient.HardwareClient, tinkerbellclient.TemplateClient, tinkerbellclient.WorkflowClient) - type driver struct { - TinkServerAddress string - ImageRepoAddress string + ClusterName string + OSImageURL string + + HardwareRef types.NamespacedName - metadataClient metadataclient.Client - hardwareClient tinkerbellclient.HardwareClient - templateClient tinkerbellclient.TemplateClient - workflowClient tinkerbellclient.WorkflowClient + TinkClient ctrlruntimeclient.Client + HardwareClient client.HardwareClient + WorkflowClient client.WorkflowClient + TemplateClient client.TemplateClient +} + +func init() { + // Ensure the Tinkerbell API types are registered with the global scheme. + if err := tinkv1alpha1.SchemeBuilder.AddToScheme(scheme.Scheme); err != nil { + panic(fmt.Sprintf("failed to add tinkv1alpha1 to scheme: %v", err)) + } } // NewTinkerbellDriver returns a new TinkerBell driver with a configured tinkserver address and a client timeout. -func NewTinkerbellDriver(mdConfig *metadataclient.Config, factory ClientFactory, tinkServerAddress, imageRepoAddress string) (plugins.PluginDriver, error) { - if tinkServerAddress == "" || imageRepoAddress == "" { - return nil, errors.New("tink-server address, ImageRepoAddress cannot be empty") +func NewTinkerbellDriver(tinkConfig tinktypes.Config, tinkSpec *tinktypes.TinkerbellPluginSpec) (plugins.PluginDriver, error) { + tinkClient, err := ctrlruntimeclient.New(tinkConfig.RestConfig, ctrlruntimeclient.Options{Scheme: scheme.Scheme}) + if err != nil { + return nil, fmt.Errorf("failed to create k8s client: %w", err) } + hwClient := client.NewHardwareClient(tinkClient) - var ( - mdClient metadataclient.Client - hwClient tinkerbellclient.HardwareClient - tmplClient tinkerbellclient.TemplateClient - wflClient tinkerbellclient.WorkflowClient - err error - ) + wkClient := client.NewWorkflowClient(tinkClient) - if factory == nil { - mdClient, err = metadataclient.NewMetadataClient(mdConfig) - if err != nil { - return nil, fmt.Errorf("failed to create metadata client: %w", err) - } + tmplClient := client.NewTemplateClient(tinkClient) - if err := tinkclient.Setup(); err != nil { - return nil, fmt.Errorf("failed to setup tink-server client: %w", err) - } + d := driver{ + ClusterName: tinkSpec.ClusterName.Value, + TinkClient: tinkClient, + HardwareRef: tinkSpec.HardwareRef, + HardwareClient: *hwClient, + WorkflowClient: *wkClient, + TemplateClient: *tmplClient, + OSImageURL: tinkSpec.OSImageURL.Value, + } - hwClient = tinkerbellclient.NewHardwareClient(tinkclient.HardwareClient) - tmplClient = tinkerbellclient.NewTemplateClient(tinkclient.TemplateClient) - wflClient = tinkerbellclient.NewWorkflowClient(tinkclient.WorkflowClient, tinkerbellclient.NewHardwareClient(tinkclient.HardwareClient)) - } else { - mdClient, hwClient, tmplClient, wflClient = factory() + return &d, nil +} + +func (d *driver) GetServer(ctx context.Context) (plugins.Server, error) { + targetHardware, err := d.HardwareClient.GetHardware(ctx, d.HardwareRef) + if err != nil { + return nil, err } - d := &driver{ - TinkServerAddress: tinkServerAddress, - ImageRepoAddress: imageRepoAddress, - metadataClient: mdClient, - hardwareClient: hwClient, - templateClient: tmplClient, - workflowClient: wflClient, + if targetHardware.Spec.Metadata == nil || targetHardware.Spec.Metadata.State == "" { + return nil, cloudprovidererrors.ErrInstanceNotFound } - return d, nil + server := tinkerbelltypes.Hardware{Hardware: targetHardware} + return &server, nil } -func (d *driver) GetServer(ctx context.Context, uid types.UID, hwSpec runtime.RawExtension) (plugins.Server, error) { - hw := HardwareSpec{} - if err := json.Unmarshal(hwSpec.Raw, &hw); err != nil { - return nil, fmt.Errorf("failed to unmarshal tinkerbell hardware spec: %w", err) +func (d *driver) ProvisionServer(ctx context.Context, _ *zap.SugaredLogger, meta metav1.ObjectMeta, _ runtime.RawExtension, userdata string) (plugins.Server, error) { + // Get the hardware object from tinkerbell + hardware, err := d.HardwareClient.GetHardware(ctx, d.HardwareRef) + if err != nil { + return nil, err } - fetchedHW, err := d.hardwareClient.Get(ctx, string(uid), hw.GetIPAddress(), - hw.GetMACAddress()) - if err != nil { - if resourceNotFoundErr(err) { - return nil, cloudprovidererrors.ErrInstanceNotFound + var allowProvision bool + for _, iface := range hardware.Spec.Interfaces { + if iface.Netboot != nil && iface.Netboot.AllowPXE != nil && iface.Netboot.AllowPXE == ptr.Bool(false) { + continue } - return nil, fmt.Errorf("failed to get hardware: %w", err) - } + if iface.Netboot != nil && iface.Netboot.AllowWorkflow != nil && iface.Netboot.AllowWorkflow == ptr.Bool(false) { + continue + } - return &HardwareSpec{ - Hardware: tinkpkg.HardwareWrapper{ - Hardware: fetchedHW, - }, - }, nil -} + allowProvision = true + } -func (d *driver) ProvisionServer(ctx context.Context, uid types.UID, cfg *plugins.CloudConfigSettings, hwSpec runtime.RawExtension) (plugins.Server, error) { - hw := HardwareSpec{} - if err := json.Unmarshal(hwSpec.Raw, &hw); err != nil { - return nil, fmt.Errorf("failed to unmarshal tinkerbell hardware spec: %w", err) + if !allowProvision { + return nil, fmt.Errorf("server %s is not allowed to be provisioned; either hardware allowPXE or allowWorkflow is set to false", hardware.Name) } - hw.Hardware.Id = string(uid) - _, err := d.hardwareClient.Get(ctx, hw.Hardware.Id, "", "") + + // Create template if it doesn't exist + err = d.TemplateClient.CreateTemplate(ctx, d.HardwareRef.Namespace) if err != nil { - if resourceNotFoundErr(err) { - cfg, err := d.metadataClient.GetMachineMetadata() - if err != nil { - return nil, fmt.Errorf("failed to get metadata configs: %w", err) - } - - hw.Hardware.Network.Interfaces[0].Dhcp.Mac = cfg.MACAddress - - ip, netmask, _, err := util.CIDRToIPAndNetMask(cfg.CIDR) - if err != nil { - return nil, fmt.Errorf("failed to parse CIDR: %w", err) - } - dhcpIP := &hardware.Hardware_DHCP_IP{ - Address: ip, - Netmask: netmask, - Gateway: cfg.Gateway, - } - hw.Hardware.Network.Interfaces[0].Dhcp.Ip = dhcpIP - - if err := d.hardwareClient.Create(ctx, hw.Hardware.Hardware); err != nil { - return nil, fmt.Errorf("failed to register hardware to tink-server: %w", err) - } - } + return nil, err } - // cfg.SecretName has the same name as the machine name - workflowTemplate, err := d.templateClient.Get(ctx, "", cfg.SecretName) - if err != nil { - if resourceNotFoundErr(err) { - tmpl := createTemplate(d.TinkServerAddress, d.ImageRepoAddress, cfg) - payload, err := yaml.Marshal(tmpl) - if err != nil { - return nil, fmt.Errorf("failed marshalling workflow template: %w", err) - } - - workflowTemplate = &tinktmpl.WorkflowTemplate{ - Name: tmpl.Name, - Id: tmpl.ID, - Data: string(payload), - } - - if err := d.templateClient.Create(ctx, workflowTemplate); err != nil { - return nil, fmt.Errorf("failed to create workflow template: %w", err) - } - } + // Create Workflow to match the template and server + server := tinkerbelltypes.Hardware{Hardware: hardware} + if err = d.WorkflowClient.CreateWorkflow(ctx, userdata, client.ProvisionWorkerNodeTemplate, d.OSImageURL, server); err != nil { + return nil, err } - if _, err := d.workflowClient.Create(ctx, workflowTemplate.Id, hw.GetID()); err != nil { - return nil, fmt.Errorf("failed to provision server id %s running template id %s: %w", workflowTemplate.Id, hw.GetID(), err) + // Set the HardwareID with machine UID. The hardware object is claimed by the machine. + if err = d.HardwareClient.SetHardwareID(ctx, hardware, string(meta.UID)); err != nil { + return nil, err } - return &hw, nil + return &server, nil } -func (d *driver) Validate(hwSpec runtime.RawExtension) error { - hw := HardwareSpec{} - if err := json.Unmarshal(hwSpec.Raw, &hw); err != nil { - return fmt.Errorf("failed to unmarshal tinkerbell hardware spec: %w", err) - } +func (d *driver) Validate(_ runtime.RawExtension) error { + return nil +} - if hw.Hardware.Hardware == nil { - return fmt.Errorf("tinkerbell hardware data can not be empty") +func (d *driver) DeprovisionServer(ctx context.Context) error { + // Get the hardware object from tinkerbell cluster + targetHardware, err := d.HardwareClient.GetHardware(ctx, d.HardwareRef) + if err != nil { + return err } - if hw.Hardware.Network == nil { - return fmt.Errorf("tinkerbell hardware network configs can not be empty") + if err := d.WorkflowClient.CleanupWorkflows(ctx, targetHardware.Name, targetHardware.Namespace); err != nil { + return fmt.Errorf("failed to cleanup workflows for hardware %s: %w", targetHardware.Name, err) } - if hw.Hardware.Metadata == "" { - return fmt.Errorf("tinkerbell hardware metadata can not be empty") + // Reset the hardware ID and state in the tinkerbell cluster. + if err := d.HardwareClient.SetHardwareID(ctx, targetHardware, ""); err != nil { + return fmt.Errorf("failed to reset hardware ID for %s: %w", targetHardware.Name, err) } return nil } -func (d *driver) DeprovisionServer(ctx context.Context, uid types.UID) error { - if err := d.hardwareClient.Delete(ctx, string(uid)); err != nil { - if resourceNotFoundErr(err) { - return nil +func GetConfig(driverConfig tinktypes.TinkerbellPluginSpec, valueFromStringOrEnvVar func(configVar providerconfigtypes.ConfigVarString, envVarName string) (string, error)) (*tinktypes.Config, error) { + config := tinktypes.Config{} + var err error + // Kubeconfig was specified directly in the Machine/MachineDeployment CR. In this case we need to ensure that the value is base64 encoded. + if driverConfig.Auth.Kubeconfig.Value != "" { + val, err := base64.StdEncoding.DecodeString(driverConfig.Auth.Kubeconfig.Value) + if err != nil { + // An error here means that this is not a valid base64 string + // We can be more explicit here with the error for visibility. Webhook will return this error if we hit this scenario. + return nil, fmt.Errorf("failed to decode base64 encoded kubeconfig. Expected value is a base64 encoded Kubeconfig in JSON or YAML format: %w", err) + } + config.Kubeconfig = string(val) + } else { + // Environment variable or secret reference was used for providing the value of kubeconfig + // We have to be lenient in this case and allow unencoded values as well. + // TODO(mq): Replace this field with a reference to a secret instead of having it inlined. + config.Kubeconfig, err = valueFromStringOrEnvVar(driverConfig.Auth.Kubeconfig, "TINK_KUBECONFIG") + if err != nil { + return nil, fmt.Errorf(`failed to get value of "kubeconfig" field: %w`, err) + } + val, err := base64.StdEncoding.DecodeString(config.Kubeconfig) + // We intentionally ignore errors here with an assumption that an unencoded YAML or JSON must have been passed on + // in this case. + if err == nil { + config.Kubeconfig = string(val) } - return fmt.Errorf("failed to delete tinkerbell hardware data: %w", err) } - return nil -} + config.ClusterName, err = valueFromStringOrEnvVar(driverConfig.ClusterName, "CLUSTER_NAME") + if err != nil { + return nil, fmt.Errorf(`failed to get value of "clusterName" field: %w`, err) + } -func resourceNotFoundErr(err error) bool { - switch err.Error() { - case fmt.Sprintf("hardware %s", tinkerbellclient.ErrNotFound.Error()): - return true - case fmt.Sprintf("template %s", tinkerbellclient.ErrNotFound.Error()): - return true + config.OSImageURL, err = valueFromStringOrEnvVar(driverConfig.OSImageURL, "OS_IMAGE_URL") + if err != nil { + return nil, fmt.Errorf(`failed to get value of "OSImageURL" field: %w`, err) } - return false + config.RestConfig, err = clientcmd.RESTConfigFromKubeConfig([]byte(config.Kubeconfig)) + if err != nil { + return nil, fmt.Errorf("failed to decode kubeconfig: %w", err) + } + return &config, nil } diff --git a/pkg/cloudprovider/provider/baremetal/plugins/tinkerbell/driver_test.go b/pkg/cloudprovider/provider/baremetal/plugins/tinkerbell/driver_test.go deleted file mode 100644 index 229dd6817..000000000 --- a/pkg/cloudprovider/provider/baremetal/plugins/tinkerbell/driver_test.go +++ /dev/null @@ -1,346 +0,0 @@ -/* -Copyright 2021 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package tinkerbell - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "reflect" - "testing" - - "github.com/tinkerbell/tink/protos/hardware" - "github.com/tinkerbell/tink/protos/template" - "github.com/tinkerbell/tink/workflow" - - cloudprovidererrors "github.com/kubermatic/machine-controller/pkg/cloudprovider/errors" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/provider/baremetal/plugins" - tinkerbellclient "github.com/kubermatic/machine-controller/pkg/cloudprovider/provider/baremetal/plugins/tinkerbell/client" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/provider/baremetal/plugins/tinkerbell/metadata" - - "k8s.io/apimachinery/pkg/runtime" -) - -func TestNewTinkerbellDriver(t *testing.T) { - var testCases = []struct { - name string - tinkServer string - imageRepoServer string - clientFactor ClientFactory - errorIsExpected bool - }{ - { - name: "create new tinkerbell driver failure, missing image repo server", - tinkServer: "10.129.8.102", - imageRepoServer: "", - errorIsExpected: true, - }, - { - name: "create new tinkerbell driver failure, missing tink server", - tinkServer: "", - imageRepoServer: "10.129.8.102:8080", - errorIsExpected: true, - }, - { - name: "create new tinkerbell driver success", - tinkServer: "10.129.8.102", - imageRepoServer: "10.129.8.102:8080", - clientFactor: func() (metadata.Client, tinkerbellclient.HardwareClient, tinkerbellclient.TemplateClient, tinkerbellclient.WorkflowClient) { - return &fakeMetadataClient{}, &fakeHardwareClient{}, &fakeTemplateClient{}, &fakeWorkflowClient{} - }, - errorIsExpected: false, - }, - } - - for _, test := range testCases { - t.Run(test.name, func(t *testing.T) { - _, err := NewTinkerbellDriver(nil, test.clientFactor, test.tinkServer, test.imageRepoServer) - if err != nil { - if test.errorIsExpected { - return - } - - t.Fatalf("failed to create tinkerbell client: %v", err) - } - }) - } -} - -func TestDriver_GetServer(t *testing.T) { - var testCases = []struct { - name string - tinkServer string - imageRepoServer string - hardwareSpec runtime.RawExtension - clientFactor ClientFactory - expectedHardwareSpec string - errorIsExpected bool - expectedError error - }{ - { - name: "failed to get server", - tinkServer: "10.129.8.102", - imageRepoServer: "10.129.8.102:8080", - hardwareSpec: runtime.RawExtension{Raw: []byte("{\n \"hardware\": {\n \"network\": {\n \"interfaces\": [\n {\n \"dhcp\": {\n \"ip\": {\n \"address\": \"10.129.8.90\"\n },\n \"mac\": \"18:C0:4D:B1:18:E3\"\n }\n }\n ]\n }\n }\n}")}, - clientFactor: func() (metadata.Client, tinkerbellclient.HardwareClient, tinkerbellclient.TemplateClient, tinkerbellclient.WorkflowClient) { - return &fakeMetadataClient{}, &fakeHardwareClient{ - err: &resourceError{ - resource: "hardware", - }, - }, &fakeTemplateClient{}, &fakeWorkflowClient{} - }, - errorIsExpected: true, - expectedError: cloudprovidererrors.ErrInstanceNotFound, - }, - { - name: "get server success", - tinkServer: "10.129.8.102", - imageRepoServer: "10.129.8.102:8080", - hardwareSpec: runtime.RawExtension{Raw: []byte("{\n \"hardware\": {\n \"network\": {\n \"interfaces\": [\n {\n \"dhcp\": {\n \"ip\": {\n \"address\": \"10.129.8.90\"\n },\n \"mac\": \"18:C0:4D:B1:18:E3\"\n }\n }\n ]\n }\n }\n}")}, - clientFactor: func() (metadata.Client, tinkerbellclient.HardwareClient, tinkerbellclient.TemplateClient, tinkerbellclient.WorkflowClient) { - return &fakeMetadataClient{}, &fakeHardwareClient{}, &fakeTemplateClient{}, &fakeWorkflowClient{} - }, - errorIsExpected: false, - expectedHardwareSpec: "{\n \"hardware\": {\n \"metadata\": {\n \"facility\": {\n \"facility_code\": \"ewr1\",\n \"plan_slug\": \"c2.medium.x86\",\n \"plan_version_slug\": \"\"\n },\n \"instance\": {\n \"operating_system_version\": {\n \"distro\": \"ubuntu\",\n \"os_slug\": \"ubuntu_18_04\",\n \"version\": \"18.04\"\n }\n },\n \"state\": \"\"\n },\n \"network\": {\n \"interfaces\": [\n {\n \"dhcp\": {\n \"arch\": \"x86_64\",\n \"ip\": {\n \"address\": \"10.129.8.90\",\n \"gateway\": \"10.129.8.89\",\n \"netmask\": \"255.255.255.252\"\n },\n \"mac\": \"18:C0:4D:B1:18:E3\",\n \"uefi\": false\n },\n \"netboot\": {\n \"allow_pxe\": true,\n \"allow_workflow\": true\n }\n }\n ]\n }\n }\n}", - }, - } - - for _, test := range testCases { - t.Run(test.name, func(t *testing.T) { - d, err := NewTinkerbellDriver(nil, test.clientFactor, test.tinkServer, test.imageRepoServer) - if err != nil { - t.Fatalf("failed to create tinkerbell driver: %v", err) - } - - ctx := context.Background() - s, err := d.GetServer(ctx, "0eba0bf8-3772-4b4a-ab9f-6ebe93b90a94", test.hardwareSpec) - if err != nil { - if test.errorIsExpected && errors.Is(err, test.expectedError) { - return - } - - t.Fatalf("failed to execute get server: %v", err) - } - - hw := &HardwareSpec{} - if err := json.Unmarshal([]byte(test.expectedHardwareSpec), hw); err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(hw, s) { - t.Fatal("server spec and hardware spec mismatched") - } - }) - } -} - -func TestDriver_ProvisionServer(t *testing.T) { - var testCases = []struct { - name string - tinkServer string - imageRepoServer string - hardwareSpec runtime.RawExtension - clientFactory ClientFactory - cloudConfig *plugins.CloudConfigSettings - expectedHardwareSpec string - errorIsExpected bool - expectedError error - }{ - { - name: "provision server success", - tinkServer: "10.129.8.102", - imageRepoServer: "10.129.8.102:8080", - hardwareSpec: runtime.RawExtension{Raw: []byte("{\n \"hardware\": {\n \"metadata\": {\n \"facility\": {\n \"facility_code\": \"ewr1\",\n \"plan_slug\": \"c2.medium.x86\",\n \"plan_version_slug\": \"\"\n },\n \"instance\": {\n \"operating_system_version\": {\n \"distro\": \"ubuntu\",\n \"os_slug\": \"ubuntu_18_04\",\n \"version\": \"18.04\"\n }\n },\n \"state\": \"\"\n },\n \"network\": {\n \"interfaces\": [\n {\n \"dhcp\": {\n \"arch\": \"x86_64\",\n \"ip\": {\n \"address\": \"10.129.8.90\",\n \"gateway\": \"10.129.8.89\",\n \"netmask\": \"255.255.255.252\"\n },\n \"mac\": \"18:C0:4D:B1:18:E3\",\n \"uefi\": false\n },\n \"netboot\": {\n \"allow_pxe\": true,\n \"allow_workflow\": true\n }\n }\n ]\n }\n }\n}")}, - clientFactory: func() (metadata.Client, tinkerbellclient.HardwareClient, tinkerbellclient.TemplateClient, tinkerbellclient.WorkflowClient) { - return &fakeMetadataClient{}, &fakeHardwareClient{ - err: &resourceError{ - resource: "hardware", - }, - }, &fakeTemplateClient{}, &fakeWorkflowClient{} - }, - cloudConfig: &plugins.CloudConfigSettings{ - Token: "test-token", - Namespace: "kube-system", - SecretName: "test-secret", - ClusterHost: "10.10.10.10", - }, - expectedHardwareSpec: "{\n \"hardware\": {\n \"id\": \"0eba0bf8-3772-4b4a-ab9f-6ebe93b90a94\",\n \"metadata\": {\n \"facility\": {\n \"facility_code\": \"ewr1\",\n \"plan_slug\": \"c2.medium.x86\",\n \"plan_version_slug\": \"\"\n },\n \"instance\": {\n \"operating_system_version\": {\n \"distro\": \"ubuntu\",\n \"os_slug\": \"ubuntu_18_04\",\n \"version\": \"18.04\"\n }\n },\n \"state\": \"\"\n },\n \"network\": {\n \"interfaces\": [\n {\n \"dhcp\": {\n \"arch\": \"x86_64\",\n \"ip\": {\n \"address\": \"10.129.8.90\",\n \"gateway\": \"10.129.8.89\",\n \"netmask\": \"255.255.255.252\"\n },\n \"mac\": \"18:C0:4D:B1:18:E3\",\n \"uefi\": false\n },\n \"netboot\": {\n \"allow_pxe\": true,\n \"allow_workflow\": true\n }\n }\n ]\n }\n }\n}", - errorIsExpected: false, - }, - } - - for _, test := range testCases { - t.Run(test.name, func(t *testing.T) { - d, err := NewTinkerbellDriver(nil, test.clientFactory, test.tinkServer, test.imageRepoServer) - if err != nil { - t.Fatalf("failed to create tinkerbell driver: %v", err) - } - - ctx := context.Background() - s, err := d.ProvisionServer(ctx, "0eba0bf8-3772-4b4a-ab9f-6ebe93b90a94", test.cloudConfig, test.hardwareSpec) - if err != nil { - t.Fatalf("failed to execute provision server: %v", err) - } - - hw := &HardwareSpec{} - if err := json.Unmarshal([]byte(test.expectedHardwareSpec), hw); err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(hw, s) { - t.Fatal("server spec and hardware spec mismatched") - } - }) - } -} - -type fakeMetadataClient struct{} - -func (f *fakeMetadataClient) GetMachineMetadata() (*metadata.MachineMetadata, error) { - return &metadata.MachineMetadata{ - CIDR: "10.129.8.90/30", - MACAddress: "18:C0:4D:B1:18:E3", - Gateway: "10.129.8.89", - }, nil -} - -type fakeHardwareClient struct { - err *resourceError -} - -func (f *fakeHardwareClient) Get(_ context.Context, _ string, _ string, _ string) (*hardware.Hardware, error) { - if f.err != nil { - return nil, f.err - } - - return &hardware.Hardware{ - Metadata: "{\"facility\":{\"facility_code\":\"ewr1\",\"plan_slug\":\"c2.medium.x86\",\"plan_version_slug\":\"\"},\"instance\":{\"operating_system_version\":{\"distro\":\"ubuntu\",\"os_slug\":\"ubuntu_18_04\",\"version\":\"18.04\"}},\"state\":\"\"}", - Network: &hardware.Hardware_Network{ - Interfaces: []*hardware.Hardware_Network_Interface{ - { - Dhcp: &hardware.Hardware_DHCP{ - Arch: "x86_64", - Uefi: false, - Mac: "18:C0:4D:B1:18:E3", - Ip: &hardware.Hardware_DHCP_IP{ - Address: "10.129.8.90", - Netmask: "255.255.255.252", - Gateway: "10.129.8.89", - }, - }, - Netboot: &hardware.Hardware_Netboot{ - AllowPxe: true, - AllowWorkflow: true, - }, - }, - }, - }, - }, nil -} - -func (f *fakeHardwareClient) Delete(_ context.Context, _ string) error { - return nil -} - -func (f *fakeHardwareClient) Create(_ context.Context, hw *hardware.Hardware) error { - expectedHW := &hardware.Hardware{ - Id: "0eba0bf8-3772-4b4a-ab9f-6ebe93b90a94", - Metadata: "{\"facility\":{\"facility_code\":\"ewr1\",\"plan_slug\":\"c2.medium.x86\",\"plan_version_slug\":\"\"},\"instance\":{\"operating_system_version\":{\"distro\":\"ubuntu\",\"os_slug\":\"ubuntu_18_04\",\"version\":\"18.04\"}},\"state\":\"\"}", - Network: &hardware.Hardware_Network{ - Interfaces: []*hardware.Hardware_Network_Interface{ - { - Dhcp: &hardware.Hardware_DHCP{ - Arch: "x86_64", - Uefi: false, - Mac: "18:C0:4D:B1:18:E3", - Ip: &hardware.Hardware_DHCP_IP{ - Address: "10.129.8.90", - Netmask: "255.255.255.252", - Gateway: "10.129.8.89", - }, - }, - Netboot: &hardware.Hardware_Netboot{ - AllowPxe: true, - AllowWorkflow: true, - }, - }, - }, - }, - } - - if !reflect.DeepEqual(hw, expectedHW) { - return errors.New("unexpected hardware data") - } - - return nil -} - -type fakeTemplateClient struct{} - -func (f *fakeTemplateClient) Get(_ context.Context, _ string, _ string) (*template.WorkflowTemplate, error) { - wfl := &workflow.Workflow{ - Version: "0.1", - Name: "fake_template", - GlobalTimeout: 6000, - Tasks: []workflow.Task{ - { - Name: "disk-wipe", - WorkerAddr: "{{.device_1}}", - Volumes: []string{ - "/dev:/dev", - "/dev/console:/dev/console", - "/lib/firmware:/lib/firmware:ro", - }, - Actions: []workflow.Action{ - { - Name: "disk-wipe", - Image: "disk-wipe:v1", - Timeout: 90, - }, - }, - }, - }, - } - - payload, err := json.Marshal(wfl) - if err != nil { - return nil, err - } - - return &template.WorkflowTemplate{ - Data: string(payload), - }, nil -} - -func (f *fakeTemplateClient) Create(_ context.Context, _ *template.WorkflowTemplate) error { - return nil -} - -type fakeWorkflowClient struct{} - -func (f *fakeWorkflowClient) Create(_ context.Context, _ string, _ string) (string, error) { - return "", nil -} - -type resourceError struct { - resource string -} - -func (re *resourceError) Error() string { - return fmt.Sprintf("%s %s", re.resource, tinkerbellclient.ErrNotFound.Error()) -} diff --git a/pkg/cloudprovider/provider/baremetal/plugins/tinkerbell/hardware.go b/pkg/cloudprovider/provider/baremetal/plugins/tinkerbell/hardware.go deleted file mode 100644 index dd8a006ab..000000000 --- a/pkg/cloudprovider/provider/baremetal/plugins/tinkerbell/hardware.go +++ /dev/null @@ -1,67 +0,0 @@ -/* -Copyright 2021 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package tinkerbell - -import ( - "encoding/json" - - "github.com/tinkerbell/tink/pkg" - - "k8s.io/klog" -) - -type HardwareSpec struct { - Hardware pkg.HardwareWrapper `json:"hardware"` -} - -func (h *HardwareSpec) GetName() string { - return "" -} - -func (h *HardwareSpec) GetID() string { - return h.Hardware.Id -} - -func (h *HardwareSpec) GetIPAddress() string { - interfaces := h.Hardware.Network.Interfaces - if len(interfaces) > 0 && interfaces[0].Dhcp.Ip != nil { - return h.Hardware.Network.Interfaces[0].Dhcp.Ip.Address - } - - return "" -} - -func (h *HardwareSpec) GetMACAddress() string { - if len(h.Hardware.Network.Interfaces) > 0 { - return h.Hardware.Network.Interfaces[0].Dhcp.Mac - } - - return "" -} - -func (h *HardwareSpec) GetStatus() string { - metadata := struct { - State string `json:"state"` - }{} - - if err := json.Unmarshal([]byte(h.Hardware.Metadata), &metadata); err != nil { - klog.Errorf("failed to unmarshal hardware metadata: %v", err) - return "" - } - - return metadata.State -} diff --git a/pkg/cloudprovider/provider/baremetal/plugins/tinkerbell/metadata/client.go b/pkg/cloudprovider/provider/baremetal/plugins/tinkerbell/metadata/client.go deleted file mode 100644 index 29ce3d9c8..000000000 --- a/pkg/cloudprovider/provider/baremetal/plugins/tinkerbell/metadata/client.go +++ /dev/null @@ -1,123 +0,0 @@ -/* -Copyright 2021 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package metadata - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "time" -) - -type MachineMetadata struct { - CIDR string `json:"cidr,omitempty"` - MACAddress string `json:"mac_address,omitempty"` - Gateway string `json:"gateway,omitempty"` - Status string `json:"status,omitempty"` -} - -type Config struct { - Endpoint string `json:"endpoint,omitempty"` - AuthConfig *AuthConfig `json:"authConfig,omitempty"` -} - -type AuthMethod string - -const ( - BasicAuth AuthMethod = "BasicAuth" - BearerToken AuthMethod = "BearerToken" - - defaultTimeout = 30 * time.Second -) - -type AuthConfig struct { - AuthMethod AuthMethod `json:"authMethod"` - Username string `json:"username"` - Password string `json:"password"` - Token string `json:"token"` -} - -type Client interface { - GetMachineMetadata() (*MachineMetadata, error) -} - -type defaultClient struct { - metadataEndpoint string - authConfig *AuthConfig - client *http.Client -} - -func NewMetadataClient(cfg *Config) (Client, error) { - if cfg.Endpoint == "" { - return nil, errors.New("machine metadata endpoint cannot be empty") - } - - client := http.DefaultClient - client.Timeout = defaultTimeout - - return &defaultClient{ - metadataEndpoint: cfg.Endpoint, - authConfig: cfg.AuthConfig, - client: client, - }, nil -} - -func (d *defaultClient) GetMachineMetadata() (*MachineMetadata, error) { - req, err := http.NewRequest(http.MethodGet, d.metadataEndpoint, nil) - // TODO: Fix this - req = req.WithContext(context.TODO()) - if err != nil { - return nil, fmt.Errorf("failed to create a get metadata request: %w", err) - } - - req.Header.Set("Content-Type", "application/json") - d.getAuthMethod(req) - - res, err := d.client.Do(req) - if err != nil { - return nil, fmt.Errorf("failed to execute get metadata request: %w", err) - } - - defer res.Body.Close() - - if res.StatusCode != http.StatusOK { - return nil, fmt.Errorf("failed to execute get metadata request with status code: %v", res.StatusCode) - } - data, err := io.ReadAll(res.Body) - if err != nil { - return nil, fmt.Errorf("failed to read response body: %w", err) - } - - mdConfig := &MachineMetadata{} - if err := json.Unmarshal(data, mdConfig); err != nil { - return nil, fmt.Errorf("failed to unmarshal metadata config: %w", err) - } - - return mdConfig, nil -} - -func (d *defaultClient) getAuthMethod(req *http.Request) { - switch d.authConfig.AuthMethod { - case BasicAuth: - req.SetBasicAuth(d.authConfig.Username, d.authConfig.Password) - case BearerToken: - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", d.authConfig.Token)) - } -} diff --git a/pkg/cloudprovider/provider/baremetal/plugins/tinkerbell/template.go b/pkg/cloudprovider/provider/baremetal/plugins/tinkerbell/template.go deleted file mode 100644 index 5d0902d26..000000000 --- a/pkg/cloudprovider/provider/baremetal/plugins/tinkerbell/template.go +++ /dev/null @@ -1,93 +0,0 @@ -/* -Copyright 2021 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package tinkerbell - -import ( - "github.com/tinkerbell/tink/workflow" - - "github.com/kubermatic/machine-controller/pkg/cloudprovider/provider/baremetal/plugins" -) - -func createTemplate(tinkServerAddress, imageRepoAddress string, cfg *plugins.CloudConfigSettings) *workflow.Workflow { - return &workflow.Workflow{ - Version: "0.1", - Name: cfg.SecretName, - ID: "", - GlobalTimeout: 6000, - Tasks: []workflow.Task{ - { - Name: "os-installation", - WorkerAddr: "{{.device_1}}", - Volumes: []string{ - "/dev:/dev", - "/dev/console:/dev/console", - "/lib/firmware:/lib/firmware:ro", - }, - Actions: []workflow.Action{ - { - Name: "disk-wipe", - Image: "disk-wipe:v1", - Timeout: 90, - }, - { - Name: "disk-partition", - Image: "disk-partition:v1", - Timeout: 180, - Environment: map[string]string{ - "MIRROR_HOST": tinkServerAddress, - }, - Volumes: []string{ - "/statedir:/statedir", - }, - }, - { - Name: "install-root-fs", - Image: "install-root-fs:v1", - Timeout: 600, - Environment: map[string]string{ - "MIRROR_HOST": imageRepoAddress, - }, - Volumes: nil, - }, - { - Name: "install-grub", - Image: "install-grub:v1", - Timeout: 600, - Environment: map[string]string{ - "MIRROR_HOST": imageRepoAddress, - }, - Volumes: []string{ - "/statedir:/statedir", - }, - }, - { - Name: "cloud-init", - Image: "cloud-init:v1", - Timeout: 600, - Environment: map[string]string{ - "MIRROR_HOST": imageRepoAddress, - "CLOUD_INIT_TOKEN": cfg.Token, - "CLOUD_INIT_SETTINGS_NAMESPACE": cfg.Namespace, - "SECRET_NAME": cfg.SecretName, - "CLUSTER_HOST": cfg.ClusterHost, - }, - }, - }, - }, - }, - } -} diff --git a/pkg/cloudprovider/provider/baremetal/plugins/tinkerbell/types/hardware.go b/pkg/cloudprovider/provider/baremetal/plugins/tinkerbell/types/hardware.go new file mode 100644 index 000000000..6263dd675 --- /dev/null +++ b/pkg/cloudprovider/provider/baremetal/plugins/tinkerbell/types/hardware.go @@ -0,0 +1,78 @@ +/* +Copyright 2024 The Machine Controller Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +import ( + tinkv1alpha1 "github.com/tinkerbell/tink/api/v1alpha1" + + "k8c.io/machine-controller/pkg/cloudprovider/provider/baremetal/plugins" +) + +const ( + Unknown string = "Unknown" + Staged string = "Staged" + Provisioned string = "Provisioned" + Decommissioned string = "Decommissioned" + + HardwareRefLabel = "app.kubernetes.io/hardware-reference" +) + +type Hardware struct { + *tinkv1alpha1.Hardware `json:"hardware"` +} + +var _ plugins.Server = &Hardware{} + +func (h *Hardware) GetName() string { + return h.Name +} + +func (h *Hardware) GetID() string { + if h.Spec.Metadata != nil && + h.Spec.Metadata.Instance != nil { + return h.Spec.Metadata.Instance.ID + } + + return "" +} + +func (h *Hardware) GetIPAddress() string { + if h.Spec.Metadata != nil && h.Spec.Metadata.State == Staged { + interfaces := h.Spec.Interfaces + if len(interfaces) > 0 && interfaces[0].DHCP.IP != nil { + return interfaces[0].DHCP.IP.Address + } + } + + return "" +} + +func (h *Hardware) GetMACAddress() string { + if len(h.Spec.Interfaces) > 0 { + return h.Spec.Interfaces[0].DHCP.MAC + } + + return "" +} + +func (h *Hardware) GetStatus() string { + if h.Status.State != "" { + return string(h.Status.State) + } + + return Unknown +} diff --git a/pkg/cloudprovider/provider/baremetal/provider.go b/pkg/cloudprovider/provider/baremetal/provider.go index 1203cb786..0815e374a 100644 --- a/pkg/cloudprovider/provider/baremetal/provider.go +++ b/pkg/cloudprovider/provider/baremetal/provider.go @@ -22,21 +22,23 @@ import ( "errors" "fmt" - "github.com/kubermatic/machine-controller/pkg/apis/cluster/common" - clusterv1alpha1 "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" - cloudprovidererrors "github.com/kubermatic/machine-controller/pkg/cloudprovider/errors" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/instance" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/provider/baremetal/plugins" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/provider/baremetal/plugins/tinkerbell" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/provider/baremetal/plugins/tinkerbell/metadata" - baremetaltypes "github.com/kubermatic/machine-controller/pkg/cloudprovider/provider/baremetal/types" - cloudprovidertypes "github.com/kubermatic/machine-controller/pkg/cloudprovider/types" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/util" - "github.com/kubermatic/machine-controller/pkg/providerconfig" - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" + "go.uber.org/zap" + + cloudprovidererrors "k8c.io/machine-controller/pkg/cloudprovider/errors" + "k8c.io/machine-controller/pkg/cloudprovider/instance" + "k8c.io/machine-controller/pkg/cloudprovider/provider/baremetal/plugins" + tink "k8c.io/machine-controller/pkg/cloudprovider/provider/baremetal/plugins/tinkerbell" + cloudprovidertypes "k8c.io/machine-controller/pkg/cloudprovider/types" + "k8c.io/machine-controller/pkg/cloudprovider/util" + "k8c.io/machine-controller/sdk/apis/cluster/common" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" + baremetaltypes "k8c.io/machine-controller/sdk/cloudprovider/baremetal" + plugintypes "k8c.io/machine-controller/sdk/cloudprovider/baremetal/plugins" + tinktypes "k8c.io/machine-controller/sdk/cloudprovider/baremetal/plugins/tinkerbell" + "k8c.io/machine-controller/sdk/providerconfig" corev1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" ) @@ -69,11 +71,11 @@ func (b bareMetalServer) Status() instance.Status { } type provider struct { - configVarResolver *providerconfig.ConfigVarResolver + configVarResolver providerconfig.ConfigVarResolver } // New returns a new BareMetal provider. -func New(configVarResolver *providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { +func New(configVarResolver providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { return &provider{ configVarResolver: configVarResolver, } @@ -81,16 +83,16 @@ func New(configVarResolver *providerconfig.ConfigVarResolver) cloudprovidertypes type Config struct { driver plugins.PluginDriver - driverName plugins.Driver + driverName plugintypes.Driver driverSpec runtime.RawExtension } -func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *providerconfigtypes.Config, error) { +func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *providerconfig.Config, error) { if provSpec.Value == nil { return nil, nil, fmt.Errorf("machine.spec.providerconfig.value is nil") } - pconfig, err := providerconfigtypes.GetConfig(provSpec) + pconfig, err := providerconfig.GetConfig(provSpec) if err != nil { return nil, nil, err } @@ -105,57 +107,30 @@ func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *p } c := Config{} - endpoint, err := p.configVarResolver.GetConfigVarStringValueOrEnv(rawConfig.MetadataClient.Endpoint, "METADATA_SERVER_ENDPOINT") - if err != nil { - return nil, nil, fmt.Errorf(`failed to get value of \"endpoint\" field: %w`, err) - } - authMethod, err := p.configVarResolver.GetConfigVarStringValueOrEnv(rawConfig.MetadataClient.AuthMethod, "METADATA_SERVER_AUTH_METHOD") - if err != nil { - return nil, nil, fmt.Errorf(`failed to get value of \"authMethod\" field: %w`, err) - } - username, err := p.configVarResolver.GetConfigVarStringValueOrEnv(rawConfig.MetadataClient.Username, "METADATA_SERVER_USERNAME") - if err != nil { - return nil, nil, fmt.Errorf(`failed to get value of \"username\" field: %w`, err) - } - password, err := p.configVarResolver.GetConfigVarStringValueOrEnv(rawConfig.MetadataClient.Password, "METADATA_SERVER_PASSWORD") - if err != nil { - return nil, nil, fmt.Errorf(`failed to get value of \"password\" field: %w`, err) - } - token, err := p.configVarResolver.GetConfigVarStringValueOrEnv(rawConfig.MetadataClient.Token, "METADATA_SERVER_TOKEN") - if err != nil { - return nil, nil, fmt.Errorf(`failed to get value of \"token\" field: %w`, err) - } - - mdCfg := &metadata.Config{ - Endpoint: endpoint, - AuthConfig: &metadata.AuthConfig{ - AuthMethod: metadata.AuthMethod(authMethod), - Username: username, - Password: password, - Token: token, - }, - } - driverName, err := p.configVarResolver.GetConfigVarStringValue(rawConfig.Driver) + driverName, err := p.configVarResolver.GetStringValue(rawConfig.Driver) if err != nil { return nil, nil, fmt.Errorf("failed to get baremetal provider's driver name: %w", err) } - c.driverName = plugins.Driver(driverName) + c.driverName = plugintypes.Driver(driverName) c.driverSpec = rawConfig.DriverSpec switch c.driverName { - case plugins.Tinkerbell: - driverConfig := struct { - ProvisionerIPAddress string `json:"provisionerIPAddress"` - MirrorHost string `json:"mirrorHost"` - }{} + case plugintypes.Tinkerbell: + driverConfig := &tinktypes.TinkerbellPluginSpec{} if err := json.Unmarshal(c.driverSpec.Raw, &driverConfig); err != nil { return nil, nil, fmt.Errorf("failed to unmarshal tinkerbell driver spec: %w", err) } - c.driver, err = tinkerbell.NewTinkerbellDriver(mdCfg, nil, driverConfig.ProvisionerIPAddress, driverConfig.MirrorHost) + tinkConfig, err := tink.GetConfig(*driverConfig, p.configVarResolver.GetStringValueOrEnv) + + if err != nil { + return nil, nil, err + } + + c.driver, err = tink.NewTinkerbellDriver(*tinkConfig, driverConfig) if err != nil { return nil, nil, fmt.Errorf("failed to create a tinkerbell driver: %w", err) } @@ -166,12 +141,12 @@ func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *p return &c, pconfig, err } -func (p provider) AddDefaults(spec clusterv1alpha1.MachineSpec) (clusterv1alpha1.MachineSpec, error) { +func (p provider) AddDefaults(_ *zap.SugaredLogger, spec clusterv1alpha1.MachineSpec) (clusterv1alpha1.MachineSpec, error) { _, _, err := p.getConfig(spec.ProviderSpec) return spec, err } -func (p provider) Validate(_ context.Context, spec clusterv1alpha1.MachineSpec) error { +func (p provider) Validate(_ context.Context, _ *zap.SugaredLogger, spec clusterv1alpha1.MachineSpec) error { c, _, err := p.getConfig(spec.ProviderSpec) if err != nil { return fmt.Errorf("failed to parse config: %w", err) @@ -188,7 +163,7 @@ func (p provider) Validate(_ context.Context, spec clusterv1alpha1.MachineSpec) return nil } -func (p provider) Get(ctx context.Context, machine *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData) (instance.Instance, error) { +func (p provider) Get(ctx context.Context, _ *zap.SugaredLogger, machine *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData) (instance.Instance, error) { c, _, err := p.getConfig(machine.Spec.ProviderSpec) if err != nil { return nil, cloudprovidererrors.TerminalError{ @@ -197,7 +172,7 @@ func (p provider) Get(ctx context.Context, machine *clusterv1alpha1.Machine, _ * } } - server, err := c.driver.GetServer(ctx, machine.UID, c.driverSpec) + server, err := c.driver.GetServer(ctx) if err != nil { if errors.Is(err, cloudprovidererrors.ErrInstanceNotFound) { return nil, cloudprovidererrors.ErrInstanceNotFound @@ -215,7 +190,7 @@ func (p provider) GetCloudConfig(_ clusterv1alpha1.MachineSpec) (config string, return "", "", nil } -func (p provider) Create(ctx context.Context, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData, userdata string) (instance.Instance, error) { +func (p provider) Create(ctx context.Context, log *zap.SugaredLogger, machine *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData, userdata string) (instance.Instance, error) { c, _, err := p.getConfig(machine.Spec.ProviderSpec) if err != nil { return nil, cloudprovidererrors.TerminalError{ @@ -224,23 +199,7 @@ func (p provider) Create(ctx context.Context, machine *clusterv1alpha1.Machine, } } - if err := util.CreateMachineCloudInitSecret(ctx, userdata, machine.Name, data.Client); err != nil { - return nil, fmt.Errorf("failed to create cloud-init secret for machine %s: %w", machine.Name, err) - } - - token, apiServer, err := util.ExtractTokenAndAPIServer(ctx, userdata, data.Client) - if err != nil { - return nil, fmt.Errorf("failed to extarct token and api server address: %w", err) - } - - cfg := &plugins.CloudConfigSettings{ - Token: token, - Namespace: util.CloudInitNamespace, - SecretName: machine.Name, - ClusterHost: apiServer, - } - - server, err := c.driver.ProvisionServer(ctx, machine.UID, cfg, c.driverSpec) + server, err := c.driver.ProvisionServer(ctx, log, machine.ObjectMeta, c.driverSpec, userdata) if err != nil { return nil, fmt.Errorf("failed to provision server: %w", err) } @@ -250,7 +209,7 @@ func (p provider) Create(ctx context.Context, machine *clusterv1alpha1.Machine, }, nil } -func (p provider) Cleanup(ctx context.Context, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData) (bool, error) { +func (p provider) Cleanup(ctx context.Context, _ *zap.SugaredLogger, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData) (bool, error) { c, _, err := p.getConfig(machine.Spec.ProviderSpec) if err != nil { return false, cloudprovidererrors.TerminalError{ @@ -259,13 +218,13 @@ func (p provider) Cleanup(ctx context.Context, machine *clusterv1alpha1.Machine, } } - if err := c.driver.DeprovisionServer(ctx, machine.UID); err != nil { + if err := c.driver.DeprovisionServer(ctx); err != nil { return false, fmt.Errorf("failed to de-provision server: %w", err) } secret := &corev1.Secret{} if err := data.Client.Get(ctx, types.NamespacedName{Namespace: util.CloudInitNamespace, Name: machine.Name}, secret); err != nil { - if !kerrors.IsNotFound(err) { + if !apierrors.IsNotFound(err) { return false, fmt.Errorf("failed to fetching secret for userdata: %w", err) } @@ -283,7 +242,7 @@ func (p provider) MachineMetricsLabels(_ *clusterv1alpha1.Machine) (map[string]s return nil, nil } -func (p provider) MigrateUID(_ context.Context, _ *clusterv1alpha1.Machine, _ types.UID) error { +func (p provider) MigrateUID(_ context.Context, _ *zap.SugaredLogger, _ *clusterv1alpha1.Machine, _ types.UID) error { return nil } diff --git a/pkg/cloudprovider/provider/baremetal/types/types.go b/pkg/cloudprovider/provider/baremetal/types/types.go deleted file mode 100644 index 374cc2fb2..000000000 --- a/pkg/cloudprovider/provider/baremetal/types/types.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Copyright 2021 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package types - -import ( - "github.com/kubermatic/machine-controller/pkg/jsonutil" - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" - - "k8s.io/apimachinery/pkg/runtime" -) - -type RawConfig struct { - MetadataClient *MetadataClientConfig `json:"metadataClientConfig"` - Driver providerconfigtypes.ConfigVarString `json:"driver"` - DriverSpec runtime.RawExtension `json:"driverSpec"` -} - -type MetadataClientConfig struct { - Endpoint providerconfigtypes.ConfigVarString `json:"endpoint,omitempty"` - AuthMethod providerconfigtypes.ConfigVarString `json:"authMethod,omitempty"` - Username providerconfigtypes.ConfigVarString `json:"username,omitempty"` - Password providerconfigtypes.ConfigVarString `json:"password,omitempty"` - Token providerconfigtypes.ConfigVarString `json:"token,omitempty"` -} - -func GetConfig(pconfig providerconfigtypes.Config) (*RawConfig, error) { - rawConfig := &RawConfig{} - - return rawConfig, jsonutil.StrictUnmarshal(pconfig.CloudProviderSpec.Raw, rawConfig) -} diff --git a/pkg/cloudprovider/provider/digitalocean/provider.go b/pkg/cloudprovider/provider/digitalocean/provider.go index 63b5d65ae..da65bc846 100644 --- a/pkg/cloudprovider/provider/digitalocean/provider.go +++ b/pkg/cloudprovider/provider/digitalocean/provider.go @@ -25,32 +25,31 @@ import ( "time" "github.com/digitalocean/godo" + "go.uber.org/zap" "golang.org/x/oauth2" - "github.com/kubermatic/machine-controller/pkg/apis/cluster/common" - clusterv1alpha1 "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/common/ssh" - cloudprovidererrors "github.com/kubermatic/machine-controller/pkg/cloudprovider/errors" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/instance" - digitaloceantypes "github.com/kubermatic/machine-controller/pkg/cloudprovider/provider/digitalocean/types" - cloudprovidertypes "github.com/kubermatic/machine-controller/pkg/cloudprovider/types" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/util" - "github.com/kubermatic/machine-controller/pkg/providerconfig" - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" - - v1 "k8s.io/api/core/v1" + "k8c.io/machine-controller/pkg/cloudprovider/common/ssh" + cloudprovidererrors "k8c.io/machine-controller/pkg/cloudprovider/errors" + "k8c.io/machine-controller/pkg/cloudprovider/instance" + cloudprovidertypes "k8c.io/machine-controller/pkg/cloudprovider/types" + "k8c.io/machine-controller/sdk/apis/cluster/common" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" + digitaloceantypes "k8c.io/machine-controller/sdk/cloudprovider/digitalocean" + "k8c.io/machine-controller/sdk/net" + "k8c.io/machine-controller/sdk/providerconfig" + + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/klog" ) type provider struct { - configVarResolver *providerconfig.ConfigVarResolver + configVarResolver providerconfig.ConfigVarResolver } // New returns a digitalocean provider. -func New(configVarResolver *providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { +func New(configVarResolver providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { return &provider{configVarResolver: configVarResolver} } @@ -82,16 +81,14 @@ func (t *TokenSource) Token() (*oauth2.Token, error) { return token, nil } -func getSlugForOS(os providerconfigtypes.OperatingSystem) (string, error) { +func getSlugForOS(os providerconfig.OperatingSystem) (string, error) { switch os { - case providerconfigtypes.OperatingSystemUbuntu: - return "ubuntu-22-04-x64", nil - case providerconfigtypes.OperatingSystemCentOS: - return "centos-7-x64", nil - case providerconfigtypes.OperatingSystemRockyLinux: - return "rockylinux-8-x64", nil - } - return "", providerconfigtypes.ErrOSNotSupported + case providerconfig.OperatingSystemUbuntu: + return "ubuntu-24-04-x64", nil + case providerconfig.OperatingSystemRockyLinux: + return "rockylinux-9-x64", nil + } + return "", providerconfig.ErrOSNotSupported } func getClient(ctx context.Context, token string) *godo.Client { @@ -103,12 +100,8 @@ func getClient(ctx context.Context, token string) *godo.Client { return godo.NewClient(oauthClient) } -func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *providerconfigtypes.Config, error) { - if provSpec.Value == nil { - return nil, nil, fmt.Errorf("machine.spec.providerconfig.value is nil") - } - - pconfig, err := providerconfigtypes.GetConfig(provSpec) +func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *providerconfig.Config, error) { + pconfig, err := providerconfig.GetConfig(provSpec) if err != nil { return nil, nil, err } @@ -123,36 +116,36 @@ func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *p } c := Config{} - c.Token, err = p.configVarResolver.GetConfigVarStringValueOrEnv(rawConfig.Token, "DO_TOKEN") + c.Token, err = p.configVarResolver.GetStringValueOrEnv(rawConfig.Token, "DO_TOKEN") if err != nil { return nil, nil, fmt.Errorf("failed to get the value of \"token\" field, error = %w", err) } - c.Region, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.Region) + c.Region, err = p.configVarResolver.GetStringValue(rawConfig.Region) if err != nil { return nil, nil, err } - c.Size, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.Size) + c.Size, err = p.configVarResolver.GetStringValue(rawConfig.Size) if err != nil { return nil, nil, err } - c.Backups, _, err = p.configVarResolver.GetConfigVarBoolValue(rawConfig.Backups) + c.Backups, _, err = p.configVarResolver.GetBoolValue(rawConfig.Backups) if err != nil { return nil, nil, err } - c.IPv6, _, err = p.configVarResolver.GetConfigVarBoolValue(rawConfig.IPv6) + c.IPv6, _, err = p.configVarResolver.GetBoolValue(rawConfig.IPv6) if err != nil { return nil, nil, err } - c.PrivateNetworking, _, err = p.configVarResolver.GetConfigVarBoolValue(rawConfig.PrivateNetworking) + c.PrivateNetworking, _, err = p.configVarResolver.GetBoolValue(rawConfig.PrivateNetworking) if err != nil { return nil, nil, err } - c.Monitoring, _, err = p.configVarResolver.GetConfigVarBoolValue(rawConfig.Monitoring) + c.Monitoring, _, err = p.configVarResolver.GetBoolValue(rawConfig.Monitoring) if err != nil { return nil, nil, err } for _, tag := range rawConfig.Tags { - tagVal, err := p.configVarResolver.GetConfigVarStringValue(tag) + tagVal, err := p.configVarResolver.GetStringValue(tag) if err != nil { return nil, nil, err } @@ -162,11 +155,11 @@ func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *p return &c, pconfig, err } -func (p *provider) AddDefaults(spec clusterv1alpha1.MachineSpec) (clusterv1alpha1.MachineSpec, error) { +func (p *provider) AddDefaults(_ *zap.SugaredLogger, spec clusterv1alpha1.MachineSpec) (clusterv1alpha1.MachineSpec, error) { return spec, nil } -func (p *provider) Validate(ctx context.Context, spec clusterv1alpha1.MachineSpec) error { +func (p *provider) Validate(ctx context.Context, _ *zap.SugaredLogger, spec clusterv1alpha1.MachineSpec) error { c, pc, err := p.getConfig(spec.ProviderSpec) if err != nil { return fmt.Errorf("failed to parse config: %w", err) @@ -190,14 +183,14 @@ func (p *provider) Validate(ctx context.Context, spec clusterv1alpha1.MachineSpe } switch f := pc.Network.GetIPFamily(); f { - case util.IPFamilyUnspecified, util.IPFamilyIPv4: + case net.IPFamilyUnspecified, net.IPFamilyIPv4: // noop - case util.IPFamilyIPv6: - return fmt.Errorf(util.ErrIPv6OnlyUnsupported) - case util.IPFamilyIPv4IPv6, util.IPFamilyIPv6IPv4: + case net.IPFamilyIPv6: + return fmt.Errorf(net.ErrIPv6OnlyUnsupported) + case net.IPFamilyIPv4IPv6, net.IPFamilyIPv6IPv4: // noop default: - return fmt.Errorf(util.ErrUnknownNetworkFamily, f) + return fmt.Errorf(net.ErrUnknownNetworkFamily, f) } client := getClient(ctx, c.Token) @@ -276,7 +269,7 @@ func uploadRandomSSHPublicKey(ctx context.Context, service godo.KeysService) (st return newDoKey.Fingerprint, nil } -func (p *provider) Create(ctx context.Context, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData, userdata string) (instance.Instance, error) { +func (p *provider) Create(ctx context.Context, log *zap.SugaredLogger, machine *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData, userdata string) (instance.Instance, error) { c, pc, err := p.getConfig(machine.Spec.ProviderSpec) if err != nil { return nil, cloudprovidererrors.TerminalError{ @@ -294,7 +287,7 @@ func (p *provider) Create(ctx context.Context, machine *clusterv1alpha1.Machine, defer func() { _, err := client.Keys.DeleteByFingerprint(ctx, fingerprint) if err != nil { - klog.Errorf("failed to remove a temporary ssh key with fingerprint = %v, due to = %v", fingerprint, err) + log.Errorw("Failed to remove a temporary ssh key", "fingerprint", fingerprint, zap.Error(err)) } }() @@ -324,8 +317,10 @@ func (p *provider) Create(ctx context.Context, machine *clusterv1alpha1.Machine, return nil, doStatusAndErrToTerminalError(rsp.StatusCode, err) } + dropletLog := log.With("droplet", droplet.ID) + //We need to wait until the droplet really got created as tags will be only applied when the droplet is running - err = wait.Poll(createCheckPeriod, createCheckTimeout, func() (done bool, err error) { + err = wait.PollUntilContextTimeout(ctx, createCheckPeriod, createCheckTimeout, false, func(ctx context.Context) (bool, error) { newDroplet, rsp, err := client.Droplets.Get(ctx, droplet.ID) if err != nil { tErr := doStatusAndErrToTerminalError(rsp.StatusCode, err) @@ -334,20 +329,20 @@ func (p *provider) Create(ctx context.Context, machine *clusterv1alpha1.Machine, } //Well just wait 10 sec and hope the droplet got started by then... time.Sleep(createCheckFailedWaitPeriod) - return false, fmt.Errorf("droplet (id='%d') got created but we failed to fetch its status", droplet.ID) + return false, fmt.Errorf("droplet %q got created but we failed to fetch its status", droplet.ID) } if sets.NewString(newDroplet.Tags...).Has(string(machine.UID)) { - klog.V(6).Infof("droplet (id='%d') got fully created", droplet.ID) + dropletLog.Debug("Droplet got fully created") return true, nil } - klog.V(6).Infof("waiting until droplet (id='%d') got fully created...", droplet.ID) + dropletLog.Debug("Waiting until droplet got fully created...") return false, nil }) return &doInstance{droplet: droplet}, err } -func (p *provider) Cleanup(ctx context.Context, machine *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData) (bool, error) { +func (p *provider) Cleanup(ctx context.Context, _ *zap.SugaredLogger, machine *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData) (bool, error) { instance, err := p.get(ctx, machine) if err != nil { if errors.Is(err, cloudprovidererrors.ErrInstanceNotFound) { @@ -378,7 +373,7 @@ func (p *provider) Cleanup(ctx context.Context, machine *clusterv1alpha1.Machine return false, nil } -func (p *provider) Get(ctx context.Context, machine *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData) (instance.Instance, error) { +func (p *provider) Get(ctx context.Context, _ *zap.SugaredLogger, machine *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData) (instance.Instance, error) { return p.get(ctx, machine) } @@ -436,7 +431,7 @@ func (p *provider) listDroplets(ctx context.Context, token string) ([]godo.Dropl return result, nil } -func (p *provider) MigrateUID(ctx context.Context, machine *clusterv1alpha1.Machine, newUID types.UID) error { +func (p *provider) MigrateUID(ctx context.Context, _ *zap.SugaredLogger, machine *clusterv1alpha1.Machine, newUID types.UID) error { c, _, err := p.getConfig(machine.Spec.ProviderSpec) if err != nil { return fmt.Errorf("failed to decode providerconfig: %w", err) @@ -475,10 +470,6 @@ func (p *provider) MigrateUID(ctx context.Context, machine *clusterv1alpha1.Mach return nil } -func (p *provider) GetCloudConfig(spec clusterv1alpha1.MachineSpec) (config string, name string, err error) { - return "", "", nil -} - func (p *provider) MachineMetricsLabels(machine *clusterv1alpha1.Machine) (map[string]string, error) { labels := make(map[string]string) @@ -504,23 +495,26 @@ func (d *doInstance) ID() string { } func (d *doInstance) ProviderID() string { + if d.droplet == nil || d.droplet.Name == "" { + return "" + } return fmt.Sprintf("digitalocean://%d", d.droplet.ID) } -func (d *doInstance) Addresses() map[string]v1.NodeAddressType { - addresses := map[string]v1.NodeAddressType{} +func (d *doInstance) Addresses() map[string]corev1.NodeAddressType { + addresses := map[string]corev1.NodeAddressType{} for _, n := range d.droplet.Networks.V4 { if n.Type == "public" { - addresses[n.IPAddress] = v1.NodeExternalIP + addresses[n.IPAddress] = corev1.NodeExternalIP } else { - addresses[n.IPAddress] = v1.NodeInternalIP + addresses[n.IPAddress] = corev1.NodeInternalIP } } for _, n := range d.droplet.Networks.V6 { if n.Type == "public" { - addresses[n.IPAddress] = v1.NodeExternalIP + addresses[n.IPAddress] = corev1.NodeExternalIP } else { - addresses[n.IPAddress] = v1.NodeInternalIP + addresses[n.IPAddress] = corev1.NodeInternalIP } } return addresses @@ -556,6 +550,6 @@ func doStatusAndErrToTerminalError(status int, err error) error { } } -func (p *provider) SetMetricsForMachines(machines clusterv1alpha1.MachineList) error { +func (p *provider) SetMetricsForMachines(_ clusterv1alpha1.MachineList) error { return nil } diff --git a/pkg/cloudprovider/provider/digitalocean/types/types.go b/pkg/cloudprovider/provider/digitalocean/types/types.go deleted file mode 100644 index a0fdb6830..000000000 --- a/pkg/cloudprovider/provider/digitalocean/types/types.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright 2019 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package types - -import ( - "github.com/kubermatic/machine-controller/pkg/jsonutil" - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" -) - -type RawConfig struct { - Token providerconfigtypes.ConfigVarString `json:"token,omitempty"` - Region providerconfigtypes.ConfigVarString `json:"region"` - Size providerconfigtypes.ConfigVarString `json:"size"` - Backups providerconfigtypes.ConfigVarBool `json:"backups"` - IPv6 providerconfigtypes.ConfigVarBool `json:"ipv6"` - PrivateNetworking providerconfigtypes.ConfigVarBool `json:"private_networking"` - Monitoring providerconfigtypes.ConfigVarBool `json:"monitoring"` - Tags []providerconfigtypes.ConfigVarString `json:"tags,omitempty"` -} - -func GetConfig(pconfig providerconfigtypes.Config) (*RawConfig, error) { - rawConfig := &RawConfig{} - - return rawConfig, jsonutil.StrictUnmarshal(pconfig.CloudProviderSpec.Raw, rawConfig) -} diff --git a/pkg/cloudprovider/provider/edge/provider.go b/pkg/cloudprovider/provider/edge/provider.go new file mode 100644 index 000000000..b311146b0 --- /dev/null +++ b/pkg/cloudprovider/provider/edge/provider.go @@ -0,0 +1,96 @@ +/* +Copyright 2024 The Machine Controller Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package edge + +import ( + "context" + + "go.uber.org/zap" + + "k8c.io/machine-controller/pkg/cloudprovider/instance" + cloudprovidertypes "k8c.io/machine-controller/pkg/cloudprovider/types" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" + "k8c.io/machine-controller/sdk/providerconfig" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" +) + +type provider struct{} + +type CloudProviderSpec struct{} + +type CloudProviderInstance struct{} + +func (f CloudProviderInstance) Name() string { + return "" +} + +func (f CloudProviderInstance) ID() string { + return "" +} + +func (f CloudProviderInstance) ProviderID() string { + return "" +} + +func (f CloudProviderInstance) Addresses() map[string]corev1.NodeAddressType { + return nil +} + +func (f CloudProviderInstance) Status() instance.Status { + return instance.StatusUnknown +} + +// New returns a edge cloud provider. +func New(_ providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { + return &provider{} +} + +func (p *provider) AddDefaults(_ *zap.SugaredLogger, spec clusterv1alpha1.MachineSpec) (clusterv1alpha1.MachineSpec, error) { + return spec, nil +} + +// Validate returns success or failure based according to its EdgeCloudProviderSpec. +func (p *provider) Validate(_ context.Context, _ *zap.SugaredLogger, _ clusterv1alpha1.MachineSpec) error { + return nil +} + +func (p *provider) Get(_ context.Context, _ *zap.SugaredLogger, _ *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData) (instance.Instance, error) { + return CloudProviderInstance{}, nil +} + +// Create creates a cloud instance according to the given machine. +func (p *provider) Create(_ context.Context, _ *zap.SugaredLogger, _ *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData, _ string) (instance.Instance, error) { + return CloudProviderInstance{}, nil +} + +func (p *provider) Cleanup(_ context.Context, _ *zap.SugaredLogger, _ *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData) (bool, error) { + return true, nil +} + +func (p *provider) MigrateUID(_ context.Context, _ *zap.SugaredLogger, _ *clusterv1alpha1.Machine, _ types.UID) error { + return nil +} + +func (p *provider) MachineMetricsLabels(_ *clusterv1alpha1.Machine) (map[string]string, error) { + return map[string]string{}, nil +} + +func (p *provider) SetMetricsForMachines(_ clusterv1alpha1.MachineList) error { + return nil +} diff --git a/pkg/cloudprovider/provider/equinixmetal/provider.go b/pkg/cloudprovider/provider/equinixmetal/provider.go index 2c290450e..871351b9c 100644 --- a/pkg/cloudprovider/provider/equinixmetal/provider.go +++ b/pkg/cloudprovider/provider/equinixmetal/provider.go @@ -21,24 +21,25 @@ import ( "encoding/json" "errors" "fmt" - "reflect" + "net/http" + "slices" "strings" - "github.com/packethost/packngo" + "github.com/equinix/equinix-sdk-go/services/metalv1" + "go.uber.org/zap" - "github.com/kubermatic/machine-controller/pkg/apis/cluster/common" - clusterv1alpha1 "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" - cloudprovidererrors "github.com/kubermatic/machine-controller/pkg/cloudprovider/errors" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/instance" - equinixmetaltypes "github.com/kubermatic/machine-controller/pkg/cloudprovider/provider/equinixmetal/types" - cloudprovidertypes "github.com/kubermatic/machine-controller/pkg/cloudprovider/types" - "github.com/kubermatic/machine-controller/pkg/providerconfig" - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" + cloudprovidererrors "k8c.io/machine-controller/pkg/cloudprovider/errors" + "k8c.io/machine-controller/pkg/cloudprovider/instance" + cloudprovidertypes "k8c.io/machine-controller/pkg/cloudprovider/types" + "k8c.io/machine-controller/sdk/apis/cluster/common" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" + equinixmetaltypes "k8c.io/machine-controller/sdk/cloudprovider/equinixmetal" + "k8c.io/machine-controller/sdk/providerconfig" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - "k8s.io/klog" + "k8s.io/apimachinery/pkg/util/sets" ) const ( @@ -47,7 +48,7 @@ const ( ) // New returns a Equinix Metal provider. -func New(configVarResolver *providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { +func New(configVarResolver providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { return &provider{configVarResolver: configVarResolver} } @@ -76,15 +77,11 @@ func populateDefaults(c *equinixmetaltypes.RawConfig) { } type provider struct { - configVarResolver *providerconfig.ConfigVarResolver + configVarResolver providerconfig.ConfigVarResolver } -func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *equinixmetaltypes.RawConfig, *providerconfigtypes.Config, error) { - if provSpec.Value == nil { - return nil, nil, nil, fmt.Errorf("machine.spec.providerconfig.value is nil") - } - - pconfig, err := providerconfigtypes.GetConfig(provSpec) +func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *equinixmetaltypes.RawConfig, *providerconfig.Config, error) { + pconfig, err := providerconfig.GetConfig(provSpec) if err != nil { return nil, nil, nil, err } @@ -99,49 +96,49 @@ func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *e } c := Config{} - c.Token, err = p.configVarResolver.GetConfigVarStringValueOrEnv(rawConfig.Token, "METAL_AUTH_TOKEN") + c.Token, err = p.configVarResolver.GetStringValueOrEnv(rawConfig.Token, "METAL_AUTH_TOKEN") if err != nil || len(c.Token) == 0 { // This retry is temporary and is only required to facilitate migration from Packet to Equinix Metal // We look for env variable PACKET_API_KEY associated with Packet to ensure that nothing breaks during automated migration for the Machines // TODO(@ahmedwaleedmalik) Remove this after a release period - c.Token, err = p.configVarResolver.GetConfigVarStringValueOrEnv(rawConfig.Token, "PACKET_API_KEY") + c.Token, err = p.configVarResolver.GetStringValueOrEnv(rawConfig.Token, "PACKET_API_KEY") if err != nil { return nil, nil, nil, fmt.Errorf("failed to get the value of \"apiKey\" field, error = %w", err) } } - c.ProjectID, err = p.configVarResolver.GetConfigVarStringValueOrEnv(rawConfig.ProjectID, "METAL_PROJECT_ID") + c.ProjectID, err = p.configVarResolver.GetStringValueOrEnv(rawConfig.ProjectID, "METAL_PROJECT_ID") if err != nil || len(c.ProjectID) == 0 { // This retry is temporary and is only required to facilitate migration from Packet to Equinix Metal // We look for env variable PACKET_PROJECT_ID associated with Packet to ensure that nothing breaks during automated migration for the Machines // TODO(@ahmedwaleedmalik) Remove this after a release period - c.ProjectID, err = p.configVarResolver.GetConfigVarStringValueOrEnv(rawConfig.ProjectID, "PACKET_PROJECT_ID") + c.ProjectID, err = p.configVarResolver.GetStringValueOrEnv(rawConfig.ProjectID, "PACKET_PROJECT_ID") if err != nil { return nil, nil, nil, fmt.Errorf("failed to get the value of \"apiKey\" field, error = %w", err) } } - c.InstanceType, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.InstanceType) + c.InstanceType, err = p.configVarResolver.GetStringValue(rawConfig.InstanceType) if err != nil { return nil, nil, nil, fmt.Errorf("failed to get the value of \"instanceType\" field, error = %w", err) } - c.BillingCycle, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.BillingCycle) + c.BillingCycle, err = p.configVarResolver.GetStringValue(rawConfig.BillingCycle) if err != nil { return nil, nil, nil, fmt.Errorf("failed to get the value of \"billingCycle\" field, error = %w", err) } for i, tag := range rawConfig.Tags { - tagValue, err := p.configVarResolver.GetConfigVarStringValue(tag) + tagValue, err := p.configVarResolver.GetStringValue(tag) if err != nil { return nil, nil, nil, fmt.Errorf("failed to read the value for the Tag at index %d of the \"tags\" field, error = %w", i, err) } c.Tags = append(c.Tags, tagValue) } for i, facility := range rawConfig.Facilities { - facilityValue, err := p.configVarResolver.GetConfigVarStringValue(facility) + facilityValue, err := p.configVarResolver.GetStringValue(facility) if err != nil { return nil, nil, nil, fmt.Errorf("failed to read the value for the Tag at index %d of the \"facilities\" field, error = %w", i, err) } c.Facilities = append(c.Facilities, facilityValue) } - c.Metro, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.Metro) + c.Metro, err = p.configVarResolver.GetStringValue(rawConfig.Metro) if err != nil { return nil, nil, nil, fmt.Errorf("failed to get the value of \"metro\" field, error = %w", err) } @@ -152,7 +149,7 @@ func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *e return &c, rawConfig, pconfig, err } -func (p *provider) getMetalDevice(machine *clusterv1alpha1.Machine) (*packngo.Device, *packngo.Client, error) { +func (p *provider) getMetalDevice(ctx context.Context, machine *clusterv1alpha1.Machine) (*metalv1.Device, *metalv1.APIClient, error) { c, _, _, err := p.getConfig(machine.Spec.ProviderSpec) if err != nil { return nil, nil, cloudprovidererrors.TerminalError{ @@ -162,14 +159,14 @@ func (p *provider) getMetalDevice(machine *clusterv1alpha1.Machine) (*packngo.De } client := getClient(c.Token) - device, err := getDeviceByTag(client, c.ProjectID, generateTag(string(machine.UID))) + device, err := getDeviceByTag(ctx, client, c.ProjectID, generateTag(string(machine.UID))) if err != nil { return nil, nil, err } return device, client, nil } -func (p *provider) Validate(_ context.Context, spec clusterv1alpha1.MachineSpec) error { +func (p *provider) Validate(ctx context.Context, _ *zap.SugaredLogger, spec clusterv1alpha1.MachineSpec) error { c, _, pc, err := p.getConfig(spec.ProviderSpec) if err != nil { return fmt.Errorf("failed to parse config: %w", err) @@ -198,49 +195,65 @@ func (p *provider) Validate(_ context.Context, spec clusterv1alpha1.MachineSpec) if c.Facilities != nil && (len(c.Facilities) > 0 || c.Facilities[0] != "") { // get all valid facilities - facilities, _, err := client.Facilities.List(nil) + request := client.FacilitiesApi.FindFacilitiesByProject(ctx, c.ProjectID) + facilities, resp, err := client.FacilitiesApi.FindFacilitiesByProjectExecute(request) if err != nil { return fmt.Errorf("failed to list facilities: %w", err) } + resp.Body.Close() + + expectedFacilities := sets.New(c.Facilities...) + availableFacilities := sets.New[string]() + for _, facility := range facilities.Facilities { + availableFacilities.Insert(*facility.Code) + } + // ensure our requested facilities are in those facilities - if missingFacilities := itemsNotInList(facilityProp(facilities, "Code"), c.Facilities); len(missingFacilities) > 0 { - return fmt.Errorf("unknown facilities: %s", strings.Join(missingFacilities, ",")) + if diff := expectedFacilities.Difference(availableFacilities); diff.Len() > 0 { + return fmt.Errorf("unknown facilities: %v", sets.List(diff)) } } if c.Metro != "" { - metros, _, err := client.Metros.List(nil) + request := client.MetrosApi.FindMetros(ctx) + metros, resp, err := client.MetrosApi.FindMetrosExecute(request) if err != nil { return fmt.Errorf("failed to list metros: %w", err) } + resp.Body.Close() - var metroExists bool - for _, metro := range metros { - if strings.EqualFold(metro.Code, c.Metro) { - metroExists = true - } - } + metroExists := slices.ContainsFunc(metros.Metros, func(m metalv1.Metro) bool { + return strings.EqualFold(*m.Code, c.Metro) + }) if !metroExists { - return fmt.Errorf("unknown metro: %s", c.Metro) + return fmt.Errorf("unknown metro %q", c.Metro) } } // get all valid plans a.k.a. instance types - plans, _, err := client.Plans.List(nil) + request := client.PlansApi.FindPlansByProject(ctx, c.ProjectID) + plans, resp, err := client.PlansApi.FindPlansByProjectExecute(request) if err != nil { return fmt.Errorf("failed to list instance types / plans: %w", err) } + resp.Body.Close() + // ensure our requested plan is in those plans - validPlanNames := planProp(plans, "Name") - if missingPlans := itemsNotInList(validPlanNames, []string{c.InstanceType}); len(missingPlans) > 0 { - return fmt.Errorf("unknown instance type / plan: %s, acceptable plans: %s", strings.Join(missingPlans, ","), strings.Join(validPlanNames, ",")) + expectedPlans := sets.New(c.InstanceType) + availablePlans := sets.New[string]() + for _, plan := range plans.Plans { + availablePlans.Insert(*plan.Name) + } + + if diff := expectedPlans.Difference(availablePlans); diff.Len() > 0 { + return fmt.Errorf("unknown instance type / plan: %s, acceptable plans: %v", c.InstanceType, sets.List(availablePlans)) } return nil } -func (p *provider) Create(_ context.Context, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData, userdata string) (instance.Instance, error) { +func (p *provider) Create(ctx context.Context, _ *zap.SugaredLogger, machine *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData, userdata string) (instance.Instance, error) { c, _, pc, err := p.getConfig(machine.Spec.ProviderSpec) if err != nil { return nil, cloudprovidererrors.TerminalError{ @@ -250,6 +263,7 @@ func (p *provider) Create(_ context.Context, machine *clusterv1alpha1.Machine, d } client := getClient(c.Token) + request := client.DevicesApi.CreateDevice(ctx, c.ProjectID) imageName, err := getNameForOS(pc.OperatingSystem) if err != nil { @@ -259,30 +273,49 @@ func (p *provider) Create(_ context.Context, machine *clusterv1alpha1.Machine, d } } - serverCreateOpts := &packngo.DeviceCreateRequest{ - Hostname: machine.Spec.Name, - UserData: userdata, - ProjectID: c.ProjectID, - Facility: c.Facilities, - Metro: c.Metro, - BillingCycle: c.BillingCycle, - Plan: c.InstanceType, - OS: imageName, - Tags: []string{ - generateTag(string(machine.UID)), - }, - } + billingCycle := metalv1.DeviceCreateInputBillingCycle(c.BillingCycle) - device, res, err := client.Devices.Create(serverCreateOpts) + if c.Metro != "" { + request = request.CreateDeviceRequest(metalv1.CreateDeviceRequest{ + DeviceCreateInMetroInput: &metalv1.DeviceCreateInMetroInput{ + Hostname: &machine.Spec.Name, + Userdata: &userdata, + Metro: c.Metro, + BillingCycle: &billingCycle, + Plan: c.InstanceType, + OperatingSystem: imageName, + Tags: []string{ + generateTag(string(machine.UID)), + }, + }, + }) + } else { + request = request.CreateDeviceRequest(metalv1.CreateDeviceRequest{ + DeviceCreateInFacilityInput: &metalv1.DeviceCreateInFacilityInput{ + Hostname: &machine.Spec.Name, + Userdata: &userdata, + Facility: c.Facilities, + BillingCycle: &billingCycle, + Plan: c.InstanceType, + OperatingSystem: imageName, + Tags: []string{ + generateTag(string(machine.UID)), + }, + }, + }) + } + + device, resp, err := client.DevicesApi.CreateDeviceExecute(request) if err != nil { - return nil, metalErrorToTerminalError(err, res, "failed to create server") + return nil, metalErrorToTerminalError(err, resp, "failed to create server") } + resp.Body.Close() return &metalDevice{device: device}, nil } -func (p *provider) Cleanup(ctx context.Context, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData) (bool, error) { - instance, err := p.Get(ctx, machine, data) +func (p *provider) Cleanup(ctx context.Context, log *zap.SugaredLogger, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData) (bool, error) { + instance, err := p.Get(ctx, log, machine, data) if err != nil { if errors.Is(err, cloudprovidererrors.ErrInstanceNotFound) { return true, nil @@ -299,15 +332,18 @@ func (p *provider) Cleanup(ctx context.Context, machine *clusterv1alpha1.Machine } client := getClient(c.Token) - res, err := client.Devices.Delete(instance.(*metalDevice).device.ID, false) + request := client.DevicesApi.DeleteDevice(ctx, *instance.(*metalDevice).device.Id) + + resp, err := client.DevicesApi.DeleteDeviceExecute(request) if err != nil { - return false, metalErrorToTerminalError(err, res, "failed to delete the server") + return false, metalErrorToTerminalError(err, resp, "failed to delete the server") } + resp.Body.Close() return false, nil } -func (p *provider) AddDefaults(spec clusterv1alpha1.MachineSpec) (clusterv1alpha1.MachineSpec, error) { +func (p *provider) AddDefaults(_ *zap.SugaredLogger, spec clusterv1alpha1.MachineSpec) (clusterv1alpha1.MachineSpec, error) { _, rawConfig, _, err := p.getConfig(spec.ProviderSpec) if err != nil { return spec, err @@ -320,8 +356,8 @@ func (p *provider) AddDefaults(spec clusterv1alpha1.MachineSpec) (clusterv1alpha return spec, nil } -func (p *provider) Get(_ context.Context, machine *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData) (instance.Instance, error) { - device, _, err := p.getMetalDevice(machine) +func (p *provider) Get(ctx context.Context, _ *zap.SugaredLogger, machine *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData) (instance.Instance, error) { + device, _, err := p.getMetalDevice(ctx, machine) if err != nil { return nil, err } @@ -332,13 +368,13 @@ func (p *provider) Get(_ context.Context, machine *clusterv1alpha1.Machine, _ *c return nil, cloudprovidererrors.ErrInstanceNotFound } -func (p *provider) MigrateUID(_ context.Context, machine *clusterv1alpha1.Machine, newID types.UID) error { - device, client, err := p.getMetalDevice(machine) +func (p *provider) MigrateUID(ctx context.Context, log *zap.SugaredLogger, machine *clusterv1alpha1.Machine, newID types.UID) error { + device, client, err := p.getMetalDevice(ctx, machine) if err != nil { return err } if device == nil { - klog.Infof("No instance exists for machine %s", machine.Name) + log.Info("No instance exists for machine") return nil } @@ -354,21 +390,23 @@ func (p *provider) MigrateUID(_ context.Context, machine *clusterv1alpha1.Machin // create a new UID label tags = append(tags, generateTag(string(newID))) - klog.Infof("Setting UID label for machine %s", machine.Name) - dur := &packngo.DeviceUpdateRequest{ - Tags: &tags, - } - _, response, err := client.Devices.Update(device.ID, dur) + log.Info("Setting UID label for machine") + + dur := client.DevicesApi. + UpdateDevice(ctx, *device.Id). + DeviceUpdateInput(metalv1.DeviceUpdateInput{ + Tags: tags, + }) + + _, response, err := client.DevicesApi.UpdateDeviceExecute(dur) if err != nil { return metalErrorToTerminalError(err, response, "failed to update UID label") } - klog.Infof("Successfully set UID label for machine %s", machine.Name) + response.Body.Close() - return nil -} + log.Info("Successfully set UID label for machine") -func (p *provider) GetCloudConfig(spec clusterv1alpha1.MachineSpec) (config string, name string, err error) { - return "", "", nil + return nil } func (p *provider) MachineMetricsLabels(machine *clusterv1alpha1.Machine) (map[string]string, error) { @@ -383,45 +421,53 @@ func (p *provider) MachineMetricsLabels(machine *clusterv1alpha1.Machine) (map[s return labels, err } -func (p *provider) SetMetricsForMachines(machines clusterv1alpha1.MachineList) error { +func (p *provider) SetMetricsForMachines(_ clusterv1alpha1.MachineList) error { return nil } type metalDevice struct { - device *packngo.Device + device *metalv1.Device } func (s *metalDevice) Name() string { - return s.device.Hostname + return *s.device.Hostname } func (s *metalDevice) ID() string { - return s.device.ID + return *s.device.Id } func (s *metalDevice) ProviderID() string { - return "equinixmetal://" + s.device.ID + if s.device == nil || *s.device.Id == "" { + return "" + } + return "equinixmetal://" + *s.device.Id } -func (s *metalDevice) Addresses() map[string]v1.NodeAddressType { - // returns addresses in CIDR format - addresses := map[string]v1.NodeAddressType{} - for _, ip := range s.device.Network { - if ip.Public { - addresses[ip.Address] = v1.NodeExternalIP - continue +// Addresses returns addresses in CIDR format. +func (s *metalDevice) Addresses() map[string]corev1.NodeAddressType { + addresses := map[string]corev1.NodeAddressType{} + for _, ip := range s.device.IpAddresses { + kind := corev1.NodeInternalIP + if *ip.Public { + kind = corev1.NodeExternalIP } - addresses[ip.Address] = v1.NodeInternalIP + + addresses[*ip.Address] = kind } return addresses } func (s *metalDevice) Status() instance.Status { - switch s.device.State { - case "provisioning": + if s.device.State == nil { + return instance.StatusUnknown + } + + switch *s.device.State { + case metalv1.DEVICESTATE_PROVISIONING: return instance.StatusCreating - case "active": + case metalv1.DEVICESTATE_ACTIVE: return instance.StatusRunning default: return instance.StatusUnknown @@ -434,7 +480,7 @@ func setProviderSpec(rawConfig equinixmetaltypes.RawConfig, s clusterv1alpha1.Pr return nil, fmt.Errorf("machine.spec.providerconfig.value is nil") } - pconfig, err := providerconfigtypes.GetConfig(s) + pconfig, err := providerconfig.GetConfig(s) if err != nil { return nil, err } @@ -453,37 +499,45 @@ func setProviderSpec(rawConfig equinixmetaltypes.RawConfig, s clusterv1alpha1.Pr return &runtime.RawExtension{Raw: rawPconfig}, nil } -func getDeviceByTag(client *packngo.Client, projectID, tag string) (*packngo.Device, error) { - devices, response, err := client.Devices.List(projectID, nil) +func getDeviceByTag(ctx context.Context, client *metalv1.APIClient, projectID, tag string) (*metalv1.Device, error) { + request := client.DevicesApi. + FindProjectDevices(ctx, projectID). + Tag(tag) + + devices, response, err := client.DevicesApi.FindProjectDevicesExecute(request) if err != nil { return nil, metalErrorToTerminalError(err, response, "failed to list devices") } + response.Body.Close() - for _, device := range devices { - if itemInList(device.Tags, tag) { + for _, device := range devices.Devices { + if slices.Contains(device.Tags, tag) { return &device, nil } } + return nil, nil } // given a defined Kubermatic constant for an operating system, return the canonical slug for Equinix Metal. -func getNameForOS(os providerconfigtypes.OperatingSystem) (string, error) { +func getNameForOS(os providerconfig.OperatingSystem) (string, error) { switch os { - case providerconfigtypes.OperatingSystemUbuntu: - return "ubuntu_22_04", nil - case providerconfigtypes.OperatingSystemCentOS: - return "centos_7", nil - case providerconfigtypes.OperatingSystemFlatcar: + case providerconfig.OperatingSystemUbuntu: + return "ubuntu_24_04", nil + case providerconfig.OperatingSystemFlatcar: return "flatcar_stable", nil - case providerconfigtypes.OperatingSystemRockyLinux: + case providerconfig.OperatingSystemRockyLinux: return "rocky_8", nil } - return "", providerconfigtypes.ErrOSNotSupported + return "", providerconfig.ErrOSNotSupported } -func getClient(apiKey string) *packngo.Client { - return packngo.NewClientWithAuth("kubermatic", apiKey, nil) +func getClient(apiKey string) *metalv1.APIClient { + configuration := metalv1.NewConfiguration() + configuration.UserAgent = fmt.Sprintf("kubermatic/machine-controller %s", configuration.UserAgent) + configuration.AddDefaultHeader("X-Auth-Token", apiKey) + + return metalv1.NewAPIClient(configuration) } func generateTag(ID string) string { @@ -502,13 +556,13 @@ func getTagUID(tag string) (string, error) { // can be qualified as a "terminal" error, for more info see v1alpha1.MachineStatus // // if the given error doesn't qualify the error passed as an argument will be returned. -func metalErrorToTerminalError(err error, response *packngo.Response, msg string) error { +func metalErrorToTerminalError(err error, response *http.Response, msg string) error { prepareAndReturnError := func() error { - return fmt.Errorf("%s, due to %w", msg, err) + return fmt.Errorf("%s: %w", msg, err) } if err != nil { - if response != nil && response.Response != nil && response.Response.StatusCode == 403 { + if response != nil && response.StatusCode == http.StatusForbidden { // authorization primitives come from MachineSpec // thus we are setting InvalidConfigurationMachineError return cloudprovidererrors.TerminalError{ @@ -522,44 +576,3 @@ func metalErrorToTerminalError(err error, response *packngo.Response, msg string return err } - -func itemInList(list []string, item string) bool { - for _, elm := range list { - if elm == item { - return true - } - } - return false -} - -func itemsNotInList(list, items []string) []string { - listMap := make(map[string]bool) - missing := make([]string, 0) - for _, item := range list { - listMap[item] = true - } - for _, item := range items { - if _, ok := listMap[item]; !ok { - missing = append(missing, item) - } - } - return missing -} - -func facilityProp(vs []packngo.Facility, field string) []string { - vsm := make([]string, len(vs)) - for i, v := range vs { - val := reflect.ValueOf(v) - vsm[i] = val.FieldByName(field).String() - } - return vsm -} - -func planProp(vs []packngo.Plan, field string) []string { - vsm := make([]string, len(vs)) - for i, v := range vs { - val := reflect.ValueOf(v) - vsm[i] = val.FieldByName(field).String() - } - return vsm -} diff --git a/pkg/cloudprovider/provider/external/provider.go b/pkg/cloudprovider/provider/external/provider.go new file mode 100644 index 000000000..9ce1e4cd8 --- /dev/null +++ b/pkg/cloudprovider/provider/external/provider.go @@ -0,0 +1,97 @@ +/* +Copyright 2025 The Machine Controller Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package external + +import ( + "context" + + "go.uber.org/zap" + + "k8c.io/machine-controller/pkg/cloudprovider/instance" + cloudprovidertypes "k8c.io/machine-controller/pkg/cloudprovider/types" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" + "k8c.io/machine-controller/sdk/providerconfig" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" +) + +type provider struct{} + +type CloudProviderSpec struct { +} + +type CloudProviderInstance struct{} + +func (f CloudProviderInstance) Name() string { + return "" +} + +func (f CloudProviderInstance) ID() string { + return "" +} + +func (f CloudProviderInstance) ProviderID() string { + return "" +} + +func (f CloudProviderInstance) Addresses() map[string]corev1.NodeAddressType { + return nil +} + +func (f CloudProviderInstance) Status() instance.Status { + return instance.StatusUnknown +} + +// New returns an external cloud provider. +func New(_ providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { + return &provider{} +} + +func (p *provider) AddDefaults(_ *zap.SugaredLogger, spec clusterv1alpha1.MachineSpec) (clusterv1alpha1.MachineSpec, error) { + return spec, nil +} + +// Validate returns success or failure based according to its ExternalCloudProviderSpec. +func (p *provider) Validate(_ context.Context, _ *zap.SugaredLogger, _ clusterv1alpha1.MachineSpec) error { + return nil +} + +func (p *provider) Get(_ context.Context, _ *zap.SugaredLogger, _ *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData) (instance.Instance, error) { + return CloudProviderInstance{}, nil +} + +// Create creates a cloud instance according to the given machine. +func (p *provider) Create(_ context.Context, _ *zap.SugaredLogger, _ *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData, _ string) (instance.Instance, error) { + return CloudProviderInstance{}, nil +} + +func (p *provider) Cleanup(_ context.Context, _ *zap.SugaredLogger, _ *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData) (bool, error) { + return true, nil +} + +func (p *provider) MigrateUID(_ context.Context, _ *zap.SugaredLogger, _ *clusterv1alpha1.Machine, _ types.UID) error { + return nil +} + +func (p *provider) MachineMetricsLabels(_ *clusterv1alpha1.Machine) (map[string]string, error) { + return map[string]string{}, nil +} + +func (p *provider) SetMetricsForMachines(_ clusterv1alpha1.MachineList) error { + return nil +} diff --git a/pkg/cloudprovider/provider/fake/provider.go b/pkg/cloudprovider/provider/fake/provider.go index c1d0c7c56..b07091a13 100644 --- a/pkg/cloudprovider/provider/fake/provider.go +++ b/pkg/cloudprovider/provider/fake/provider.go @@ -21,15 +21,15 @@ import ( "encoding/json" "fmt" - clusterv1alpha1 "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/instance" - cloudprovidertypes "github.com/kubermatic/machine-controller/pkg/cloudprovider/types" - "github.com/kubermatic/machine-controller/pkg/providerconfig" - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" + "go.uber.org/zap" + + "k8c.io/machine-controller/pkg/cloudprovider/instance" + cloudprovidertypes "k8c.io/machine-controller/pkg/cloudprovider/types" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" + "k8c.io/machine-controller/sdk/providerconfig" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/klog" ) type provider struct{} @@ -61,17 +61,17 @@ func (f CloudProviderInstance) Status() instance.Status { } // New returns a fake cloud provider. -func New(_ *providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { +func New(_ providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { return &provider{} } -func (p *provider) AddDefaults(spec clusterv1alpha1.MachineSpec) (clusterv1alpha1.MachineSpec, error) { +func (p *provider) AddDefaults(_ *zap.SugaredLogger, spec clusterv1alpha1.MachineSpec) (clusterv1alpha1.MachineSpec, error) { return spec, nil } // Validate returns success or failure based according to its FakeCloudProviderSpec. -func (p *provider) Validate(_ context.Context, machinespec clusterv1alpha1.MachineSpec) error { - pconfig, err := providerconfigtypes.GetConfig(machinespec.ProviderSpec) +func (p *provider) Validate(_ context.Context, log *zap.SugaredLogger, machinespec clusterv1alpha1.MachineSpec) error { + pconfig, err := providerconfig.GetConfig(machinespec.ProviderSpec) if err != nil { return err } @@ -82,32 +82,28 @@ func (p *provider) Validate(_ context.Context, machinespec clusterv1alpha1.Machi } if fakeCloudProviderSpec.PassValidation { - klog.V(3).Infof("succeeding validation as requested") + log.Debug("Succeeding validation as requested") return nil } - klog.V(3).Infof("failing validation as requested") + log.Debug("Failing validation as requested") return fmt.Errorf("failing validation as requested") } -func (p *provider) Get(_ context.Context, _ *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData) (instance.Instance, error) { +func (p *provider) Get(_ context.Context, _ *zap.SugaredLogger, _ *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData) (instance.Instance, error) { return CloudProviderInstance{}, nil } -func (p *provider) GetCloudConfig(_ clusterv1alpha1.MachineSpec) (string, string, error) { - return "", "", nil -} - // Create creates a cloud instance according to the given machine. -func (p *provider) Create(_ context.Context, _ *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData, _ string) (instance.Instance, error) { +func (p *provider) Create(_ context.Context, _ *zap.SugaredLogger, _ *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData, _ string) (instance.Instance, error) { return CloudProviderInstance{}, nil } -func (p *provider) Cleanup(_ context.Context, _ *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData) (bool, error) { +func (p *provider) Cleanup(_ context.Context, _ *zap.SugaredLogger, _ *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData) (bool, error) { return true, nil } -func (p *provider) MigrateUID(_ context.Context, _ *clusterv1alpha1.Machine, _ types.UID) error { +func (p *provider) MigrateUID(_ context.Context, _ *zap.SugaredLogger, _ *clusterv1alpha1.Machine, _ types.UID) error { return nil } diff --git a/pkg/cloudprovider/provider/gce/config.go b/pkg/cloudprovider/provider/gce/config.go index e292157a7..c78903569 100644 --- a/pkg/cloudprovider/provider/gce/config.go +++ b/pkg/cloudprovider/provider/gce/config.go @@ -21,20 +21,20 @@ limitations under the License. package gce import ( + "context" "encoding/base64" "encoding/json" "errors" "fmt" "strings" - "golang.org/x/oauth2/google" - "golang.org/x/oauth2/jwt" + "golang.org/x/oauth2" + googleoauth "golang.org/x/oauth2/google" "google.golang.org/api/compute/v1" - "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" - gcetypes "github.com/kubermatic/machine-controller/pkg/cloudprovider/provider/gce/types" - "github.com/kubermatic/machine-controller/pkg/providerconfig" - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" + gcetypes "k8c.io/machine-controller/sdk/cloudprovider/gce" + "k8c.io/machine-controller/sdk/providerconfig" ) // Environment variables for the configuration of the Google Cloud project access. @@ -43,13 +43,15 @@ const ( ) // imageProjects maps the OS to the Google Cloud image projects. -var imageProjects = map[providerconfigtypes.OperatingSystem]string{ - providerconfigtypes.OperatingSystemUbuntu: "ubuntu-os-cloud", +var imageProjects = map[providerconfig.OperatingSystem]string{ + providerconfig.OperatingSystemUbuntu: "ubuntu-os-cloud", + providerconfig.OperatingSystemFlatcar: "kinvolk-public", } // imageFamilies maps the OS to the Google Cloud image projects. -var imageFamilies = map[providerconfigtypes.OperatingSystem]string{ - providerconfigtypes.OperatingSystemUbuntu: "ubuntu-2204-lts", +var imageFamilies = map[providerconfig.OperatingSystem]string{ + providerconfig.OperatingSystemUbuntu: "ubuntu-2404-lts-amd64", + providerconfig.OperatingSystemFlatcar: "flatcar-stable", } // diskTypes are the disk types of the Google Cloud. Map is used for @@ -67,13 +69,9 @@ const ( // newCloudProviderSpec creates a cloud provider specification out of the // given ProviderSpec. -func newCloudProviderSpec(provSpec v1alpha1.ProviderSpec) (*gcetypes.CloudProviderSpec, *providerconfigtypes.Config, error) { +func newCloudProviderSpec(provSpec clusterv1alpha1.ProviderSpec) (*gcetypes.CloudProviderSpec, *providerconfig.Config, error) { // Retrieve provider configuration from machine specification. - if provSpec.Value == nil { - return nil, nil, fmt.Errorf("machine.spec.providerconfig.value is nil") - } - - pconfig, err := providerconfigtypes.GetConfig(provSpec) + pconfig, err := providerconfig.GetConfig(provSpec) if err != nil { return nil, nil, fmt.Errorf("cannot unmarshal machine.spec.providerconfig.value: %w", err) } @@ -106,8 +104,7 @@ type config struct { provisioningModel *string labels map[string]string tags []string - jwtConfig *jwt.Config - providerConfig *providerconfigtypes.Config + providerConfig *providerconfig.Config assignPublicIPAddress bool multizone bool regional bool @@ -116,10 +113,16 @@ type config struct { enableNestedVirtualization bool minCPUPlatform string guestOSFeatures []string + clientConfig *clientConfig +} + +type clientConfig struct { + ClientEmail string + TokenSource oauth2.TokenSource } // newConfig creates a Provider configuration out of the passed resolver and spec. -func newConfig(resolver *providerconfig.ConfigVarResolver, spec v1alpha1.ProviderSpec) (*config, error) { +func newConfig(resolver providerconfig.ConfigVarResolver, spec clusterv1alpha1.ProviderSpec) (*config, error) { // Create cloud provider spec. cpSpec, providerConfig, err := newCloudProviderSpec(spec) if err != nil { @@ -135,48 +138,53 @@ func newConfig(resolver *providerconfig.ConfigVarResolver, spec v1alpha1.Provide guestOSFeatures: cpSpec.GuestOSFeatures, } - cfg.serviceAccount, err = resolver.GetConfigVarStringValueOrEnv(cpSpec.ServiceAccount, envGoogleServiceAccount) + cfg.serviceAccount, err = resolver.GetStringValueOrEnv(cpSpec.ServiceAccount, envGoogleServiceAccount) if err != nil { return nil, fmt.Errorf("cannot retrieve service account: %w", err) } + cfg.projectID, err = resolver.GetStringValue(cpSpec.ProjectID) + if err != nil { + return nil, fmt.Errorf("failed to retrieve project id: %w", err) + } + err = cfg.postprocessServiceAccount() if err != nil { return nil, fmt.Errorf("cannot prepare JWT: %w", err) } - cfg.zone, err = resolver.GetConfigVarStringValue(cpSpec.Zone) + cfg.zone, err = resolver.GetStringValue(cpSpec.Zone) if err != nil { return nil, fmt.Errorf("cannot retrieve zone: %w", err) } - cfg.machineType, err = resolver.GetConfigVarStringValue(cpSpec.MachineType) + cfg.machineType, err = resolver.GetStringValue(cpSpec.MachineType) if err != nil { return nil, fmt.Errorf("cannot retrieve machine type: %w", err) } - cfg.diskType, err = resolver.GetConfigVarStringValue(cpSpec.DiskType) + cfg.diskType, err = resolver.GetStringValue(cpSpec.DiskType) if err != nil { return nil, fmt.Errorf("cannot retrieve disk type: %w", err) } - cfg.network, err = resolver.GetConfigVarStringValue(cpSpec.Network) + cfg.network, err = resolver.GetStringValue(cpSpec.Network) if err != nil { return nil, fmt.Errorf("cannot retrieve network: %w", err) } - cfg.subnetwork, err = resolver.GetConfigVarStringValue(cpSpec.Subnetwork) + cfg.subnetwork, err = resolver.GetStringValue(cpSpec.Subnetwork) if err != nil { return nil, fmt.Errorf("cannot retrieve subnetwork: %w", err) } - cfg.preemptible, _, err = resolver.GetConfigVarBoolValue(cpSpec.Preemptible) + cfg.preemptible, _, err = resolver.GetBoolValue(cpSpec.Preemptible) if err != nil { return nil, fmt.Errorf("cannot retrieve preemptible: %w", err) } if cpSpec.AutomaticRestart != nil { - automaticRestart, _, err := resolver.GetConfigVarBoolValue(*cpSpec.AutomaticRestart) + automaticRestart, _, err := resolver.GetBoolValue(*cpSpec.AutomaticRestart) if err != nil { return nil, fmt.Errorf("cannot retrieve automaticRestart: %w", err) } @@ -188,7 +196,7 @@ func newConfig(resolver *providerconfig.ConfigVarResolver, spec v1alpha1.Provide } if cpSpec.ProvisioningModel != nil { - provisioningModel, err := resolver.GetConfigVarStringValue(*cpSpec.ProvisioningModel) + provisioningModel, err := resolver.GetStringValue(*cpSpec.ProvisioningModel) if err != nil { return nil, fmt.Errorf("cannot retrieve provisioningModel: %w", err) } @@ -199,38 +207,38 @@ func newConfig(resolver *providerconfig.ConfigVarResolver, spec v1alpha1.Provide cfg.assignPublicIPAddress = true if cpSpec.AssignPublicIPAddress != nil { - cfg.assignPublicIPAddress, _, err = resolver.GetConfigVarBoolValue(*cpSpec.AssignPublicIPAddress) + cfg.assignPublicIPAddress, _, err = resolver.GetBoolValue(*cpSpec.AssignPublicIPAddress) if err != nil { return nil, fmt.Errorf("failed to retrieve assignPublicIPAddress: %w", err) } } - cfg.multizone, _, err = resolver.GetConfigVarBoolValue(cpSpec.MultiZone) + cfg.multizone, _, err = resolver.GetBoolValue(cpSpec.MultiZone) if err != nil { return nil, fmt.Errorf("failed to retrieve multizone: %w", err) } - cfg.regional, _, err = resolver.GetConfigVarBoolValue(cpSpec.Regional) + cfg.regional, _, err = resolver.GetBoolValue(cpSpec.Regional) if err != nil { return nil, fmt.Errorf("failed to retrieve regional: %w", err) } - cfg.customImage, err = resolver.GetConfigVarStringValue(cpSpec.CustomImage) + cfg.customImage, err = resolver.GetStringValue(cpSpec.CustomImage) if err != nil { return nil, fmt.Errorf("failed to retrieve gce custom image: %w", err) } - cfg.disableMachineServiceAccount, _, err = resolver.GetConfigVarBoolValue(cpSpec.DisableMachineServiceAccount) + cfg.disableMachineServiceAccount, _, err = resolver.GetBoolValue(cpSpec.DisableMachineServiceAccount) if err != nil { return nil, fmt.Errorf("failed to retrieve disable machine service account: %w", err) } - cfg.enableNestedVirtualization, _, err = resolver.GetConfigVarBoolValue(cpSpec.EnableNestedVirtualization) + cfg.enableNestedVirtualization, _, err = resolver.GetBoolValue(cpSpec.EnableNestedVirtualization) if err != nil { return nil, fmt.Errorf("failed to retrieve enable nested virtualization: %w", err) } - cfg.minCPUPlatform, err = resolver.GetConfigVarStringValue(cpSpec.MinCPUPlatform) + cfg.minCPUPlatform, err = resolver.GetStringValue(cpSpec.MinCPUPlatform) if err != nil { return nil, fmt.Errorf("failed to retrieve min cpu platform: %w", err) } @@ -251,16 +259,28 @@ func (cfg *config) postprocessServiceAccount() error { sa = string(decoded) } + creds, err := googleoauth.CredentialsFromJSON(context.TODO(), []byte(sa), compute.ComputeScope) + if err != nil { + return fmt.Errorf("failed to parse credentials from google service account: %w", err) + } + + if cfg.projectID == "" { + cfg.projectID = creds.ProjectID + } + sam := map[string]string{} err = json.Unmarshal([]byte(sa), &sam) if err != nil { return fmt.Errorf("failed unmarshalling service account: %w", err) } - cfg.projectID = sam["project_id"] - cfg.jwtConfig, err = google.JWTConfigFromJSON([]byte(sa), compute.ComputeScope) - if err != nil { - return fmt.Errorf("failed preparing JWT: %w", err) + + // if the project id is not set in the machine deployment, we fallback to the project id that is embedded in the + // google service account json object. + cfg.clientConfig = &clientConfig{ + ClientEmail: sam["client_email"], + TokenSource: creds.TokenSource, } + return nil } @@ -290,11 +310,11 @@ func (cfg *config) sourceImageDescriptor() (string, error) { } project, ok := imageProjects[cfg.providerConfig.OperatingSystem] if !ok { - return "", providerconfigtypes.ErrOSNotSupported + return "", providerconfig.ErrOSNotSupported } family, ok := imageFamilies[cfg.providerConfig.OperatingSystem] if !ok { - return "", providerconfigtypes.ErrOSNotSupported + return "", providerconfig.ErrOSNotSupported } return fmt.Sprintf("projects/%s/global/images/family/%s", project, family), nil } diff --git a/pkg/cloudprovider/provider/gce/instance.go b/pkg/cloudprovider/provider/gce/instance.go index 1d61d4bae..5259ce8b2 100644 --- a/pkg/cloudprovider/provider/gce/instance.go +++ b/pkg/cloudprovider/provider/gce/instance.go @@ -26,9 +26,9 @@ import ( "google.golang.org/api/compute/v1" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/instance" + "k8c.io/machine-controller/pkg/cloudprovider/instance" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" ) // Possible instance statuses. @@ -61,19 +61,22 @@ func (gi *googleInstance) ID() string { } func (gi *googleInstance) ProviderID() string { + if gi.ci == nil || gi.ci.Name == "" { + return "" + } return fmt.Sprintf("gce://%s/%s/%s", gi.projectID, gi.zone, gi.ci.Name) } // Addresses implements instance.Instance. -func (gi *googleInstance) Addresses() map[string]v1.NodeAddressType { - addrs := map[string]v1.NodeAddressType{} +func (gi *googleInstance) Addresses() map[string]corev1.NodeAddressType { + addrs := map[string]corev1.NodeAddressType{} for _, ifc := range gi.ci.NetworkInterfaces { - addrs[ifc.NetworkIP] = v1.NodeInternalIP + addrs[ifc.NetworkIP] = corev1.NodeInternalIP for _, ac := range ifc.AccessConfigs { - addrs[ac.NatIP] = v1.NodeExternalIP + addrs[ac.NatIP] = corev1.NodeExternalIP } for _, ac := range ifc.Ipv6AccessConfigs { - addrs[ac.ExternalIpv6] = v1.NodeExternalIP + addrs[ac.ExternalIpv6] = corev1.NodeExternalIP } } @@ -83,17 +86,17 @@ func (gi *googleInstance) Addresses() map[string]v1.NodeAddressType { // Zonal DNS is present for newer projects and has the following FQDN format: // [INSTANCE_NAME].[ZONE].c.[PROJECT_ID].internal zonalDNS := fmt.Sprintf("%s.%s.c.%s.internal", gi.ci.Name, gi.zone, gi.projectID) - addrs[zonalDNS] = v1.NodeInternalDNS + addrs[zonalDNS] = corev1.NodeInternalDNS // Global DNS is present for older projects and has the following FQDN format: // [INSTANCE_NAME].c.[PROJECT_ID].internal globalDNS := fmt.Sprintf("%s.c.%s.internal", gi.ci.Name, gi.projectID) - addrs[globalDNS] = v1.NodeInternalDNS + addrs[globalDNS] = corev1.NodeInternalDNS // GCP provides the search paths to resolve the machine's name, // so we add is as a DNS name // https://cloud.google.com/compute/docs/internal-dns#resolv.conf - addrs[gi.ci.Name] = v1.NodeInternalDNS + addrs[gi.ci.Name] = corev1.NodeInternalDNS return addrs } diff --git a/pkg/cloudprovider/provider/gce/provider.go b/pkg/cloudprovider/provider/gce/provider.go index 72f13602a..93eabd389 100644 --- a/pkg/cloudprovider/provider/gce/provider.go +++ b/pkg/cloudprovider/provider/gce/provider.go @@ -29,17 +29,17 @@ import ( "cloud.google.com/go/logging" monitoring "cloud.google.com/go/monitoring/apiv3/v2" + "go.uber.org/zap" compute "google.golang.org/api/compute/v1" "google.golang.org/api/googleapi" - "github.com/kubermatic/machine-controller/pkg/apis/cluster/common" - clusterv1alpha1 "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" - cloudprovidererrors "github.com/kubermatic/machine-controller/pkg/cloudprovider/errors" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/instance" - gcetypes "github.com/kubermatic/machine-controller/pkg/cloudprovider/provider/gce/types" - cloudprovidertypes "github.com/kubermatic/machine-controller/pkg/cloudprovider/types" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/util" - "github.com/kubermatic/machine-controller/pkg/providerconfig" + cloudprovidererrors "k8c.io/machine-controller/pkg/cloudprovider/errors" + "k8c.io/machine-controller/pkg/cloudprovider/instance" + cloudprovidertypes "k8c.io/machine-controller/pkg/cloudprovider/types" + "k8c.io/machine-controller/sdk/apis/cluster/common" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" + "k8c.io/machine-controller/sdk/net" + "k8c.io/machine-controller/sdk/providerconfig" "k8s.io/apimachinery/pkg/types" ) @@ -56,7 +56,6 @@ const ( errInvalidDiskType = "Disk type is missing or has wrong type, allowed are 'pd-standard' and 'pd-ssd'" errRetrieveInstance = "Failed to retrieve instance: %v" errGotTooManyInstances = "Got more than 1 instance matching the machine UID label" - errCloudConfig = "Failed to convert cloud-config to string: %v" errInsertInstance = "Failed to insert instance: %v" errDeleteInstance = "Failed to delete instance: %v" errSetLabels = "Failed to set the labels for the new machine UID: %v" @@ -73,18 +72,18 @@ var _ cloudprovidertypes.Provider = New(nil) // Provider implements the cloud.Provider interface for the Google Cloud Platform. type Provider struct { - resolver *providerconfig.ConfigVarResolver + resolver providerconfig.ConfigVarResolver } // New creates a cloud provider instance for the Google Cloud Platform. -func New(configVarResolver *providerconfig.ConfigVarResolver) *Provider { +func New(configVarResolver providerconfig.ConfigVarResolver) *Provider { return &Provider{ resolver: configVarResolver, } } // AddDefaults reads the MachineSpec and applies defaults for provider specific fields. -func (p *Provider) AddDefaults(spec clusterv1alpha1.MachineSpec) (clusterv1alpha1.MachineSpec, error) { +func (p *Provider) AddDefaults(_ *zap.SugaredLogger, spec clusterv1alpha1.MachineSpec) (clusterv1alpha1.MachineSpec, error) { // Read cloud provider spec. cpSpec, _, err := newCloudProviderSpec(spec.ProviderSpec) if err != nil { @@ -102,7 +101,7 @@ func (p *Provider) AddDefaults(spec clusterv1alpha1.MachineSpec) (clusterv1alpha } // Validate checks the given machine's specification. -func (p *Provider) Validate(_ context.Context, spec clusterv1alpha1.MachineSpec) error { +func (p *Provider) Validate(_ context.Context, _ *zap.SugaredLogger, spec clusterv1alpha1.MachineSpec) error { // Read configuration. cfg, err := newConfig(p.resolver, spec.ProviderSpec) if err != nil { @@ -117,13 +116,13 @@ func (p *Provider) Validate(_ context.Context, spec clusterv1alpha1.MachineSpec) } switch cfg.providerConfig.Network.GetIPFamily() { - case util.IPFamilyUnspecified, util.IPFamilyIPv4: + case net.IPFamilyUnspecified, net.IPFamilyIPv4: // noop - case util.IPFamilyIPv6: - return newError(common.InvalidConfigurationMachineError, util.ErrIPv6OnlyUnsupported) - case util.IPFamilyIPv4IPv6, util.IPFamilyIPv6IPv4: + case net.IPFamilyIPv6: + return newError(common.InvalidConfigurationMachineError, net.ErrIPv6OnlyUnsupported) + case net.IPFamilyIPv4IPv6, net.IPFamilyIPv6IPv4: default: - return newError(common.InvalidConfigurationMachineError, util.ErrUnknownNetworkFamily, cfg.providerConfig.Network.GetIPFamily()) + return newError(common.InvalidConfigurationMachineError, net.ErrUnknownNetworkFamily, cfg.providerConfig.Network.GetIPFamily()) } if cfg.machineType == "" { @@ -143,18 +142,18 @@ func (p *Provider) Validate(_ context.Context, spec clusterv1alpha1.MachineSpec) } // Get retrieves a node instance that is associated with the given machine. -func (p *Provider) Get(_ context.Context, machine *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData) (instance.Instance, error) { - return p.get(machine) +func (p *Provider) Get(ctx context.Context, _ *zap.SugaredLogger, machine *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData) (instance.Instance, error) { + return p.get(ctx, machine) } -func (p *Provider) get(machine *clusterv1alpha1.Machine) (*googleInstance, error) { +func (p *Provider) get(ctx context.Context, machine *clusterv1alpha1.Machine) (*googleInstance, error) { // Read configuration. cfg, err := newConfig(p.resolver, machine.Spec.ProviderSpec) if err != nil { return nil, newError(common.InvalidConfigurationMachineError, errMachineSpec, err) } // Connect to Google compute. - svc, err := connectComputeService(cfg) + svc, err := connectComputeService(ctx, cfg) if err != nil { return nil, newError(common.InvalidConfigurationMachineError, errConnect, err) } @@ -183,46 +182,20 @@ func (p *Provider) get(machine *clusterv1alpha1.Machine) (*googleInstance, error }, nil } -// GetCloudConfig returns the cloud provider specific cloud-config for the kubelet. -func (p *Provider) GetCloudConfig(spec clusterv1alpha1.MachineSpec) (config string, name string, err error) { - // Read configuration. - cfg, err := newConfig(p.resolver, spec.ProviderSpec) - if err != nil { - return "", "", newError(common.InvalidConfigurationMachineError, errMachineSpec, err) - } - // Init cloud configuration. - cc := &gcetypes.CloudConfig{ - Global: gcetypes.GlobalOpts{ - ProjectID: cfg.projectID, - LocalZone: cfg.zone, - MultiZone: cfg.multizone, - Regional: cfg.regional, - NetworkName: cfg.network, - SubnetworkName: cfg.subnetwork, - NodeTags: cfg.tags, - }, - } - config, err = cc.AsString() - if err != nil { - return "", "", newError(common.InvalidConfigurationMachineError, errCloudConfig, err) - } - return config, "gce", nil -} - // Create inserts a cloud instance according to the given machine. -func (p *Provider) Create(ctx context.Context, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData, userdata string) (instance.Instance, error) { +func (p *Provider) Create(ctx context.Context, log *zap.SugaredLogger, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData, userdata string) (instance.Instance, error) { // Read configuration. cfg, err := newConfig(p.resolver, machine.Spec.ProviderSpec) if err != nil { return nil, newError(common.InvalidConfigurationMachineError, errMachineSpec, err) } // Connect to Google compute. - svc, err := connectComputeService(cfg) + svc, err := connectComputeService(ctx, cfg) if err != nil { return nil, newError(common.InvalidConfigurationMachineError, errConnect, err) } // Create Google compute instance spec and insert it. - networkInterfaces, err := svc.networkInterfaces(cfg) + networkInterfaces, err := svc.networkInterfaces(log, cfg) if err != nil { return nil, newError(common.InvalidConfigurationMachineError, errMachineSpec, err) } @@ -261,7 +234,7 @@ func (p *Provider) Create(ctx context.Context, machine *clusterv1alpha1.Machine, if !cfg.disableMachineServiceAccount { inst.ServiceAccounts = []*compute.ServiceAccount{ { - Email: cfg.jwtConfig.Email, + Email: cfg.clientConfig.ClientEmail, Scopes: append( monitoring.DefaultAuthScopes(), compute.ComputeScope, @@ -294,23 +267,23 @@ func (p *Provider) Create(ctx context.Context, machine *clusterv1alpha1.Machine, if err != nil { return nil, newError(common.InvalidConfigurationMachineError, errInsertInstance, err) } - err = svc.waitZoneOperation(cfg, op.Name) + err = svc.waitZoneOperation(ctx, cfg, op.Name) if err != nil { return nil, newError(common.InvalidConfigurationMachineError, errInsertInstance, err) } // Retrieve it to get a full qualified instance. - return p.Get(ctx, machine, data) + return p.Get(ctx, log, machine, data) } // Cleanup deletes the instance associated with the machine and all associated resources. -func (p *Provider) Cleanup(_ context.Context, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData) (bool, error) { +func (p *Provider) Cleanup(ctx context.Context, _ *zap.SugaredLogger, machine *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData) (bool, error) { // Read configuration. cfg, err := newConfig(p.resolver, machine.Spec.ProviderSpec) if err != nil { return false, newError(common.InvalidConfigurationMachineError, errMachineSpec, err) } // Connect to Google compute. - svc, err := connectComputeService(cfg) + svc, err := connectComputeService(ctx, cfg) if err != nil { return false, newError(common.InvalidConfigurationMachineError, errConnect, err) } @@ -325,7 +298,7 @@ func (p *Provider) Cleanup(_ context.Context, machine *clusterv1alpha1.Machine, } return false, newError(common.InvalidConfigurationMachineError, errDeleteInstance, err) } - err = svc.waitZoneOperation(cfg, op.Name) + err = svc.waitZoneOperation(ctx, cfg, op.Name) if err != nil { return false, newError(common.InvalidConfigurationMachineError, errDeleteInstance, err) } @@ -354,19 +327,19 @@ func (p *Provider) MachineMetricsLabels(machine *clusterv1alpha1.Machine) (map[s // MigrateUID updates the UID of an instance after the controller migrates types // and the UID of the machine object changed. -func (p *Provider) MigrateUID(_ context.Context, machine *clusterv1alpha1.Machine, newUID types.UID) error { +func (p *Provider) MigrateUID(ctx context.Context, _ *zap.SugaredLogger, machine *clusterv1alpha1.Machine, newUID types.UID) error { // Read configuration. cfg, err := newConfig(p.resolver, machine.Spec.ProviderSpec) if err != nil { return newError(common.InvalidConfigurationMachineError, errMachineSpec, err) } // Connect to Google compute. - svc, err := connectComputeService(cfg) + svc, err := connectComputeService(ctx, cfg) if err != nil { return newError(common.InvalidConfigurationMachineError, errConnect, err) } // Retrieve instance. - inst, err := p.get(machine) + inst, err := p.get(ctx, machine) if err != nil { if errors.Is(err, cloudprovidererrors.ErrInstanceNotFound) { return nil @@ -388,7 +361,7 @@ func (p *Provider) MigrateUID(_ context.Context, machine *clusterv1alpha1.Machin if err != nil { return newError(common.InvalidConfigurationMachineError, errSetLabels, err) } - err = svc.waitZoneOperation(cfg, op.Name) + err = svc.waitZoneOperation(ctx, cfg, op.Name) if err != nil { return newError(common.InvalidConfigurationMachineError, errSetLabels, err) } @@ -396,7 +369,7 @@ func (p *Provider) MigrateUID(_ context.Context, machine *clusterv1alpha1.Machin } // SetMetricsForMachines allows providers to provide provider-specific metrics. -func (p *Provider) SetMetricsForMachines(machines clusterv1alpha1.MachineList) error { +func (p *Provider) SetMetricsForMachines(_ clusterv1alpha1.MachineList) error { return nil } diff --git a/pkg/cloudprovider/provider/gce/provider_test.go b/pkg/cloudprovider/provider/gce/provider_test.go index d1217a2f8..47a6807d8 100644 --- a/pkg/cloudprovider/provider/gce/provider_test.go +++ b/pkg/cloudprovider/provider/gce/provider_test.go @@ -24,8 +24,10 @@ import ( "strings" "testing" - "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" - "github.com/kubermatic/machine-controller/pkg/providerconfig" + "go.uber.org/zap" + + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" + "k8c.io/machine-controller/sdk/providerconfig/configvar" "k8s.io/apimachinery/pkg/runtime" fake2 "sigs.k8s.io/controller-runtime/pkg/client/fake" @@ -121,16 +123,16 @@ func TestValidate(t *testing.T) { return data } - p := New(providerconfig.NewConfigVarResolver(context.Background(), fake2.NewClientBuilder().Build())) + p := New(configvar.NewResolver(context.Background(), fake2.NewClientBuilder().Build())) tests := []struct { name string - mspec v1alpha1.MachineSpec + mspec clusterv1alpha1.MachineSpec expectErr bool }{ { "without IP family", - v1alpha1.MachineSpec{ - ProviderSpec: v1alpha1.ProviderSpec{ + clusterv1alpha1.MachineSpec{ + ProviderSpec: clusterv1alpha1.ProviderSpec{ Value: &runtime.RawExtension{ Raw: rawBytes(testProviderSpec()), }, @@ -140,8 +142,8 @@ func TestValidate(t *testing.T) { }, { "empty IP family", - v1alpha1.MachineSpec{ - ProviderSpec: v1alpha1.ProviderSpec{ + clusterv1alpha1.MachineSpec{ + ProviderSpec: clusterv1alpha1.ProviderSpec{ Value: &runtime.RawExtension{ Raw: rawBytes(testMap(testProviderSpec()). with("network.ipFamily", ""), @@ -153,8 +155,8 @@ func TestValidate(t *testing.T) { }, { "with IP family", - v1alpha1.MachineSpec{ - ProviderSpec: v1alpha1.ProviderSpec{ + clusterv1alpha1.MachineSpec{ + ProviderSpec: clusterv1alpha1.ProviderSpec{ Value: &runtime.RawExtension{ Raw: rawBytes(testMap(testProviderSpec()). with("network.ipFamily", "IPv4+IPv6"), @@ -168,7 +170,7 @@ func TestValidate(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - err := p.Validate(context.Background(), test.mspec) + err := p.Validate(context.Background(), zap.NewNop().Sugar(), test.mspec) if (err != nil) != test.expectErr { t.Fatalf("expectedErr: %t, got: %v", test.expectErr, err) } diff --git a/pkg/cloudprovider/provider/gce/service.go b/pkg/cloudprovider/provider/gce/service.go index d58a9268d..fb15961c7 100644 --- a/pkg/cloudprovider/provider/gce/service.go +++ b/pkg/cloudprovider/provider/gce/service.go @@ -22,14 +22,16 @@ package gce import ( "context" + "errors" "fmt" "time" + "go.uber.org/zap" + "golang.org/x/oauth2" "google.golang.org/api/compute/v1" "google.golang.org/api/option" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/klog" ) const ( @@ -53,17 +55,22 @@ type service struct { } // connectComputeService establishes a service connection to the Compute Engine. -func connectComputeService(cfg *config) (*service, error) { - client := cfg.jwtConfig.Client(context.Background()) - svc, err := compute.NewService(context.Background(), option.WithHTTPClient(client)) - if err != nil { - return nil, fmt.Errorf("cannot connect to Google Cloud: %w", err) +func connectComputeService(ctx context.Context, cfg *config) (*service, error) { + if cfg.clientConfig != nil && + cfg.clientConfig.TokenSource != nil { + client := oauth2.NewClient(ctx, cfg.clientConfig.TokenSource) + svc, err := compute.NewService(ctx, option.WithHTTPClient(client)) + if err != nil { + return nil, fmt.Errorf("cannot connect to Google Cloud: %w", err) + } + return &service{svc}, nil } - return &service{svc}, nil + + return nil, errors.New("gcp token source was not found") } // networkInterfaces returns the configured network interfaces for an instance creation. -func (svc *service) networkInterfaces(cfg *config) ([]*compute.NetworkInterface, error) { +func (svc *service) networkInterfaces(log *zap.SugaredLogger, cfg *config) ([]*compute.NetworkInterface, error) { network := cfg.network if cfg.network == "" && cfg.subnetwork == "" { @@ -75,7 +82,7 @@ func (svc *service) networkInterfaces(cfg *config) ([]*compute.NetworkInterface, Subnetwork: cfg.subnetwork, } - klog.Infof("using network:%s subnetwork: %s", cfg.network, cfg.subnetwork) + log.Infow("Network configuration", "network", cfg.network, "subnetwork", cfg.subnetwork) if cfg.assignPublicIPAddress { ifc.AccessConfigs = []*compute.AccessConfig{ @@ -102,7 +109,7 @@ func (svc *service) networkInterfaces(cfg *config) ([]*compute.NetworkInterface, }, } } else { - klog.Infof("IP family doesn't specify dual stack: %s", cfg.providerConfig.Network.GetIPFamily()) + log.Infow("IP family doesn't specify dual stack", "family", cfg.providerConfig.Network.GetIPFamily()) } } return []*compute.NetworkInterface{ifc}, nil @@ -132,18 +139,18 @@ func (svc *service) attachedDisks(cfg *config) ([]*compute.AttachedDisk, error) } // waitZoneOperation waits for a GCE operation in a zone to be completed or timed out. -func (svc *service) waitZoneOperation(cfg *config, opName string) error { - return svc.waitOperation(func() (*compute.Operation, error) { +func (svc *service) waitZoneOperation(ctx context.Context, cfg *config, opName string) error { + return svc.waitOperation(ctx, func() (*compute.Operation, error) { return svc.ZoneOperations.Get(cfg.projectID, cfg.zone, opName).Do() }) } // waitOperation waits for a GCE operation to be completed or timed out. -func (svc *service) waitOperation(refreshOperation func() (*compute.Operation, error)) error { +func (svc *service) waitOperation(ctx context.Context, refreshOperation func() (*compute.Operation, error)) error { var op *compute.Operation var err error - return wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) { + return wait.PollUntilContextTimeout(ctx, pollInterval, pollTimeout, false, func(_ context.Context) (bool, error) { op, err = refreshOperation() if err != nil { return false, err diff --git a/pkg/cloudprovider/provider/gce/types/cloudconfig.go b/pkg/cloudprovider/provider/gce/types/cloudconfig.go deleted file mode 100644 index 9c4201d4b..000000000 --- a/pkg/cloudprovider/provider/gce/types/cloudconfig.go +++ /dev/null @@ -1,79 +0,0 @@ -/* -Copyright 2019 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// -// Google Cloud Provider for the Machine Controller -// - -package types - -import ( - "bytes" - "fmt" - "text/template" - - "github.com/Masterminds/sprig/v3" - - "github.com/kubermatic/machine-controller/pkg/ini" -) - -// cloudConfigTemplate renders the cloud-config in gcfg format. All -// fields are optional, that's why containing the ifs and the explicit newlines. -const cloudConfigTemplate = "[global]\n" + - "project-id = {{ .Global.ProjectID | iniEscape }}\n" + - "local-zone = {{ .Global.LocalZone | iniEscape }}\n" + - "network-name = {{ .Global.NetworkName | iniEscape }}\n" + - "subnetwork-name = {{ .Global.SubnetworkName | iniEscape }}\n" + - "token-url = {{ .Global.TokenURL | iniEscape }}\n" + - "multizone = {{ .Global.MultiZone }}\n" + - "regional = {{ .Global.Regional }}\n" + - "{{ range .Global.NodeTags }}node-tags = {{ . | iniEscape }}\n{{end}}" - -// GlobalOpts contains the values of the global section of the cloud configuration. -type GlobalOpts struct { - ProjectID string - LocalZone string - NetworkName string - SubnetworkName string - TokenURL string - MultiZone bool - Regional bool - NodeTags []string - RHSMOfflineToken string -} - -// CloudConfig contains only the section global. -type CloudConfig struct { - Global GlobalOpts -} - -// AsString renders the cloud configuration as string. -func (cc *CloudConfig) AsString() (string, error) { - funcMap := sprig.TxtFuncMap() - funcMap["iniEscape"] = ini.Escape - - tmpl, err := template.New("cloud-config").Funcs(funcMap).Parse(cloudConfigTemplate) - if err != nil { - return "", fmt.Errorf("failed to parse the cloud config template: %w", err) - } - - buf := &bytes.Buffer{} - if err := tmpl.Execute(buf, cc); err != nil { - return "", fmt.Errorf("failed to execute cloud config template: %w", err) - } - - return buf.String(), nil -} diff --git a/pkg/cloudprovider/provider/gce/types/cloudconfig_test.go b/pkg/cloudprovider/provider/gce/types/cloudconfig_test.go deleted file mode 100644 index 6b91cefd5..000000000 --- a/pkg/cloudprovider/provider/gce/types/cloudconfig_test.go +++ /dev/null @@ -1,73 +0,0 @@ -/* -Copyright 2019 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// -// Google Cloud Provider for the Machine Controller -// -// Unit Tests -// - -package types - -import ( - "testing" -) - -func TestCloudConfigAsString(t *testing.T) { - tests := []struct { - name string - config *CloudConfig - contents string - }{ - { - name: "minimum test", - config: &CloudConfig{ - Global: GlobalOpts{ - ProjectID: "my-project-id", - LocalZone: "my-zone", - NetworkName: "my-cool-network", - SubnetworkName: "my-cool-subnetwork", - TokenURL: "nil", - MultiZone: true, - Regional: true, - NodeTags: []string{"tag1", "tag2"}, - }, - }, - contents: "[global]\n" + - "project-id = \"my-project-id\"\n" + - "local-zone = \"my-zone\"\n" + - "network-name = \"my-cool-network\"\n" + - "subnetwork-name = \"my-cool-subnetwork\"\n" + - "token-url = \"nil\"\n" + - "multizone = true\n" + - "regional = true\n" + - "node-tags = \"tag1\"\n" + - "node-tags = \"tag2\"\n", - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - s, err := test.config.AsString() - if err != nil { - t.Fatalf("failed to convert to string: %v", err) - } - if s != test.contents { - t.Fatalf("output is not as expected: %s", s) - } - }) - } -} diff --git a/pkg/cloudprovider/provider/gce/types/types.go b/pkg/cloudprovider/provider/gce/types/types.go deleted file mode 100644 index 8b8736bbe..000000000 --- a/pkg/cloudprovider/provider/gce/types/types.go +++ /dev/null @@ -1,85 +0,0 @@ -/* -Copyright 2019 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package types - -import ( - "encoding/json" - "fmt" - - "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" - "github.com/kubermatic/machine-controller/pkg/jsonutil" - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" - - "k8s.io/apimachinery/pkg/runtime" -) - -// CloudProviderSpec contains the specification of the cloud provider taken -// from the provider configuration. -type CloudProviderSpec struct { - // ServiceAccount must be base64-encoded. - ServiceAccount providerconfigtypes.ConfigVarString `json:"serviceAccount,omitempty"` - Zone providerconfigtypes.ConfigVarString `json:"zone"` - MachineType providerconfigtypes.ConfigVarString `json:"machineType"` - DiskSize int64 `json:"diskSize"` - DiskType providerconfigtypes.ConfigVarString `json:"diskType"` - Network providerconfigtypes.ConfigVarString `json:"network"` - Subnetwork providerconfigtypes.ConfigVarString `json:"subnetwork"` - Preemptible providerconfigtypes.ConfigVarBool `json:"preemptible"` - AutomaticRestart *providerconfigtypes.ConfigVarBool `json:"automaticRestart,omitempty"` - ProvisioningModel *providerconfigtypes.ConfigVarString `json:"provisioningModel,omitempty"` - Labels map[string]string `json:"labels,omitempty"` - Tags []string `json:"tags,omitempty"` - AssignPublicIPAddress *providerconfigtypes.ConfigVarBool `json:"assignPublicIPAddress,omitempty"` - MultiZone providerconfigtypes.ConfigVarBool `json:"multizone"` - Regional providerconfigtypes.ConfigVarBool `json:"regional"` - CustomImage providerconfigtypes.ConfigVarString `json:"customImage,omitempty"` - DisableMachineServiceAccount providerconfigtypes.ConfigVarBool `json:"disableMachineServiceAccount,omitempty"` - EnableNestedVirtualization providerconfigtypes.ConfigVarBool `json:"enableNestedVirtualization,omitempty"` - MinCPUPlatform providerconfigtypes.ConfigVarString `json:"minCPUPlatform,omitempty"` - GuestOSFeatures []string `json:"guestOSFeatures,omitempty"` -} - -// UpdateProviderSpec updates the given provider spec with changed -// configuration values. -func (cpSpec *CloudProviderSpec) UpdateProviderSpec(spec v1alpha1.ProviderSpec) (*runtime.RawExtension, error) { - if spec.Value == nil { - return nil, fmt.Errorf("machine.spec.providerconfig.value is nil") - } - providerConfig := providerconfigtypes.Config{} - err := json.Unmarshal(spec.Value.Raw, &providerConfig) - if err != nil { - return nil, err - } - rawCPSpec, err := json.Marshal(cpSpec) - if err != nil { - return nil, err - } - providerConfig.CloudProviderSpec = runtime.RawExtension{Raw: rawCPSpec} - rawProviderConfig, err := json.Marshal(providerConfig) - if err != nil { - return nil, err - } - return &runtime.RawExtension{Raw: rawProviderConfig}, nil -} - -type RawConfig = CloudProviderSpec - -func GetConfig(pconfig providerconfigtypes.Config) (*RawConfig, error) { - rawConfig := &RawConfig{} - - return rawConfig, jsonutil.StrictUnmarshal(pconfig.CloudProviderSpec.Raw, rawConfig) -} diff --git a/pkg/cloudprovider/provider/hetzner/provider.go b/pkg/cloudprovider/provider/hetzner/provider.go index 9c385e809..1fb24afc1 100644 --- a/pkg/cloudprovider/provider/hetzner/provider.go +++ b/pkg/cloudprovider/provider/hetzner/provider.go @@ -24,22 +24,22 @@ import ( "strconv" "strings" - "github.com/hetznercloud/hcloud-go/hcloud" - - "github.com/kubermatic/machine-controller/pkg/apis/cluster/common" - clusterv1alpha1 "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/common/ssh" - cloudprovidererrors "github.com/kubermatic/machine-controller/pkg/cloudprovider/errors" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/instance" - hetznertypes "github.com/kubermatic/machine-controller/pkg/cloudprovider/provider/hetzner/types" - cloudprovidertypes "github.com/kubermatic/machine-controller/pkg/cloudprovider/types" - "github.com/kubermatic/machine-controller/pkg/providerconfig" - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" - - v1 "k8s.io/api/core/v1" + "github.com/hetznercloud/hcloud-go/v2/hcloud" + "go.uber.org/zap" + + "k8c.io/machine-controller/pkg/cloudprovider/common/ssh" + cloudprovidererrors "k8c.io/machine-controller/pkg/cloudprovider/errors" + "k8c.io/machine-controller/pkg/cloudprovider/instance" + cloudprovidertypes "k8c.io/machine-controller/pkg/cloudprovider/types" + "k8c.io/machine-controller/pkg/version" + "k8c.io/machine-controller/sdk/apis/cluster/common" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" + hetznertypes "k8c.io/machine-controller/sdk/cloudprovider/hetzner" + "k8c.io/machine-controller/sdk/providerconfig" + + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/rand" - "k8s.io/klog" ) const ( @@ -47,11 +47,11 @@ const ( ) type provider struct { - configVarResolver *providerconfig.ConfigVarResolver + configVarResolver providerconfig.ConfigVarResolver } // New returns a Hetzner provider. -func New(configVarResolver *providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { +func New(configVarResolver providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { return &provider{configVarResolver: configVarResolver} } @@ -65,30 +65,29 @@ type Config struct { Networks []string Firewalls []string Labels map[string]string + AssignIPv4 bool + AssignIPv6 bool } -func getNameForOS(os providerconfigtypes.OperatingSystem) (string, error) { +func getNameForOS(os providerconfig.OperatingSystem) (string, error) { switch os { - case providerconfigtypes.OperatingSystemUbuntu: - return "ubuntu-22.04", nil - case providerconfigtypes.OperatingSystemCentOS: - return "centos-7", nil - case providerconfigtypes.OperatingSystemRockyLinux: - return "rocky-8", nil - } - return "", providerconfigtypes.ErrOSNotSupported + case providerconfig.OperatingSystemUbuntu: + return "ubuntu-24.04", nil + case providerconfig.OperatingSystemRockyLinux: + return "rocky-9", nil + } + return "", providerconfig.ErrOSNotSupported } func getClient(token string) *hcloud.Client { - return hcloud.NewClient(hcloud.WithToken(token)) + return hcloud.NewClient( + hcloud.WithToken(token), + hcloud.WithApplication("kubermatic-machine-controller", version.Get().String()), + ) } -func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *providerconfigtypes.Config, error) { - if provSpec.Value == nil { - return nil, nil, fmt.Errorf("machine.spec.providerconfig.value is nil") - } - - pconfig, err := providerconfigtypes.GetConfig(provSpec) +func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *providerconfig.Config, error) { + pconfig, err := providerconfig.GetConfig(provSpec) if err != nil { return nil, nil, err } @@ -103,38 +102,38 @@ func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *p } c := Config{} - c.Token, err = p.configVarResolver.GetConfigVarStringValueOrEnv(rawConfig.Token, "HZ_TOKEN") + c.Token, err = p.configVarResolver.GetStringValueOrEnv(rawConfig.Token, "HZ_TOKEN") if err != nil { return nil, nil, fmt.Errorf("failed to get the value of \"token\" field, error = %w", err) } - c.ServerType, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.ServerType) + c.ServerType, err = p.configVarResolver.GetStringValue(rawConfig.ServerType) if err != nil { return nil, nil, err } - c.Datacenter, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.Datacenter) + c.Datacenter, err = p.configVarResolver.GetStringValue(rawConfig.Datacenter) if err != nil { return nil, nil, err } - c.Image, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.Image) + c.Image, err = p.configVarResolver.GetStringValue(rawConfig.Image) if err != nil { return nil, nil, err } - c.Location, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.Location) + c.Location, err = p.configVarResolver.GetStringValue(rawConfig.Location) if err != nil { return nil, nil, err } - c.PlacementGroupPrefix, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.PlacementGroupPrefix) + c.PlacementGroupPrefix, err = p.configVarResolver.GetStringValue(rawConfig.PlacementGroupPrefix) if err != nil { return nil, nil, err } for _, network := range rawConfig.Networks { - networkValue, err := p.configVarResolver.GetConfigVarStringValue(network) + networkValue, err := p.configVarResolver.GetStringValue(network) if err != nil { return nil, nil, err } @@ -142,13 +141,21 @@ func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *p } for _, firewall := range rawConfig.Firewalls { - firewallValue, err := p.configVarResolver.GetConfigVarStringValue(firewall) + firewallValue, err := p.configVarResolver.GetStringValue(firewall) if err != nil { return nil, nil, err } c.Firewalls = append(c.Firewalls, firewallValue) } + ipv4, ipv6, err := p.publicIPsAssignment(rawConfig) + if err != nil { + return nil, nil, err + } + + c.AssignIPv4 = ipv4 + c.AssignIPv6 = ipv6 + c.Labels = rawConfig.Labels return &c, pconfig, err @@ -184,7 +191,7 @@ func (p *provider) getServerPlacementGroup(ctx context.Context, client *hcloud.C return createdPg.PlacementGroup, nil } -func (p *provider) Validate(ctx context.Context, spec clusterv1alpha1.MachineSpec) error { +func (p *provider) Validate(ctx context.Context, _ *zap.SugaredLogger, spec clusterv1alpha1.MachineSpec) error { c, pc, err := p.getConfig(spec.ProviderSpec) if err != nil { return fmt.Errorf("failed to parse config: %w", err) @@ -194,11 +201,6 @@ func (p *provider) Validate(ctx context.Context, spec clusterv1alpha1.MachineSpe return errors.New("token is missing") } - _, err = getNameForOS(pc.OperatingSystem) - if err != nil { - return fmt.Errorf("invalid/not supported operating system specified %q: %w", pc.OperatingSystem, err) - } - client := getClient(c.Token) if c.Location != "" && c.Datacenter != "" { @@ -217,12 +219,27 @@ func (p *provider) Validate(ctx context.Context, spec clusterv1alpha1.MachineSpe } } - if c.Image != "" { - if _, _, err = client.Image.Get(ctx, c.Image); err != nil { - return fmt.Errorf("failed to get image: %w", err) + serverType, _, err := client.ServerType.Get(ctx, c.ServerType) + if err != nil { + return fmt.Errorf("failed to get server type: %w", err) + } + + if serverType == nil { + return fmt.Errorf("server type %q not found", c.ServerType) + } + + image := c.Image + if image == "" { + image, err = getNameForOS(pc.OperatingSystem) + if err != nil { + return fmt.Errorf("invalid/not supported operating system specified %q: %w", pc.OperatingSystem, err) } } + if _, _, err = client.Image.GetForArchitecture(ctx, image, serverType.Architecture); err != nil { + return fmt.Errorf("failed to get image: %w", err) + } + for _, network := range c.Networks { if _, _, err = client.Network.Get(ctx, network); err != nil { return fmt.Errorf("failed to get network %q: %w", network, err) @@ -239,14 +256,14 @@ func (p *provider) Validate(ctx context.Context, spec clusterv1alpha1.MachineSpe } } - if _, _, err = client.ServerType.Get(ctx, c.ServerType); err != nil { - return fmt.Errorf("failed to get server type: %w", err) + if !c.AssignIPv4 && !c.AssignIPv6 && len(c.Networks) < 1 { + return errors.New("server should have either a public ipv4, ipv6 or dedicated network") } return nil } -func (p *provider) Create(ctx context.Context, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData, userdata string) (instance.Instance, error) { +func (p *provider) Create(ctx context.Context, log *zap.SugaredLogger, machine *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData, userdata string) (instance.Instance, error) { c, pc, err := p.getConfig(machine.Spec.ProviderSpec) if err != nil { return nil, cloudprovidererrors.TerminalError{ @@ -273,10 +290,15 @@ func (p *provider) Create(ctx context.Context, machine *clusterv1alpha1.Machine, } c.Labels[machineUIDLabelKey] = string(machine.UID) + serverCreateOpts := hcloud.ServerCreateOpts{ Name: machine.Spec.Name, UserData: userdata, Labels: c.Labels, + PublicNet: &hcloud.ServerCreatePublicNet{ + EnableIPv4: c.AssignIPv4, + EnableIPv6: c.AssignIPv6, + }, } if c.Datacenter != "" { @@ -331,15 +353,6 @@ func (p *provider) Create(ctx context.Context, machine *clusterv1alpha1.Machine, serverCreateOpts.Firewalls = append(serverCreateOpts.Firewalls, &hcloud.ServerCreateFirewall{Firewall: *n}) } - image, _, err := client.Image.Get(ctx, c.Image) - if err != nil { - return nil, hzErrorToTerminalError(err, "failed to get image") - } - if image == nil { - return nil, fmt.Errorf("image %q does not exist", c.Image) - } - serverCreateOpts.Image = image - serverType, _, err := client.ServerType.Get(ctx, c.ServerType) if err != nil { return nil, hzErrorToTerminalError(err, "failed to get server type") @@ -349,6 +362,15 @@ func (p *provider) Create(ctx context.Context, machine *clusterv1alpha1.Machine, } serverCreateOpts.ServerType = serverType + image, _, err := client.Image.GetForArchitecture(ctx, c.Image, serverType.Architecture) + if err != nil { + return nil, hzErrorToTerminalError(err, "failed to get image") + } + if image == nil { + return nil, fmt.Errorf("image %q does not exist", c.Image) + } + serverCreateOpts.Image = image + // We generate a temporary SSH key here, because otherwise Hetzner creates // a password and sends it via E-Mail to the account owner, which can be quite // spammy. No one will ever get access to the private key. @@ -370,7 +392,7 @@ func (p *provider) Create(ctx context.Context, machine *clusterv1alpha1.Machine, defer func() { _, err := client.SSHKey.Delete(ctx, hkey) if err != nil { - klog.Errorf("Failed to delete temporary ssh key: %v", err) + log.Errorw("Failed to delete temporary ssh key", zap.Error(err)) } }() serverCreateOpts.SSHKeys = []*hcloud.SSHKey{hkey} @@ -386,8 +408,8 @@ func (p *provider) Create(ctx context.Context, machine *clusterv1alpha1.Machine, return &hetznerServer{server: serverCreateRes.Server}, nil } -func (p *provider) Cleanup(ctx context.Context, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData) (bool, error) { - instance, err := p.Get(ctx, machine, data) +func (p *provider) Cleanup(ctx context.Context, log *zap.SugaredLogger, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData) (bool, error) { + instance, err := p.Get(ctx, log, machine, data) if err != nil { if errors.Is(err, cloudprovidererrors.ErrInstanceNotFound) { return true, nil @@ -406,7 +428,7 @@ func (p *provider) Cleanup(ctx context.Context, machine *clusterv1alpha1.Machine client := getClient(c.Token) hzServer := instance.(*hetznerServer).server - res, err := client.Server.Delete(ctx, hzServer) + _, res, err := client.Server.DeleteWithResult(ctx, hzServer) if err != nil { return false, hzErrorToTerminalError(err, "failed to delete the server") } @@ -436,11 +458,11 @@ func (p *provider) Cleanup(ctx context.Context, machine *clusterv1alpha1.Machine return false, nil } -func (p *provider) AddDefaults(spec clusterv1alpha1.MachineSpec) (clusterv1alpha1.MachineSpec, error) { +func (p *provider) AddDefaults(_ *zap.SugaredLogger, spec clusterv1alpha1.MachineSpec) (clusterv1alpha1.MachineSpec, error) { return spec, nil } -func (p *provider) Get(ctx context.Context, machine *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData) (instance.Instance, error) { +func (p *provider) Get(ctx context.Context, _ *zap.SugaredLogger, machine *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData) (instance.Instance, error) { c, _, err := p.getConfig(machine.Spec.ProviderSpec) if err != nil { return nil, cloudprovidererrors.TerminalError{ @@ -467,7 +489,7 @@ func (p *provider) Get(ctx context.Context, machine *clusterv1alpha1.Machine, _ return nil, cloudprovidererrors.ErrInstanceNotFound } -func (p *provider) MigrateUID(ctx context.Context, machine *clusterv1alpha1.Machine, newUID types.UID) error { +func (p *provider) MigrateUID(ctx context.Context, log *zap.SugaredLogger, machine *clusterv1alpha1.Machine, newUID types.UID) error { c, _, err := p.getConfig(machine.Spec.ProviderSpec) if err != nil { return cloudprovidererrors.TerminalError{ @@ -483,31 +505,27 @@ func (p *provider) MigrateUID(ctx context.Context, machine *clusterv1alpha1.Mach return fmt.Errorf("failed to get server: %w", err) } if server == nil { - klog.Infof("No instance exists for machine %s", machine.Name) + log.Info("No instance exists for machine") return nil } - klog.Infof("Setting UID label for machine %s", machine.Name) + log.Info("Setting UID label for machine") _, response, err := client.Server.Update(ctx, server, hcloud.ServerUpdateOpts{ Labels: map[string]string{machineUIDLabelKey: string(newUID)}, }) if err != nil { return fmt.Errorf("failed to update UID label: %w", err) } - if response.Response.StatusCode != http.StatusOK { - return fmt.Errorf("got unexpected response code %v, expected %v", response.Response.Status, http.StatusOK) + if response.StatusCode != http.StatusOK { + return fmt.Errorf("got unexpected response code %v, expected %v", response.Status, http.StatusOK) } // This succeeds, but does not result in a label on the server, seems to be a bug // on Hetzner side - klog.Infof("Successfully set UID label for machine %s", machine.Name) + log.Info("Successfully set UID label for machine") return nil } -func (p *provider) GetCloudConfig(spec clusterv1alpha1.MachineSpec) (config string, name string, err error) { - return "", "", nil -} - func (p *provider) MachineMetricsLabels(machine *clusterv1alpha1.Machine) (map[string]string, error) { labels := make(map[string]string) @@ -530,27 +548,30 @@ func (s *hetznerServer) Name() string { } func (s *hetznerServer) ID() string { - return strconv.Itoa(s.server.ID) + return strconv.FormatInt(s.server.ID, 10) } func (s *hetznerServer) ProviderID() string { + if s.server == nil || s.server.ID == 0 { + return "" + } return fmt.Sprintf("hcloud://%d", s.server.ID) } -func (s *hetznerServer) Addresses() map[string]v1.NodeAddressType { - addresses := map[string]v1.NodeAddressType{} +func (s *hetznerServer) Addresses() map[string]corev1.NodeAddressType { + addresses := map[string]corev1.NodeAddressType{} for _, fips := range s.server.PublicNet.FloatingIPs { - addresses[fips.IP.String()] = v1.NodeExternalIP + addresses[fips.IP.String()] = corev1.NodeExternalIP } for _, privateNetwork := range s.server.PrivateNet { - addresses[privateNetwork.IP.String()] = v1.NodeInternalIP + addresses[privateNetwork.IP.String()] = corev1.NodeInternalIP } - addresses[s.server.PublicNet.IPv4.IP.String()] = v1.NodeExternalIP + addresses[s.server.PublicNet.IPv4.IP.String()] = corev1.NodeExternalIP // For a given IPv6 network of 2001:db8:1234::/64, the instance address is 2001:db8:1234::1 // Reference: https://github.com/hetznercloud/hcloud-cloud-controller-manager/blob/v1.12.1/hcloud/instances.go#L165-167 - if !s.server.PublicNet.IPv6.IP.IsUnspecified() { + if s.server.PublicNet.IPv6.IP != nil && !s.server.PublicNet.IPv6.IP.IsUnspecified() { s.server.PublicNet.IPv6.IP[len(s.server.PublicNet.IPv6.IP)-1] |= 0x01 - addresses[s.server.PublicNet.IPv6.IP.String()] = v1.NodeExternalIP + addresses[s.server.PublicNet.IPv6.IP.String()] = corev1.NodeExternalIP } return addresses } @@ -591,6 +612,30 @@ func hzErrorToTerminalError(err error, msg string) error { return err } -func (p *provider) SetMetricsForMachines(machines clusterv1alpha1.MachineList) error { +func (p *provider) publicIPsAssignment(rawConfig *hetznertypes.RawConfig) (bool, bool, error) { + assignIPv4, ipv4Set, err := p.configVarResolver.GetBoolValue(rawConfig.AssignPublicIPv4) + if err != nil { + return false, false, err + } + + assignIPv6, ipv6Set, err := p.configVarResolver.GetBoolValue(rawConfig.AssignPublicIPv6) + if err != nil { + return false, false, err + } + + // hetzner default behaviour assigns public ips when users don't set them explicitly for the server. In order to + // retain this behaviour, if the field AssignPublicIPv4/AssignPublicIPv6 in MachineDeployment is not set, machine controller + // default them to true. + if !ipv4Set { + assignIPv4 = true + } + if !ipv6Set { + assignIPv6 = true + } + + return assignIPv4, assignIPv6, nil +} + +func (p *provider) SetMetricsForMachines(_ clusterv1alpha1.MachineList) error { return nil } diff --git a/pkg/cloudprovider/provider/hetzner/types/types.go b/pkg/cloudprovider/provider/hetzner/types/types.go deleted file mode 100644 index dd1b86471..000000000 --- a/pkg/cloudprovider/provider/hetzner/types/types.go +++ /dev/null @@ -1,40 +0,0 @@ -/* -Copyright 2019 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package types - -import ( - "github.com/kubermatic/machine-controller/pkg/jsonutil" - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" -) - -type RawConfig struct { - Token providerconfigtypes.ConfigVarString `json:"token,omitempty"` - ServerType providerconfigtypes.ConfigVarString `json:"serverType"` - Datacenter providerconfigtypes.ConfigVarString `json:"datacenter"` - Image providerconfigtypes.ConfigVarString `json:"image"` - Location providerconfigtypes.ConfigVarString `json:"location"` - PlacementGroupPrefix providerconfigtypes.ConfigVarString `json:"placementGroupPrefix"` - Networks []providerconfigtypes.ConfigVarString `json:"networks"` - Firewalls []providerconfigtypes.ConfigVarString `json:"firewalls"` - Labels map[string]string `json:"labels,omitempty"` -} - -func GetConfig(pconfig providerconfigtypes.Config) (*RawConfig, error) { - rawConfig := &RawConfig{} - - return rawConfig, jsonutil.StrictUnmarshal(pconfig.CloudProviderSpec.Raw, rawConfig) -} diff --git a/pkg/cloudprovider/provider/kubevirt/OWNERS b/pkg/cloudprovider/provider/kubevirt/OWNERS deleted file mode 100644 index ac28b34e2..000000000 --- a/pkg/cloudprovider/provider/kubevirt/OWNERS +++ /dev/null @@ -1,13 +0,0 @@ -# See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md - -approvers: - - sig-virtualization - -reviewers: - - sig-virtualization - -labels: - - sig/virtualization - -options: - no_parent_owners: true diff --git a/pkg/cloudprovider/provider/kubevirt/provider.go b/pkg/cloudprovider/provider/kubevirt/provider.go index 044439b1b..40aa9b9e9 100644 --- a/pkg/cloudprovider/provider/kubevirt/provider.go +++ b/pkg/cloudprovider/provider/kubevirt/provider.go @@ -27,39 +27,38 @@ import ( "strings" "time" - kubevirtv1 "kubevirt.io/api/core/v1" - cdiv1beta1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1" - - "github.com/kubermatic/machine-controller/pkg/apis/cluster/common" - clusterv1alpha1 "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" - cloudprovidererrors "github.com/kubermatic/machine-controller/pkg/cloudprovider/errors" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/instance" - kubevirttypes "github.com/kubermatic/machine-controller/pkg/cloudprovider/provider/kubevirt/types" - cloudprovidertypes "github.com/kubermatic/machine-controller/pkg/cloudprovider/types" - netutil "github.com/kubermatic/machine-controller/pkg/cloudprovider/util" - controllerutil "github.com/kubermatic/machine-controller/pkg/controller/util" - "github.com/kubermatic/machine-controller/pkg/providerconfig" - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" + "go.uber.org/zap" + kubevirtcorev1 "kubevirt.io/api/core/v1" + cdicorev1beta1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1" + + cloudprovidererrors "k8c.io/machine-controller/pkg/cloudprovider/errors" + "k8c.io/machine-controller/pkg/cloudprovider/instance" + cloudprovidertypes "k8c.io/machine-controller/pkg/cloudprovider/types" + controllerutil "k8c.io/machine-controller/pkg/controller/util" + "k8c.io/machine-controller/sdk/apis/cluster/common" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" + kubevirttypes "k8c.io/machine-controller/sdk/cloudprovider/kubevirt" + "k8c.io/machine-controller/sdk/providerconfig" corev1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" + storagev1 "k8s.io/api/storage/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" - "k8s.io/klog" - utilpointer "k8s.io/utils/pointer" - "sigs.k8s.io/controller-runtime/pkg/client" + "k8s.io/utils/ptr" + ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" ) func init() { - if err := kubevirtv1.AddToScheme(scheme.Scheme); err != nil { - klog.Fatalf("failed to add kubevirtv1 to scheme: %v", err) + if err := kubevirtcorev1.AddToScheme(scheme.Scheme); err != nil { + panic(fmt.Sprintf("failed to add kubevirtv1 to scheme: %v", err)) } - if err := cdiv1beta1.AddToScheme(scheme.Scheme); err != nil { - klog.Fatalf("failed to add cdiv1beta1 to scheme: %v", err) + if err := cdicorev1beta1.AddToScheme(scheme.Scheme); err != nil { + panic(fmt.Sprintf("failed to add cdiv1beta1 to scheme: %v", err)) } } @@ -73,38 +72,72 @@ const ( machineDeploymentLabelKey = "md" // httpSource defines the http source type for VM Disk Image. httpSource imageSource = "http" + // registrySource defines the OCI registry source type for VM Disk Image. + registrySource imageSource = "registry" // pvcSource defines the pvc source type for VM Disk Image. pvcSource imageSource = "pvc" + // topologyRegionKey and topologyZoneKey on PVC is a topology-aware volume provisioners will automatically set + // node affinity constraints on a PersistentVolume. + topologyRegionKey = "topology.kubernetes.io/region" + topologyZoneKey = "topology.kubernetes.io/zone" + // clusterNamespace represents the infra cluster namespace, where KubeVirt resources are created. + clusterNamespace = "cluster.x-k8s.io/cluster-namespace" + projectIDLabelName = "kubermatic.k8c.io/project-id" + clusterIDLabelName = "kubermatic.k8c.io/cluster-id" ) type provider struct { - configVarResolver *providerconfig.ConfigVarResolver + configVarResolver providerconfig.ConfigVarResolver } // New returns a Kubevirt provider. -func New(configVarResolver *providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { +func New(configVarResolver providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { return &provider{configVarResolver: configVarResolver} } type Config struct { Kubeconfig string ClusterName string + ProjectID string RestConfig *rest.Config DNSConfig *corev1.PodDNSConfig DNSPolicy corev1.DNSPolicy CPUs string + VCPUs *kubevirtcorev1.CPU + Resources *corev1.ResourceList Memory string Namespace string - OSImageSource *cdiv1beta1.DataVolumeSource + OSImageSource *cdicorev1beta1.DataVolumeSource + StorageTarget StorageTarget StorageClassName string + StorageAccessType corev1.PersistentVolumeAccessMode PVCSize resource.Quantity - Instancetype *kubevirtv1.InstancetypeMatcher - Preference *kubevirtv1.PreferenceMatcher + Instancetype *kubevirtcorev1.InstancetypeMatcher + Preference *kubevirtcorev1.PreferenceMatcher SecondaryDisks []SecondaryDisks NodeAffinityPreset NodeAffinityPreset TopologySpreadConstraints []corev1.TopologySpreadConstraint + Region string + Zone string + EnableNetworkMultiQueue bool + ExtraHeaders []string + ExtraHeadersSecretRef string + DataVolumeSecretRef string + EvictionStrategy kubevirtcorev1.EvictionStrategy + + ProviderNetworkName string + SubnetName string } +// StorageTarget represents targeted storage definition that will be used to provision VirtualMachine volumes. Currently, +// there are two definitions, PVC and Storage. Default value is PVC. +type StorageTarget string + +const ( + Storage StorageTarget = "storage" + PVC StorageTarget = "pvc" +) + type AffinityType string const ( @@ -117,8 +150,8 @@ const ( noAffinityType = "" ) -func (p *provider) affinityType(affinityType providerconfigtypes.ConfigVarString) (AffinityType, error) { - podAffinityPresetString, err := p.configVarResolver.GetConfigVarStringValue(affinityType) +func (p *provider) affinityType(affinityType providerconfig.ConfigVarString) (AffinityType, error) { + podAffinityPresetString, err := p.configVarResolver.GetStringValue(affinityType) if err != nil { return "", fmt.Errorf(`failed to parse "podAffinityPreset" field: %w`, err) } @@ -142,13 +175,14 @@ type NodeAffinityPreset struct { } type SecondaryDisks struct { - Name string - Size resource.Quantity - StorageClassName string + Name string + Size resource.Quantity + StorageClassName string + StorageAccessType corev1.PersistentVolumeAccessMode } type kubeVirtServer struct { - vmi kubevirtv1.VirtualMachineInstance + vmi kubevirtcorev1.VirtualMachineInstance } func (k *kubeVirtServer) Name() string { @@ -160,6 +194,9 @@ func (k *kubeVirtServer) ID() string { } func (k *kubeVirtServer) ProviderID() string { + if k.vmi.Name == "" { + return "" + } return "kubevirt://" + k.vmi.Name } @@ -174,7 +211,7 @@ func (k *kubeVirtServer) Addresses() map[string]corev1.NodeAddressType { } func (k *kubeVirtServer) Status() instance.Status { - if k.vmi.Status.Phase == kubevirtv1.Running { + if k.vmi.Status.Phase == kubevirtcorev1.Running { return instance.StatusRunning } return instance.StatusUnknown @@ -182,12 +219,8 @@ func (k *kubeVirtServer) Status() instance.Status { var _ instance.Instance = &kubeVirtServer{} -func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *providerconfigtypes.Config, error) { - if provSpec.Value == nil { - return nil, nil, fmt.Errorf("machine.spec.providerconfig.value is nil") - } - - pconfig, err := providerconfigtypes.GetConfig(provSpec) +func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *providerconfig.Config, error) { + pconfig, err := providerconfig.GetConfig(provSpec) if err != nil { return nil, nil, err } @@ -215,7 +248,7 @@ func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *p } else { // Environment variable or secret reference was used for providing the value of kubeconfig // We have to be lenient in this case and allow unencoded values as well. - config.Kubeconfig, err = p.configVarResolver.GetConfigVarStringValueOrEnv(rawConfig.Auth.Kubeconfig, "KUBEVIRT_KUBECONFIG") + config.Kubeconfig, err = p.configVarResolver.GetStringValueOrEnv(rawConfig.Auth.Kubeconfig, "KUBEVIRT_KUBECONFIG") if err != nil { return nil, nil, fmt.Errorf(`failed to get value of "kubeconfig" field: %w`, err) } @@ -227,9 +260,34 @@ func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *p } } - config.ClusterName, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.ClusterName) + var enableNetworkMultiQueueSet bool + config.EnableNetworkMultiQueue, enableNetworkMultiQueueSet, err = p.configVarResolver.GetBoolValue(rawConfig.VirtualMachine.EnableNetworkMultiQueue) if err != nil { - return nil, nil, fmt.Errorf(`failed to get value of "clusterName" field: %w`, err) + return nil, nil, fmt.Errorf(`failed to get value of "enableNetworkMultiQueue" field: %w`, err) + } + + if !enableNetworkMultiQueueSet { + config.EnableNetworkMultiQueue = true + } + + clusterID, exists := os.LookupEnv("CLUSTER_ID") + if clusterID == "" || !exists { + config.ClusterName, err = p.configVarResolver.GetStringValue(rawConfig.ClusterName) + if err != nil { + return nil, nil, fmt.Errorf(`failed to get value of "clusterName" field: %w`, err) + } + } else { + config.ClusterName = clusterID + } + + projectID, exists := os.LookupEnv("PROJECT_ID") + if projectID == "" || !exists { + config.ProjectID, err = p.configVarResolver.GetStringValue(rawConfig.ProjectID) + if err != nil { + return nil, nil, fmt.Errorf(`failed to get value of "projectID" field: %w`, err) + } + } else { + config.ProjectID = projectID } config.RestConfig, err = clientcmd.RESTConfigFromKubeConfig([]byte(config.Kubeconfig)) @@ -237,38 +295,67 @@ func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *p return nil, nil, fmt.Errorf("failed to decode kubeconfig: %w", err) } - config.CPUs, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.VirtualMachine.Template.CPUs) + cpus, err := p.configVarResolver.GetStringValue(rawConfig.VirtualMachine.Template.CPUs) if err != nil { return nil, nil, fmt.Errorf(`failed to get value of "cpus" field: %w`, err) } - config.Memory, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.VirtualMachine.Template.Memory) + + memory, err := p.configVarResolver.GetStringValue(rawConfig.VirtualMachine.Template.Memory) if err != nil { return nil, nil, fmt.Errorf(`failed to get value of "memory" field: %w`, err) } - config.Namespace = getNamespace() - config.OSImageSource, err = p.parseOSImageSource(rawConfig.VirtualMachine.Template.PrimaryDisk, config.Namespace) + if rawConfig.VirtualMachine.Instancetype == nil { + config.Resources, config.VCPUs, err = parseResources(cpus, memory, rawConfig.VirtualMachine.Template.VCPUs) + if err != nil { + return nil, nil, fmt.Errorf(`failed to configure resource requests and limits and vcpus: %w`, err) + } + } + + config.Namespace = getNamespace() + if len(rawConfig.VirtualMachine.Template.PrimaryDisk.ExtraHeaders) > 0 { + config.ExtraHeaders = rawConfig.VirtualMachine.Template.PrimaryDisk.ExtraHeaders + } + dataVolumeSecretRef, err := p.configVarResolver.GetStringValue(rawConfig.VirtualMachine.Template.PrimaryDisk.DataVolumeSecretRef) + if err != nil { + return nil, nil, fmt.Errorf(`failed to get value of "dataVolumeSecretRef" field: %w`, err) + } + config.DataVolumeSecretRef = dataVolumeSecretRef + extraHeadersSecretRef, err := p.configVarResolver.GetStringValue(rawConfig.VirtualMachine.Template.PrimaryDisk.ExtraHeadersSecretRef) + if err != nil { + return nil, nil, fmt.Errorf(`failed to get value of "extraHeadersSecretRef" field: %w`, err) + } + config.ExtraHeadersSecretRef = extraHeadersSecretRef + if len(config.ExtraHeaders) > 0 && extraHeadersSecretRef != "" { + return nil, nil, errors.New(`field "extraHeaders" and "extraHeadersSecretRef" are mutually exclusive`) + } + config.OSImageSource, err = p.parseOSImageSource(rawConfig.VirtualMachine.Template.PrimaryDisk, &config) if err != nil { return nil, nil, fmt.Errorf(`failed to get value of "osImageSource" field: %w`, err) } - pvcSize, err := p.configVarResolver.GetConfigVarStringValue(rawConfig.VirtualMachine.Template.PrimaryDisk.Size) + storageTarget, err := p.configVarResolver.GetStringValue(rawConfig.VirtualMachine.Template.PrimaryDisk.StorageTarget) + if err != nil { + return nil, nil, fmt.Errorf(`failed to get value of "storageTarget" field: %w`, err) + } + config.StorageTarget = StorageTarget(storageTarget) + + pvcSize, err := p.configVarResolver.GetStringValue(rawConfig.VirtualMachine.Template.PrimaryDisk.Size) if err != nil { return nil, nil, fmt.Errorf(`failed to get value of "pvcSize" field: %w`, err) } if config.PVCSize, err = resource.ParseQuantity(pvcSize); err != nil { return nil, nil, fmt.Errorf(`failed to parse value of "pvcSize" field: %w`, err) } - config.StorageClassName, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.VirtualMachine.Template.PrimaryDisk.StorageClassName) + config.StorageClassName, err = p.configVarResolver.GetStringValue(rawConfig.VirtualMachine.Template.PrimaryDisk.StorageClassName) if err != nil { return nil, nil, fmt.Errorf(`failed to get value of "storageClassName" field: %w`, err) } - // Instancetype and Preference config.Instancetype = rawConfig.VirtualMachine.Instancetype config.Preference = rawConfig.VirtualMachine.Preference - dnsPolicyString, err := p.configVarResolver.GetConfigVarStringValue(rawConfig.VirtualMachine.DNSPolicy) + dnsPolicyString, err := p.configVarResolver.GetStringValue(rawConfig.VirtualMachine.DNSPolicy) if err != nil { return nil, nil, fmt.Errorf(`failed to parse "dnsPolicy" field: %w`, err) } @@ -281,28 +368,14 @@ func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *p if rawConfig.VirtualMachine.DNSConfig != nil { config.DNSConfig = rawConfig.VirtualMachine.DNSConfig } - config.SecondaryDisks = make([]SecondaryDisks, 0, len(rawConfig.VirtualMachine.Template.SecondaryDisks)) - for i, sd := range rawConfig.VirtualMachine.Template.SecondaryDisks { - sdSizeString, err := p.configVarResolver.GetConfigVarStringValue(sd.Size) - if err != nil { - return nil, nil, fmt.Errorf(`failed to parse "secondaryDisks.size" field: %w`, err) - } - pvc, err := resource.ParseQuantity(sdSizeString) - if err != nil { - return nil, nil, fmt.Errorf(`failed to parse value of "secondaryDisks.size" field: %w`, err) - } - - scString, err := p.configVarResolver.GetConfigVarStringValue(sd.StorageClassName) - if err != nil { - return nil, nil, fmt.Errorf(`failed to parse value of "secondaryDisks.storageClass" field: %w`, err) - } - config.SecondaryDisks = append(config.SecondaryDisks, SecondaryDisks{ - Name: fmt.Sprintf("secondarydisk%d", i), - Size: pvc, - StorageClassName: scString, - }) + infraClient, err := ctrlruntimeclient.New(config.RestConfig, ctrlruntimeclient.Options{}) + if err != nil { + return nil, nil, fmt.Errorf("failed to get kubevirt client: %w", err) + } + config.StorageAccessType, config.SecondaryDisks, err = p.configureStorage(infraClient, rawConfig.VirtualMachine.Template) + if err != nil { + return nil, nil, fmt.Errorf(`failed to configure storage: %w`, err) } - config.NodeAffinityPreset, err = p.parseNodeAffinityPreset(rawConfig.Affinity.NodeAffinityPreset) if err != nil { return nil, nil, fmt.Errorf(`failed to parse "nodeAffinityPreset" field: %w`, err) @@ -312,9 +385,50 @@ func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *p return nil, nil, fmt.Errorf(`failed to parse "topologySpreadConstraints" field: %w`, err) } + if rawConfig.VirtualMachine.Location != nil { + config.Zone = rawConfig.VirtualMachine.Location.Zone + config.Region = rawConfig.VirtualMachine.Location.Region + } + + if rawConfig.VirtualMachine.ProviderNetwork != nil { + config.ProviderNetworkName = rawConfig.VirtualMachine.ProviderNetwork.Name + if rawConfig.VirtualMachine.ProviderNetwork.VPC.Subnet != nil { + config.SubnetName = rawConfig.VirtualMachine.ProviderNetwork.VPC.Subnet.Name + } + } + + if rawConfig.VirtualMachine.EvictionStrategy != "" { + config.EvictionStrategy = kubevirtcorev1.EvictionStrategy(rawConfig.VirtualMachine.EvictionStrategy) + } + return &config, pconfig, nil } +func (p *provider) getStorageAccessType(ctx context.Context, accessType providerconfig.ConfigVarString, + infraClient ctrlruntimeclient.Client, storageClassName string) (corev1.PersistentVolumeAccessMode, error) { + at, _ := p.configVarResolver.GetStringValue(accessType) + if at == "" { + sp := &cdicorev1beta1.StorageProfile{} + if err := infraClient.Get(ctx, types.NamespacedName{Name: storageClassName}, sp); err != nil { + return "", fmt.Errorf(`failed to get cdi storageprofile: %w`, err) + } + + // choose RWO as a default access mode and if RWX is supported then choose it instead. + accessMode := corev1.ReadWriteOnce + for _, claimProperty := range sp.Status.ClaimPropertySets { + for _, am := range claimProperty.AccessModes { + if am == corev1.ReadWriteMany { + accessMode = corev1.ReadWriteMany + } + } + } + + return accessMode, nil + } + + return corev1.PersistentVolumeAccessMode(at), nil +} + func (p *provider) parseNodeAffinityPreset(nodeAffinityPreset kubevirttypes.NodeAffinityPreset) (NodeAffinityPreset, error) { nodeAffinity := NodeAffinityPreset{} var err error @@ -322,13 +436,13 @@ func (p *provider) parseNodeAffinityPreset(nodeAffinityPreset kubevirttypes.Node if err != nil { return nodeAffinity, fmt.Errorf(`failed to parse "nodeAffinity.type" field: %w`, err) } - nodeAffinity.Key, err = p.configVarResolver.GetConfigVarStringValue(nodeAffinityPreset.Key) + nodeAffinity.Key, err = p.configVarResolver.GetStringValue(nodeAffinityPreset.Key) if err != nil { return nodeAffinity, fmt.Errorf(`failed to parse "nodeAffinity.key" field: %w`, err) } nodeAffinity.Values = make([]string, 0, len(nodeAffinityPreset.Values)) for _, v := range nodeAffinityPreset.Values { - valueString, err := p.configVarResolver.GetConfigVarStringValue(v) + valueString, err := p.configVarResolver.GetStringValue(v) if err != nil { return nodeAffinity, fmt.Errorf(`failed to parse "nodeAffinity.value" field: %w`, err) } @@ -340,7 +454,7 @@ func (p *provider) parseNodeAffinityPreset(nodeAffinityPreset kubevirttypes.Node func (p *provider) parseTopologySpreadConstraint(topologyConstraints []kubevirttypes.TopologySpreadConstraint) ([]corev1.TopologySpreadConstraint, error) { parsedTopologyConstraints := make([]corev1.TopologySpreadConstraint, 0, len(topologyConstraints)) for _, constraint := range topologyConstraints { - maxSkewString, err := p.configVarResolver.GetConfigVarStringValue(constraint.MaxSkew) + maxSkewString, err := p.configVarResolver.GetStringValue(constraint.MaxSkew) if err != nil { return nil, fmt.Errorf(`failed to parse "topologySpreadConstraint.maxSkew" field: %w`, err) } @@ -348,11 +462,11 @@ func (p *provider) parseTopologySpreadConstraint(topologyConstraints []kubevirtt if err != nil { return nil, fmt.Errorf(`failed to parse "topologySpreadConstraint.maxSkew" field: %w`, err) } - topologyKey, err := p.configVarResolver.GetConfigVarStringValue(constraint.TopologyKey) + topologyKey, err := p.configVarResolver.GetStringValue(constraint.TopologyKey) if err != nil { return nil, fmt.Errorf(`failed to parse "topologySpreadConstraint.topologyKey" field: %w`, err) } - whenUnsatisfiable, err := p.configVarResolver.GetConfigVarStringValue(constraint.WhenUnsatisfiable) + whenUnsatisfiable, err := p.configVarResolver.GetStringValue(constraint.WhenUnsatisfiable) if err != nil { return nil, fmt.Errorf(`failed to parse "topologySpreadConstraint.whenUnsatisfiable" field: %w`, err) } @@ -365,35 +479,81 @@ func (p *provider) parseTopologySpreadConstraint(topologyConstraints []kubevirtt return parsedTopologyConstraints, nil } -func (p *provider) parseOSImageSource(primaryDisk kubevirttypes.PrimaryDisk, namespace string) (*cdiv1beta1.DataVolumeSource, error) { - osImage, err := p.configVarResolver.GetConfigVarStringValue(primaryDisk.OsImage) +func (p *provider) parseOSImageSource(primaryDisk kubevirttypes.PrimaryDisk, config *Config) (*cdicorev1beta1.DataVolumeSource, error) { + osImage, err := p.configVarResolver.GetStringValue(primaryDisk.OsImage) if err != nil { return nil, fmt.Errorf(`failed to get value of "primaryDisk.osImage" field: %w`, err) } - osImageSource, err := p.configVarResolver.GetConfigVarStringValue(primaryDisk.Source) + osImageSource, err := p.configVarResolver.GetStringValue(primaryDisk.Source) if err != nil { return nil, fmt.Errorf(`failed to get value of "primaryDisk.source" field: %w`, err) } + pullMethod, err := p.getPullMethod(primaryDisk.PullMethod) + if err != nil { + return nil, fmt.Errorf(`failed to get value of "primaryDisk.pullMethod" field: %w`, err) + } switch imageSource(osImageSource) { case httpSource: - return &cdiv1beta1.DataVolumeSource{HTTP: &cdiv1beta1.DataVolumeSourceHTTP{URL: osImage}}, nil + extraHeaders, err := getHTTPExtraHeaders(config) + if err != nil { + return nil, fmt.Errorf(`failed to get value of "primaryDisk.extraHeaders" field: %w`, err) + } + return &cdicorev1beta1.DataVolumeSource{HTTP: &cdicorev1beta1.DataVolumeSourceHTTP{URL: osImage, ExtraHeaders: extraHeaders, SecretRef: config.DataVolumeSecretRef}}, nil + case registrySource: + return registryDataVolume(osImage, pullMethod), nil case pvcSource: if namespaceAndName := strings.Split(osImage, "/"); len(namespaceAndName) >= 2 { - return &cdiv1beta1.DataVolumeSource{PVC: &cdiv1beta1.DataVolumeSourcePVC{Name: namespaceAndName[1], Namespace: namespaceAndName[0]}}, nil + return &cdicorev1beta1.DataVolumeSource{PVC: &cdicorev1beta1.DataVolumeSourcePVC{Name: namespaceAndName[1], Namespace: namespaceAndName[0]}}, nil } - return &cdiv1beta1.DataVolumeSource{PVC: &cdiv1beta1.DataVolumeSourcePVC{Name: osImage, Namespace: namespace}}, nil + return &cdicorev1beta1.DataVolumeSource{PVC: &cdicorev1beta1.DataVolumeSourcePVC{Name: osImage, Namespace: config.Namespace}}, nil default: // handle old API for backward compatibility. - if _, err = url.ParseRequestURI(osImage); err == nil { - return &cdiv1beta1.DataVolumeSource{HTTP: &cdiv1beta1.DataVolumeSourceHTTP{URL: osImage}}, nil + if srcURL, err := url.ParseRequestURI(osImage); err == nil { + if srcURL.Scheme == cdicorev1beta1.RegistrySchemeDocker || srcURL.Scheme == cdicorev1beta1.RegistrySchemeOci { + return registryDataVolume(osImage, pullMethod), nil + } + + extraHeaders, err := getHTTPExtraHeaders(config) + if err != nil { + return nil, fmt.Errorf(`failed to get value of "primaryDisk.extraHeaders" field: %w`, err) + } + + return &cdicorev1beta1.DataVolumeSource{HTTP: &cdicorev1beta1.DataVolumeSourceHTTP{URL: osImage, ExtraHeaders: extraHeaders, SecretRef: config.DataVolumeSecretRef}}, nil } if namespaceAndName := strings.Split(osImage, "/"); len(namespaceAndName) >= 2 { - return &cdiv1beta1.DataVolumeSource{PVC: &cdiv1beta1.DataVolumeSourcePVC{Name: namespaceAndName[1], Namespace: namespaceAndName[0]}}, nil + return &cdicorev1beta1.DataVolumeSource{PVC: &cdicorev1beta1.DataVolumeSourcePVC{Name: namespaceAndName[1], Namespace: namespaceAndName[0]}}, nil } - return &cdiv1beta1.DataVolumeSource{PVC: &cdiv1beta1.DataVolumeSourcePVC{Name: osImage, Namespace: namespace}}, nil + return &cdicorev1beta1.DataVolumeSource{PVC: &cdicorev1beta1.DataVolumeSourcePVC{Name: osImage, Namespace: config.Namespace}}, nil } } +func getHTTPExtraHeaders(config *Config) ([]string, error) { + var extraHeaders []string + if config.ExtraHeadersSecretRef != "" { + sigClient, err := ctrlruntimeclient.New(config.RestConfig, ctrlruntimeclient.Options{}) + if err != nil { + return nil, fmt.Errorf("failed to get kubevirt client: %w", err) + } + + extraHeadersSecretRef := &corev1.Secret{} + if err := sigClient.Get(context.TODO(), types.NamespacedName{Namespace: config.Namespace, Name: config.ExtraHeadersSecretRef}, + extraHeadersSecretRef); err != nil { + return nil, fmt.Errorf("failed to get extra headers secret: %w", err) + } + + for key, val := range extraHeadersSecretRef.Data { + trimmedVal := strings.TrimSuffix(string(val), "\n") + extraHeaders = append(extraHeaders, fmt.Sprintf("%v: %v", key, trimmedVal)) + } + } + + if len(config.ExtraHeaders) > 0 { + extraHeaders = config.ExtraHeaders + } + + return extraHeaders, nil +} + // getNamespace returns the namespace where the VM is created. // VM is created in a dedicated namespace // which is the namespace where the machine-controller pod is running. @@ -407,7 +567,31 @@ func getNamespace() string { return ns } -func (p *provider) Get(ctx context.Context, machine *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData) (instance.Instance, error) { +func (p *provider) getPullMethod(pullMethod providerconfig.ConfigVarString) (cdicorev1beta1.RegistryPullMethod, error) { + resolvedPM, err := p.configVarResolver.GetStringValue(pullMethod) + if err != nil { + return "", err + } + switch pm := cdicorev1beta1.RegistryPullMethod(resolvedPM); pm { + case cdicorev1beta1.RegistryPullNode, cdicorev1beta1.RegistryPullPod: + return pm, nil + case "": + return cdicorev1beta1.RegistryPullNode, nil + default: + return "", fmt.Errorf("unsupported value: %v", resolvedPM) + } +} + +func registryDataVolume(imageURL string, pullMethod cdicorev1beta1.RegistryPullMethod) *cdicorev1beta1.DataVolumeSource { + return &cdicorev1beta1.DataVolumeSource{ + Registry: &cdicorev1beta1.DataVolumeSourceRegistry{ + URL: &imageURL, + PullMethod: &pullMethod, + }, + } +} + +func (p *provider) Get(ctx context.Context, _ *zap.SugaredLogger, machine *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData) (instance.Instance, error) { c, _, err := p.getConfig(machine.Spec.ProviderSpec) if err != nil { return nil, cloudprovidererrors.TerminalError{ @@ -415,22 +599,22 @@ func (p *provider) Get(ctx context.Context, machine *clusterv1alpha1.Machine, _ Message: fmt.Sprintf("Failed to parse MachineSpec, due to %v", err), } } - sigClient, err := client.New(c.RestConfig, client.Options{}) + sigClient, err := ctrlruntimeclient.New(c.RestConfig, ctrlruntimeclient.Options{}) if err != nil { return nil, fmt.Errorf("failed to get kubevirt client: %w", err) } - virtualMachine := &kubevirtv1.VirtualMachine{} + virtualMachine := &kubevirtcorev1.VirtualMachine{} if err := sigClient.Get(ctx, types.NamespacedName{Namespace: c.Namespace, Name: machine.Name}, virtualMachine); err != nil { - if !kerrors.IsNotFound(err) { + if !apierrors.IsNotFound(err) { return nil, fmt.Errorf("failed to get VirtualMachine %s: %w", machine.Name, err) } return nil, cloudprovidererrors.ErrInstanceNotFound } - virtualMachineInstance := &kubevirtv1.VirtualMachineInstance{} + virtualMachineInstance := &kubevirtcorev1.VirtualMachineInstance{} if err := sigClient.Get(ctx, types.NamespacedName{Namespace: c.Namespace, Name: machine.Name}, virtualMachineInstance); err != nil { - if kerrors.IsNotFound(err) { + if apierrors.IsNotFound(err) { return &kubeVirtServer{}, nil } @@ -445,29 +629,17 @@ func (p *provider) Get(ctx context.Context, machine *clusterv1alpha1.Machine, _ return nil, cloudprovidererrors.ErrInstanceNotFound } - if virtualMachineInstance.Status.Phase == kubevirtv1.Failed || - // The VMI enters phase succeeded if someone issues a kubectl - // delete pod on the virt-launcher pod it runs in - virtualMachineInstance.Status.Phase == kubevirtv1.Succeeded { - // The pod got deleted, delete the VMI and return ErrNotFound so the VMI - // will get recreated - if err := sigClient.Delete(ctx, virtualMachineInstance); err != nil { - return nil, fmt.Errorf("failed to delete failed VMI %s: %w", machine.Name, err) - } - return nil, cloudprovidererrors.ErrInstanceNotFound - } - return &kubeVirtServer{vmi: *virtualMachineInstance}, nil } // We don't use the UID for kubevirt because the name of a VMI must stay stable // in order for the node name to stay stable. The operator is responsible for ensuring // there are no conflicts, e.G. by using one Namespace per Kubevirt user cluster. -func (p *provider) MigrateUID(_ context.Context, _ *clusterv1alpha1.Machine, _ types.UID) error { +func (p *provider) MigrateUID(_ context.Context, _ *zap.SugaredLogger, _ *clusterv1alpha1.Machine, _ types.UID) error { return nil } -func (p *provider) Validate(ctx context.Context, spec clusterv1alpha1.MachineSpec) error { +func (p *provider) Validate(ctx context.Context, _ *zap.SugaredLogger, spec clusterv1alpha1.MachineSpec) error { c, pc, err := p.getConfig(spec.ProviderSpec) if err != nil { return fmt.Errorf("failed to parse config: %w", err) @@ -475,48 +647,65 @@ func (p *provider) Validate(ctx context.Context, spec clusterv1alpha1.MachineSpe // If instancetype is specified, skip CPU and Memory validation. // Values will come from instancetype. if c.Instancetype == nil { - if _, err := parseResources(c.CPUs, c.Memory); err != nil { - return err + if c.Resources == nil { + return errors.New("no resource requests set for the virtual machine") + } + + if c.VCPUs == nil && c.Resources.Cpu().IsZero() { + return errors.New("no CPUs configured. Either vCPUs or CPUs have to be set") + } + + if c.VCPUs != nil && !c.Resources.Cpu().IsZero() { + return errors.New("vCPUs and CPUs cannot be configured at the same time") } } - sigClient, err := client.New(c.RestConfig, client.Options{}) + sigClient, err := ctrlruntimeclient.New(c.RestConfig, ctrlruntimeclient.Options{}) if err != nil { return fmt.Errorf("failed to get kubevirt client: %w", err) } if _, ok := kubevirttypes.SupportedOS[pc.OperatingSystem]; !ok { - return fmt.Errorf("invalid/not supported operating system specified %q: %w", pc.OperatingSystem, providerconfigtypes.ErrOSNotSupported) + return fmt.Errorf("invalid/not supported operating system specified %q: %w", pc.OperatingSystem, providerconfig.ErrOSNotSupported) } if c.DNSPolicy == corev1.DNSNone { if c.DNSConfig == nil || len(c.DNSConfig.Nameservers) == 0 { - return fmt.Errorf("dns config must be specified when dns policy is None") + return errors.New("dns config must be specified when dns policy is None") } } // Check if we can reach the API of the target cluster. - vmi := &kubevirtv1.VirtualMachineInstance{} - if err := sigClient.Get(ctx, types.NamespacedName{Namespace: c.Namespace, Name: "not-expected-to-exist"}, vmi); err != nil && !kerrors.IsNotFound(err) { + vmi := &kubevirtcorev1.VirtualMachineInstance{} + if err := sigClient.Get(ctx, types.NamespacedName{Namespace: c.Namespace, Name: "not-expected-to-exist"}, vmi); err != nil && !apierrors.IsNotFound(err) { return fmt.Errorf("failed to request VirtualMachineInstances: %w", err) } - return nil -} + if c.EvictionStrategy != "" { + if c.EvictionStrategy != kubevirtcorev1.EvictionStrategyExternal && + c.EvictionStrategy != kubevirtcorev1.EvictionStrategyLiveMigrate { + return fmt.Errorf("unsupported vm eviction strategy: %s", c.EvictionStrategy) + } + } -func (p *provider) AddDefaults(spec clusterv1alpha1.MachineSpec) (clusterv1alpha1.MachineSpec, error) { - return spec, nil + return nil } -func (p *provider) GetCloudConfig(spec clusterv1alpha1.MachineSpec) (config string, name string, err error) { +func (p *provider) AddDefaults(_ *zap.SugaredLogger, spec clusterv1alpha1.MachineSpec) (clusterv1alpha1.MachineSpec, error) { c, _, err := p.getConfig(spec.ProviderSpec) if err != nil { - return "", "", fmt.Errorf("failed to parse config: %w", err) + return spec, err } - cc := kubevirttypes.CloudConfig{ - Namespace: c.Namespace, + annotations := spec.Annotations + if annotations == nil { + annotations = make(map[string]string) } - ccs, err := cc.String() - return ccs, string(providerconfigtypes.CloudProviderExternal), err + annotations[clusterNamespace] = c.Namespace + spec.Annotations = annotations + if err := appendTopologiesLabels(context.TODO(), c, spec.Labels); err != nil { + return spec, err + } + + return spec, nil } func (p *provider) MachineMetricsLabels(machine *clusterv1alpha1.Machine) (map[string]string, error) { @@ -538,14 +727,14 @@ func (p *provider) MachineMetricsLabels(machine *clusterv1alpha1.Machine) (map[s type machineDeploymentNameGetter func() (string, error) -func machineDeploymentNameAndRevisionForMachineGetter(ctx context.Context, machine *clusterv1alpha1.Machine, c client.Client) machineDeploymentNameGetter { +func machineDeploymentNameAndRevisionForMachineGetter(ctx context.Context, machine *clusterv1alpha1.Machine, c ctrlruntimeclient.Client) machineDeploymentNameGetter { mdName, _, err := controllerutil.GetMachineDeploymentNameAndRevisionForMachine(ctx, machine, c) return func() (string, error) { return mdName, err } } -func (p *provider) Create(ctx context.Context, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData, userdata string) (instance.Instance, error) { +func (p *provider) Create(ctx context.Context, _ *zap.SugaredLogger, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData, userdata string) (instance.Instance, error) { c, pc, err := p.getConfig(machine.Spec.ProviderSpec) if err != nil { return nil, cloudprovidererrors.TerminalError{ @@ -554,15 +743,23 @@ func (p *provider) Create(ctx context.Context, machine *clusterv1alpha1.Machine, } } - sigClient, err := client.New(c.RestConfig, client.Options{}) + sigClient, err := ctrlruntimeclient.New(c.RestConfig, ctrlruntimeclient.Options{}) if err != nil { return nil, fmt.Errorf("failed to get kubevirt client: %w", err) } userDataSecretName := fmt.Sprintf("userdata-%s-%s", machine.Name, strconv.Itoa(int(time.Now().Unix()))) + labels := map[string]string{} + if err := appendTopologiesLabels(ctx, c, labels); err != nil { + return nil, fmt.Errorf("failed to append labels: %w", err) + } + + for key, val := range machine.Labels { + labels[key] = val + } - virtualMachine, err := p.newVirtualMachine(ctx, c, pc, machine, userDataSecretName, userdata, - machineDeploymentNameAndRevisionForMachineGetter(ctx, machine, data.Client), randomMacAddressGetter, sigClient) + virtualMachine, err := p.newVirtualMachine(c, pc, machine, labels, userDataSecretName, userdata, + machineDeploymentNameAndRevisionForMachineGetter(ctx, machine, data.Client)) if err != nil { return nil, fmt.Errorf("could not create a VirtualMachine manifest %w", err) } @@ -575,7 +772,7 @@ func (p *provider) Create(ctx context.Context, machine *clusterv1alpha1.Machine, ObjectMeta: metav1.ObjectMeta{ Name: userDataSecretName, Namespace: virtualMachine.Namespace, - OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(virtualMachine, kubevirtv1.VirtualMachineGroupVersionKind)}, + OwnerReferences: []metav1.OwnerReference{*metav1.NewControllerRef(virtualMachine, kubevirtcorev1.VirtualMachineGroupVersionKind)}, }, Data: map[string][]byte{"userdata": []byte(userdata)}, } @@ -585,17 +782,20 @@ func (p *provider) Create(ctx context.Context, machine *clusterv1alpha1.Machine, return &kubeVirtServer{}, nil } -func (p *provider) newVirtualMachine(ctx context.Context, c *Config, pc *providerconfigtypes.Config, machine *clusterv1alpha1.Machine, - userdataSecretName, userdata string, mdNameGetter machineDeploymentNameGetter, macAddressGetter macAddressGetter, sigClient client.Client) (*kubevirtv1.VirtualMachine, error) { +func (p *provider) newVirtualMachine(c *Config, pc *providerconfig.Config, machine *clusterv1alpha1.Machine, + labels map[string]string, userdataSecretName, userdata string, mdNameGetter machineDeploymentNameGetter) (*kubevirtcorev1.VirtualMachine, error) { // We add the timestamp because the secret name must be different when we recreate the VMI // because its pod got deleted // The secret has an ownerRef on the VMI so garbace collection will take care of cleaning up. terminationGracePeriodSeconds := int64(30) - evictionStrategy := kubevirtv1.EvictionStrategyExternal + evictionStrategy := kubevirtcorev1.EvictionStrategyExternal + if c.EvictionStrategy != "" { + evictionStrategy = c.EvictionStrategy + } - resourceRequirements := kubevirtv1.ResourceRequirements{} - labels := map[string]string{"kubevirt.io/vm": machine.Name} + resourceRequirements := kubevirtcorev1.ResourceRequirements{} + labels["kubevirt.io/vm"] = machine.Name //Add a common label to all VirtualMachines spawned by the same MachineDeployment (= MachineDeployment name). if mdName, err := mdNameGetter(); err == nil { labels[machineDeploymentLabelKey] = mdName @@ -603,64 +803,81 @@ func (p *provider) newVirtualMachine(ctx context.Context, c *Config, pc *provide // if no instancetype, resources are from config. if c.Instancetype == nil { - requestsAndLimits, err := parseResources(c.CPUs, c.Memory) - if err != nil { - return nil, err - } - resourceRequirements.Requests = *requestsAndLimits - resourceRequirements.Limits = *requestsAndLimits + resourceRequirements.Requests = *c.Resources + resourceRequirements.Limits = *c.Resources } // Add cluster labels labels["cluster.x-k8s.io/cluster-name"] = c.ClusterName labels["cluster.x-k8s.io/role"] = "worker" + labels[projectIDLabelName] = c.ProjectID + labels[clusterIDLabelName] = c.ClusterName var ( dataVolumeName = machine.Name - annotations map[string]string + annotations = map[string]string{} + dvAnnotations = map[string]string{} ) // Add machineName as prefix to secondaryDisks. addPrefixToSecondaryDisk(c.SecondaryDisks, dataVolumeName) - if pc.OperatingSystem == providerconfigtypes.OperatingSystemFlatcar { - annotations = map[string]string{ - "kubevirt.io/ignitiondata": userdata, + if pc.OperatingSystem == providerconfig.OperatingSystemFlatcar { + annotations["kubevirt.io/ignitiondata"] = userdata + } + + annotations["kubevirt.io/allow-pod-bridge-network-live-migration"] = "true" + + if err := setOVNAnnotations(c, annotations); err != nil { + return nil, fmt.Errorf("failed to set OVN annotations: %w", err) + } + + for k, v := range machine.Annotations { + if strings.HasPrefix(k, "cdi.kubevirt.io") { + dvAnnotations[k] = v + continue } + + annotations[k] = v } - defaultBridgeNetwork, err := defaultBridgeNetwork(macAddressGetter) - if err != nil { - return nil, fmt.Errorf("could not compute a random MAC address") + defaultBridgeNetwork := defaultBridgeNetwork() + runStrategy := kubevirtcorev1.RunStrategyOnce + // currently we only support KubeOvn as a ProviderNetwork and KubeOvn has the ability to pin the IP of the VM(static ip) + // even if the VMi was stopped or deleted thus we can have the VM always running and in the events of VM restarts the + // ip address of the VMI will not change. + if c.SubnetName != "" { + runStrategy = kubevirtcorev1.RunStrategyAlways } - virtualMachine := &kubevirtv1.VirtualMachine{ + virtualMachine := &kubevirtcorev1.VirtualMachine{ ObjectMeta: metav1.ObjectMeta{ Name: machine.Name, Namespace: c.Namespace, Labels: labels, }, - Spec: kubevirtv1.VirtualMachineSpec{ - Running: utilpointer.Bool(true), + Spec: kubevirtcorev1.VirtualMachineSpec{ + RunStrategy: &runStrategy, Instancetype: c.Instancetype, Preference: c.Preference, - Template: &kubevirtv1.VirtualMachineInstanceTemplateSpec{ + Template: &kubevirtcorev1.VirtualMachineInstanceTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Annotations: annotations, Labels: labels, }, - Spec: kubevirtv1.VirtualMachineInstanceSpec{ + Spec: kubevirtcorev1.VirtualMachineInstanceSpec{ EvictionStrategy: &evictionStrategy, - Networks: []kubevirtv1.Network{ - *kubevirtv1.DefaultPodNetwork(), + Networks: []kubevirtcorev1.Network{ + *kubevirtcorev1.DefaultPodNetwork(), }, - Domain: kubevirtv1.DomainSpec{ - Devices: kubevirtv1.Devices{ - Disks: getVMDisks(c), - Interfaces: []kubevirtv1.Interface{*defaultBridgeNetwork}, + Domain: kubevirtcorev1.DomainSpec{ + Devices: kubevirtcorev1.Devices{ + Interfaces: []kubevirtcorev1.Interface{*defaultBridgeNetwork}, + Disks: getVMDisks(c), + NetworkInterfaceMultiQueue: ptr.To(c.EnableNetworkMultiQueue), }, Resources: resourceRequirements, }, - Affinity: getAffinity(c, machineDeploymentLabelKey, labels[machineDeploymentLabelKey]), + Affinity: getAffinity(c), TerminationGracePeriodSeconds: &terminationGracePeriodSeconds, Volumes: getVMVolumes(c, dataVolumeName, userdataSecretName), DNSPolicy: c.DNSPolicy, @@ -668,13 +885,20 @@ func (p *provider) newVirtualMachine(ctx context.Context, c *Config, pc *provide TopologySpreadConstraints: getTopologySpreadConstraints(c, map[string]string{machineDeploymentLabelKey: labels[machineDeploymentLabelKey]}), }, }, - DataVolumeTemplates: getDataVolumeTemplates(c, dataVolumeName), + DataVolumeTemplates: getDataVolumeTemplates(c, dataVolumeName, dvAnnotations), }, } + + if c.VCPUs != nil { + virtualMachine.Spec.Template.Spec.Domain.CPU = &kubevirtcorev1.CPU{ + Cores: c.VCPUs.Cores, + } + } + return virtualMachine, nil } -func (p *provider) Cleanup(ctx context.Context, machine *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData) (bool, error) { +func (p *provider) Cleanup(ctx context.Context, _ *zap.SugaredLogger, machine *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData) (bool, error) { c, _, err := p.getConfig(machine.Spec.ProviderSpec) if err != nil { return false, cloudprovidererrors.TerminalError{ @@ -682,39 +906,44 @@ func (p *provider) Cleanup(ctx context.Context, machine *clusterv1alpha1.Machine Message: fmt.Sprintf("Failed to parse MachineSpec, due to %v", err), } } - sigClient, err := client.New(c.RestConfig, client.Options{}) + sigClient, err := ctrlruntimeclient.New(c.RestConfig, ctrlruntimeclient.Options{}) if err != nil { return false, fmt.Errorf("failed to get kubevirt client: %w", err) } - vm := &kubevirtv1.VirtualMachine{} + vm := &kubevirtcorev1.VirtualMachine{} if err := sigClient.Get(ctx, types.NamespacedName{Namespace: c.Namespace, Name: machine.Name}, vm); err != nil { - if !kerrors.IsNotFound(err) { + if !apierrors.IsNotFound(err) { return false, fmt.Errorf("failed to get VirtualMachineInstance %s: %w", machine.Name, err) } - // VMI is gone return true, nil } return false, sigClient.Delete(ctx, vm) } -func parseResources(cpus, memory string) (*corev1.ResourceList, error) { +func parseResources(cpus, memory string, vpcus kubevirttypes.VCPUs) (*corev1.ResourceList, *kubevirtcorev1.CPU, error) { memoryResource, err := resource.ParseQuantity(memory) if err != nil { - return nil, fmt.Errorf("failed to parse memory requests: %w", err) + return nil, nil, fmt.Errorf("failed to parse memory requests: %w", err) + } + + if vpcus.Cores != 0 { + return &corev1.ResourceList{corev1.ResourceMemory: memoryResource}, &kubevirtcorev1.CPU{Cores: uint32(vpcus.Cores)}, nil } + cpuResource, err := resource.ParseQuantity(cpus) if err != nil { - return nil, fmt.Errorf("failed to parse cpu request: %w", err) + return nil, nil, fmt.Errorf("failed to parse cpu requests: %w", err) } + return &corev1.ResourceList{ corev1.ResourceMemory: memoryResource, corev1.ResourceCPU: cpuResource, - }, nil + }, nil, nil } -func (p *provider) SetMetricsForMachines(machines clusterv1alpha1.MachineList) error { +func (p *provider) SetMetricsForMachines(_ clusterv1alpha1.MachineList) error { return nil } @@ -733,60 +962,44 @@ func dnsPolicy(policy string) (corev1.DNSPolicy, error) { return "", fmt.Errorf("unknown dns policy: %s", policy) } -func getVMDisks(config *Config) []kubevirtv1.Disk { - disks := []kubevirtv1.Disk{ +func getVMDisks(config *Config) []kubevirtcorev1.Disk { + disks := []kubevirtcorev1.Disk{ { Name: "datavolumedisk", - DiskDevice: kubevirtv1.DiskDevice{Disk: &kubevirtv1.DiskTarget{Bus: "virtio"}}, + DiskDevice: kubevirtcorev1.DiskDevice{Disk: &kubevirtcorev1.DiskTarget{Bus: "virtio"}}, }, { Name: "cloudinitdisk", - DiskDevice: kubevirtv1.DiskDevice{Disk: &kubevirtv1.DiskTarget{Bus: "virtio"}}, + DiskDevice: kubevirtcorev1.DiskDevice{Disk: &kubevirtcorev1.DiskTarget{Bus: "virtio"}}, }, } for _, sd := range config.SecondaryDisks { - disks = append(disks, kubevirtv1.Disk{ + disks = append(disks, kubevirtcorev1.Disk{ Name: sd.Name, - DiskDevice: kubevirtv1.DiskDevice{Disk: &kubevirtv1.DiskTarget{Bus: "virtio"}}, + DiskDevice: kubevirtcorev1.DiskDevice{Disk: &kubevirtcorev1.DiskTarget{Bus: "virtio"}}, }) } return disks } -type macAddressGetter func() (string, error) - -func randomMacAddressGetter() (string, error) { - mac, err := netutil.GenerateRandMAC() - if err != nil { - return "", err - } - return mac.String(), nil +func defaultBridgeNetwork() *kubevirtcorev1.Interface { + return kubevirtcorev1.DefaultBridgeNetworkInterface() } -func defaultBridgeNetwork(macAddressGetter macAddressGetter) (*kubevirtv1.Interface, error) { - defaultBridgeNetwork := kubevirtv1.DefaultBridgeNetworkInterface() - mac, err := macAddressGetter() - if err != nil { - return nil, err - } - defaultBridgeNetwork.MacAddress = mac - return defaultBridgeNetwork, nil -} - -func getVMVolumes(config *Config, dataVolumeName string, userDataSecretName string) []kubevirtv1.Volume { - volumes := []kubevirtv1.Volume{ +func getVMVolumes(config *Config, dataVolumeName string, userDataSecretName string) []kubevirtcorev1.Volume { + volumes := []kubevirtcorev1.Volume{ { Name: "datavolumedisk", - VolumeSource: kubevirtv1.VolumeSource{ - DataVolume: &kubevirtv1.DataVolumeSource{ + VolumeSource: kubevirtcorev1.VolumeSource{ + DataVolume: &kubevirtcorev1.DataVolumeSource{ Name: dataVolumeName, }, }, }, { Name: "cloudinitdisk", - VolumeSource: kubevirtv1.VolumeSource{ - CloudInitNoCloud: &kubevirtv1.CloudInitNoCloudSource{ + VolumeSource: kubevirtcorev1.VolumeSource{ + CloudInitNoCloud: &kubevirtcorev1.CloudInitNoCloudSource{ UserDataSecretRef: &corev1.LocalObjectReference{ Name: userDataSecretName, }, @@ -795,10 +1008,10 @@ func getVMVolumes(config *Config, dataVolumeName string, userDataSecretName stri }, } for _, sd := range config.SecondaryDisks { - volumes = append(volumes, kubevirtv1.Volume{ + volumes = append(volumes, kubevirtcorev1.Volume{ Name: sd.Name, - VolumeSource: kubevirtv1.VolumeSource{ - DataVolume: &kubevirtv1.DataVolumeSource{ + VolumeSource: kubevirtcorev1.VolumeSource{ + DataVolume: &kubevirtcorev1.DataVolumeSource{ Name: sd.Name, }}, }) @@ -806,39 +1019,55 @@ func getVMVolumes(config *Config, dataVolumeName string, userDataSecretName stri return volumes } -func getDataVolumeTemplates(config *Config, dataVolumeName string) []kubevirtv1.DataVolumeTemplateSpec { +func getDataVolumeTemplates(config *Config, dataVolumeName string, annotations map[string]string) []kubevirtcorev1.DataVolumeTemplateSpec { pvcRequest := corev1.ResourceList{corev1.ResourceStorage: config.PVCSize} - dataVolumeTemplates := []kubevirtv1.DataVolumeTemplateSpec{ + dataVolumeTemplates := []kubevirtcorev1.DataVolumeTemplateSpec{ { ObjectMeta: metav1.ObjectMeta{ - Name: dataVolumeName, + Name: dataVolumeName, + Annotations: annotations, }, - Spec: cdiv1beta1.DataVolumeSpec{ - PVC: &corev1.PersistentVolumeClaimSpec{ - StorageClassName: utilpointer.String(config.StorageClassName), - AccessModes: []corev1.PersistentVolumeAccessMode{ - "ReadWriteOnce", - }, - Resources: corev1.ResourceRequirements{ - Requests: pvcRequest, - }, - }, + Spec: cdicorev1beta1.DataVolumeSpec{ Source: config.OSImageSource, }, }, } + + switch config.StorageTarget { + case PVC: + dataVolumeTemplates[0].Spec.PVC = &corev1.PersistentVolumeClaimSpec{ + StorageClassName: ptr.To(config.StorageClassName), + AccessModes: []corev1.PersistentVolumeAccessMode{ + config.StorageAccessType, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: pvcRequest, + }, + } + default: + dataVolumeTemplates[0].Spec.Storage = &cdicorev1beta1.StorageSpec{ + StorageClassName: ptr.To(config.StorageClassName), + AccessModes: []corev1.PersistentVolumeAccessMode{ + config.StorageAccessType, + }, + Resources: corev1.VolumeResourceRequirements{ + Requests: pvcRequest, + }, + } + } + for _, sd := range config.SecondaryDisks { - dataVolumeTemplates = append(dataVolumeTemplates, kubevirtv1.DataVolumeTemplateSpec{ + dataVolumeTemplates = append(dataVolumeTemplates, kubevirtcorev1.DataVolumeTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Name: sd.Name, }, - Spec: cdiv1beta1.DataVolumeSpec{ + Spec: cdicorev1beta1.DataVolumeSpec{ PVC: &corev1.PersistentVolumeClaimSpec{ - StorageClassName: utilpointer.String(sd.StorageClassName), + StorageClassName: ptr.To(sd.StorageClassName), AccessModes: []corev1.PersistentVolumeAccessMode{ - "ReadWriteOnce", + config.StorageAccessType, }, - Resources: corev1.ResourceRequirements{ + Resources: corev1.VolumeResourceRequirements{ Requests: corev1.ResourceList{corev1.ResourceStorage: sd.Size}, }, }, @@ -849,7 +1078,7 @@ func getDataVolumeTemplates(config *Config, dataVolumeName string) []kubevirtv1. return dataVolumeTemplates } -func getAffinity(config *Config, matchKey, matchValue string) *corev1.Affinity { +func getAffinity(config *Config) *corev1.Affinity { affinity := &corev1.Affinity{} expressions := []corev1.NodeSelectorRequirement{ @@ -916,3 +1145,112 @@ func getTopologySpreadConstraints(config *Config, matchLabels map[string]string) }, } } + +func appendTopologiesLabels(ctx context.Context, c *Config, labels map[string]string) error { + if labels == nil { + labels = map[string]string{} + } + // trying to get region and zone from the storage class + err := getStorageTopologies(ctx, c.StorageClassName, c, labels) + if err != nil { + return fmt.Errorf("failed to get storage topologies: %w", err) + } + + // if regions are explicitly set then we read them from the configs + if c.Region != "" { + labels[topologyRegionKey] = c.Region + } + + if c.Zone != "" { + labels[topologyZoneKey] = c.Zone + } + + return nil +} + +func getStorageTopologies(ctx context.Context, storageClassName string, c *Config, labels map[string]string) error { + kubeClient, err := ctrlruntimeclient.New(c.RestConfig, ctrlruntimeclient.Options{}) + if err != nil { + return fmt.Errorf("failed to get kubevirt client: %w", err) + } + + sc := &storagev1.StorageClass{} + if err := kubeClient.Get(ctx, types.NamespacedName{Name: storageClassName}, sc); err != nil { + return err + } + + for _, topology := range sc.AllowedTopologies { + for _, exp := range topology.MatchLabelExpressions { + if exp.Key == topologyRegionKey { + if exp.Values == nil || len(exp.Values) != 1 { + // found multiple or no regions available. One zone/region is allowed + return nil + } + + labels[topologyRegionKey] = exp.Values[0] + continue + } + + if exp.Key == topologyZoneKey { + if exp.Values == nil || len(exp.Values) != 1 { + // found multiple or no zones available. One zone/region is allowed + return nil + } + + labels[topologyZoneKey] = exp.Values[0] + } + } + } + + return nil +} + +func setOVNAnnotations(c *Config, annotations map[string]string) error { + annotations["ovn.kubernetes.io/allow_live_migration"] = "true" + if c.SubnetName != "" { + annotations["ovn.kubernetes.io/logical_switch"] = c.SubnetName + } + + return nil +} + +func (p *provider) configureStorage(infraClient ctrlruntimeclient.Client, template kubevirttypes.Template) (corev1.PersistentVolumeAccessMode, []SecondaryDisks, error) { + secondaryDisks := make([]SecondaryDisks, 0, len(template.SecondaryDisks)) + for i, sd := range template.SecondaryDisks { + sdSizeString, err := p.configVarResolver.GetStringValue(sd.Size) + if err != nil { + return "", nil, fmt.Errorf(`failed to parse "secondaryDisks.size" field: %w`, err) + } + pvc, err := resource.ParseQuantity(sdSizeString) + if err != nil { + return "", nil, fmt.Errorf(`failed to parse value of "secondaryDisks.size" field: %w`, err) + } + + scString, err := p.configVarResolver.GetStringValue(sd.StorageClassName) + if err != nil { + return "", nil, fmt.Errorf(`failed to parse value of "secondaryDisks.storageClass" field: %w`, err) + } + storageAccessMode, err := p.getStorageAccessType(context.TODO(), sd.StorageAccessType, infraClient, scString) + if err != nil { + return "", nil, fmt.Errorf(`failed to get value of storageAccessMode: %w`, err) + } + secondaryDisks = append(secondaryDisks, SecondaryDisks{ + Name: fmt.Sprintf("secondarydisk%d", i), + Size: pvc, + StorageClassName: scString, + StorageAccessType: storageAccessMode, + }) + } + scString, err := p.configVarResolver.GetStringValue(template.PrimaryDisk.StorageClassName) + if err != nil { + return "", nil, fmt.Errorf(`failed to parse value of "primaryDisk.storageClass" field: %w`, err) + } + + primaryDisk, err := p.getStorageAccessType(context.TODO(), template.PrimaryDisk.StorageAccessType, + infraClient, scString) + if err != nil { + return "", nil, fmt.Errorf(`failed to get value of primaryDiskstorageAccessType: %w`, err) + } + + return primaryDisk, secondaryDisks, nil +} diff --git a/pkg/cloudprovider/provider/kubevirt/provider_test.go b/pkg/cloudprovider/provider/kubevirt/provider_test.go index 16cb82573..c09796742 100644 --- a/pkg/cloudprovider/provider/kubevirt/provider_test.go +++ b/pkg/cloudprovider/provider/kubevirt/provider_test.go @@ -25,17 +25,19 @@ import ( "reflect" "testing" - kubevirtv1 "kubevirt.io/api/core/v1" + "github.com/google/go-cmp/cmp" + kubevirtcorev1 "kubevirt.io/api/core/v1" + cdicorev1beta1 "kubevirt.io/containerized-data-importer-api/pkg/apis/core/v1beta1" - cloudprovidertesting "github.com/kubermatic/machine-controller/pkg/cloudprovider/testing" - "github.com/kubermatic/machine-controller/pkg/providerconfig" + cloudprovidertesting "k8c.io/machine-controller/pkg/cloudprovider/testing" + "k8c.io/machine-controller/sdk/cloudprovider/kubevirt" + "k8c.io/machine-controller/sdk/providerconfig/configvar" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/apimachinery/pkg/util/diff" ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" fakectrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" ) @@ -45,7 +47,7 @@ var ( vmManifestsFS embed.FS vmDir = "testdata" fakeclient ctrlruntimeclient.WithWatch - expectedVms map[string]*kubevirtv1.VirtualMachine + expectedVms map[string]*kubevirtcorev1.VirtualMachine ) func init() { @@ -56,14 +58,21 @@ func init() { type kubevirtProviderSpecConf struct { OsImageDV string // if OsImage from DV and not from http source - Instancetype *kubevirtv1.InstancetypeMatcher - Preference *kubevirtv1.PreferenceMatcher + Instancetype *kubevirtcorev1.InstancetypeMatcher + Preference *kubevirtcorev1.PreferenceMatcher + StorageTarget StorageTarget OperatingSystem string TopologySpreadConstraint bool Affinity bool AffinityValues bool SecondaryDisks bool OsImageSource imageSource + OsImageSourceURL string + PullMethod cdicorev1beta1.RegistryPullMethod + ProviderNetwork *kubevirt.ProviderNetwork + ExtraHeadersSet bool + EvictStrategy string + VCPUs uint32 } func (k kubevirtProviderSpecConf) rawProviderSpec(t *testing.T) []byte { @@ -97,6 +106,20 @@ func (k kubevirtProviderSpecConf) rawProviderSpec(t *testing.T) []byte { }, {{- end }} "virtualMachine": { + {{- if .EvictStrategy }} + "evictionStrategy": "LiveMigrate", + {{- end }} + {{- if .ProviderNetwork }} + "providerNetwork": { + "name": "kubeovn", + "vpc": { + "name": "test-vpc", + "subnet": { + "name": "test-subnet" + } + } + }, + {{- end }} {{- if .Instancetype }} "instancetype": { "name": "{{ .Instancetype.Name }}", @@ -110,21 +133,39 @@ func (k kubevirtProviderSpecConf) rawProviderSpec(t *testing.T) []byte { }, {{- end }} "template": { + {{- if .VCPUs }} + "vcpus": { + "cores": {{ .VCPUs }} + }, + {{- else }} "cpus": "2", + {{- end }} "memory": "2Gi", {{- if .SecondaryDisks }} "secondaryDisks": [{ "size": "20Gi", + "storageAccessType": "ReadWriteMany", "storageClassName": "longhorn2"},{ "size": "30Gi", + "storageAccessType": "ReadWriteMany", "storageClassName": "longhorn3"}], {{- end }} "primaryDisk": { + {{- if .ExtraHeadersSet }} + "extraHeaders": ["authorization: Basic bXE6cGFzc3dvcmQ="], + {{- end }} + "storageAccessType": "ReadWriteMany", + {{- if .StorageTarget }} + "storageTarget": "{{ .StorageTarget }}", + {{- end }} {{- if .OsImageDV }} "osImage": "{{ .OsImageDV }}", {{- else }} - "osImage": "http://x.y.z.t/ubuntu.img", + "osImage": "{{ if .OsImageSourceURL }}{{ .OsImageSourceURL }}{{ else }}http://x.y.z.t/ubuntu.img{{ end }}", {{- end }} + {{- if .PullMethod }} + "pullMethod": "{{ .PullMethod }}", + {{- end}} "size": "10Gi", {{- if .OsImageSource }} "source": "{{ .OsImageSource }}", @@ -166,14 +207,20 @@ func TestNewVirtualMachine(t *testing.T) { name: "nominal-case", specConf: kubevirtProviderSpecConf{}, }, + { + name: "extra-headers-set", + specConf: kubevirtProviderSpecConf{ + ExtraHeadersSet: true, + }, + }, { name: "instancetype-preference-standard", specConf: kubevirtProviderSpecConf{ - Instancetype: &kubevirtv1.InstancetypeMatcher{ + Instancetype: &kubevirtcorev1.InstancetypeMatcher{ Name: "standard-it", Kind: "VirtualMachineInstancetype", }, - Preference: &kubevirtv1.PreferenceMatcher{ + Preference: &kubevirtcorev1.PreferenceMatcher{ Name: "standard-pref", Kind: "VirtualMachinePreference", }, @@ -182,16 +229,23 @@ func TestNewVirtualMachine(t *testing.T) { { name: "instancetype-preference-custom", specConf: kubevirtProviderSpecConf{ - Instancetype: &kubevirtv1.InstancetypeMatcher{ + Instancetype: &kubevirtcorev1.InstancetypeMatcher{ Name: "custom-it", Kind: "VirtualMachineClusterInstancetype", }, - Preference: &kubevirtv1.PreferenceMatcher{ + Preference: &kubevirtcorev1.PreferenceMatcher{ Name: "custom-pref", Kind: "VirtualMachineClusterPreference", }, }, }, + { + name: "kubeovn-provider-network", + specConf: kubevirtProviderSpecConf{ + ProviderNetwork: &kubevirt.ProviderNetwork{Name: "KubeOVN", VPC: kubevirt.VPC{Name: "test-vpc", Subnet: &kubevirt.Subnet{ + Name: "test-subnet", + }}}}, + }, { name: "topologyspreadconstraints", specConf: kubevirtProviderSpecConf{TopologySpreadConstraint: true}, @@ -212,20 +266,40 @@ func TestNewVirtualMachine(t *testing.T) { name: "custom-local-disk", specConf: kubevirtProviderSpecConf{OsImageDV: "ns/dvname"}, }, + { + name: "use-storage-as-storage-target", + specConf: kubevirtProviderSpecConf{StorageTarget: Storage}, + }, { name: "http-image-source", specConf: kubevirtProviderSpecConf{OsImageSource: httpSource}, }, + { + name: "registry-image-source", + specConf: kubevirtProviderSpecConf{OsImageSource: registrySource, OsImageSourceURL: "docker://x.y.z.t/ubuntu.img:latest"}, + }, + { + name: "registry-image-source-pod", + specConf: kubevirtProviderSpecConf{OsImageSource: registrySource, OsImageSourceURL: "docker://x.y.z.t/ubuntu.img:latest", PullMethod: cdicorev1beta1.RegistryPullPod}, + }, { name: "pvc-image-source", specConf: kubevirtProviderSpecConf{OsImageSource: pvcSource, OsImageDV: "ns/dvname"}, }, + { + name: "eviction-strategy-live-migrate", + specConf: kubevirtProviderSpecConf{EvictStrategy: "LiveMigrate"}, + }, + { + name: "dedicated-vcpus", + specConf: kubevirtProviderSpecConf{VCPUs: 2}, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { p := &provider{ // Note that configVarResolver is not used in this test as the getConfigFunc is mocked. - configVarResolver: providerconfig.NewConfigVarResolver(context.Background(), fakeclient), + configVarResolver: configvar.NewResolver(context.Background(), fakeclient), } machine := cloudprovidertesting.Creator{ @@ -240,13 +314,16 @@ func TestNewVirtualMachine(t *testing.T) { } // Do not rely on POD_NAMESPACE env variable, force to known value c.Namespace = testNamespace + labels := map[string]string{} // Check the created VirtualMachine - vm, _ := p.newVirtualMachine(context.TODO(), c, pc, machine, "udsn", userdata, fakeMachineDeploymentNameAndRevisionForMachineGetter(), fixedMacAddressGetter, fakeclient) - vm.TypeMeta.APIVersion, vm.TypeMeta.Kind = kubevirtv1.VirtualMachineGroupVersionKind.ToAPIVersionAndKind() + vm, _ := p.newVirtualMachine(c, pc, machine, labels, "udsn", userdata, fakeMachineDeploymentNameAndRevisionForMachineGetter()) + vm.APIVersion, vm.Kind = kubevirtcorev1.VirtualMachineGroupVersionKind.ToAPIVersionAndKind() if !equality.Semantic.DeepEqual(vm, expectedVms[tt.name]) { - t.Errorf("Diff %v", diff.ObjectGoPrintDiff(expectedVms[tt.name], vm)) + if diff := cmp.Diff(expectedVms[tt.name], vm); diff != "" { + t.Errorf("Diff:\n%s", diff) + } } }) } @@ -258,20 +335,16 @@ func fakeMachineDeploymentNameAndRevisionForMachineGetter() machineDeploymentNam } } -func toVirtualMachines(objects []runtime.Object) map[string]*kubevirtv1.VirtualMachine { - vms := make(map[string]*kubevirtv1.VirtualMachine) +func toVirtualMachines(objects []runtime.Object) map[string]*kubevirtcorev1.VirtualMachine { + vms := make(map[string]*kubevirtcorev1.VirtualMachine) for _, o := range objects { - if vm, ok := o.(*kubevirtv1.VirtualMachine); ok { + if vm, ok := o.(*kubevirtcorev1.VirtualMachine); ok { vms[vm.Name] = vm } } return vms } -func fixedMacAddressGetter() (string, error) { - return "b6:f5:b4:fe:45:1d", nil -} - // runtimeFromYaml returns a list of Kubernetes runtime objects from their yaml templates. // It returns the objects for all files included in the ManifestFS folder, skipping (with error log) the yaml files // that would not contain correct yaml files. diff --git a/pkg/cloudprovider/provider/kubevirt/testdata/affinity-no-values.yaml b/pkg/cloudprovider/provider/kubevirt/testdata/affinity-no-values.yaml index f304b2a6e..330e812ff 100644 --- a/pkg/cloudprovider/provider/kubevirt/testdata/affinity-no-values.yaml +++ b/pkg/cloudprovider/provider/kubevirt/testdata/affinity-no-values.yaml @@ -4,6 +4,8 @@ metadata: annotations: labels: cluster.x-k8s.io/cluster-name: cluster-name + kubermatic.k8c.io/cluster-id: "cluster-name" + kubermatic.k8c.io/project-id: "" cluster.x-k8s.io/role: worker kubevirt.io/vm: affinity-no-values md: md-name @@ -15,9 +17,9 @@ spec: creationTimestamp: null name: affinity-no-values spec: - pvc: + storage: accessModes: - - ReadWriteOnce + - ReadWriteMany resources: requests: storage: 10Gi @@ -25,12 +27,17 @@ spec: source: http: url: http://x.y.z.t/ubuntu.img - running: true + runStrategy: Once template: metadata: + annotations: + "kubevirt.io/allow-pod-bridge-network-live-migration": "true" + "ovn.kubernetes.io/allow_live_migration": "true" labels: cluster.x-k8s.io/cluster-name: cluster-name cluster.x-k8s.io/role: worker + kubermatic.k8c.io/cluster-id: "cluster-name" + kubermatic.k8c.io/project-id: "" kubevirt.io/vm: affinity-no-values md: md-name spec: @@ -51,9 +58,9 @@ spec: bus: virtio name: cloudinitdisk interfaces: - - macAddress: b6:f5:b4:fe:45:1d - name: default + - name: default bridge: {} + networkInterfaceMultiqueue: true resources: limits: cpu: "2" diff --git a/pkg/cloudprovider/provider/kubevirt/testdata/affinity.yaml b/pkg/cloudprovider/provider/kubevirt/testdata/affinity.yaml index 309e30d7b..949999709 100644 --- a/pkg/cloudprovider/provider/kubevirt/testdata/affinity.yaml +++ b/pkg/cloudprovider/provider/kubevirt/testdata/affinity.yaml @@ -5,6 +5,8 @@ metadata: labels: cluster.x-k8s.io/cluster-name: cluster-name cluster.x-k8s.io/role: worker + kubermatic.k8c.io/cluster-id: "cluster-name" + kubermatic.k8c.io/project-id: "" kubevirt.io/vm: affinity md: md-name name: affinity @@ -15,9 +17,9 @@ spec: creationTimestamp: null name: affinity spec: - pvc: + storage: accessModes: - - ReadWriteOnce + - ReadWriteMany resources: requests: storage: 10Gi @@ -25,12 +27,17 @@ spec: source: http: url: http://x.y.z.t/ubuntu.img - running: true + runStrategy: Once template: metadata: + annotations: + "kubevirt.io/allow-pod-bridge-network-live-migration": "true" + "ovn.kubernetes.io/allow_live_migration": "true" labels: cluster.x-k8s.io/cluster-name: cluster-name cluster.x-k8s.io/role: worker + kubermatic.k8c.io/cluster-id: "cluster-name" + kubermatic.k8c.io/project-id: "" kubevirt.io/vm: affinity md: md-name spec: @@ -54,9 +61,9 @@ spec: bus: virtio name: cloudinitdisk interfaces: - - macAddress: b6:f5:b4:fe:45:1d - name: default + - name: default bridge: {} + networkInterfaceMultiqueue: true resources: limits: cpu: "2" diff --git a/pkg/cloudprovider/provider/kubevirt/testdata/custom-local-disk.yaml b/pkg/cloudprovider/provider/kubevirt/testdata/custom-local-disk.yaml index e10d07c62..277750e98 100644 --- a/pkg/cloudprovider/provider/kubevirt/testdata/custom-local-disk.yaml +++ b/pkg/cloudprovider/provider/kubevirt/testdata/custom-local-disk.yaml @@ -5,6 +5,8 @@ metadata: labels: cluster.x-k8s.io/cluster-name: cluster-name cluster.x-k8s.io/role: worker + kubermatic.k8c.io/cluster-id: "cluster-name" + kubermatic.k8c.io/project-id: "" kubevirt.io/vm: custom-local-disk md: md-name name: custom-local-disk @@ -14,9 +16,9 @@ spec: - metadata: name: custom-local-disk spec: - pvc: + storage: accessModes: - - ReadWriteOnce + - ReadWriteMany resources: requests: storage: 10Gi @@ -25,12 +27,17 @@ spec: pvc: namespace: ns name: dvname - running: true + runStrategy: Once template: metadata: creationTimestamp: null + annotations: + "kubevirt.io/allow-pod-bridge-network-live-migration": "true" + "ovn.kubernetes.io/allow_live_migration": "true" labels: cluster.x-k8s.io/cluster-name: cluster-name + kubermatic.k8c.io/cluster-id: "cluster-name" + kubermatic.k8c.io/project-id: "" cluster.x-k8s.io/role: worker kubevirt.io/vm: custom-local-disk md: md-name @@ -46,9 +53,9 @@ spec: bus: virtio name: cloudinitdisk interfaces: - - macAddress: b6:f5:b4:fe:45:1d - name: default + - name: default bridge: {} + networkInterfaceMultiqueue: true resources: limits: cpu: "2" diff --git a/pkg/cloudprovider/provider/kubevirt/testdata/dedicated-vcpus.yaml b/pkg/cloudprovider/provider/kubevirt/testdata/dedicated-vcpus.yaml new file mode 100644 index 000000000..8077a010d --- /dev/null +++ b/pkg/cloudprovider/provider/kubevirt/testdata/dedicated-vcpus.yaml @@ -0,0 +1,84 @@ +apiVersion: kubevirt.io/v1 +kind: VirtualMachine +metadata: + annotations: + labels: + cluster.x-k8s.io/cluster-name: cluster-name + kubermatic.k8c.io/cluster-id: "cluster-name" + kubermatic.k8c.io/project-id: "" + cluster.x-k8s.io/role: worker + kubevirt.io/vm: dedicated-vcpus + md: md-name + name: dedicated-vcpus + namespace: test-namespace +spec: + dataVolumeTemplates: + - metadata: + name: dedicated-vcpus + spec: + storage: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 10Gi + storageClassName: longhorn + source: + http: + url: http://x.y.z.t/ubuntu.img + runStrategy: Once + template: + metadata: + creationTimestamp: null + annotations: + "kubevirt.io/allow-pod-bridge-network-live-migration": "true" + "ovn.kubernetes.io/allow_live_migration": "true" + labels: + cluster.x-k8s.io/cluster-name: cluster-name + kubermatic.k8c.io/cluster-id: "cluster-name" + kubermatic.k8c.io/project-id: "" + cluster.x-k8s.io/role: worker + kubevirt.io/vm: dedicated-vcpus + md: md-name + spec: + affinity: {} + domain: + cpu: + cores: 2 + devices: + disks: + - disk: + bus: virtio + name: datavolumedisk + - disk: + bus: virtio + name: cloudinitdisk + interfaces: + - name: default + bridge: {} + networkInterfaceMultiqueue: true + resources: + limits: + memory: 2Gi + requests: + memory: 2Gi + networks: + - name: default + pod: {} + terminationGracePeriodSeconds: 30 + topologyspreadconstraints: + - maxskew: 1 + topologykey: kubernetes.io/hostname + whenunsatisfiable: ScheduleAnyway + labelselector: + matchlabels: + md: md-name + volumes: + - dataVolume: + name: dedicated-vcpus + name: datavolumedisk + - cloudInitNoCloud: + secretRef: + name: udsn + name: cloudinitdisk + evictionStrategy: External diff --git a/pkg/cloudprovider/provider/kubevirt/testdata/eviction-strategy-live-migrate.yaml b/pkg/cloudprovider/provider/kubevirt/testdata/eviction-strategy-live-migrate.yaml new file mode 100644 index 000000000..f2f9f5ff6 --- /dev/null +++ b/pkg/cloudprovider/provider/kubevirt/testdata/eviction-strategy-live-migrate.yaml @@ -0,0 +1,84 @@ +apiVersion: kubevirt.io/v1 +kind: VirtualMachine +metadata: + annotations: + labels: + cluster.x-k8s.io/cluster-name: cluster-name + kubermatic.k8c.io/cluster-id: "cluster-name" + kubermatic.k8c.io/project-id: "" + cluster.x-k8s.io/role: worker + kubevirt.io/vm: eviction-strategy-live-migrate + md: md-name + name: eviction-strategy-live-migrate + namespace: test-namespace +spec: + dataVolumeTemplates: + - metadata: + name: eviction-strategy-live-migrate + spec: + storage: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 10Gi + storageClassName: longhorn + source: + http: + url: http://x.y.z.t/ubuntu.img + runStrategy: Once + template: + metadata: + creationTimestamp: null + annotations: + "kubevirt.io/allow-pod-bridge-network-live-migration": "true" + "ovn.kubernetes.io/allow_live_migration": "true" + labels: + cluster.x-k8s.io/cluster-name: cluster-name + kubermatic.k8c.io/cluster-id: "cluster-name" + kubermatic.k8c.io/project-id: "" + cluster.x-k8s.io/role: worker + kubevirt.io/vm: eviction-strategy-live-migrate + md: md-name + spec: + affinity: {} + domain: + devices: + disks: + - disk: + bus: virtio + name: datavolumedisk + - disk: + bus: virtio + name: cloudinitdisk + interfaces: + - name: default + bridge: {} + networkInterfaceMultiqueue: true + resources: + limits: + cpu: "2" + memory: 2Gi + requests: + cpu: "2" + memory: 2Gi + networks: + - name: default + pod: {} + terminationGracePeriodSeconds: 30 + topologyspreadconstraints: + - maxskew: 1 + topologykey: kubernetes.io/hostname + whenunsatisfiable: ScheduleAnyway + labelselector: + matchlabels: + md: md-name + volumes: + - dataVolume: + name: eviction-strategy-live-migrate + name: datavolumedisk + - cloudInitNoCloud: + secretRef: + name: udsn + name: cloudinitdisk + evictionStrategy: LiveMigrate diff --git a/pkg/cloudprovider/provider/kubevirt/testdata/extra-headers-set.yaml b/pkg/cloudprovider/provider/kubevirt/testdata/extra-headers-set.yaml new file mode 100644 index 000000000..243b3ec54 --- /dev/null +++ b/pkg/cloudprovider/provider/kubevirt/testdata/extra-headers-set.yaml @@ -0,0 +1,86 @@ +apiVersion: kubevirt.io/v1 +kind: VirtualMachine +metadata: + annotations: + labels: + cluster.x-k8s.io/cluster-name: cluster-name + kubermatic.k8c.io/cluster-id: "cluster-name" + kubermatic.k8c.io/project-id: "" + cluster.x-k8s.io/role: worker + kubevirt.io/vm: extra-headers-set + md: md-name + name: extra-headers-set + namespace: test-namespace +spec: + dataVolumeTemplates: + - metadata: + name: extra-headers-set + spec: + storage: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 10Gi + storageClassName: longhorn + source: + http: + url: http://x.y.z.t/ubuntu.img + extraHeaders: + - 'authorization: Basic bXE6cGFzc3dvcmQ=' + runStrategy: Once + template: + metadata: + creationTimestamp: null + annotations: + "kubevirt.io/allow-pod-bridge-network-live-migration": "true" + "ovn.kubernetes.io/allow_live_migration": "true" + labels: + cluster.x-k8s.io/cluster-name: cluster-name + kubermatic.k8c.io/cluster-id: "cluster-name" + kubermatic.k8c.io/project-id: "" + cluster.x-k8s.io/role: worker + kubevirt.io/vm: extra-headers-set + md: md-name + spec: + affinity: {} + domain: + devices: + disks: + - disk: + bus: virtio + name: datavolumedisk + - disk: + bus: virtio + name: cloudinitdisk + interfaces: + - name: default + bridge: {} + networkInterfaceMultiqueue: true + resources: + limits: + cpu: "2" + memory: 2Gi + requests: + cpu: "2" + memory: 2Gi + networks: + - name: default + pod: {} + terminationGracePeriodSeconds: 30 + topologyspreadconstraints: + - maxskew: 1 + topologykey: kubernetes.io/hostname + whenunsatisfiable: ScheduleAnyway + labelselector: + matchlabels: + md: md-name + volumes: + - dataVolume: + name: extra-headers-set + name: datavolumedisk + - cloudInitNoCloud: + secretRef: + name: udsn + name: cloudinitdisk + evictionStrategy: External diff --git a/pkg/cloudprovider/provider/kubevirt/testdata/http-image-source.yaml b/pkg/cloudprovider/provider/kubevirt/testdata/http-image-source.yaml index 3bf7731e7..0b83b40ee 100644 --- a/pkg/cloudprovider/provider/kubevirt/testdata/http-image-source.yaml +++ b/pkg/cloudprovider/provider/kubevirt/testdata/http-image-source.yaml @@ -5,6 +5,8 @@ metadata: labels: kubevirt.io/vm: http-image-source cluster.x-k8s.io/cluster-name: cluster-name + kubermatic.k8c.io/cluster-id: "cluster-name" + kubermatic.k8c.io/project-id: "" cluster.x-k8s.io/role: worker md: md-name name: http-image-source @@ -14,9 +16,9 @@ spec: - metadata: name: http-image-source spec: - pvc: + storage: accessModes: - - ReadWriteOnce + - ReadWriteMany resources: requests: storage: 10Gi @@ -24,13 +26,18 @@ spec: source: http: url: http://x.y.z.t/ubuntu.img - running: true + runStrategy: Once template: metadata: creationTimestamp: null + annotations: + "kubevirt.io/allow-pod-bridge-network-live-migration": "true" + "ovn.kubernetes.io/allow_live_migration": "true" labels: kubevirt.io/vm: http-image-source cluster.x-k8s.io/cluster-name: cluster-name + kubermatic.k8c.io/cluster-id: "cluster-name" + kubermatic.k8c.io/project-id: "" cluster.x-k8s.io/role: worker md: md-name spec: @@ -45,9 +52,9 @@ spec: bus: virtio name: cloudinitdisk interfaces: - - macAddress: b6:f5:b4:fe:45:1d - name: default + - name: default bridge: {} + networkInterfaceMultiqueue: true resources: limits: cpu: "2" diff --git a/pkg/cloudprovider/provider/kubevirt/testdata/instancetype-preference-custom.yaml b/pkg/cloudprovider/provider/kubevirt/testdata/instancetype-preference-custom.yaml index 060171c4b..087f627cc 100644 --- a/pkg/cloudprovider/provider/kubevirt/testdata/instancetype-preference-custom.yaml +++ b/pkg/cloudprovider/provider/kubevirt/testdata/instancetype-preference-custom.yaml @@ -4,6 +4,8 @@ metadata: annotations: labels: cluster.x-k8s.io/cluster-name: cluster-name + kubermatic.k8c.io/cluster-id: "cluster-name" + kubermatic.k8c.io/project-id: "" cluster.x-k8s.io/role: worker kubevirt.io/vm: instancetype-preference-custom md: md-name @@ -15,9 +17,9 @@ spec: creationTimestamp: null name: instancetype-preference-custom spec: - pvc: + storage: accessModes: - - ReadWriteOnce + - ReadWriteMany resources: requests: storage: 10Gi @@ -25,7 +27,7 @@ spec: source: http: url: http://x.y.z.t/ubuntu.img - running: true + runStrategy: Once instancetype: kind: VirtualMachineClusterInstancetype name: custom-it @@ -34,8 +36,13 @@ spec: name: custom-pref template: metadata: + annotations: + "kubevirt.io/allow-pod-bridge-network-live-migration": "true" + "ovn.kubernetes.io/allow_live_migration": "true" labels: cluster.x-k8s.io/cluster-name: cluster-name + kubermatic.k8c.io/cluster-id: "cluster-name" + kubermatic.k8c.io/project-id: "" cluster.x-k8s.io/role: worker kubevirt.io/vm: instancetype-preference-custom md: md-name @@ -51,9 +58,9 @@ spec: bus: virtio name: cloudinitdisk interfaces: - - macAddress: b6:f5:b4:fe:45:1d - name: default + - name: default bridge: {} + networkInterfaceMultiqueue: true networks: - name: default pod: {} diff --git a/pkg/cloudprovider/provider/kubevirt/testdata/instancetype-preference-standard.yaml b/pkg/cloudprovider/provider/kubevirt/testdata/instancetype-preference-standard.yaml index b56229f69..90f5a95c8 100644 --- a/pkg/cloudprovider/provider/kubevirt/testdata/instancetype-preference-standard.yaml +++ b/pkg/cloudprovider/provider/kubevirt/testdata/instancetype-preference-standard.yaml @@ -4,6 +4,8 @@ metadata: annotations: labels: cluster.x-k8s.io/cluster-name: cluster-name + kubermatic.k8c.io/cluster-id: "cluster-name" + kubermatic.k8c.io/project-id: "" cluster.x-k8s.io/role: worker kubevirt.io/vm: instancetype-preference-standard md: md-name @@ -14,9 +16,9 @@ spec: - metadata: name: instancetype-preference-standard spec: - pvc: + storage: accessModes: - - ReadWriteOnce + - ReadWriteMany resources: requests: storage: 10Gi @@ -24,7 +26,7 @@ spec: source: http: url: http://x.y.z.t/ubuntu.img - running: true + runStrategy: Once instancetype: kind: VirtualMachineInstancetype name: standard-it @@ -34,8 +36,13 @@ spec: template: metadata: creationTimestamp: null + annotations: + "kubevirt.io/allow-pod-bridge-network-live-migration": "true" + "ovn.kubernetes.io/allow_live_migration": "true" labels: cluster.x-k8s.io/cluster-name: cluster-name + kubermatic.k8c.io/cluster-id: "cluster-name" + kubermatic.k8c.io/project-id: "" cluster.x-k8s.io/role: worker kubevirt.io/vm: instancetype-preference-standard md: md-name @@ -51,9 +58,9 @@ spec: bus: virtio name: cloudinitdisk interfaces: - - macAddress: b6:f5:b4:fe:45:1d - name: default + - name: default bridge: {} + networkInterfaceMultiqueue: true networks: - name: default pod: {} diff --git a/pkg/cloudprovider/provider/kubevirt/testdata/kubeovn-provider-network.yaml b/pkg/cloudprovider/provider/kubevirt/testdata/kubeovn-provider-network.yaml new file mode 100644 index 000000000..6da059eae --- /dev/null +++ b/pkg/cloudprovider/provider/kubevirt/testdata/kubeovn-provider-network.yaml @@ -0,0 +1,85 @@ +apiVersion: kubevirt.io/v1 +kind: VirtualMachine +metadata: + annotations: + labels: + cluster.x-k8s.io/cluster-name: cluster-name + kubermatic.k8c.io/cluster-id: "cluster-name" + kubermatic.k8c.io/project-id: "" + cluster.x-k8s.io/role: worker + kubevirt.io/vm: kubeovn-provider-network + md: md-name + name: kubeovn-provider-network + namespace: test-namespace +spec: + dataVolumeTemplates: + - metadata: + name: kubeovn-provider-network + spec: + storage: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 10Gi + storageClassName: longhorn + source: + http: + url: http://x.y.z.t/ubuntu.img + runStrategy: Always + template: + metadata: + creationTimestamp: null + annotations: + ovn.kubernetes.io/allow_live_migration: "true" + ovn.kubernetes.io/logical_switch: test-subnet + "kubevirt.io/allow-pod-bridge-network-live-migration": "true" + labels: + cluster.x-k8s.io/cluster-name: cluster-name + kubermatic.k8c.io/cluster-id: "cluster-name" + kubermatic.k8c.io/project-id: "" + cluster.x-k8s.io/role: worker + kubevirt.io/vm: kubeovn-provider-network + md: md-name + spec: + affinity: {} + domain: + devices: + disks: + - disk: + bus: virtio + name: datavolumedisk + - disk: + bus: virtio + name: cloudinitdisk + interfaces: + - name: default + bridge: {} + networkInterfaceMultiqueue: true + resources: + limits: + cpu: "2" + memory: 2Gi + requests: + cpu: "2" + memory: 2Gi + networks: + - name: default + pod: {} + terminationGracePeriodSeconds: 30 + topologyspreadconstraints: + - maxskew: 1 + topologykey: kubernetes.io/hostname + whenunsatisfiable: ScheduleAnyway + labelselector: + matchlabels: + md: md-name + volumes: + - dataVolume: + name: kubeovn-provider-network + name: datavolumedisk + - cloudInitNoCloud: + secretRef: + name: udsn + name: cloudinitdisk + evictionStrategy: External diff --git a/pkg/cloudprovider/provider/kubevirt/testdata/nominal-case.yaml b/pkg/cloudprovider/provider/kubevirt/testdata/nominal-case.yaml index 5dddb4b52..bacb6bca4 100644 --- a/pkg/cloudprovider/provider/kubevirt/testdata/nominal-case.yaml +++ b/pkg/cloudprovider/provider/kubevirt/testdata/nominal-case.yaml @@ -4,6 +4,8 @@ metadata: annotations: labels: cluster.x-k8s.io/cluster-name: cluster-name + kubermatic.k8c.io/cluster-id: "cluster-name" + kubermatic.k8c.io/project-id: "" cluster.x-k8s.io/role: worker kubevirt.io/vm: nominal-case md: md-name @@ -14,9 +16,9 @@ spec: - metadata: name: nominal-case spec: - pvc: + storage: accessModes: - - ReadWriteOnce + - ReadWriteMany resources: requests: storage: 10Gi @@ -24,12 +26,17 @@ spec: source: http: url: http://x.y.z.t/ubuntu.img - running: true + runStrategy: Once template: metadata: creationTimestamp: null + annotations: + "kubevirt.io/allow-pod-bridge-network-live-migration": "true" + "ovn.kubernetes.io/allow_live_migration": "true" labels: cluster.x-k8s.io/cluster-name: cluster-name + kubermatic.k8c.io/cluster-id: "cluster-name" + kubermatic.k8c.io/project-id: "" cluster.x-k8s.io/role: worker kubevirt.io/vm: nominal-case md: md-name @@ -45,9 +52,9 @@ spec: bus: virtio name: cloudinitdisk interfaces: - - macAddress: b6:f5:b4:fe:45:1d - name: default + - name: default bridge: {} + networkInterfaceMultiqueue: true resources: limits: cpu: "2" diff --git a/pkg/cloudprovider/provider/kubevirt/testdata/pvc-image-source.yaml b/pkg/cloudprovider/provider/kubevirt/testdata/pvc-image-source.yaml index 2cafabbd7..671e1aeb0 100644 --- a/pkg/cloudprovider/provider/kubevirt/testdata/pvc-image-source.yaml +++ b/pkg/cloudprovider/provider/kubevirt/testdata/pvc-image-source.yaml @@ -5,6 +5,8 @@ metadata: labels: kubevirt.io/vm: pvc-image-source cluster.x-k8s.io/cluster-name: cluster-name + kubermatic.k8c.io/cluster-id: "cluster-name" + kubermatic.k8c.io/project-id: "" cluster.x-k8s.io/role: worker md: md-name name: pvc-image-source @@ -14,9 +16,9 @@ spec: - metadata: name: pvc-image-source spec: - pvc: + storage: accessModes: - - ReadWriteOnce + - ReadWriteMany resources: requests: storage: 10Gi @@ -25,13 +27,18 @@ spec: pvc: namespace: ns name: dvname - running: true + runStrategy: Once template: metadata: creationTimestamp: null + annotations: + "kubevirt.io/allow-pod-bridge-network-live-migration": "true" + "ovn.kubernetes.io/allow_live_migration": "true" labels: kubevirt.io/vm: pvc-image-source cluster.x-k8s.io/cluster-name: cluster-name + kubermatic.k8c.io/cluster-id: "cluster-name" + kubermatic.k8c.io/project-id: "" cluster.x-k8s.io/role: worker md: md-name spec: @@ -46,9 +53,9 @@ spec: bus: virtio name: cloudinitdisk interfaces: - - macAddress: b6:f5:b4:fe:45:1d - name: default + - name: default bridge: {} + networkInterfaceMultiqueue: true resources: limits: cpu: "2" diff --git a/pkg/cloudprovider/provider/kubevirt/testdata/registry-image-source-pod.yaml b/pkg/cloudprovider/provider/kubevirt/testdata/registry-image-source-pod.yaml new file mode 100644 index 000000000..95c9653ec --- /dev/null +++ b/pkg/cloudprovider/provider/kubevirt/testdata/registry-image-source-pod.yaml @@ -0,0 +1,85 @@ +apiVersion: kubevirt.io/v1 +kind: VirtualMachine +metadata: + annotations: + labels: + kubevirt.io/vm: registry-image-source-pod + cluster.x-k8s.io/cluster-name: cluster-name + kubermatic.k8c.io/cluster-id: "cluster-name" + kubermatic.k8c.io/project-id: "" + cluster.x-k8s.io/role: worker + md: md-name + name: registry-image-source-pod + namespace: test-namespace +spec: + dataVolumeTemplates: + - metadata: + name: registry-image-source-pod + spec: + storage: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 10Gi + storageClassName: longhorn + source: + registry: + url: docker://x.y.z.t/ubuntu.img:latest + pullMethod: pod + runStrategy: Once + template: + metadata: + creationTimestamp: null + annotations: + "kubevirt.io/allow-pod-bridge-network-live-migration": "true" + "ovn.kubernetes.io/allow_live_migration": "true" + labels: + kubevirt.io/vm: registry-image-source-pod + cluster.x-k8s.io/cluster-name: cluster-name + kubermatic.k8c.io/cluster-id: "cluster-name" + kubermatic.k8c.io/project-id: "" + cluster.x-k8s.io/role: worker + md: md-name + spec: + affinity: {} + domain: + devices: + disks: + - disk: + bus: virtio + name: datavolumedisk + - disk: + bus: virtio + name: cloudinitdisk + interfaces: + - name: default + bridge: {} + networkInterfaceMultiqueue: true + resources: + limits: + cpu: "2" + memory: 2Gi + requests: + cpu: "2" + memory: 2Gi + networks: + - name: default + pod: {} + terminationGracePeriodSeconds: 30 + topologyspreadconstraints: + - maxskew: 1 + topologykey: kubernetes.io/hostname + whenunsatisfiable: ScheduleAnyway + labelselector: + matchlabels: + md: md-name + volumes: + - dataVolume: + name: registry-image-source-pod + name: datavolumedisk + - cloudInitNoCloud: + secretRef: + name: udsn + name: cloudinitdisk + evictionStrategy: External diff --git a/pkg/cloudprovider/provider/kubevirt/testdata/registry-image-source.yaml b/pkg/cloudprovider/provider/kubevirt/testdata/registry-image-source.yaml new file mode 100644 index 000000000..772838704 --- /dev/null +++ b/pkg/cloudprovider/provider/kubevirt/testdata/registry-image-source.yaml @@ -0,0 +1,85 @@ +apiVersion: kubevirt.io/v1 +kind: VirtualMachine +metadata: + annotations: + labels: + kubevirt.io/vm: registry-image-source + cluster.x-k8s.io/cluster-name: cluster-name + kubermatic.k8c.io/cluster-id: "cluster-name" + kubermatic.k8c.io/project-id: "" + cluster.x-k8s.io/role: worker + md: md-name + name: registry-image-source + namespace: test-namespace +spec: + dataVolumeTemplates: + - metadata: + name: registry-image-source + spec: + storage: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 10Gi + storageClassName: longhorn + source: + registry: + url: docker://x.y.z.t/ubuntu.img:latest + pullMethod: node + runStrategy: Once + template: + metadata: + creationTimestamp: null + annotations: + "kubevirt.io/allow-pod-bridge-network-live-migration": "true" + "ovn.kubernetes.io/allow_live_migration": "true" + labels: + kubevirt.io/vm: registry-image-source + cluster.x-k8s.io/cluster-name: cluster-name + kubermatic.k8c.io/cluster-id: "cluster-name" + kubermatic.k8c.io/project-id: "" + cluster.x-k8s.io/role: worker + md: md-name + spec: + affinity: {} + domain: + devices: + disks: + - disk: + bus: virtio + name: datavolumedisk + - disk: + bus: virtio + name: cloudinitdisk + interfaces: + - name: default + bridge: {} + networkInterfaceMultiqueue: true + resources: + limits: + cpu: "2" + memory: 2Gi + requests: + cpu: "2" + memory: 2Gi + networks: + - name: default + pod: {} + terminationGracePeriodSeconds: 30 + topologyspreadconstraints: + - maxskew: 1 + topologykey: kubernetes.io/hostname + whenunsatisfiable: ScheduleAnyway + labelselector: + matchlabels: + md: md-name + volumes: + - dataVolume: + name: registry-image-source + name: datavolumedisk + - cloudInitNoCloud: + secretRef: + name: udsn + name: cloudinitdisk + evictionStrategy: External diff --git a/pkg/cloudprovider/provider/kubevirt/testdata/secondary-disks.yaml b/pkg/cloudprovider/provider/kubevirt/testdata/secondary-disks.yaml index 576172c93..8f032c439 100644 --- a/pkg/cloudprovider/provider/kubevirt/testdata/secondary-disks.yaml +++ b/pkg/cloudprovider/provider/kubevirt/testdata/secondary-disks.yaml @@ -4,6 +4,8 @@ metadata: annotations: labels: cluster.x-k8s.io/cluster-name: cluster-name + kubermatic.k8c.io/cluster-id: "cluster-name" + kubermatic.k8c.io/project-id: "" cluster.x-k8s.io/role: worker kubevirt.io/vm: secondary-disks md: md-name @@ -14,9 +16,9 @@ spec: - metadata: name: secondary-disks spec: - pvc: + storage: accessModes: - - ReadWriteOnce + - ReadWriteMany resources: requests: storage: 10Gi @@ -29,7 +31,7 @@ spec: spec: pvc: accessModes: - - ReadWriteOnce + - ReadWriteMany resources: requests: storage: 20Gi @@ -42,7 +44,7 @@ spec: spec: pvc: accessModes: - - ReadWriteOnce + - ReadWriteMany resources: requests: storage: 30Gi @@ -50,12 +52,17 @@ spec: source: http: url: http://x.y.z.t/ubuntu.img - running: true + runStrategy: Once template: metadata: creationTimestamp: null + annotations: + "kubevirt.io/allow-pod-bridge-network-live-migration": "true" + "ovn.kubernetes.io/allow_live_migration": "true" labels: cluster.x-k8s.io/cluster-name: cluster-name + kubermatic.k8c.io/cluster-id: "cluster-name" + kubermatic.k8c.io/project-id: "" cluster.x-k8s.io/role: worker kubevirt.io/vm: secondary-disks md: md-name @@ -77,9 +84,9 @@ spec: bus: virtio name: secondary-disks-secondarydisk1 interfaces: - - macAddress: b6:f5:b4:fe:45:1d - name: default + - name: default bridge: {} + networkInterfaceMultiqueue: true resources: limits: cpu: "2" diff --git a/pkg/cloudprovider/provider/kubevirt/testdata/topologyspreadconstraints.yaml b/pkg/cloudprovider/provider/kubevirt/testdata/topologyspreadconstraints.yaml index a36f6b84d..c28ad187b 100644 --- a/pkg/cloudprovider/provider/kubevirt/testdata/topologyspreadconstraints.yaml +++ b/pkg/cloudprovider/provider/kubevirt/testdata/topologyspreadconstraints.yaml @@ -4,6 +4,8 @@ metadata: annotations: labels: cluster.x-k8s.io/cluster-name: cluster-name + kubermatic.k8c.io/cluster-id: "cluster-name" + kubermatic.k8c.io/project-id: "" cluster.x-k8s.io/role: worker kubevirt.io/vm: topologyspreadconstraints md: md-name @@ -14,9 +16,9 @@ spec: - metadata: name: topologyspreadconstraints spec: - pvc: + storage: accessModes: - - ReadWriteOnce + - ReadWriteMany resources: requests: storage: 10Gi @@ -24,12 +26,17 @@ spec: source: http: url: http://x.y.z.t/ubuntu.img - running: true + runStrategy: Once template: metadata: creationTimestamp: null + annotations: + "kubevirt.io/allow-pod-bridge-network-live-migration": "true" + "ovn.kubernetes.io/allow_live_migration": "true" labels: cluster.x-k8s.io/cluster-name: cluster-name + kubermatic.k8c.io/cluster-id: "cluster-name" + kubermatic.k8c.io/project-id: "" cluster.x-k8s.io/role: worker kubevirt.io/vm: topologyspreadconstraints md: md-name @@ -45,9 +52,9 @@ spec: bus: virtio name: cloudinitdisk interfaces: - - macAddress: b6:f5:b4:fe:45:1d - name: default + - name: default bridge: {} + networkInterfaceMultiqueue: true resources: limits: cpu: "2" diff --git a/pkg/cloudprovider/provider/kubevirt/testdata/use-storage-as-storage-target.yaml b/pkg/cloudprovider/provider/kubevirt/testdata/use-storage-as-storage-target.yaml new file mode 100644 index 000000000..38879a01c --- /dev/null +++ b/pkg/cloudprovider/provider/kubevirt/testdata/use-storage-as-storage-target.yaml @@ -0,0 +1,85 @@ +apiVersion: kubevirt.io/v1 +kind: VirtualMachine +metadata: + annotations: + labels: + cluster.x-k8s.io/cluster-name: cluster-name + kubermatic.k8c.io/cluster-id: "cluster-name" + kubermatic.k8c.io/project-id: "" + cluster.x-k8s.io/role: worker + kubevirt.io/vm: use-storage-as-storage-target + md: md-name + name: use-storage-as-storage-target + namespace: test-namespace +spec: + dataVolumeTemplates: + - metadata: + name: use-storage-as-storage-target + annotations: {} + spec: + source: + http: + url: "http://x.y.z.t/ubuntu.img" + storage: + accessModes: + - ReadWriteMany + resources: + requests: + storage: 10Gi + storageClassName: longhorn + runStrategy: Once + template: + metadata: + creationTimestamp: null + annotations: + "kubevirt.io/allow-pod-bridge-network-live-migration": "true" + "ovn.kubernetes.io/allow_live_migration": "true" + labels: + cluster.x-k8s.io/cluster-name: cluster-name + kubermatic.k8c.io/cluster-id: "cluster-name" + kubermatic.k8c.io/project-id: "" + cluster.x-k8s.io/role: worker + kubevirt.io/vm: use-storage-as-storage-target + md: md-name + spec: + affinity: {} + domain: + devices: + disks: + - disk: + bus: virtio + name: datavolumedisk + - disk: + bus: virtio + name: cloudinitdisk + interfaces: + - name: default + bridge: {} + networkInterfaceMultiqueue: true + resources: + limits: + cpu: "2" + memory: 2Gi + requests: + cpu: "2" + memory: 2Gi + networks: + - name: default + pod: {} + terminationGracePeriodSeconds: 30 + topologyspreadconstraints: + - maxskew: 1 + topologykey: kubernetes.io/hostname + whenunsatisfiable: ScheduleAnyway + labelselector: + matchlabels: + md: md-name + volumes: + - dataVolume: + name: use-storage-as-storage-target + name: datavolumedisk + - cloudInitNoCloud: + secretRef: + name: udsn + name: cloudinitdisk + evictionStrategy: External diff --git a/pkg/cloudprovider/provider/kubevirt/types/types.go b/pkg/cloudprovider/provider/kubevirt/types/types.go deleted file mode 100644 index 53f8e9dd8..000000000 --- a/pkg/cloudprovider/provider/kubevirt/types/types.go +++ /dev/null @@ -1,126 +0,0 @@ -/* -Copyright 2019 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package types - -import ( - kubevirtv1 "kubevirt.io/api/core/v1" - - "github.com/kubermatic/machine-controller/pkg/jsonutil" - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" - - corev1 "k8s.io/api/core/v1" -) - -var SupportedOS = map[providerconfigtypes.OperatingSystem]*struct{}{ - providerconfigtypes.OperatingSystemCentOS: nil, - providerconfigtypes.OperatingSystemUbuntu: nil, - providerconfigtypes.OperatingSystemRHEL: nil, - providerconfigtypes.OperatingSystemFlatcar: nil, - providerconfigtypes.OperatingSystemRockyLinux: nil, -} - -type RawConfig struct { - ClusterName providerconfigtypes.ConfigVarString `json:"clusterName"` - Auth Auth `json:"auth,omitempty"` - VirtualMachine VirtualMachine `json:"virtualMachine,omitempty"` - Affinity Affinity `json:"affinity,omitempty"` - TopologySpreadConstraints []TopologySpreadConstraint `json:"topologySpreadConstraints"` -} - -// Auth. -type Auth struct { - Kubeconfig providerconfigtypes.ConfigVarString `json:"kubeconfig,omitempty"` -} - -// VirtualMachine. -type VirtualMachine struct { - // Deprecated: use Instancetype/Preference instead. - Flavor Flavor `json:"flavor,omitempty"` - // Instancetype is optional. - Instancetype *kubevirtv1.InstancetypeMatcher `json:"instancetype,omitempty"` - // Preference is optional. - Preference *kubevirtv1.PreferenceMatcher `json:"preference,omitempty"` - Template Template `json:"template,omitempty"` - DNSPolicy providerconfigtypes.ConfigVarString `json:"dnsPolicy,omitempty"` - DNSConfig *corev1.PodDNSConfig `json:"dnsConfig,omitempty"` -} - -// Flavor. -type Flavor struct { - Name providerconfigtypes.ConfigVarString `json:"name,omitempty"` - Profile providerconfigtypes.ConfigVarString `json:"profile,omitempty"` -} - -// Template. -type Template struct { - CPUs providerconfigtypes.ConfigVarString `json:"cpus,omitempty"` - Memory providerconfigtypes.ConfigVarString `json:"memory,omitempty"` - PrimaryDisk PrimaryDisk `json:"primaryDisk,omitempty"` - SecondaryDisks []SecondaryDisks `json:"secondaryDisks,omitempty"` -} - -// PrimaryDisk. -type PrimaryDisk struct { - Disk - OsImage providerconfigtypes.ConfigVarString `json:"osImage,omitempty"` - // Source describes the VM Disk Image source. - Source providerconfigtypes.ConfigVarString `json:"source,omitempty"` -} - -// SecondaryDisks. -type SecondaryDisks struct { - Disk -} - -// Disk. -type Disk struct { - Size providerconfigtypes.ConfigVarString `json:"size,omitempty"` - StorageClassName providerconfigtypes.ConfigVarString `json:"storageClassName,omitempty"` -} - -// Affinity. -type Affinity struct { - // Deprecated: Use TopologySpreadConstraint instead. - PodAffinityPreset providerconfigtypes.ConfigVarString `json:"podAffinityPreset,omitempty"` - // Deprecated: Use TopologySpreadConstraint instead. - PodAntiAffinityPreset providerconfigtypes.ConfigVarString `json:"podAntiAffinityPreset,omitempty"` - NodeAffinityPreset NodeAffinityPreset `json:"nodeAffinityPreset,omitempty"` -} - -// NodeAffinityPreset. -type NodeAffinityPreset struct { - Type providerconfigtypes.ConfigVarString `json:"type,omitempty"` - Key providerconfigtypes.ConfigVarString `json:"key,omitempty"` - Values []providerconfigtypes.ConfigVarString `json:"values,omitempty"` -} - -// TopologySpreadConstraint describes topology spread constraints for VMs. -type TopologySpreadConstraint struct { - // MaxSkew describes the degree to which VMs may be unevenly distributed. - MaxSkew providerconfigtypes.ConfigVarString `json:"maxSkew,omitempty"` - // TopologyKey is the key of infra-node labels. - TopologyKey providerconfigtypes.ConfigVarString `json:"topologyKey,omitempty"` - // WhenUnsatisfiable indicates how to deal with a VM if it doesn't satisfy - // the spread constraint. - WhenUnsatisfiable providerconfigtypes.ConfigVarString `json:"whenUnsatisfiable,omitempty"` -} - -func GetConfig(pconfig providerconfigtypes.Config) (*RawConfig, error) { - rawConfig := &RawConfig{} - - return rawConfig, jsonutil.StrictUnmarshal(pconfig.CloudProviderSpec.Raw, rawConfig) -} diff --git a/pkg/cloudprovider/provider/linode/provider.go b/pkg/cloudprovider/provider/linode/provider.go index 67b200588..dc0a9e39f 100644 --- a/pkg/cloudprovider/provider/linode/provider.go +++ b/pkg/cloudprovider/provider/linode/provider.go @@ -29,29 +29,29 @@ import ( "time" "github.com/linode/linodego" + "go.uber.org/zap" "golang.org/x/oauth2" - common "github.com/kubermatic/machine-controller/pkg/apis/cluster/common" - clusterv1alpha1 "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/common/ssh" - cloudprovidererrors "github.com/kubermatic/machine-controller/pkg/cloudprovider/errors" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/instance" - linodetypes "github.com/kubermatic/machine-controller/pkg/cloudprovider/provider/linode/types" - cloudprovidertypes "github.com/kubermatic/machine-controller/pkg/cloudprovider/types" - "github.com/kubermatic/machine-controller/pkg/providerconfig" - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" - - v1 "k8s.io/api/core/v1" + "k8c.io/machine-controller/pkg/cloudprovider/common/ssh" + cloudprovidererrors "k8c.io/machine-controller/pkg/cloudprovider/errors" + "k8c.io/machine-controller/pkg/cloudprovider/instance" + cloudprovidertypes "k8c.io/machine-controller/pkg/cloudprovider/types" + common "k8c.io/machine-controller/sdk/apis/cluster/common" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" + linodetypes "k8c.io/machine-controller/sdk/cloudprovider/linode" + "k8c.io/machine-controller/sdk/providerconfig" + + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" ) type provider struct { - configVarResolver *providerconfig.ConfigVarResolver + configVarResolver providerconfig.ConfigVarResolver } // New returns a linode provider. -func New(configVarResolver *providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { +func New(configVarResolver providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { return &provider{configVarResolver: configVarResolver} } @@ -80,18 +80,12 @@ func (t *TokenSource) Token() (*oauth2.Token, error) { return token, nil } -func getSlugForOS(os providerconfigtypes.OperatingSystem) (string, error) { +func getSlugForOS(os providerconfig.OperatingSystem) (string, error) { switch os { - case providerconfigtypes.OperatingSystemUbuntu: + case providerconfig.OperatingSystemUbuntu: return "linode/ubuntu18.04", nil - - /** - // StackScript for CloudInit is not centos7 ready - case providerconfigtypes.OperatingSystemCentOS: - return "linode/centos7", nil - **/ } - return "", providerconfigtypes.ErrOSNotSupported + return "", providerconfig.ErrOSNotSupported } func getClient(ctx context.Context, token string) linodego.Client { @@ -107,12 +101,8 @@ func getClient(ctx context.Context, token string) linodego.Client { return client } -func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *providerconfigtypes.Config, error) { - if provSpec.Value == nil { - return nil, nil, fmt.Errorf("machine.spec.providerconfig.value is nil") - } - - pconfig, err := providerconfigtypes.GetConfig(provSpec) +func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *providerconfig.Config, error) { + pconfig, err := providerconfig.GetConfig(provSpec) if err != nil { return nil, nil, err } @@ -127,29 +117,29 @@ func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *p } c := Config{} - c.Token, err = p.configVarResolver.GetConfigVarStringValueOrEnv(rawConfig.Token, "LINODE_TOKEN") + c.Token, err = p.configVarResolver.GetStringValueOrEnv(rawConfig.Token, "LINODE_TOKEN") if err != nil { return nil, nil, fmt.Errorf("failed to get the value of \"token\" field, error = %w", err) } - c.Region, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.Region) + c.Region, err = p.configVarResolver.GetStringValue(rawConfig.Region) if err != nil { return nil, nil, err } - c.Type, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.Type) + c.Type, err = p.configVarResolver.GetStringValue(rawConfig.Type) if err != nil { return nil, nil, err } - c.Backups, _, err = p.configVarResolver.GetConfigVarBoolValue(rawConfig.Backups) + c.Backups, _, err = p.configVarResolver.GetBoolValue(rawConfig.Backups) if err != nil { return nil, nil, err } - c.PrivateNetworking, _, err = p.configVarResolver.GetConfigVarBoolValue(rawConfig.PrivateNetworking) + c.PrivateNetworking, _, err = p.configVarResolver.GetBoolValue(rawConfig.PrivateNetworking) if err != nil { return nil, nil, err } for _, tag := range rawConfig.Tags { - tagVal, err := p.configVarResolver.GetConfigVarStringValue(tag) + tagVal, err := p.configVarResolver.GetStringValue(tag) if err != nil { return nil, nil, err } @@ -159,11 +149,11 @@ func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *p return &c, pconfig, err } -func (p *provider) AddDefaults(spec clusterv1alpha1.MachineSpec) (clusterv1alpha1.MachineSpec, error) { +func (p *provider) AddDefaults(_ *zap.SugaredLogger, spec clusterv1alpha1.MachineSpec) (clusterv1alpha1.MachineSpec, error) { return spec, nil } -func (p *provider) Validate(ctx context.Context, spec clusterv1alpha1.MachineSpec) error { +func (p *provider) Validate(ctx context.Context, _ *zap.SugaredLogger, spec clusterv1alpha1.MachineSpec) error { c, pc, err := p.getConfig(spec.ProviderSpec) if err != nil { return fmt.Errorf("failed to parse config: %w", err) @@ -205,13 +195,13 @@ func createRandomPassword() (string, error) { rawRootPass := make([]byte, 50) _, err := rand.Read(rawRootPass) if err != nil { - return "", fmt.Errorf("Failed to generate random password") + return "", fmt.Errorf("failed to generate random password: %w", err) } rootPass := base64.StdEncoding.EncodeToString(rawRootPass) return rootPass, nil } -func (p *provider) Create(ctx context.Context, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData, userdata string) (instance.Instance, error) { +func (p *provider) Create(ctx context.Context, _ *zap.SugaredLogger, machine *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData, userdata string) (instance.Instance, error) { c, pc, err := p.getConfig(machine.Spec.ProviderSpec) if err != nil { return nil, cloudprovidererrors.TerminalError{ @@ -272,8 +262,8 @@ func (p *provider) Create(ctx context.Context, machine *clusterv1alpha1.Machine, return &linodeInstance{linode: linode}, err } -func (p *provider) Cleanup(ctx context.Context, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData) (bool, error) { - instance, err := p.Get(ctx, machine, data) +func (p *provider) Cleanup(ctx context.Context, log *zap.SugaredLogger, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData) (bool, error) { + instance, err := p.Get(ctx, log, machine, data) if err != nil { if errors.Is(err, cloudprovidererrors.ErrInstanceNotFound) { return true, nil @@ -312,7 +302,7 @@ func getListOptions(name string) *linodego.ListOptions { return listOptions } -func (p *provider) Get(ctx context.Context, machine *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData) (instance.Instance, error) { +func (p *provider) Get(ctx context.Context, _ *zap.SugaredLogger, machine *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData) (instance.Instance, error) { c, _, err := p.getConfig(machine.Spec.ProviderSpec) if err != nil { return nil, cloudprovidererrors.TerminalError{ @@ -339,7 +329,7 @@ func (p *provider) Get(ctx context.Context, machine *clusterv1alpha1.Machine, _ return nil, cloudprovidererrors.ErrInstanceNotFound } -func (p *provider) MigrateUID(ctx context.Context, machine *clusterv1alpha1.Machine, newUID types.UID) error { +func (p *provider) MigrateUID(ctx context.Context, _ *zap.SugaredLogger, machine *clusterv1alpha1.Machine, newUID types.UID) error { c, _, err := p.getConfig(machine.Spec.ProviderSpec) if err != nil { return fmt.Errorf("failed to decode providerconfig: %w", err) @@ -375,10 +365,6 @@ func (p *provider) MigrateUID(ctx context.Context, machine *clusterv1alpha1.Mach return nil } -func (p *provider) GetCloudConfig(spec clusterv1alpha1.MachineSpec) (config string, name string, err error) { - return "", "", nil -} - func (p *provider) MachineMetricsLabels(machine *clusterv1alpha1.Machine) (map[string]string, error) { labels := make(map[string]string) @@ -404,15 +390,18 @@ func (d *linodeInstance) ID() string { } func (d *linodeInstance) ProviderID() string { + if d == nil || d.ID() == "" { + return "" + } return fmt.Sprintf("linode://%s", d.ID()) } -func (d *linodeInstance) Addresses() map[string]v1.NodeAddressType { - addresses := map[string]v1.NodeAddressType{} +func (d *linodeInstance) Addresses() map[string]corev1.NodeAddressType { + addresses := map[string]corev1.NodeAddressType{} for _, n := range d.linode.IPv4 { - addresses[n.String()] = v1.NodeInternalIP + addresses[n.String()] = corev1.NodeInternalIP } - addresses[d.linode.IPv6] = v1.NodeInternalIP + addresses[d.linode.IPv6] = corev1.NodeInternalIP return addresses } @@ -456,6 +445,6 @@ func linodeStatusAndErrToTerminalError(err error) error { } } -func (p *provider) SetMetricsForMachines(machines clusterv1alpha1.MachineList) error { +func (p *provider) SetMetricsForMachines(_ clusterv1alpha1.MachineList) error { return nil } diff --git a/pkg/cloudprovider/provider/nutanix/client.go b/pkg/cloudprovider/provider/nutanix/client.go index 332769823..dff3a545c 100644 --- a/pkg/cloudprovider/provider/nutanix/client.go +++ b/pkg/cloudprovider/provider/nutanix/client.go @@ -22,23 +22,29 @@ import ( "encoding/json" "errors" "fmt" + "net" + "net/http" + "net/url" + "strconv" "strings" "time" - nutanixclient "github.com/nutanix-cloud-native/prism-go-client" + "github.com/nutanix-cloud-native/prism-go-client/environment/types" nutanixv3 "github.com/nutanix-cloud-native/prism-go-client/v3" - "github.com/kubermatic/machine-controller/pkg/apis/cluster/common" - cloudprovidererrors "github.com/kubermatic/machine-controller/pkg/cloudprovider/errors" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/instance" - nutanixtypes "github.com/kubermatic/machine-controller/pkg/cloudprovider/provider/nutanix/types" - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" + cloudprovidererrors "k8c.io/machine-controller/pkg/cloudprovider/errors" + "k8c.io/machine-controller/pkg/cloudprovider/instance" + "k8c.io/machine-controller/sdk/apis/cluster/common" + nutanixtypes "k8c.io/machine-controller/sdk/cloudprovider/nutanix" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) +// Shared client cache to persist between calls. +var clientCache = nutanixv3.NewClientCache(nutanixv3.WithSessionAuth(true)) + const ( invalidCredentials = "invalid Nutanix Credentials" ) @@ -47,6 +53,22 @@ type ClientSet struct { Prism *nutanixv3.Client } +// cachedClientParams implements the nutanixv3.CachedClientParams interface. +type cachedClientParams struct { + managementEndpoint types.ManagementEndpoint + clusterName string +} + +// ManagementEndpoint returns the management endpoint. +func (c *cachedClientParams) ManagementEndpoint() types.ManagementEndpoint { + return c.managementEndpoint +} + +// Key returns a unique key for the client. +func (c *cachedClientParams) Key() string { + return c.clusterName +} + func GetClientSet(config *Config) (*ClientSet, error) { if config == nil { return nil, errors.New("no configuration passed") @@ -64,26 +86,67 @@ func GetClientSet(config *Config) (*ClientSet, error) { return nil, errors.New("no endpoint specified") } + if config.ClusterName == "" { + return nil, errors.New("no clusterName specified") + } + // set up 9440 as default port if none is passed via config port := 9440 if config.Port != nil { port = *config.Port } - credentials := nutanixclient.Credentials{ - URL: fmt.Sprintf("%s:%d", config.Endpoint, port), - Endpoint: config.Endpoint, - Port: fmt.Sprint(port), - Username: config.Username, - Password: config.Password, + // Create the management endpoint URL + endpointURL, err := url.Parse(fmt.Sprintf("https://%s", net.JoinHostPort(config.Endpoint, strconv.Itoa(port)))) + if err != nil { + return nil, fmt.Errorf("failed to parse endpoint URL: %w", err) + } + + // Create the management endpoint + managementEndpoint := types.ManagementEndpoint{ + ApiCredentials: types.ApiCredentials{ + Username: config.Username, + Password: config.Password, + }, + Address: endpointURL, Insecure: config.AllowInsecure, } + // Create cached client parameters + cachedParams := &cachedClientParams{ + managementEndpoint: managementEndpoint, + clusterName: config.ClusterName, + } + + // Prepare client options + var clientOptions []nutanixv3.ClientOption + + // Add proxy configuration if provided if config.ProxyURL != "" { - credentials.ProxyURL = config.ProxyURL + proxyURL, err := url.Parse(config.ProxyURL) + if err != nil { + return nil, fmt.Errorf("failed to parse proxy URL: %w", err) + } + + // Create a custom transport with proxy + transport := &http.Transport{ + Proxy: http.ProxyURL(proxyURL), + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + ForceAttemptHTTP2: true, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } + + clientOptions = append(clientOptions, nutanixv3.WithRoundTripper(transport)) } - clientV3, err := nutanixv3.NewV3Client(credentials) + // Get or create the cached client + clientV3, err := clientCache.GetOrCreate(cachedParams, clientOptions...) if err != nil { return nil, err } @@ -93,7 +156,7 @@ func GetClientSet(config *Config) (*ClientSet, error) { }, nil } -func createVM(ctx context.Context, client *ClientSet, name string, conf Config, os providerconfigtypes.OperatingSystem, userdata string) (instance.Instance, error) { +func createVM(ctx context.Context, client *ClientSet, name string, conf Config, userdata string) (instance.Instance, error) { cluster, err := getClusterByName(ctx, client, conf.ClusterName) if err != nil { return nil, err @@ -107,7 +170,7 @@ func createVM(ctx context.Context, client *ClientSet, name string, conf Config, nicList := []*nutanixv3.VMNic{ { SubnetReference: &nutanixv3.Reference{ - Kind: pointer.String(nutanixtypes.SubnetKind), + Kind: ptr.To(nutanixtypes.SubnetKind), UUID: subnet.Metadata.UUID, }, }, @@ -120,7 +183,7 @@ func createVM(ctx context.Context, client *ClientSet, name string, conf Config, } additionalSubnetNic := &nutanixv3.VMNic{ SubnetReference: &nutanixv3.Reference{ - Kind: pointer.String(nutanixtypes.SubnetKind), + Kind: ptr.To(nutanixtypes.SubnetKind), UUID: additionalSubnet.Metadata.UUID, }, } @@ -134,41 +197,41 @@ func createVM(ctx context.Context, client *ClientSet, name string, conf Config, request := &nutanixv3.VMIntentInput{ Metadata: &nutanixv3.Metadata{ - Kind: pointer.String(nutanixtypes.VMKind), + Kind: ptr.To(nutanixtypes.VMKind), Categories: conf.Categories, }, Spec: &nutanixv3.VM{ - Name: pointer.String(name), + Name: ptr.To(name), ClusterReference: &nutanixv3.Reference{ - Kind: pointer.String(nutanixtypes.ClusterKind), + Kind: ptr.To(nutanixtypes.ClusterKind), UUID: cluster.Metadata.UUID, }, }, } resources := &nutanixv3.VMResources{ - PowerState: pointer.String("ON"), - NumSockets: pointer.Int64(conf.CPUs), - MemorySizeMib: pointer.Int64(conf.MemoryMB), + PowerState: ptr.To("ON"), + NumSockets: ptr.To(conf.CPUs), + MemorySizeMib: ptr.To(conf.MemoryMB), NicList: nicList, DiskList: []*nutanixv3.VMDisk{ { DeviceProperties: &nutanixv3.VMDiskDeviceProperties{ - DeviceType: pointer.String("DISK"), + DeviceType: ptr.To("DISK"), DiskAddress: &nutanixv3.DiskAddress{ - DeviceIndex: pointer.Int64(0), - AdapterType: pointer.String("SCSI"), + DeviceIndex: ptr.To(int64(0)), + AdapterType: ptr.To("SCSI"), }, }, DataSourceReference: &nutanixv3.Reference{ - Kind: pointer.String(nutanixtypes.ImageKind), + Kind: ptr.To(nutanixtypes.ImageKind), UUID: image.Metadata.UUID, }, }, }, GuestCustomization: &nutanixv3.GuestCustomization{ CloudInit: &nutanixv3.GuestCustomizationCloudInit{ - UserData: pointer.String(base64.StdEncoding.EncodeToString([]byte(userdata))), + UserData: ptr.To(base64.StdEncoding.EncodeToString([]byte(userdata))), }, }, } @@ -180,7 +243,7 @@ func createVM(ctx context.Context, client *ClientSet, name string, conf Config, } request.Metadata.ProjectReference = &nutanixv3.Reference{ - Kind: pointer.String(nutanixtypes.ProjectKind), + Kind: ptr.To(nutanixtypes.ProjectKind), UUID: project.Metadata.UUID, } } @@ -194,7 +257,7 @@ func createVM(ctx context.Context, client *ClientSet, name string, conf Config, } if conf.DiskSizeGB != nil { - resources.DiskList[0].DiskSizeMib = pointer.Int64(*conf.DiskSizeGB * 1024) + resources.DiskList[0].DiskSizeMib = ptr.To(*conf.DiskSizeGB * 1024) } request.Spec.Resources = resources @@ -368,7 +431,7 @@ func getVMByName(ctx context.Context, client *ClientSet, name string, projectID func getIPs(ctx context.Context, client *ClientSet, vmID string, interval time.Duration, timeout time.Duration) (map[string]corev1.NodeAddressType, error) { addresses := make(map[string]corev1.NodeAddressType) - if err := wait.Poll(interval, timeout, func() (bool, error) { + err := wait.PollUntilContextTimeout(ctx, interval, timeout, false, func(ctx context.Context) (bool, error) { vm, err := client.Prism.V3.GetVM(ctx, vmID) if err != nil { return false, wrapNutanixError(err) @@ -382,7 +445,8 @@ func getIPs(ctx context.Context, client *ClientSet, vmID string, interval time.D addresses[ip] = corev1.NodeInternalIP return true, nil - }); err != nil { + }) + if err != nil { return map[string]corev1.NodeAddressType{}, err } @@ -390,7 +454,7 @@ func getIPs(ctx context.Context, client *ClientSet, vmID string, interval time.D } func waitForCompletion(ctx context.Context, client *ClientSet, taskID string, interval time.Duration, timeout time.Duration) error { - return wait.Poll(interval, timeout, func() (bool, error) { + return wait.PollUntilContextTimeout(ctx, interval, timeout, false, func(ctx context.Context) (bool, error) { task, err := client.Prism.V3.GetTask(ctx, taskID) if err != nil { return false, wrapNutanixError(err) @@ -402,19 +466,19 @@ func waitForCompletion(ctx context.Context, client *ClientSet, taskID string, in switch *task.Status { case "INVALID_UUID", "FAILED": - return false, fmt.Errorf("bad status: %s", *task.Status) + return false, fmt.Errorf("bad status: %s, error detail: %s, progress message: %s", *task.Status, *task.ErrorDetail, *task.ProgressMessage) case "QUEUED", "RUNNING": return false, nil case "SUCCEEDED": return true, nil default: - return false, fmt.Errorf("unknown status: %s", *task.Status) + return false, fmt.Errorf("unknown status: %s, error detail: %s, progress message: %s", *task.Status, *task.ErrorDetail, *task.ProgressMessage) } }) } func waitForPowerState(ctx context.Context, client *ClientSet, vmID string, interval time.Duration, timeout time.Duration) error { - return wait.Poll(interval, timeout, func() (bool, error) { + return wait.PollUntilContextTimeout(ctx, interval, timeout, false, func(ctx context.Context) (bool, error) { vm, err := client.Prism.V3.GetVM(ctx, vmID) if err != nil { return false, wrapNutanixError(err) diff --git a/pkg/cloudprovider/provider/nutanix/provider.go b/pkg/cloudprovider/provider/nutanix/provider.go index f2dcbf884..e84cb5c6c 100644 --- a/pkg/cloudprovider/provider/nutanix/provider.go +++ b/pkg/cloudprovider/provider/nutanix/provider.go @@ -23,18 +23,19 @@ import ( "strconv" "time" - "github.com/kubermatic/machine-controller/pkg/apis/cluster/common" - clusterv1alpha1 "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" - cloudprovidererrors "github.com/kubermatic/machine-controller/pkg/cloudprovider/errors" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/instance" - nutanixtypes "github.com/kubermatic/machine-controller/pkg/cloudprovider/provider/nutanix/types" - cloudprovidertypes "github.com/kubermatic/machine-controller/pkg/cloudprovider/types" - "github.com/kubermatic/machine-controller/pkg/providerconfig" - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" + "go.uber.org/zap" + + cloudprovidererrors "k8c.io/machine-controller/pkg/cloudprovider/errors" + "k8c.io/machine-controller/pkg/cloudprovider/instance" + cloudprovidertypes "k8c.io/machine-controller/pkg/cloudprovider/types" + "k8c.io/machine-controller/sdk/apis/cluster/common" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" + nutanixtypes "k8c.io/machine-controller/sdk/cloudprovider/nutanix" + "k8c.io/machine-controller/sdk/providerconfig" corev1 "k8s.io/api/core/v1" ktypes "k8s.io/apimachinery/pkg/types" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) type Config struct { @@ -61,7 +62,7 @@ type Config struct { } type provider struct { - configVarResolver *providerconfig.ConfigVarResolver + configVarResolver providerconfig.ConfigVarResolver } // Server holds Nutanix server information. @@ -87,6 +88,9 @@ func (nutanixServer Server) ID() string { } func (nutanixServer Server) ProviderID() string { + if nutanixServer.ID() == "" { + return "" + } return fmt.Sprintf("nutanix://%s", nutanixServer.ID()) } @@ -99,17 +103,13 @@ func (nutanixServer Server) Status() instance.Status { } // New returns a nutanix provider. -func New(configVarResolver *providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { +func New(configVarResolver providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { provider := &provider{configVarResolver: configVarResolver} return provider } -func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *providerconfigtypes.Config, *nutanixtypes.RawConfig, error) { - if provSpec.Value == nil { - return nil, nil, nil, fmt.Errorf("machine.spec.providerconfig.value is nil") - } - - pconfig, err := providerconfigtypes.GetConfig(provSpec) +func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *providerconfig.Config, *nutanixtypes.RawConfig, error) { + pconfig, err := providerconfig.GetConfig(provSpec) if err != nil { return nil, nil, nil, err } @@ -125,12 +125,12 @@ func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *p c := Config{} - c.Endpoint, err = p.configVarResolver.GetConfigVarStringValueOrEnv(rawConfig.Endpoint, "NUTANIX_ENDPOINT") + c.Endpoint, err = p.configVarResolver.GetStringValueOrEnv(rawConfig.Endpoint, "NUTANIX_ENDPOINT") if err != nil { return nil, nil, nil, err } - port, err := p.configVarResolver.GetConfigVarStringValueOrEnv(rawConfig.Port, "NUTANIX_PORT") + port, err := p.configVarResolver.GetStringValueOrEnv(rawConfig.Port, "NUTANIX_PORT") if err != nil { return nil, nil, nil, err } @@ -141,49 +141,49 @@ func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *p if err != nil { return nil, nil, nil, err } - c.Port = pointer.Int(portInt) + c.Port = ptr.To(portInt) } - c.Username, err = p.configVarResolver.GetConfigVarStringValueOrEnv(rawConfig.Username, "NUTANIX_USERNAME") + c.Username, err = p.configVarResolver.GetStringValueOrEnv(rawConfig.Username, "NUTANIX_USERNAME") if err != nil { return nil, nil, nil, err } - c.Password, err = p.configVarResolver.GetConfigVarStringValueOrEnv(rawConfig.Password, "NUTANIX_PASSWORD") + c.Password, err = p.configVarResolver.GetStringValueOrEnv(rawConfig.Password, "NUTANIX_PASSWORD") if err != nil { return nil, nil, nil, err } - c.AllowInsecure, err = p.configVarResolver.GetConfigVarBoolValueOrEnv(rawConfig.AllowInsecure, "NUTANIX_INSECURE") + c.AllowInsecure, err = p.configVarResolver.GetBoolValueOrEnv(rawConfig.AllowInsecure, "NUTANIX_INSECURE") if err != nil { return nil, nil, nil, err } - c.ProxyURL, err = p.configVarResolver.GetConfigVarStringValueOrEnv(rawConfig.ProxyURL, "NUTANIX_PROXY_URL") + c.ProxyURL, err = p.configVarResolver.GetStringValueOrEnv(rawConfig.ProxyURL, "NUTANIX_PROXY_URL") if err != nil { return nil, nil, nil, err } - c.ClusterName, err = p.configVarResolver.GetConfigVarStringValueOrEnv(rawConfig.ClusterName, "NUTANIX_CLUSTER_NAME") + c.ClusterName, err = p.configVarResolver.GetStringValueOrEnv(rawConfig.ClusterName, "NUTANIX_CLUSTER_NAME") if err != nil { return nil, nil, nil, err } if rawConfig.ProjectName != nil { - c.ProjectName, err = p.configVarResolver.GetConfigVarStringValue(*rawConfig.ProjectName) + c.ProjectName, err = p.configVarResolver.GetStringValue(*rawConfig.ProjectName) if err != nil { return nil, nil, nil, err } } - c.SubnetName, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.SubnetName) + c.SubnetName, err = p.configVarResolver.GetStringValue(rawConfig.SubnetName) if err != nil { return nil, nil, nil, err } c.AdditionalSubnetNames = append(c.AdditionalSubnetNames, rawConfig.AdditionalSubnetNames...) - c.ImageName, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.ImageName) + c.ImageName, err = p.configVarResolver.GetStringValue(rawConfig.ImageName) if err != nil { return nil, nil, nil, err } @@ -199,11 +199,11 @@ func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *p return &c, pconfig, rawConfig, nil } -func (p *provider) AddDefaults(spec clusterv1alpha1.MachineSpec) (clusterv1alpha1.MachineSpec, error) { +func (p *provider) AddDefaults(_ *zap.SugaredLogger, spec clusterv1alpha1.MachineSpec) (clusterv1alpha1.MachineSpec, error) { return spec, nil } -func (p *provider) Validate(ctx context.Context, spec clusterv1alpha1.MachineSpec) error { +func (p *provider) Validate(ctx context.Context, _ *zap.SugaredLogger, spec clusterv1alpha1.MachineSpec) error { config, _, _, err := p.getConfig(spec.ProviderSpec) if err != nil { return fmt.Errorf("failed to parse machineSpec: %w", err) @@ -255,12 +255,12 @@ func (p *provider) Validate(ctx context.Context, spec clusterv1alpha1.MachineSpe return nil } -func (p *provider) Create(ctx context.Context, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData, userdata string) (instance.Instance, error) { +func (p *provider) Create(ctx context.Context, log *zap.SugaredLogger, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData, userdata string) (instance.Instance, error) { vm, err := p.create(ctx, machine, userdata) if err != nil { - _, cleanupErr := p.Cleanup(ctx, machine, data) + _, cleanupErr := p.Cleanup(ctx, log, machine, data) if cleanupErr != nil { - return nil, fmt.Errorf("cleaning up failed with err %v after creation failed with err %w", cleanupErr, err) + return nil, fmt.Errorf("cleaning up failed with err %w after creation failed with err %w", cleanupErr, err) } return nil, err } @@ -268,7 +268,7 @@ func (p *provider) Create(ctx context.Context, machine *clusterv1alpha1.Machine, } func (p *provider) create(ctx context.Context, machine *clusterv1alpha1.Machine, userdata string) (instance.Instance, error) { - config, pc, _, err := p.getConfig(machine.Spec.ProviderSpec) + config, _, _, err := p.getConfig(machine.Spec.ProviderSpec) if err != nil { return nil, cloudprovidererrors.TerminalError{ Reason: common.InvalidConfigurationMachineError, @@ -284,14 +284,14 @@ func (p *provider) create(ctx context.Context, machine *clusterv1alpha1.Machine, } } - return createVM(ctx, client, machine.Name, *config, pc.OperatingSystem, userdata) + return createVM(ctx, client, machine.Name, *config, userdata) } -func (p *provider) Cleanup(ctx context.Context, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData) (bool, error) { +func (p *provider) Cleanup(ctx context.Context, _ *zap.SugaredLogger, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData) (bool, error) { return p.cleanup(ctx, machine, data) } -func (p *provider) cleanup(ctx context.Context, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData) (bool, error) { +func (p *provider) cleanup(ctx context.Context, machine *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData) (bool, error) { config, _, _, err := p.getConfig(machine.Spec.ProviderSpec) if err != nil { return false, cloudprovidererrors.TerminalError{ @@ -352,7 +352,7 @@ func (p *provider) cleanup(ctx context.Context, machine *clusterv1alpha1.Machine return true, nil } -func (p *provider) Get(ctx context.Context, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData) (instance.Instance, error) { +func (p *provider) Get(ctx context.Context, _ *zap.SugaredLogger, machine *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData) (instance.Instance, error) { config, _, _, err := p.getConfig(machine.Spec.ProviderSpec) if err != nil { return nil, cloudprovidererrors.TerminalError{ @@ -417,15 +417,10 @@ func (p *provider) Get(ctx context.Context, machine *clusterv1alpha1.Machine, da }, nil } -func (p *provider) MigrateUID(_ context.Context, _ *clusterv1alpha1.Machine, _ ktypes.UID) error { +func (p *provider) MigrateUID(_ context.Context, _ *zap.SugaredLogger, _ *clusterv1alpha1.Machine, _ ktypes.UID) error { return nil } -// GetCloudConfig returns an empty cloud configuration for Nutanix as no CCM exists. -func (p *provider) GetCloudConfig(spec clusterv1alpha1.MachineSpec) (config string, name string, err error) { - return "", "", nil -} - func (p *provider) MachineMetricsLabels(machine *clusterv1alpha1.Machine) (map[string]string, error) { labels := make(map[string]string) @@ -440,6 +435,6 @@ func (p *provider) MachineMetricsLabels(machine *clusterv1alpha1.Machine) (map[s return labels, nil } -func (p *provider) SetMetricsForMachines(machines clusterv1alpha1.MachineList) error { +func (p *provider) SetMetricsForMachines(_ clusterv1alpha1.MachineList) error { return nil } diff --git a/pkg/cloudprovider/provider/opennebula/provider.go b/pkg/cloudprovider/provider/opennebula/provider.go new file mode 100644 index 000000000..fc5a731b3 --- /dev/null +++ b/pkg/cloudprovider/provider/opennebula/provider.go @@ -0,0 +1,458 @@ +/* +Copyright 2022 The Machine Controller Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package opennebula + +import ( + "context" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "strconv" + "strings" + + "github.com/OpenNebula/one/src/oca/go/src/goca" + "github.com/OpenNebula/one/src/oca/go/src/goca/schemas/shared" + "github.com/OpenNebula/one/src/oca/go/src/goca/schemas/vm" + "github.com/OpenNebula/one/src/oca/go/src/goca/schemas/vm/keys" + "go.uber.org/zap" + + cloudprovidererrors "k8c.io/machine-controller/pkg/cloudprovider/errors" + "k8c.io/machine-controller/pkg/cloudprovider/instance" + cloudprovidertypes "k8c.io/machine-controller/pkg/cloudprovider/types" + "k8c.io/machine-controller/sdk/apis/cluster/common" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" + opennebulatypes "k8c.io/machine-controller/sdk/cloudprovider/opennebula" + "k8c.io/machine-controller/sdk/providerconfig" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" +) + +type provider struct { + configVarResolver providerconfig.ConfigVarResolver +} + +type CloudProviderSpec struct { + PassValidation bool `json:"passValidation"` +} + +const ( + machineUIDContextKey = "K8S_MACHINE_UID" +) + +// New returns a OpenNebula provider. +func New(configVarResolver providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { + return &provider{configVarResolver: configVarResolver} +} + +type Config struct { + // Auth details + Username string + Password string + Endpoint string + + // Machine details + CPU *float64 + VCPU *int + Memory *int + Image string + Datastore string + DiskSize *int + Network string + EnableVNC bool + VMTemplateExtra map[string]string +} + +func getClient(config *Config) *goca.Client { + return goca.NewDefaultClient(goca.NewConfig(config.Username, config.Password, config.Endpoint)) +} + +func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *providerconfig.Config, error) { + pconfig, err := providerconfig.GetConfig(provSpec) + if err != nil { + return nil, nil, err + } + + rawConfig, err := opennebulatypes.GetConfig(*pconfig) + if err != nil { + return nil, nil, err + } + + c := Config{} + c.Username, err = p.configVarResolver.GetStringValueOrEnv(rawConfig.Username, "ONE_USERNAME") + if err != nil { + return nil, nil, fmt.Errorf("failed to get the value of \"username\" field, error = %w", err) + } + + c.Password, err = p.configVarResolver.GetStringValueOrEnv(rawConfig.Password, "ONE_PASSWORD") + if err != nil { + return nil, nil, fmt.Errorf("failed to get the value of \"password\" field, error = %w", err) + } + + c.Endpoint, err = p.configVarResolver.GetStringValueOrEnv(rawConfig.Endpoint, "ONE_ENDPOINT") + if err != nil { + return nil, nil, fmt.Errorf("failed to get the value of \"endpoint\" field, error = %w", err) + } + + c.CPU = rawConfig.CPU + + c.VCPU = rawConfig.VCPU + + c.Memory = rawConfig.Memory + + c.Image, err = p.configVarResolver.GetStringValue(rawConfig.Image) + if err != nil { + return nil, nil, err + } + + c.Datastore, err = p.configVarResolver.GetStringValue(rawConfig.Datastore) + if err != nil { + return nil, nil, err + } + + c.DiskSize = rawConfig.DiskSize + + c.Network, err = p.configVarResolver.GetStringValue(rawConfig.Network) + if err != nil { + return nil, nil, err + } + + c.EnableVNC, _, err = p.configVarResolver.GetBoolValue(rawConfig.EnableVNC) + if err != nil { + return nil, nil, err + } + + c.VMTemplateExtra = rawConfig.VMTemplateExtra + + return &c, pconfig, err +} + +func (p *provider) Validate(_ context.Context, _ *zap.SugaredLogger, spec clusterv1alpha1.MachineSpec) error { + _, pc, err := p.getConfig(spec.ProviderSpec) + if err != nil { + return fmt.Errorf("failed to parse config: %w", err) + } + + opennebulaCloudProviderSpec := CloudProviderSpec{} + if err = json.Unmarshal(pc.CloudProviderSpec.Raw, &opennebulaCloudProviderSpec); err != nil { + return fmt.Errorf("failed to parse cloud provider spec: %w", err) + } + + return nil +} + +func (p *provider) Create(_ context.Context, _ *zap.SugaredLogger, machine *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData, userdata string) (instance.Instance, error) { + c, _, err := p.getConfig(machine.Spec.ProviderSpec) + if err != nil { + return nil, cloudprovidererrors.TerminalError{ + Reason: common.InvalidConfigurationMachineError, + Message: fmt.Sprintf("Failed to parse MachineSpec, due to %v", err), + } + } + + client := getClient(c) + + // build a template + tpl := vm.NewTemplate() + + // add extra template vars first + for key, value := range c.VMTemplateExtra { + tpl.Add(keys.Template(key), value) + } + + tpl.Add(keys.Name, machine.Spec.Name) + tpl.CPU(*c.CPU).Memory(*c.Memory).VCPU(*c.VCPU) + + disk := tpl.AddDisk() + disk.Add(shared.Image, c.Image) + disk.Add(shared.Datastore, c.Datastore) + disk.Add(shared.Size, *c.DiskSize) + + nic := tpl.AddNIC() + nic.Add(shared.Network, c.Network) + nic.Add(shared.Model, "virtio") + + if c.EnableVNC { + err = tpl.AddIOGraphic(keys.GraphicType, "VNC") + if err != nil { + return nil, fmt.Errorf("failed to add graphic type to iographic in template: %w", err) + } + err = tpl.AddIOGraphic(keys.Listen, "0.0.0.0") + if err != nil { + return nil, fmt.Errorf("failed to add listen address to iographic in template: %w", err) + } + } + + err = tpl.AddCtx(keys.NetworkCtx, "YES") + if err != nil { + return nil, fmt.Errorf("failed to add network to context in template: %w", err) + } + err = tpl.AddCtx(keys.SSHPubKey, "$USER[SSH_PUBLIC_KEY]") + if err != nil { + return nil, fmt.Errorf("failed to add SSH public key to context in template: %w", err) + } + + err = tpl.AddCtx(machineUIDContextKey, string(machine.UID)) + if err != nil { + return nil, fmt.Errorf("failed to add machine UID to context in template: %w", err) + } + err = tpl.AddCtx("USER_DATA", base64.StdEncoding.EncodeToString([]byte(userdata))) + if err != nil { + return nil, fmt.Errorf("failed to add user data to context in template: %w", err) + } + err = tpl.AddCtx("USER_DATA_ENCODING", "base64") + if err != nil { + return nil, fmt.Errorf("failed to add user data encoding to context in template: %w", err) + } + err = tpl.AddCtx("SET_HOSTNAME", machine.Spec.Name) + if err != nil { + return nil, fmt.Errorf("failed to add desired hostname to context in template: %w", err) + } + + controller := goca.NewController(client) + + // create VM from the generated template above + vmID, err := controller.VMs().Create(tpl.String(), false) + if err != nil { + return nil, fmt.Errorf("failed to create VM: %w", err) + } + + vm, err := controller.VM(vmID).Info(false) + if err != nil { + return nil, fmt.Errorf("failed to fetch VM information: %w", err) + } + + return &openNebulaInstance{vm}, nil +} + +func (p *provider) Cleanup(_ context.Context, _ *zap.SugaredLogger, machine *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData) (bool, error) { + instance, err := p.get(machine) + if err != nil { + if errors.Is(err, cloudprovidererrors.ErrInstanceNotFound) { + return true, nil + } + return false, err + } + + c, _, err := p.getConfig(machine.Spec.ProviderSpec) + if err != nil { + return false, cloudprovidererrors.TerminalError{ + Reason: common.InvalidConfigurationMachineError, + Message: fmt.Sprintf("Failed to parse MachineSpec, due to %v", err), + } + } + + client := getClient(c) + controller := goca.NewController(client) + + vmctrl := controller.VM(instance.vm.ID) + err = vmctrl.TerminateHard() + // ignore error of nonexistent machines by matching for "NO_EXISTS", the error string is something like "OpenNebula error [NO_EXISTS]: [one.vm.action] Error getting virtual machine [999914743]." + if err != nil && !strings.Contains(err.Error(), "NO_EXISTS") { + return false, cloudprovidererrors.TerminalError{ + Reason: common.InvalidConfigurationMachineError, + Message: fmt.Sprintf("failed to delete virtual machine, due to %v", err), + } + } + + return true, nil +} + +func (p *provider) Get(_ context.Context, _ *zap.SugaredLogger, machine *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData) (instance.Instance, error) { + return p.get(machine) +} + +func (p *provider) get(machine *clusterv1alpha1.Machine) (*openNebulaInstance, error) { + c, _, err := p.getConfig(machine.Spec.ProviderSpec) + if err != nil { + return nil, cloudprovidererrors.TerminalError{ + Reason: common.InvalidConfigurationMachineError, + Message: fmt.Sprintf("Failed to parse MachineSpec, due to %v", err), + } + } + + client := getClient(c) + controller := goca.NewController(client) + + vmPool, err := controller.VMs().Info() + if err != nil { + return nil, cloudprovidererrors.TerminalError{ + Reason: common.InvalidConfigurationMachineError, + Message: fmt.Sprintf("failed to list virtual machines, due to %v", err), + } + } + + // first collect all IDs, the vm infos in the vmPool don't contain the context which has the uid + var vmIDs []int + for _, vm := range vmPool.VMs { + if vm.Name != machine.Spec.Name { + continue + } + + vmIDs = append(vmIDs, vm.ID) + } + + // go over each vm that matches the name and check if the uid is the same + for _, vmID := range vmIDs { + vm, err := controller.VM(vmID).Info(false) + if err != nil { + return nil, cloudprovidererrors.TerminalError{ + Reason: common.InvalidConfigurationMachineError, + Message: fmt.Sprintf("failed to get info for VM %v, due to %v", vmID, err), + } + } + + uid, err := vm.Template.GetCtx(machineUIDContextKey) + if err != nil { + // ignore errors like "key blabla not found" + continue + } + + if uid == string(machine.UID) { + return &openNebulaInstance{vm}, nil + } + } + + return nil, cloudprovidererrors.ErrInstanceNotFound +} + +func (p *provider) AddDefaults(_ *zap.SugaredLogger, spec clusterv1alpha1.MachineSpec) (clusterv1alpha1.MachineSpec, error) { + return spec, nil +} + +func (p *provider) MigrateUID(_ context.Context, _ *zap.SugaredLogger, machine *clusterv1alpha1.Machine, newUID types.UID) error { + c, _, err := p.getConfig(machine.Spec.ProviderSpec) + if err != nil { + return cloudprovidererrors.TerminalError{ + Reason: common.InvalidConfigurationMachineError, + Message: fmt.Sprintf("Failed to parse MachineSpec, due to %v", err), + } + } + + instance, err := p.get(machine) + if err != nil { + return cloudprovidererrors.TerminalError{ + Reason: common.InvalidConfigurationMachineError, + Message: fmt.Sprintf("Failed to get instance, due to %v", err), + } + } + + client := getClient(c) + + // get current template + tpl := &instance.vm.Template + contextVector, err := tpl.GetVector(keys.ContextVec) + if err != nil { + return cloudprovidererrors.TerminalError{ + Reason: common.InvalidConfigurationMachineError, + Message: fmt.Sprintf("Failed to get VM template context vector, due to %v", err), + } + } + + // replace the old uid in context with the new one + contextVector.Del(machineUIDContextKey) + err = contextVector.AddPair(machineUIDContextKey, string(newUID)) + if err != nil { + return fmt.Errorf("failed to add the new machine UID to context in template: %w", err) + } + + // create a new template that only has the context vector in it so it gets properly replaced + tpl = vm.NewTemplate() + for _, pair := range contextVector.Pairs { + key := pair.XMLName.Local + value := pair.Value + err = tpl.AddCtx(keys.Context(key), value) + if err != nil { + return fmt.Errorf("failed to add %s to context in template: %w", key, err) + } + } + + // finally, update the VM template + controller := goca.NewController(client) + vmCtrl := controller.VM(instance.vm.ID) + err = vmCtrl.UpdateConf(tpl.String()) + if err != nil { + return cloudprovidererrors.TerminalError{ + Reason: common.InvalidConfigurationMachineError, + Message: fmt.Sprintf("Failed to update VM template, due to %v", err), + } + } + + return nil +} + +func (p *provider) MachineMetricsLabels(_ *clusterv1alpha1.Machine) (map[string]string, error) { + return map[string]string{}, nil +} + +func (p *provider) SetMetricsForMachines(_ clusterv1alpha1.MachineList) error { + return nil +} + +type openNebulaInstance struct { + vm *vm.VM +} + +func (i *openNebulaInstance) Name() string { + return i.vm.Name +} + +func (i *openNebulaInstance) ID() string { + return strconv.Itoa(i.vm.ID) +} + +func (i *openNebulaInstance) ProviderID() string { + if i.vm == nil || i.vm.ID == 0 { + return "" + } + return "opennebula://" + strconv.Itoa(i.vm.ID) +} + +func (i *openNebulaInstance) Addresses() map[string]corev1.NodeAddressType { + addresses := map[string]corev1.NodeAddressType{} + + for _, nic := range i.vm.Template.GetNICs() { + ip, _ := nic.Get(shared.IP) + addresses[ip] = corev1.NodeInternalIP + } + + return addresses +} + +func (i *openNebulaInstance) Status() instance.Status { + // state is the general state of the VM, lcmState is the state of the life-cycle manager of the VM + // lcmState is anything else other than LcmInit when the VM's state is Active + state, lcmState, _ := i.vm.State() + switch state { + case vm.Init, vm.Pending, vm.Hold: + return instance.StatusCreating + case vm.Active: + switch lcmState { + case vm.LcmInit, vm.Prolog, vm.Boot: + return instance.StatusCreating + case vm.Epilog: + return instance.StatusDeleting + default: + return instance.StatusRunning + } + case vm.Done: + return instance.StatusDeleted + default: + return instance.StatusUnknown + } +} diff --git a/pkg/cloudprovider/provider/openstack/helper.go b/pkg/cloudprovider/provider/openstack/helper.go index b7475fd2d..2c2a5ec3b 100644 --- a/pkg/cloudprovider/provider/openstack/helper.go +++ b/pkg/cloudprovider/provider/openstack/helper.go @@ -36,6 +36,7 @@ import ( osports "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" ossubnets "github.com/gophercloud/gophercloud/openstack/networking/v2/subnets" "github.com/gophercloud/gophercloud/pagination" + "go.uber.org/zap" ) var ( @@ -97,7 +98,7 @@ func getNewComputeV2(client *gophercloud.ProviderClient, c *Config) (*gopherclou return computeClient, nil } -func getAvailabilityZones(computeClient *gophercloud.ServiceClient, c *Config) ([]osavailabilityzones.AvailabilityZone, error) { +func getAvailabilityZones(computeClient *gophercloud.ServiceClient) ([]osavailabilityzones.AvailabilityZone, error) { allPages, err := osavailabilityzones.List(computeClient).AllPages() if err != nil { return nil, err @@ -106,7 +107,7 @@ func getAvailabilityZones(computeClient *gophercloud.ServiceClient, c *Config) ( } func getAvailabilityZone(computeClient *gophercloud.ServiceClient, c *Config) (*osavailabilityzones.AvailabilityZone, error) { - zones, err := getAvailabilityZones(computeClient, c) + zones, err := getAvailabilityZones(computeClient) if err != nil { return nil, err } @@ -263,7 +264,7 @@ func getSubnet(netClient *gophercloud.ServiceClient, nameOrID string) (*ossubnet return nil, errNotFound } -func ensureKubernetesSecurityGroupExist(client *gophercloud.ProviderClient, region, name string) error { +func ensureKubernetesSecurityGroupExist(log *zap.SugaredLogger, client *gophercloud.ProviderClient, region, name string) error { // We need a mutex here because otherwise if more than one machine gets created at roughly the same time // we will create two security groups and subsequently not be able anymore to identify our security group // by name @@ -272,7 +273,7 @@ func ensureKubernetesSecurityGroupExist(client *gophercloud.ProviderClient, regi netClient, err := goopenstack.NewNetworkV2(client, gophercloud.EndpointOpts{Region: region}) if err != nil { - return osErrorToTerminalError(err, "failed to get network client") + return osErrorToTerminalError(log, err, "failed to get network client") } _, err = getSecurityGroup(client, region, name) @@ -280,7 +281,7 @@ func ensureKubernetesSecurityGroupExist(client *gophercloud.ProviderClient, regi if errors.Is(err, errNotFound) { sg, err := ossecuritygroups.Create(netClient, ossecuritygroups.CreateOpts{Name: name}).Extract() if err != nil { - return osErrorToTerminalError(err, fmt.Sprintf("failed to create security group %s", name)) + return osErrorToTerminalError(log, err, fmt.Sprintf("failed to create security group %s", name)) } rules := []osecruritygrouprules.CreateOpts{ @@ -302,7 +303,7 @@ func ensureKubernetesSecurityGroupExist(client *gophercloud.ProviderClient, regi for _, opts := range rules { if _, err := osecruritygrouprules.Create(netClient, opts).Extract(); err != nil { - return osErrorToTerminalError(err, "failed to create security group rule") + return osErrorToTerminalError(log, err, "failed to create security group rule") } } } diff --git a/pkg/cloudprovider/provider/openstack/provider.go b/pkg/cloudprovider/provider/openstack/provider.go index 938d74f02..a2ec0806f 100644 --- a/pkg/cloudprovider/provider/openstack/provider.go +++ b/pkg/cloudprovider/provider/openstack/provider.go @@ -34,16 +34,16 @@ import ( osfloatingips "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips" osnetworks "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" "github.com/gophercloud/gophercloud/pagination" + "go.uber.org/zap" - "github.com/kubermatic/machine-controller/pkg/apis/cluster/common" - clusterv1alpha1 "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" - cloudprovidererrors "github.com/kubermatic/machine-controller/pkg/cloudprovider/errors" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/instance" - openstacktypes "github.com/kubermatic/machine-controller/pkg/cloudprovider/provider/openstack/types" - cloudprovidertypes "github.com/kubermatic/machine-controller/pkg/cloudprovider/types" - cloudproviderutil "github.com/kubermatic/machine-controller/pkg/cloudprovider/util" - "github.com/kubermatic/machine-controller/pkg/providerconfig" - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" + cloudprovidererrors "k8c.io/machine-controller/pkg/cloudprovider/errors" + "k8c.io/machine-controller/pkg/cloudprovider/instance" + cloudprovidertypes "k8c.io/machine-controller/pkg/cloudprovider/types" + cloudproviderutil "k8c.io/machine-controller/pkg/cloudprovider/util" + "k8c.io/machine-controller/sdk/apis/cluster/common" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" + openstacktypes "k8c.io/machine-controller/sdk/cloudprovider/openstack" + "k8c.io/machine-controller/sdk/providerconfig" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" @@ -51,28 +51,28 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/klog" ) const ( floatingIPReleaseFinalizer = "kubermatic.io/release-openstack-floating-ip" floatingIPIDAnnotationKey = "kubermatic.io/release-openstack-floating-ip" + clientTimeout = 1 * time.Minute ) // clientGetterFunc returns an OpenStack client. type clientGetterFunc func(c *Config) (*gophercloud.ProviderClient, error) // portReadinessWaiterFunc waits for the port with the given ID to be available. -type portReadinessWaiterFunc func(netClient *gophercloud.ServiceClient, serverID string, networkID string, instanceReadyCheckPeriod time.Duration, instanceReadyCheckTimeout time.Duration) error +type portReadinessWaiterFunc func(ctx context.Context, instanceLog *zap.SugaredLogger, netClient *gophercloud.ServiceClient, serverID string, networkID string, instanceReadyCheckPeriod time.Duration, instanceReadyCheckTimeout time.Duration) error type provider struct { - configVarResolver *providerconfig.ConfigVarResolver + configVarResolver providerconfig.ConfigVarResolver clientGetter clientGetterFunc portReadinessWaiter portReadinessWaiterFunc } // New returns a openstack provider. -func New(configVarResolver *providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { +func New(configVarResolver providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { return &provider{ configVarResolver: configVarResolver, clientGetter: getClient, @@ -98,10 +98,12 @@ type Config struct { Flavor string SecurityGroups []string Network string + Networks []string Subnet string FloatingIPPool string AvailabilityZone string TrustDevicePath bool + ConfigDrive bool RootDiskSizeGB *int RootDiskVolumeType string NodeVolumeAttachLimit *uint @@ -124,45 +126,44 @@ var floatingIPAssignLock = &sync.Mutex{} // Get the Project name from config or env var. If not defined fallback to tenant name. func (p *provider) getProjectNameOrTenantName(rawConfig *openstacktypes.RawConfig) (string, error) { - projectName, err := p.configVarResolver.GetConfigVarStringValueOrEnv(rawConfig.ProjectName, "OS_PROJECT_NAME") + projectName, err := p.configVarResolver.GetStringValueOrEnv(rawConfig.ProjectName, "OS_PROJECT_NAME") if err == nil && len(projectName) > 0 { return projectName, nil } //fallback to tenantName. - return p.configVarResolver.GetConfigVarStringValueOrEnv(rawConfig.TenantName, "OS_TENANT_NAME") + return p.configVarResolver.GetStringValueOrEnv(rawConfig.TenantName, "OS_TENANT_NAME") } // Get the Project id from config or env var. If not defined fallback to tenant id. func (p *provider) getProjectIDOrTenantID(rawConfig *openstacktypes.RawConfig) (string, error) { - projectID, err := p.configVarResolver.GetConfigVarStringValueOrEnv(rawConfig.ProjectID, "OS_PROJECT_ID") + projectID, err := p.configVarResolver.GetStringValueOrEnv(rawConfig.ProjectID, "OS_PROJECT_ID") if err == nil && len(projectID) > 0 { return projectID, nil } //fallback to tenantName. - return p.configVarResolver.GetConfigVarStringValueOrEnv(rawConfig.TenantID, "OS_TENANT_ID") + return p.configVarResolver.GetStringValueOrEnv(rawConfig.TenantID, "OS_TENANT_ID") } func (p *provider) getConfigAuth(c *Config, rawConfig *openstacktypes.RawConfig) error { var err error - c.ApplicationCredentialID, err = p.configVarResolver.GetConfigVarStringValueOrEnv(rawConfig.ApplicationCredentialID, "OS_APPLICATION_CREDENTIAL_ID") + c.ApplicationCredentialID, err = p.configVarResolver.GetStringValueOrEnv(rawConfig.ApplicationCredentialID, "OS_APPLICATION_CREDENTIAL_ID") if err != nil { return fmt.Errorf("failed to get the value of \"applicationCredentialID\" field, error = %w", err) } if c.ApplicationCredentialID != "" { - klog.V(6).Infof("applicationCredentialID from configuration or environment was found.") - c.ApplicationCredentialSecret, err = p.configVarResolver.GetConfigVarStringValueOrEnv(rawConfig.ApplicationCredentialSecret, "OS_APPLICATION_CREDENTIAL_SECRET") + c.ApplicationCredentialSecret, err = p.configVarResolver.GetStringValueOrEnv(rawConfig.ApplicationCredentialSecret, "OS_APPLICATION_CREDENTIAL_SECRET") if err != nil { return fmt.Errorf("failed to get the value of \"applicationCredentialSecret\" field, error = %w", err) } return nil } - c.Username, err = p.configVarResolver.GetConfigVarStringValueOrEnv(rawConfig.Username, "OS_USER_NAME") + c.Username, err = p.configVarResolver.GetStringValueOrEnv(rawConfig.Username, "OS_USER_NAME") if err != nil { return fmt.Errorf("failed to get the value of \"username\" field, error = %w", err) } - c.Password, err = p.configVarResolver.GetConfigVarStringValueOrEnv(rawConfig.Password, "OS_PASSWORD") + c.Password, err = p.configVarResolver.GetStringValueOrEnv(rawConfig.Password, "OS_PASSWORD") if err != nil { return fmt.Errorf("failed to get the value of \"password\" field, error = %w", err) } @@ -177,12 +178,33 @@ func (p *provider) getConfigAuth(c *Config, rawConfig *openstacktypes.RawConfig) return nil } -func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *providerconfigtypes.Config, *openstacktypes.RawConfig, error) { - if provSpec.Value == nil { - return nil, nil, nil, fmt.Errorf("machine.spec.providerconfig.value is nil") +func (p *provider) resolveNetworks(cfg *Config) ([]string, error) { + if len(cfg.Networks) > 0 { + networks := make([]string, 0, len(cfg.Networks)+1) + seen := make(map[string]struct{}) + if cfg.Network != "" { + networks = append(networks, cfg.Network) + seen[cfg.Network] = struct{}{} + } + for _, n := range cfg.Networks { + if _, exists := seen[n]; !exists { + networks = append(networks, n) + seen[n] = struct{}{} + } + } + if len(networks) == 0 { + return nil, fmt.Errorf("no networks specified") + } + return networks, nil } + if cfg.Network != "" { + return []string{cfg.Network}, nil + } + return nil, fmt.Errorf("no networks specified") +} - pconfig, err := providerconfigtypes.GetConfig(provSpec) +func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *providerconfig.Config, *openstacktypes.RawConfig, error) { + pconfig, err := providerconfig.GetConfig(provSpec) if err != nil { return nil, nil, nil, err } @@ -197,7 +219,7 @@ func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *p } cfg := Config{} - cfg.IdentityEndpoint, err = p.configVarResolver.GetConfigVarStringValueOrEnv(rawConfig.IdentityEndpoint, "OS_AUTH_URL") + cfg.IdentityEndpoint, err = p.configVarResolver.GetStringValueOrEnv(rawConfig.IdentityEndpoint, "OS_AUTH_URL") if err != nil { return nil, nil, nil, fmt.Errorf("failed to get the value of \"identityEndpoint\" field, error = %w", err) } @@ -209,78 +231,90 @@ func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *p } // Ignore Region not found as Region might not be found and we can default it later. - cfg.Region, err = p.configVarResolver.GetConfigVarStringValueOrEnv(rawConfig.Region, "OS_REGION_NAME") - if err != nil { - klog.V(6).Infof("Region from configuration or environment variable not found") - } + cfg.Region, _ = p.configVarResolver.GetStringValueOrEnv(rawConfig.Region, "OS_REGION_NAME") - cfg.InstanceReadyCheckPeriod, err = p.configVarResolver.GetConfigVarDurationValueOrDefault(rawConfig.InstanceReadyCheckPeriod, 5*time.Second) + cfg.InstanceReadyCheckPeriod, err = p.configVarResolver.GetDurationValueOrDefault(rawConfig.InstanceReadyCheckPeriod, 5*time.Second) if err != nil { return nil, nil, nil, fmt.Errorf(`failed to get the value of "InstanceReadyCheckPeriod" field, error = %w`, err) } - cfg.InstanceReadyCheckTimeout, err = p.configVarResolver.GetConfigVarDurationValueOrDefault(rawConfig.InstanceReadyCheckTimeout, 10*time.Second) + cfg.InstanceReadyCheckTimeout, err = p.configVarResolver.GetDurationValueOrDefault(rawConfig.InstanceReadyCheckTimeout, 10*time.Second) if err != nil { return nil, nil, nil, fmt.Errorf(`failed to get the value of "InstanceReadyCheckTimeout" field, error = %w`, err) } // We ignore errors here because the OS domain is only required when using Identity API V3. - cfg.DomainName, _ = p.configVarResolver.GetConfigVarStringValueOrEnv(rawConfig.DomainName, "OS_DOMAIN_NAME") - cfg.TokenID, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.TokenID) + cfg.DomainName, _ = p.configVarResolver.GetStringValueOrEnv(rawConfig.DomainName, "OS_DOMAIN_NAME") + cfg.TokenID, err = p.configVarResolver.GetStringValue(rawConfig.TokenID) if err != nil { return nil, nil, nil, err } - cfg.Image, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.Image) + cfg.Image, err = p.configVarResolver.GetStringValue(rawConfig.Image) if err != nil { return nil, nil, nil, err } - cfg.Flavor, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.Flavor) + cfg.Flavor, err = p.configVarResolver.GetStringValue(rawConfig.Flavor) if err != nil { return nil, nil, nil, err } for _, securityGroup := range rawConfig.SecurityGroups { - securityGroupValue, err := p.configVarResolver.GetConfigVarStringValue(securityGroup) + securityGroupValue, err := p.configVarResolver.GetStringValue(securityGroup) if err != nil { return nil, nil, nil, err } cfg.SecurityGroups = append(cfg.SecurityGroups, securityGroupValue) } - cfg.Network, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.Network) + cfg.Network, err = p.configVarResolver.GetStringValue(rawConfig.Network) if err != nil { return nil, nil, nil, err } - cfg.Subnet, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.Subnet) + for _, network := range rawConfig.Networks { + networkValue, err := p.configVarResolver.GetStringValue(network) + if err != nil { + return nil, nil, nil, err + } + if networkValue != "" { + cfg.Networks = append(cfg.Networks, networkValue) + } + } + + cfg.Subnet, err = p.configVarResolver.GetStringValue(rawConfig.Subnet) if err != nil { return nil, nil, nil, err } - cfg.FloatingIPPool, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.FloatingIPPool) + cfg.FloatingIPPool, err = p.configVarResolver.GetStringValue(rawConfig.FloatingIPPool) if err != nil { return nil, nil, nil, err } - cfg.AvailabilityZone, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.AvailabilityZone) + cfg.AvailabilityZone, err = p.configVarResolver.GetStringValue(rawConfig.AvailabilityZone) if err != nil { return nil, nil, nil, err } - cfg.TrustDevicePath, _, err = p.configVarResolver.GetConfigVarBoolValue(rawConfig.TrustDevicePath) + cfg.TrustDevicePath, _, err = p.configVarResolver.GetBoolValue(rawConfig.TrustDevicePath) if err != nil { return nil, nil, nil, err } - cfg.ComputeAPIVersion, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.ComputeAPIVersion) + cfg.ConfigDrive, _, err = p.configVarResolver.GetBoolValue(rawConfig.ConfigDrive) + if err != nil { + return nil, nil, nil, err + } + + cfg.ComputeAPIVersion, err = p.configVarResolver.GetStringValue(rawConfig.ComputeAPIVersion) if err != nil { return nil, nil, nil, err } cfg.RootDiskSizeGB = rawConfig.RootDiskSizeGB - cfg.RootDiskVolumeType, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.RootDiskVolumeType) + cfg.RootDiskVolumeType, err = p.configVarResolver.GetStringValue(rawConfig.RootDiskVolumeType) if err != nil { return nil, nil, nil, err } @@ -291,7 +325,7 @@ func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *p cfg.Tags = map[string]string{} } - cfg.ServerGroup, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.ServerGroup) + cfg.ServerGroup, err = p.configVarResolver.GetStringValue(rawConfig.ServerGroup) if err != nil { return nil, nil, nil, err } @@ -300,11 +334,7 @@ func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *p } func setProviderSpec(rawConfig openstacktypes.RawConfig, provSpec clusterv1alpha1.ProviderSpec) (*runtime.RawExtension, error) { - if provSpec.Value == nil { - return nil, fmt.Errorf("machine.spec.providerconfig.value is nil") - } - - pconfig, err := providerconfigtypes.GetConfig(provSpec) + pconfig, err := providerconfig.GetConfig(provSpec) if err != nil { return nil, err } @@ -344,14 +374,17 @@ func getClient(c *Config) (*gophercloud.ProviderClient, error) { } if pc != nil { // use the util's HTTP client to benefit, among other things, from its CA bundle. - pc.HTTPClient = cloudproviderutil.HTTPClientConfig{LogPrefix: "[OpenStack API]"}.New() + pc.HTTPClient = cloudproviderutil.HTTPClientConfig{ + LogPrefix: "[OpenStack API]", + Timeout: clientTimeout, + }.New() } err = goopenstack.Authenticate(pc, opts) return pc, err } -func (p *provider) AddDefaults(spec clusterv1alpha1.MachineSpec) (clusterv1alpha1.MachineSpec, error) { +func (p *provider) AddDefaults(log *zap.SugaredLogger, spec clusterv1alpha1.MachineSpec) (clusterv1alpha1.MachineSpec, error) { c, _, rawConfig, err := p.getConfig(spec.ProviderSpec) if err != nil { return spec, cloudprovidererrors.TerminalError{ @@ -362,17 +395,17 @@ func (p *provider) AddDefaults(spec clusterv1alpha1.MachineSpec) (clusterv1alpha client, err := p.clientGetter(c) if err != nil { - return spec, osErrorToTerminalError(err, "failed to get a openstack client") + return spec, osErrorToTerminalError(log, err, "failed to get a openstack client") } if c.Region == "" { - klog.V(3).Infof("Trying to default region for machine '%s'...", spec.Name) + log.Debug("Trying to default region for machine...") regions, err := getRegions(client) if err != nil { - return spec, osErrorToTerminalError(err, "failed to get regions") + return spec, osErrorToTerminalError(log, err, "failed to get regions") } if len(regions) == 1 { - klog.V(3).Infof("Defaulted region for machine '%s' to '%s'", spec.Name, regions[0].ID) + log.Debugw("Defaulted region for machine", "region", regions[0].ID) rawConfig.Region.Value = regions[0].ID } else { return spec, fmt.Errorf("could not default region because got '%v' results", len(regions)) @@ -381,17 +414,17 @@ func (p *provider) AddDefaults(spec clusterv1alpha1.MachineSpec) (clusterv1alpha computeClient, err := getNewComputeV2(client, c) if err != nil { - return spec, osErrorToTerminalError(err, "failed to get computeClient") + return spec, osErrorToTerminalError(log, err, "failed to get computeClient") } if c.AvailabilityZone == "" { - klog.V(3).Infof("Trying to default availability zone for machine '%s'...", spec.Name) - availabilityZones, err := getAvailabilityZones(computeClient, c) + log.Debug("Trying to default availability zone for machine...") + availabilityZones, err := getAvailabilityZones(computeClient) if err != nil { - return spec, osErrorToTerminalError(err, "failed to get availability zones") + return spec, osErrorToTerminalError(log, err, "failed to get availability zones") } if len(availabilityZones) == 1 { - klog.V(3).Infof("Defaulted availability zone for machine '%s' to '%s'", spec.Name, availabilityZones[0].ZoneName) + log.Debugw("Defaulted availability zone for machine", "zone", availabilityZones[0].ZoneName) rawConfig.AvailabilityZone.Value = availabilityZones[0].ZoneName } } @@ -401,47 +434,55 @@ func (p *provider) AddDefaults(spec clusterv1alpha1.MachineSpec) (clusterv1alpha return spec, err } - if c.Network == "" { - klog.V(3).Infof("Trying to default network for machine '%s'...", spec.Name) + if c.Network == "" && len(c.Networks) == 0 { + log.Debug("Trying to default network for machine...") net, err := getDefaultNetwork(netClient) if err != nil { - return spec, osErrorToTerminalError(err, "failed to default network") + return spec, osErrorToTerminalError(log, err, "failed to default network") } if net != nil { - klog.V(3).Infof("Defaulted network for machine '%s' to '%s'", spec.Name, net.Name) - // Use the id as the name may not be unique + log.Debugw("Defaulted network for machine ", "network", net.Name) + // Use the single network field for backward compatibility when defaulting rawConfig.Network.Value = net.ID } } if c.Subnet == "" { - networkID := c.Network - if rawConfig.Network.Value != "" { - networkID = rawConfig.Network.Value + log.Debug("Trying to default subnet for machine...") + + var primaryNetworkID string + if len(c.Networks) > 0 { + primaryNetworkID = c.Networks[0] + } else if c.Network != "" { + primaryNetworkID = c.Network + } else if rawConfig.Network.Value != "" { + primaryNetworkID = rawConfig.Network.Value } - net, err := getNetwork(netClient, networkID) - if err != nil { - return spec, osErrorToTerminalError(err, fmt.Sprintf("failed to get network for subnet defaulting '%s", networkID)) - } - subnet, err := getDefaultSubnet(netClient, net) - if err != nil { - return spec, osErrorToTerminalError(err, "error defaulting subnet") - } - if subnet != nil { - klog.V(3).Infof("Defaulted subnet for machine '%s' to '%s'", spec.Name, *subnet) - rawConfig.Subnet.Value = *subnet + if primaryNetworkID != "" { + net, err := getNetwork(netClient, primaryNetworkID) + if err != nil { + return spec, osErrorToTerminalError(log, err, fmt.Sprintf("failed to get network for subnet defaulting '%s", primaryNetworkID)) + } + subnet, err := getDefaultSubnet(netClient, net) + if err != nil { + return spec, osErrorToTerminalError(log, err, "error defaulting subnet") + } + if subnet != nil { + log.Debugw("Defaulted subnet for machine", "subnet", *subnet) + rawConfig.Subnet.Value = *subnet + } } } spec.ProviderSpec.Value, err = setProviderSpec(*rawConfig, spec.ProviderSpec) if err != nil { - return spec, osErrorToTerminalError(err, "error marshaling providerconfig") + return spec, osErrorToTerminalError(log, err, "error marshaling providerconfig") } return spec, nil } -func (p *provider) Validate(_ context.Context, spec clusterv1alpha1.MachineSpec) error { +func (p *provider) Validate(_ context.Context, _ *zap.SugaredLogger, spec clusterv1alpha1.MachineSpec) error { c, _, _, err := p.getConfig(spec.ProviderSpec) if err != nil { return fmt.Errorf("failed to parse config: %w", err) @@ -520,8 +561,16 @@ func (p *provider) Validate(_ context.Context, spec clusterv1alpha1.MachineSpec) return err } - if _, err := getNetwork(netClient, c.Network); err != nil { - return fmt.Errorf("failed to get network %q: %w", c.Network, err) + networks, err := p.resolveNetworks(c) + if err != nil { + return err + } + + // Validate each network exists + for _, networkName := range networks { + if _, err := getNetwork(netClient, networkName); err != nil { + return fmt.Errorf("failed to get network %q: %w", networkName, err) + } } if _, err := getSubnet(netClient, c.Subnet); err != nil { @@ -537,6 +586,7 @@ func (p *provider) Validate(_ context.Context, spec clusterv1alpha1.MachineSpec) if _, err := getAvailabilityZone(computeClient, c); err != nil { return fmt.Errorf("failed to get availability zone %q: %w", c.AvailabilityZone, err) } + // Optional fields. if len(c.SecurityGroups) != 0 { for _, s := range c.SecurityGroups { @@ -554,7 +604,7 @@ func (p *provider) Validate(_ context.Context, spec clusterv1alpha1.MachineSpec) return nil } -func (p *provider) Create(_ context.Context, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData, userdata string) (instance.Instance, error) { +func (p *provider) Create(ctx context.Context, log *zap.SugaredLogger, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData, userdata string) (instance.Instance, error) { cfg, _, _, err := p.getConfig(machine.Spec.ProviderSpec) if err != nil { return nil, cloudprovidererrors.TerminalError{ @@ -565,28 +615,28 @@ func (p *provider) Create(_ context.Context, machine *clusterv1alpha1.Machine, d client, err := p.clientGetter(cfg) if err != nil { - return nil, osErrorToTerminalError(err, "failed to get a openstack client") + return nil, osErrorToTerminalError(log, err, "failed to get a openstack client") } computeClient, err := getNewComputeV2(client, cfg) if err != nil { - return nil, osErrorToTerminalError(err, "failed to get a openstack client") + return nil, osErrorToTerminalError(log, err, "failed to get a openstack client") } flavor, err := getFlavor(computeClient, cfg) if err != nil { - return nil, osErrorToTerminalError(err, fmt.Sprintf("failed to get flavor %s", cfg.Flavor)) + return nil, osErrorToTerminalError(log, err, fmt.Sprintf("failed to get flavor %s", cfg.Flavor)) } // Get OS Image Client. imageClient, err := goopenstack.NewImageServiceV2(client, gophercloud.EndpointOpts{Region: cfg.Region}) if err != nil { - return nil, osErrorToTerminalError(err, "failed to get a image client") + return nil, osErrorToTerminalError(log, err, "failed to get a image client") } image, err := getImageByName(imageClient, cfg) if err != nil { - return nil, osErrorToTerminalError(err, fmt.Sprintf("failed to get image %s", cfg.Image)) + return nil, osErrorToTerminalError(log, err, fmt.Sprintf("failed to get image %s", cfg.Image)) } netClient, err := goopenstack.NewNetworkV2(client, gophercloud.EndpointOpts{Region: cfg.Region}) @@ -594,17 +644,38 @@ func (p *provider) Create(_ context.Context, machine *clusterv1alpha1.Machine, d return nil, err } - network, err := getNetwork(netClient, cfg.Network) + networkNames, err := p.resolveNetworks(cfg) if err != nil { - return nil, osErrorToTerminalError(err, fmt.Sprintf("failed to get network %s", cfg.Network)) + return nil, cloudprovidererrors.TerminalError{ + Reason: common.InvalidConfigurationMachineError, + Message: fmt.Sprintf("Failed to resolve networks: %v", err), + } + } + + // Get network objects for all specified networks + var networks []osservers.Network + var primaryNetwork *osnetworks.Network // Keep track of first network for floating IP assignment + + for i, networkName := range networkNames { + network, err := getNetwork(netClient, networkName) + if err != nil { + return nil, osErrorToTerminalError(log, err, fmt.Sprintf("failed to get network %s", networkName)) + } + + networks = append(networks, osservers.Network{UUID: network.ID}) + + // Use first network as primary for floating IP assignment (backwards compatibility) + if i == 0 { + primaryNetwork = network + } } securityGroups := cfg.SecurityGroups if len(securityGroups) == 0 { - klog.V(2).Infof("creating security group %s for worker nodes", securityGroupName) - err = ensureKubernetesSecurityGroupExist(client, cfg.Region, securityGroupName) + log.Infow("Creating security group for worker nodes", "group", securityGroupName) + err = ensureKubernetesSecurityGroupExist(log, client, cfg.Region, securityGroupName) if err != nil { - return nil, fmt.Errorf("Error occurred creating security groups: %w", err) + return nil, fmt.Errorf("error occurred creating security groups: %w", err) } securityGroups = append(securityGroups, securityGroupName) } @@ -617,9 +688,10 @@ func (p *provider) Create(_ context.Context, machine *clusterv1alpha1.Machine, d Name: machine.Spec.Name, FlavorRef: flavor.ID, UserData: []byte(userdata), + ConfigDrive: &cfg.ConfigDrive, SecurityGroups: securityGroups, AvailabilityZone: cfg.AvailabilityZone, - Networks: []osservers.Network{{UUID: network.ID}}, + Networks: networks, Metadata: allTags, } @@ -654,7 +726,7 @@ func (p *provider) Create(_ context.Context, machine *clusterv1alpha1.Machine, d } if err := bootfromvolume.Create(computeClient, createOpts).ExtractInto(&server); err != nil { - return nil, osErrorToTerminalError(err, "failed to create server with volume") + return nil, osErrorToTerminalError(log, err, "failed to create server with volume") } } else { // Image ID should only be set in server options when block device @@ -663,18 +735,20 @@ func (p *provider) Create(_ context.Context, machine *clusterv1alpha1.Machine, d serverOpts.ImageRef = image.ID if err := osservers.Create(computeClient, createOpts).ExtractInto(&server); err != nil { - return nil, osErrorToTerminalError(err, "failed to create server") + return nil, osErrorToTerminalError(log, err, "failed to create server") } } if cfg.FloatingIPPool != "" { - if err := p.portReadinessWaiter(netClient, server.ID, network.ID, cfg.InstanceReadyCheckPeriod, cfg.InstanceReadyCheckTimeout); err != nil { - klog.V(2).Infof("port for instance %q did not became active due to: %v", server.ID, err) + instanceLog := log.With("instance", server.ID) + + if err := p.portReadinessWaiter(ctx, instanceLog, netClient, server.ID, primaryNetwork.ID, cfg.InstanceReadyCheckPeriod, cfg.InstanceReadyCheckTimeout); err != nil { + instanceLog.Infow("Port for instance did not became active", zap.Error(err)) } // Find a free FloatingIP or allocate a new one. - if err := assignFloatingIPToInstance(data.Update, machine, netClient, server.ID, cfg.FloatingIPPool, cfg.Region, network); err != nil { - defer deleteInstanceDueToFatalLogged(computeClient, server.ID) + if err := assignFloatingIPToInstance(instanceLog, data.Update, machine, netClient, server.ID, cfg.FloatingIPPool, cfg.Region, primaryNetwork); err != nil { + defer deleteInstanceDueToFatalLogged(instanceLog, computeClient, server.ID) return nil, fmt.Errorf("failed to assign a floating ip to instance %s: %w", server.ID, err) } } @@ -682,27 +756,27 @@ func (p *provider) Create(_ context.Context, machine *clusterv1alpha1.Machine, d return &osInstance{server: &server}, nil } -func waitForPort(netClient *gophercloud.ServiceClient, serverID string, networkID string, checkPeriod time.Duration, checkTimeout time.Duration) error { +func waitForPort(ctx context.Context, instanceLog *zap.SugaredLogger, netClient *gophercloud.ServiceClient, serverID string, networkID string, checkPeriod time.Duration, checkTimeout time.Duration) error { started := time.Now() - klog.V(2).Infof("Waiting for the port of instance %s to become active...", serverID) + instanceLog.Info("Waiting for the port to become active...") - portIsReady := func() (bool, error) { + portIsReady := func(context.Context) (bool, error) { port, err := getInstancePort(netClient, serverID, networkID) if err != nil { - tErr := osErrorToTerminalError(err, fmt.Sprintf("failed to get current instance port %s", serverID)) + tErr := osErrorToTerminalError(instanceLog, err, fmt.Sprintf("failed to get current instance port %s", serverID)) if isTerminalErr, _, _ := cloudprovidererrors.IsTerminalError(tErr); isTerminalErr { return true, tErr } // Only log the error but don't exit. in case of a network failure we want to retry. - klog.V(2).Infof("failed to get current instance port %s: %v", serverID, err) + instanceLog.Errorw("Failed to get current instance port", zap.Error(err)) return false, nil } return port.Status == "ACTIVE", nil } - if err := wait.Poll(checkPeriod, checkTimeout, portIsReady); err != nil { - if errors.Is(err, wait.ErrWaitTimeout) { + if err := wait.PollUntilContextTimeout(ctx, checkPeriod, checkTimeout, false, portIsReady); err != nil { + if wait.Interrupted(err) { // In case we have a timeout, include the timeout details return fmt.Errorf("instance port became not active after %f seconds", checkTimeout.Seconds()) } @@ -710,30 +784,30 @@ func waitForPort(netClient *gophercloud.ServiceClient, serverID string, networkI return fmt.Errorf("failed to wait for instance port to become active: %w", err) } - klog.V(2).Infof("Instance %q port became active after %f seconds", serverID, time.Since(started).Seconds()) + instanceLog.Infow("Instance port became active", "elapsed", time.Since(started).Round(time.Second)) return nil } -func deleteInstanceDueToFatalLogged(computeClient *gophercloud.ServiceClient, serverID string) { - klog.V(0).Infof("Deleting instance %s due to fatal error during machine creation...", serverID) +func deleteInstanceDueToFatalLogged(instanceLog *zap.SugaredLogger, computeClient *gophercloud.ServiceClient, serverID string) { + instanceLog.Info("Deleting instance due to fatal error during machine creation...") if err := osservers.Delete(computeClient, serverID).ExtractErr(); err != nil { utilruntime.HandleError(fmt.Errorf("failed to delete the instance %s. Please take care of manually deleting the instance: %w", serverID, err)) return } - klog.V(0).Infof("Instance %s got deleted", serverID) + instanceLog.Info("Instance got deleted") } -func (p *provider) Cleanup(ctx context.Context, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData) (bool, error) { +func (p *provider) Cleanup(ctx context.Context, log *zap.SugaredLogger, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData) (bool, error) { var hasFloatingIPReleaseFinalizer bool if finalizers := sets.NewString(machine.Finalizers...); finalizers.Has(floatingIPReleaseFinalizer) { hasFloatingIPReleaseFinalizer = true } - instance, err := p.Get(ctx, machine, data) + instance, err := p.Get(ctx, log, machine, data) if err != nil { if errors.Is(err, cloudprovidererrors.ErrInstanceNotFound) { if hasFloatingIPReleaseFinalizer { - if err := p.cleanupFloatingIP(machine, data.Update); err != nil { + if err := p.cleanupFloatingIP(log, machine, data.Update); err != nil { return false, fmt.Errorf("failed to clean up floating ip: %w", err) } } @@ -752,26 +826,26 @@ func (p *provider) Cleanup(ctx context.Context, machine *clusterv1alpha1.Machine client, err := p.clientGetter(c) if err != nil { - return false, osErrorToTerminalError(err, "failed to get a openstack client") + return false, osErrorToTerminalError(log, err, "failed to get a openstack client") } computeClient, err := getNewComputeV2(client, c) if err != nil { - return false, osErrorToTerminalError(err, "failed to get compute client") + return false, osErrorToTerminalError(log, err, "failed to get compute client") } - if err := osservers.Delete(computeClient, instance.ID()).ExtractErr(); err != nil && !errors.Is(err, &gophercloud.ErrDefault404{}) { - return false, osErrorToTerminalError(err, "failed to delete instance") + if err := osservers.Delete(computeClient, instance.ID()).ExtractErr(); err != nil && !errors.As(err, &gophercloud.ErrDefault404{}) { + return false, osErrorToTerminalError(log, err, "failed to delete instance") } if hasFloatingIPReleaseFinalizer { - return false, p.cleanupFloatingIP(machine, data.Update) + return false, p.cleanupFloatingIP(log, machine, data.Update) } return false, nil } -func (p *provider) Get(_ context.Context, machine *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData) (instance.Instance, error) { +func (p *provider) Get(_ context.Context, log *zap.SugaredLogger, machine *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData) (instance.Instance, error) { c, _, _, err := p.getConfig(machine.Spec.ProviderSpec) if err != nil { return nil, cloudprovidererrors.TerminalError{ @@ -782,12 +856,12 @@ func (p *provider) Get(_ context.Context, machine *clusterv1alpha1.Machine, _ *c client, err := p.clientGetter(c) if err != nil { - return nil, osErrorToTerminalError(err, "failed to get a openstack client") + return nil, osErrorToTerminalError(log, err, "failed to get a openstack client") } computeClient, err := getNewComputeV2(client, c) if err != nil { - return nil, osErrorToTerminalError(err, "failed to get compute client") + return nil, osErrorToTerminalError(log, err, "failed to get compute client") } var allServers []serverWithExt @@ -796,13 +870,13 @@ func (p *provider) Get(_ context.Context, machine *clusterv1alpha1.Machine, _ *c var servers []serverWithExt err = osservers.ExtractServersInto(page, &servers) if err != nil { - return false, osErrorToTerminalError(err, "failed to extract instance info") + return false, osErrorToTerminalError(log, err, "failed to extract instance info") } allServers = append(allServers, servers...) return true, nil }) if err != nil { - return nil, osErrorToTerminalError(err, "failed to list instances") + return nil, osErrorToTerminalError(log, err, "failed to list instances") } for i, s := range allServers { @@ -814,7 +888,7 @@ func (p *provider) Get(_ context.Context, machine *clusterv1alpha1.Machine, _ *c return nil, cloudprovidererrors.ErrInstanceNotFound } -func (p *provider) MigrateUID(_ context.Context, machine *clusterv1alpha1.Machine, newUID types.UID) error { +func (p *provider) MigrateUID(_ context.Context, log *zap.SugaredLogger, machine *clusterv1alpha1.Machine, newUID types.UID) error { c, _, _, err := p.getConfig(machine.Spec.ProviderSpec) if err != nil { return cloudprovidererrors.TerminalError{ @@ -825,12 +899,12 @@ func (p *provider) MigrateUID(_ context.Context, machine *clusterv1alpha1.Machin client, err := p.clientGetter(c) if err != nil { - return osErrorToTerminalError(err, "failed to get a openstack client") + return osErrorToTerminalError(log, err, "failed to get a openstack client") } computeClient, err := getNewComputeV2(client, c) if err != nil { - return osErrorToTerminalError(err, "failed to get compute client") + return osErrorToTerminalError(log, err, "failed to get compute client") } var allServers []serverWithExt @@ -839,13 +913,13 @@ func (p *provider) MigrateUID(_ context.Context, machine *clusterv1alpha1.Machin var servers []serverWithExt err = osservers.ExtractServersInto(page, &servers) if err != nil { - return false, osErrorToTerminalError(err, "failed to extract instance info") + return false, osErrorToTerminalError(log, err, "failed to extract instance info") } allServers = append(allServers, servers...) return true, nil }) if err != nil { - return osErrorToTerminalError(err, "failed to list instances") + return osErrorToTerminalError(log, err, "failed to list instances") } for _, s := range allServers { @@ -862,45 +936,6 @@ func (p *provider) MigrateUID(_ context.Context, machine *clusterv1alpha1.Machin return nil } -func (p *provider) GetCloudConfig(spec clusterv1alpha1.MachineSpec) (config string, name string, err error) { - c, _, _, err := p.getConfig(spec.ProviderSpec) - if err != nil { - return "", "", fmt.Errorf("failed to parse config: %w", err) - } - - cc := &openstacktypes.CloudConfig{ - Global: openstacktypes.GlobalOpts{ - AuthURL: c.IdentityEndpoint, - Username: c.Username, - Password: c.Password, - DomainName: c.DomainName, - ProjectName: c.ProjectName, - ProjectID: c.ProjectID, - Region: c.Region, - ApplicationCredentialSecret: c.ApplicationCredentialSecret, - ApplicationCredentialID: c.ApplicationCredentialID, - }, - LoadBalancer: openstacktypes.LoadBalancerOpts{ - ManageSecurityGroups: true, - }, - BlockStorage: openstacktypes.BlockStorageOpts{ - BSVersion: "auto", - TrustDevicePath: c.TrustDevicePath, - IgnoreVolumeAZ: true, - }, - Version: spec.Versions.Kubelet, - } - if c.NodeVolumeAttachLimit != nil { - cc.BlockStorage.NodeVolumeAttachLimit = *c.NodeVolumeAttachLimit - } - - s, err := openstacktypes.CloudConfigToString(cc) - if err != nil { - return "", "", fmt.Errorf("failed to convert the cloud-config to string: %w", err) - } - return s, "openstack", nil -} - func (p *provider) MachineMetricsLabels(machine *clusterv1alpha1.Machine) (map[string]string, error) { labels := make(map[string]string) @@ -932,6 +967,9 @@ func (d *osInstance) ID() string { } func (d *osInstance) ProviderID() string { + if d.server == nil || d.server.ID == "" { + return "" + } return "openstack:///" + d.server.ID } @@ -962,7 +1000,7 @@ func (d *osInstance) Status() instance.Status { // can be qualified as a "terminal" error, for more info see v1alpha1.MachineStatus // // if the given error doesn't qualify the error passed as an argument will be returned. -func osErrorToTerminalError(err error, msg string) error { +func osErrorToTerminalError(log *zap.SugaredLogger, err error, msg string) error { var errUnauthorized gophercloud.ErrDefault401 if errors.As(err, &errUnauthorized) { return cloudprovidererrors.TerminalError{ @@ -982,7 +1020,7 @@ func osErrorToTerminalError(err error, msg string) error { info := &forbiddenResponse{} if err := json.Unmarshal(errForbidden.Body, info); err != nil { // We just log here as we just do this to make the response more pretty - klog.V(0).Infof("failed to unmarshal response body from 403 response from OpenStack API: %v\n%s", err, errForbidden.Body) + log.Errorw("Failed to unmarshal response body from 403 response from OpenStack API", "body", errForbidden.Body, zap.Error(err)) return terr } @@ -997,7 +1035,7 @@ func osErrorToTerminalError(err error, msg string) error { return terr } - return fmt.Errorf("%s, due to %w", msg, err) + return fmt.Errorf("%s: %w", msg, err) } // forbiddenResponse is a potential response body from the OpenStack API when the request is forbidden (code: 403). @@ -1008,10 +1046,10 @@ type forbiddenResponse struct { } `json:"forbidden"` } -func (p *provider) cleanupFloatingIP(machine *clusterv1alpha1.Machine, updater cloudprovidertypes.MachineUpdater) error { +func (p *provider) cleanupFloatingIP(log *zap.SugaredLogger, machine *clusterv1alpha1.Machine, updater cloudprovidertypes.MachineUpdater) error { floatingIPID, exists := machine.Annotations[floatingIPIDAnnotationKey] if !exists { - return osErrorToTerminalError(fmt.Errorf("failed to release floating ip"), + return osErrorToTerminalError(log, fmt.Errorf("failed to release floating ip"), fmt.Sprintf("%s finalizer exists but %s annotation does not", floatingIPReleaseFinalizer, floatingIPIDAnnotationKey)) } @@ -1025,13 +1063,13 @@ func (p *provider) cleanupFloatingIP(machine *clusterv1alpha1.Machine, updater c client, err := p.clientGetter(c) if err != nil { - return osErrorToTerminalError(err, "failed to get a openstack client") + return osErrorToTerminalError(log, err, "failed to get a openstack client") } netClient, err := goopenstack.NewNetworkV2(client, gophercloud.EndpointOpts{Region: c.Region}) if err != nil { return fmt.Errorf("failed to create the networkv2 client for region %s: %w", c.Region, err) } - if err := osfloatingips.Delete(netClient, floatingIPID).ExtractErr(); err != nil && !errors.Is(err, &gophercloud.ErrDefault404{}) { + if err := osfloatingips.Delete(netClient, floatingIPID).ExtractErr(); err != nil && !errors.As(err, &gophercloud.ErrDefault404{}) { return fmt.Errorf("failed to delete floating ip %s: %w", floatingIPID, err) } if err := updater(machine, func(m *clusterv1alpha1.Machine) { @@ -1045,7 +1083,7 @@ func (p *provider) cleanupFloatingIP(machine *clusterv1alpha1.Machine, updater c return nil } -func assignFloatingIPToInstance(machineUpdater cloudprovidertypes.MachineUpdater, machine *clusterv1alpha1.Machine, netClient *gophercloud.ServiceClient, instanceID, floatingIPPoolName, region string, network *osnetworks.Network) error { +func assignFloatingIPToInstance(instanceLog *zap.SugaredLogger, machineUpdater cloudprovidertypes.MachineUpdater, machine *clusterv1alpha1.Machine, netClient *gophercloud.ServiceClient, instanceID, floatingIPPoolName, region string, network *osnetworks.Network) error { port, err := getInstancePort(netClient, instanceID, network.ID) if err != nil { return fmt.Errorf("failed to get instance port for network %s in region %s: %w", network.ID, region, err) @@ -1053,25 +1091,23 @@ func assignFloatingIPToInstance(machineUpdater cloudprovidertypes.MachineUpdater floatingIPPool, err := getNetwork(netClient, floatingIPPoolName) if err != nil { - return osErrorToTerminalError(err, fmt.Sprintf("failed to get floating ip pool %q", floatingIPPoolName)) + return osErrorToTerminalError(instanceLog, err, fmt.Sprintf("failed to get floating IP pool %q", floatingIPPoolName)) } - // We're only interested in the part which is vulnerable to concurrent access - started := time.Now() - klog.V(2).Infof("Assigning a floating IP to instance %s", instanceID) + instanceLog.Info("Assigning a floating IP to instance") floatingIPAssignLock.Lock() defer floatingIPAssignLock.Unlock() freeFloatingIps, err := getFreeFloatingIPs(netClient, floatingIPPool) if err != nil { - return osErrorToTerminalError(err, "failed to get free floating ips") + return osErrorToTerminalError(instanceLog, err, "failed to get free floating ips") } var ip *osfloatingips.FloatingIP if len(freeFloatingIps) < 1 { if ip, err = createFloatingIP(netClient, port.ID, floatingIPPool); err != nil { - return osErrorToTerminalError(err, "failed to allocate a floating ip") + return osErrorToTerminalError(instanceLog, err, "failed to allocate a floating ip") } if err := machineUpdater(machine, func(m *clusterv1alpha1.Machine) { m.Finalizers = append(m.Finalizers, floatingIPReleaseFinalizer) @@ -1088,7 +1124,7 @@ func assignFloatingIPToInstance(machineUpdater cloudprovidertypes.MachineUpdater PortID: &port.ID, }).Extract() if err != nil { - return fmt.Errorf("failed to update FloatingIP %s(%s): %w", freeIP.ID, freeIP.FloatingIP, err) + return fmt.Errorf("failed to update floating IP %s(%s): %w", freeIP.ID, freeIP.FloatingIP, err) } // We're now going to wait 3 seconds and check if the IP is still ours. If not, we're going to fail @@ -1096,19 +1132,18 @@ func assignFloatingIPToInstance(machineUpdater cloudprovidertypes.MachineUpdater time.Sleep(floatingReassignIPCheckPeriod) currentIP, err := osfloatingips.Get(netClient, ip.ID).Extract() if err != nil { - return fmt.Errorf("failed to load FloatingIP %s after assignment has been done: %w", ip.FloatingIP, err) + return fmt.Errorf("failed to load floating IP %s after assignment has been done: %w", ip.FloatingIP, err) } // Verify if the port is still the one we set it to if currentIP.PortID != port.ID { return fmt.Errorf("floatingIP %s got reassigned", currentIP.FloatingIP) } } - secondsTook := time.Since(started).Seconds() - klog.V(2).Infof("Successfully assigned the FloatingIP %s to instance %s. Took %f seconds(without the recheck wait period %f seconds). ", ip.FloatingIP, instanceID, secondsTook, floatingReassignIPCheckPeriod.Seconds()) + instanceLog.Infow("Successfully assigned the floating IP to instance", "ip", ip.FloatingIP) return nil } -func (p *provider) SetMetricsForMachines(machines clusterv1alpha1.MachineList) error { +func (p *provider) SetMetricsForMachines(_ clusterv1alpha1.MachineList) error { return nil } diff --git a/pkg/cloudprovider/provider/openstack/provider_test.go b/pkg/cloudprovider/provider/openstack/provider_test.go index 5b4583daa..ec130a6e4 100644 --- a/pkg/cloudprovider/provider/openstack/provider_test.go +++ b/pkg/cloudprovider/provider/openstack/provider_test.go @@ -30,20 +30,22 @@ import ( "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" th "github.com/gophercloud/gophercloud/testhelper" "github.com/gophercloud/gophercloud/testhelper/client" + "go.uber.org/zap" - "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" - cloudprovidertesting "github.com/kubermatic/machine-controller/pkg/cloudprovider/testing" - cloudprovidertypes "github.com/kubermatic/machine-controller/pkg/cloudprovider/types" - "github.com/kubermatic/machine-controller/pkg/providerconfig" + cloudprovidertesting "k8c.io/machine-controller/pkg/cloudprovider/testing" + cloudprovidertypes "k8c.io/machine-controller/pkg/cloudprovider/types" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" + "k8c.io/machine-controller/sdk/providerconfig/configvar" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" fakectrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" ) const expectedServerRequest = `{ "server": { "availability_zone": "eu-de-01", + "config_drive": false, "flavorRef": "1", "imageRef": "1bea47ed-f6a9-463b-b423-14b9cca9ad27", "metadata": { @@ -65,12 +67,12 @@ const expectedServerRequest = `{ ], "user_data": "ZmFrZS11c2VyZGF0YQ==" } -} -` +}` const expectedBlockDeviceBootRequest = `{ "server": { "availability_zone": "eu-de-01", + "config_drive": false, "block_device_mapping_v2": [ { "boot_index": 0, @@ -107,6 +109,7 @@ const expectedBlockDeviceBootRequest = `{ const expectedBlockDeviceBootVolumeTypeRequest = `{ "server": { "availability_zone": "eu-de-01", + "config_drive": false, "block_device_mapping_v2": [ { "boot_index": 0, @@ -141,6 +144,36 @@ const expectedBlockDeviceBootVolumeTypeRequest = `{ } }` +const expectedMultipleNetworksRequest = `{ + "server": { + "availability_zone": "eu-de-01", + "config_drive": false, + "flavorRef": "1", + "imageRef": "1bea47ed-f6a9-463b-b423-14b9cca9ad27", + "metadata": { + "kubernetes-cluster": "xyz", + "machine-uid": "", + "system-cluster": "zyx", + "system-project": "xxx" + }, + "name": "test", + "networks": [ + { + "uuid": "d32019d3-bc6e-4319-9c1d-6722fc136a22" + }, + { + "uuid": "1df1458e-bd0c-423d-b201-2e5f56c94714" + } + ], + "security_groups": [ + { + "name": "kubernetes-xyz" + } + ], + "user_data": "ZmFrZS11c2VyZGF0YQ==" + } +}` + type openstackProviderSpecConf struct { IdentityEndpointURL string RootDiskSizeGB *int32 @@ -151,70 +184,97 @@ type openstackProviderSpecConf struct { ProjectID string TenantID string TenantName string + ConfigDrive bool ComputeAPIVersion string + Network string + Networks []string + Subnet string } func (o openstackProviderSpecConf) rawProviderSpec(t *testing.T) []byte { var out bytes.Buffer - tmpl, err := template.New("test").Parse(`{ - "cloudProvider": "openstack", - "cloudProviderSpec": { - "availabilityZone": "eu-de-01", - "domainName": "openstack_domain_name", - "flavor": "m1.tiny", - "identityEndpoint": "{{ .IdentityEndpointURL }}", - "image": "Standard_Ubuntu_18.04_latest", - "network": "public", - "nodeVolumeAttachLimit": null, - "region": "eu-de", - "instanceReadyCheckPeriod": "2m", - "instanceReadyCheckTimeout": "2m", - {{- if .ComputeAPIVersion }} - "computeAPIVersion": {{ .ComputeAPIVersion }}, - {{- end }} - {{- if .RootDiskSizeGB }} - "rootDiskSizeGB": {{ .RootDiskSizeGB }}, - {{- end }} - {{- if .RootDiskVolumeType }} - "rootDiskVolumeType": "{{ .RootDiskVolumeType }}", - {{- end }} - "securityGroups": [ - "kubernetes-xyz" - ], - "subnet": "subnetid", - "tags": { - "kubernetes-cluster": "xyz", - "system-cluster": "zyx", - "system-project": "xxx" + tmplStr := `{ + "cloudProvider": "openstack", + "cloudProviderSpec": { + "availabilityZone": "eu-de-01", + "domainName": "openstack_domain_name", + "flavor": "m1.tiny", + "identityEndpoint": "{{ .IdentityEndpointURL }}", + "image": "Standard_Ubuntu_18.04_latest", + {{- if .Network }} + "network": "{{ .Network }}", + {{- end }} + {{- if .Subnet }} + "subnet": "{{ .Subnet }}", + {{- end }} + {{- if .Networks }} + "networks": [ + {{- range $i, $e := .Networks }} + {{- if $i }},{{- end }} + "{{ $e }}" + {{- end }} + ], + {{- end }} + "nodeVolumeAttachLimit": null, + "region": "eu-de", + "instanceReadyCheckPeriod": "2m", + "instanceReadyCheckTimeout": "2m", + {{- if .ComputeAPIVersion }} + "computeAPIVersion": "{{ .ComputeAPIVersion }}", + {{- end }} + {{- if .RootDiskSizeGB }} + "rootDiskSizeGB": {{ .RootDiskSizeGB }}, + {{- end }} + {{- if .RootDiskVolumeType }} + "rootDiskVolumeType": "{{ .RootDiskVolumeType }}", + {{- end }} + "securityGroups": [ + "kubernetes-xyz" + ], + "tags": { + "kubernetes-cluster": "xyz", + "system-cluster": "zyx", + "system-project": "xxx" + }, + {{- if .ApplicationCredentialID }} + "applicationCredentialID": "{{ .ApplicationCredentialID }}", + "applicationCredentialSecret": "{{ .ApplicationCredentialSecret }}", + {{- else }} + {{- if .ProjectID }} + "projectID": "{{ .ProjectID }}", + "projectName": "{{ .ProjectName }}", + {{- end }} + {{- if .TenantID }} + "tenantID": "{{ .TenantID }}", + "tenantName": "{{ .TenantName }}", + {{- end }} + "username": "dummy", + "password": "this_is_a_password", + {{- end }} + "tokenId": "", + "trustDevicePath": false }, - {{- if .ApplicationCredentialID }} - "applicationCredentialID": "{{ .ApplicationCredentialID }}", - "applicationCredentialSecret": "{{ .ApplicationCredentialSecret }}", - {{- else }} - {{ if .ProjectID }} - "projectID": "{{ .ProjectID }}", - "projectName": "{{ .ProjectName }}", - {{- end }} - {{- if .TenantID }} - "tenantID": "{{ .TenantID }}", - "tenantName": "{{ .TenantName }}", - {{- end }} - "username": "dummy", - "password": "this_is_a_password", - {{- end }} - "tokenId": "", - "trustDevicePath": false - }, - "operatingSystem": "flatcar", - "operatingSystemSpec": { - "disableAutoUpdate": false, - "disableLocksmithD": true, - "disableUpdateEngine": false - } -}`) + "operatingSystem": "flatcar", + "operatingSystemSpec": { + "disableAutoUpdate": false, + "disableLocksmithD": true, + "disableUpdateEngine": false + } + }` + + tmpl, err := template.New("test").Parse(tmplStr) if err != nil { t.Fatalf("Error occurred while parsing openstack provider spec template: %v", err) } + + if o.Networks == nil && o.Network == "" { + o.Network = "public" + } + + if o.Subnet == "" { + o.Subnet = "subnetid" + } + err = tmpl.Execute(&out, o) if err != nil { t.Fatalf("Error occurred while executing openstack provider spec template: %v", err) @@ -240,13 +300,13 @@ func TestCreateServer(t *testing.T) { }, { name: "Custom disk size", - specConf: openstackProviderSpecConf{RootDiskSizeGB: pointer.Int32(10)}, + specConf: openstackProviderSpecConf{RootDiskSizeGB: ptr.To(int32(10))}, userdata: "fake-userdata", wantServerReq: expectedBlockDeviceBootRequest, }, { name: "Custom disk type", - specConf: openstackProviderSpecConf{RootDiskSizeGB: pointer.Int32(10), RootDiskVolumeType: "ssd"}, + specConf: openstackProviderSpecConf{RootDiskSizeGB: ptr.To(int32(10)), RootDiskVolumeType: "ssd"}, userdata: "fake-userdata", wantServerReq: expectedBlockDeviceBootVolumeTypeRequest, }, @@ -262,6 +322,36 @@ func TestCreateServer(t *testing.T) { userdata: "fake-userdata", wantServerReq: expectedServerRequest, }, + { + name: "Backward compatibility with single network", + specConf: openstackProviderSpecConf{Network: "public", Subnet: "subnetid"}, + userdata: "fake-userdata", + wantServerReq: expectedServerRequest, + }, + { + name: "Networks key used with single network", + specConf: openstackProviderSpecConf{Networks: []string{"public"}}, + userdata: "fake-userdata", + wantServerReq: expectedServerRequest, + }, + { + name: "Duplicate networks provided", + specConf: openstackProviderSpecConf{Networks: []string{"public", "public"}}, + userdata: "fake-userdata", + wantServerReq: expectedServerRequest, + }, + { + name: "Multiple networks provided", + specConf: openstackProviderSpecConf{Networks: []string{"public", "private"}}, + userdata: "fake-userdata", + wantServerReq: expectedMultipleNetworksRequest, + }, + { + name: "Both network and networks specified (network becomes primary)", + specConf: openstackProviderSpecConf{Network: "public", Networks: []string{"private"}}, + userdata: "fake-userdata", + wantServerReq: expectedMultipleNetworksRequest, + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { @@ -270,18 +360,18 @@ func TestCreateServer(t *testing.T) { ExpectServerCreated(t, tt.wantServerReq) p := &provider{ // Note that configVarResolver is not used in this test as the getConfigFunc is mocked. - configVarResolver: providerconfig.NewConfigVarResolver(context.Background(), fakectrlruntimeclient.NewClientBuilder().Build()), + configVarResolver: configvar.NewResolver(context.Background(), fakectrlruntimeclient.NewClientBuilder().Build()), // mock client config getter - clientGetter: func(c *Config) (*gophercloud.ProviderClient, error) { + clientGetter: func(*Config) (*gophercloud.ProviderClient, error) { pc := client.ServiceClient() // endpoint locator used to redirect to local test endpoint - pc.ProviderClient.EndpointLocator = func(_ gophercloud.EndpointOpts) (string, error) { + pc.EndpointLocator = func(_ gophercloud.EndpointOpts) (string, error) { return pc.Endpoint, nil } return pc.ProviderClient, nil }, // mock server readiness checker - portReadinessWaiter: func(*gophercloud.ServiceClient, string, string, time.Duration, time.Duration) error { + portReadinessWaiter: func(context.Context, *zap.SugaredLogger, *gophercloud.ServiceClient, string, string, time.Duration, time.Duration) error { return nil }, } @@ -295,7 +385,7 @@ func TestCreateServer(t *testing.T) { // It only verifies that the content of the create request matches // the expectation // TODO(irozzo) check the returned instance too - _, err := p.Create(context.Background(), m, tt.data, tt.userdata) + _, err := p.Create(context.Background(), zap.NewNop().Sugar(), m, tt.data, tt.userdata) if (err != nil) != tt.wantErr { t.Errorf("provider.Create() or = %v, wantErr %v", err, tt.wantErr) return @@ -334,11 +424,11 @@ func TestProjectAuthVarsAreCorrectlyLoaded(t *testing.T) { t.Run(tt.name, func(t *testing.T) { p := &provider{ // Note that configVarResolver is not used in this test as the getConfigFunc is mocked. - configVarResolver: providerconfig.NewConfigVarResolver(context.Background(), fakectrlruntimeclient. + configVarResolver: configvar.NewResolver(context.Background(), fakectrlruntimeclient. NewClientBuilder(). Build()), } - conf, _, _, _ := p.getConfig(v1alpha1.ProviderSpec{ + conf, _, _, _ := p.getConfig(clusterv1alpha1.ProviderSpec{ Value: &runtime.RawExtension{ Raw: tt.specConf.rawProviderSpec(t), }, @@ -354,6 +444,78 @@ func TestProjectAuthVarsAreCorrectlyLoaded(t *testing.T) { } } +func TestResolveNetworks(t *testing.T) { + tests := []struct { + name string + cfg *Config + expected []string + wantErr bool + }{ + { + name: "Only networks specified", + cfg: &Config{Networks: []string{"public", "private"}}, + expected: []string{"public", "private"}, + wantErr: false, + }, + { + name: "Only network specified (backward compatibility)", + cfg: &Config{Network: "public"}, + expected: []string{"public"}, + wantErr: false, + }, + { + name: "Both network and networks specified (network becomes primary)", + cfg: &Config{Network: "public", Networks: []string{"private"}}, + expected: []string{"public", "private"}, + wantErr: false, + }, + { + name: "Handle duplicated networks", + cfg: &Config{Network: "public", Networks: []string{"public"}}, + expected: []string{"public"}, + wantErr: false, + }, + { + name: "Neither specified", + cfg: &Config{}, + expected: nil, + wantErr: true, + }, + { + name: "Empty networks array", + cfg: &Config{Networks: []string{}}, + expected: nil, + wantErr: true, + }, + } + + p := &provider{} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := p.resolveNetworks(tt.cfg) + + if (err != nil) != tt.wantErr { + t.Errorf("resolveNetworks() error = %v, wantErr %v", err, tt.wantErr) + return + } + + if !tt.wantErr { + if len(result) != len(tt.expected) { + t.Errorf("result = %v, expected %v", result, tt.expected) + t.Errorf("resolveNetworks() result length = %v, expected length %v", len(result), len(tt.expected)) + return + } + + for i, network := range result { + if network != tt.expected[i] { + t.Errorf("resolveNetworks() result[%d] = %v, expected %v", i, network, tt.expected[i]) + } + } + } + }) + } +} + type ServerResponse struct { Server servers.Server `json:"server"` } @@ -369,7 +531,7 @@ func ExpectServerCreated(t *testing.T, expectedServer string) { // expectedServer copied into the response (e.g. name). err := json.Unmarshal([]byte(expectedServer), &res) if err != nil { - t.Fatalf("Error occurred while unmarshaling the expected server manifest.") + t.Fatalf("Error occurred while unmarshaling the expected server manifest: %v", err) } res.Server.ID = "1bea47ed-f6a9-463b-b423-14b9cca9ad27" srvRes, err := json.Marshal(res) @@ -535,6 +697,24 @@ func ExpectServerCreated(t *testing.T, expectedServer string) { "port_security_enabled": true, "dns_domain": "local.", "mtu": 1500 + }, + { + "status": "ACTIVE", + "subnets": [ + "55b45ada-e384-4130-a70b-17df1c3e1d3d" + ], + "name": "private", + "admin_state_up": true, + "tenant_id": "4fd44f30292945e481c7b8a0c8908869", + "shared": false, + "id": "1df1458e-bd0c-423d-b201-2e5f56c94714", + "provider:segmentation_id": 9876543211, + "provider:physical_network": null, + "provider:network_type": "local", + "router:external": false, + "port_security_enabled": true, + "dns_domain": "local.", + "mtu": 1500 } ] }`) diff --git a/pkg/cloudprovider/provider/openstack/types/cloudconfig.go b/pkg/cloudprovider/provider/openstack/types/cloudconfig.go deleted file mode 100644 index 8015fb2fa..000000000 --- a/pkg/cloudprovider/provider/openstack/types/cloudconfig.go +++ /dev/null @@ -1,144 +0,0 @@ -/* -Copyright 2019 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package types - -import ( - "bytes" - "fmt" - "strconv" - "text/template" - - "github.com/Masterminds/sprig/v3" - - "github.com/kubermatic/machine-controller/pkg/ini" -) - -// use-octavia is enabled by default in CCM since v1.17.0, and disabled by -// default with the in-tree cloud provider. -// https://v1-18.docs.kubernetes.io/docs/concepts/cluster-administration/cloud-providers/#load-balancer -const ( - cloudConfigTpl = `[Global] -auth-url = {{ .Global.AuthURL | iniEscape }} -{{- if .Global.ApplicationCredentialID }} -application-credential-id = {{ .Global.ApplicationCredentialID | iniEscape }} -application-credential-secret = {{ .Global.ApplicationCredentialSecret | iniEscape }} -{{- else }} -username = {{ .Global.Username | iniEscape }} -password = {{ .Global.Password | iniEscape }} -tenant-name = {{ .Global.ProjectName | iniEscape }} -tenant-id = {{ .Global.ProjectID | iniEscape }} -domain-name = {{ .Global.DomainName | iniEscape }} -{{- end }} -region = {{ .Global.Region | iniEscape }} - -[LoadBalancer] -lb-version = {{ default "v2" .LoadBalancer.LBVersion | iniEscape }} -subnet-id = {{ .LoadBalancer.SubnetID | iniEscape }} -floating-network-id = {{ .LoadBalancer.FloatingNetworkID | iniEscape }} -lb-method = {{ default "ROUND_ROBIN" .LoadBalancer.LBMethod | iniEscape }} -lb-provider = {{ .LoadBalancer.LBProvider | iniEscape }} -{{- if .LoadBalancer.UseOctavia }} -use-octavia = {{ .LoadBalancer.UseOctavia | boolPtr }} -{{- end }} - -{{- if .LoadBalancer.CreateMonitor }} -create-monitor = {{ .LoadBalancer.CreateMonitor }} -monitor-delay = {{ .LoadBalancer.MonitorDelay }} -monitor-timeout = {{ .LoadBalancer.MonitorTimeout }} -monitor-max-retries = {{ .LoadBalancer.MonitorMaxRetries }} -{{- end}} -{{- if semverCompare "~1.9.10 || ~1.10.6 || ~1.11.1 || >=1.12.*" .Version }} -manage-security-groups = {{ .LoadBalancer.ManageSecurityGroups }} -{{- end }} - -[BlockStorage] -{{- if semverCompare ">=1.9" .Version }} -ignore-volume-az = {{ .BlockStorage.IgnoreVolumeAZ }} -{{- end }} -trust-device-path = {{ .BlockStorage.TrustDevicePath }} -bs-version = {{ default "auto" .BlockStorage.BSVersion | iniEscape }} -{{- if .BlockStorage.NodeVolumeAttachLimit }} -node-volume-attach-limit = {{ .BlockStorage.NodeVolumeAttachLimit }} -{{- end }} -` -) - -type LoadBalancerOpts struct { - LBVersion string `gcfg:"lb-version"` - SubnetID string `gcfg:"subnet-id"` - FloatingNetworkID string `gcfg:"floating-network-id"` - LBMethod string `gcfg:"lb-method"` - LBProvider string `gcfg:"lb-provider"` - CreateMonitor bool `gcfg:"create-monitor"` - MonitorDelay ini.Duration `gcfg:"monitor-delay"` - MonitorTimeout ini.Duration `gcfg:"monitor-timeout"` - MonitorMaxRetries uint `gcfg:"monitor-max-retries"` - ManageSecurityGroups bool `gcfg:"manage-security-groups"` - UseOctavia *bool `gcfg:"use-octavia"` -} - -type BlockStorageOpts struct { - BSVersion string `gcfg:"bs-version"` - TrustDevicePath bool `gcfg:"trust-device-path"` - IgnoreVolumeAZ bool `gcfg:"ignore-volume-az"` - NodeVolumeAttachLimit uint `gcfg:"node-volume-attach-limit"` -} - -type GlobalOpts struct { - AuthURL string `gcfg:"auth-url"` - Username string - Password string - ApplicationCredentialID string `gcfg:"application-credential-id"` - ApplicationCredentialSecret string `gcfg:"application-credential-secret"` - - // project name formerly known as tenant name. - // it serialized as tenant-name because openstack CCM reads only tenant-name. In CCM, internally project and tenant - // are stored into tenant-name. - ProjectName string `gcfg:"tenant-name"` - - // project id formerly known as tenant id. - // serialized as tenant-id for same reason as ProjectName - ProjectID string `gcfg:"tenant-id"` - DomainName string `gcfg:"domain-name"` - Region string -} - -// CloudConfig is used to read and store information from the cloud configuration file. -type CloudConfig struct { - Global GlobalOpts - LoadBalancer LoadBalancerOpts - BlockStorage BlockStorageOpts - Version string -} - -func CloudConfigToString(c *CloudConfig) (string, error) { - funcMap := sprig.TxtFuncMap() - funcMap["iniEscape"] = ini.Escape - funcMap["boolPtr"] = func(b *bool) string { return strconv.FormatBool(*b) } - - tpl, err := template.New("cloud-config").Funcs(funcMap).Parse(cloudConfigTpl) - if err != nil { - return "", fmt.Errorf("failed to parse the cloud config template: %w", err) - } - - buf := &bytes.Buffer{} - if err := tpl.Execute(buf, c); err != nil { - return "", fmt.Errorf("failed to execute cloud config template: %w", err) - } - - return buf.String(), nil -} diff --git a/pkg/cloudprovider/provider/openstack/types/cloudconfig_test.go b/pkg/cloudprovider/provider/openstack/types/cloudconfig_test.go deleted file mode 100644 index 86d505036..000000000 --- a/pkg/cloudprovider/provider/openstack/types/cloudconfig_test.go +++ /dev/null @@ -1,204 +0,0 @@ -/* -Copyright 2019 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package types - -import ( - "flag" - "testing" - "time" - - "gopkg.in/gcfg.v1" - - "github.com/kubermatic/machine-controller/pkg/ini" - testhelper "github.com/kubermatic/machine-controller/pkg/test" - - "k8s.io/utils/pointer" -) - -var update = flag.Bool("update", false, "update testdata files") - -func TestCloudConfigToString(t *testing.T) { - tests := []struct { - name string - config *CloudConfig - }{ - { - name: "simple-config", - config: &CloudConfig{ - Global: GlobalOpts{ - AuthURL: "https://127.0.0.1:8443", - Username: "admin", - Password: "password", - DomainName: "Default", - ProjectName: "Test", - Region: "eu-central1", - }, - BlockStorage: BlockStorageOpts{ - BSVersion: "v2", - IgnoreVolumeAZ: true, - TrustDevicePath: true, - NodeVolumeAttachLimit: 25, - }, - LoadBalancer: LoadBalancerOpts{ - ManageSecurityGroups: true, - }, - Version: "1.10.0", - }, - }, - { - name: "use-octavia-explicitly-enabled", - config: &CloudConfig{ - Global: GlobalOpts{ - AuthURL: "https://127.0.0.1:8443", - Username: "admin", - Password: "password", - DomainName: "Default", - ProjectName: "Test", - Region: "eu-central1", - }, - BlockStorage: BlockStorageOpts{ - BSVersion: "v2", - IgnoreVolumeAZ: true, - TrustDevicePath: true, - NodeVolumeAttachLimit: 25, - }, - LoadBalancer: LoadBalancerOpts{ - ManageSecurityGroups: true, - UseOctavia: pointer.Bool(true), - }, - Version: "1.10.0", - }, - }, - { - name: "use-octavia-explicitly-disabled", - config: &CloudConfig{ - Global: GlobalOpts{ - AuthURL: "https://127.0.0.1:8443", - Username: "admin", - Password: "password", - DomainName: "Default", - ProjectName: "Test", - Region: "eu-central1", - }, - BlockStorage: BlockStorageOpts{ - BSVersion: "v2", - IgnoreVolumeAZ: true, - TrustDevicePath: true, - NodeVolumeAttachLimit: 25, - }, - LoadBalancer: LoadBalancerOpts{ - ManageSecurityGroups: true, - UseOctavia: pointer.Bool(false), - }, - Version: "1.10.0", - }, - }, - { - name: "config-with-special-chars", - config: &CloudConfig{ - Global: GlobalOpts{ - AuthURL: "https://127.0.0.1:8443", - Username: "admin", - Password: `.)\^x[tt0L@};p 0 { - vmSpecSection.DiskSection.DiskSettings[i].Iops = pointer.Int64(*config.DiskIOPS) + vmSpecSection.DiskSection.DiskSettings[i].IopsAllocation = &vcdapitypes.IopsResource{ + Reservation: *config.DiskIOPS, + } needsDiskRecomposition = true } if config.DiskBusType != nil && *config.DiskBusType != "" { diff --git a/pkg/cloudprovider/provider/vmwareclouddirector/provider.go b/pkg/cloudprovider/provider/vmwareclouddirector/provider.go index 4c3cf16ce..bc92e48e2 100644 --- a/pkg/cloudprovider/provider/vmwareclouddirector/provider.go +++ b/pkg/cloudprovider/provider/vmwareclouddirector/provider.go @@ -24,19 +24,19 @@ import ( "net/url" "github.com/vmware/go-vcloud-director/v2/govcd" + "go.uber.org/zap" - clusterv1alpha1 "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" - cloudprovidererrors "github.com/kubermatic/machine-controller/pkg/cloudprovider/errors" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/instance" - vcdtypes "github.com/kubermatic/machine-controller/pkg/cloudprovider/provider/vmwareclouddirector/types" - cloudprovidertypes "github.com/kubermatic/machine-controller/pkg/cloudprovider/types" - "github.com/kubermatic/machine-controller/pkg/providerconfig" - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" + cloudprovidererrors "k8c.io/machine-controller/pkg/cloudprovider/errors" + "k8c.io/machine-controller/pkg/cloudprovider/instance" + cloudprovidertypes "k8c.io/machine-controller/pkg/cloudprovider/types" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" + vcdtypes "k8c.io/machine-controller/sdk/cloudprovider/vmwareclouddirector" + "k8c.io/machine-controller/sdk/providerconfig" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" ) const ( @@ -55,12 +55,13 @@ const ( ) type provider struct { - configVarResolver *providerconfig.ConfigVarResolver + configVarResolver providerconfig.ConfigVarResolver } type Auth struct { Username string Password string + APIToken string Organization string URL string VDC string @@ -78,7 +79,7 @@ type Config struct { SizingPolicy *string // Network configuration. - Network string + Networks []string IPAllocationMode vcdtypes.IPAllocationMode // Compute configuration. @@ -97,7 +98,7 @@ type Config struct { } // New returns a VMware Cloud Director provider. -func New(configVarResolver *providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { +func New(configVarResolver providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { return &provider{configVarResolver: configVarResolver} } @@ -121,6 +122,9 @@ func (s Server) ID() string { } func (s Server) ProviderID() string { + if s.ID() == "" { + return "" + } return fmt.Sprintf("vmware-cloud-director://%s", s.ID()) } @@ -132,7 +136,7 @@ func (s Server) Status() instance.Status { return s.status } -func (p *provider) AddDefaults(spec clusterv1alpha1.MachineSpec) (clusterv1alpha1.MachineSpec, error) { +func (p *provider) AddDefaults(_ *zap.SugaredLogger, spec clusterv1alpha1.MachineSpec) (clusterv1alpha1.MachineSpec, error) { _, _, rawConfig, err := p.getConfig(spec.ProviderSpec) if err != nil { return spec, err @@ -145,22 +149,22 @@ func (p *provider) AddDefaults(spec clusterv1alpha1.MachineSpec) (clusterv1alpha // These defaults will have no effect if DiskSizeGB is not specified if rawConfig.DiskBusType == nil { - rawConfig.DiskBusType = pointer.String(defaultDiskType) + rawConfig.DiskBusType = ptr.To(defaultDiskType) } if rawConfig.DiskIOPS == nil { - rawConfig.DiskIOPS = pointer.Int64(defaultDiskIOPS) + rawConfig.DiskIOPS = ptr.To(int64(defaultDiskIOPS)) } spec.ProviderSpec.Value, err = setProviderSpec(*rawConfig, spec.ProviderSpec) return spec, err } -func (p *provider) Cleanup(ctx context.Context, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData) (bool, error) { +func (p *provider) Cleanup(_ context.Context, _ *zap.SugaredLogger, machine *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData) (bool, error) { c, _, _, err := p.getConfig(machine.Spec.ProviderSpec) if err != nil { return false, fmt.Errorf("failed to parse config: %w", err) } - client, err := NewClient(c.Username, c.Password, c.Organization, c.URL, c.VDC, c.AllowInsecure) + client, err := NewClient(c.Username, c.Password, c.APIToken, c.Organization, c.URL, c.VDC, c.AllowInsecure) if err != nil { return false, fmt.Errorf("failed to create VMware Cloud Director client: %w", err) } @@ -195,25 +199,25 @@ func (p *provider) Cleanup(ctx context.Context, machine *clusterv1alpha1.Machine return true, nil } -func (p *provider) Create(ctx context.Context, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData, userdata string) (instance.Instance, error) { +func (p *provider) Create(ctx context.Context, log *zap.SugaredLogger, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData, userdata string) (instance.Instance, error) { vm, err := p.create(ctx, machine, userdata) if err != nil { - _, cleanupErr := p.Cleanup(ctx, machine, data) + _, cleanupErr := p.Cleanup(ctx, log, machine, data) if cleanupErr != nil { - return nil, fmt.Errorf("cleaning up failed with err %v after creation failed with err %w", cleanupErr, err) + return nil, fmt.Errorf("cleaning up failed with err %w after creation failed with err %w", cleanupErr, err) } return nil, err } return vm, nil } -func (p *provider) create(ctx context.Context, machine *clusterv1alpha1.Machine, userdata string) (instance.Instance, error) { +func (p *provider) create(_ context.Context, machine *clusterv1alpha1.Machine, userdata string) (instance.Instance, error) { c, providerConfig, _, err := p.getConfig(machine.Spec.ProviderSpec) if err != nil { return nil, fmt.Errorf("failed to parse config: %w", err) } - client, err := NewClient(c.Username, c.Password, c.Organization, c.URL, c.VDC, c.AllowInsecure) + client, err := NewClient(c.Username, c.Password, c.APIToken, c.Organization, c.URL, c.VDC, c.AllowInsecure) if err != nil { return nil, fmt.Errorf("failed to create VMware Cloud Director client: %w", err) } @@ -250,7 +254,7 @@ func (p *provider) create(ctx context.Context, machine *clusterv1alpha1.Machine, // 5. Before powering on the VM, configure customization to attach userdata with the VM // update guest properties. - err = setUserData(userdata, vm, providerConfig.OperatingSystem == providerconfigtypes.OperatingSystemFlatcar) + err = setUserData(userdata, vm, providerConfig.OperatingSystem == providerconfig.OperatingSystemFlatcar) if err != nil { return nil, err } @@ -285,13 +289,13 @@ func (p *provider) create(ctx context.Context, machine *clusterv1alpha1.Machine, return p.getInstance(vm) } -func (p *provider) Get(ctx context.Context, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData) (instance.Instance, error) { +func (p *provider) Get(_ context.Context, _ *zap.SugaredLogger, machine *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData) (instance.Instance, error) { c, _, _, err := p.getConfig(machine.Spec.ProviderSpec) if err != nil { return nil, fmt.Errorf("failed to parse config: %w", err) } - client, err := NewClient(c.Username, c.Password, c.Organization, c.URL, c.VDC, c.AllowInsecure) + client, err := NewClient(c.Username, c.Password, c.APIToken, c.Organization, c.URL, c.VDC, c.AllowInsecure) if err != nil { return nil, fmt.Errorf("failed to create VMware Cloud Director client: %w", err) } @@ -304,16 +308,8 @@ func (p *provider) Get(ctx context.Context, machine *clusterv1alpha1.Machine, da return p.getInstance(vm) } -func (p *provider) GetCloudConfig(spec clusterv1alpha1.MachineSpec) (config string, name string, err error) { - return "", "", nil -} - -func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *providerconfigtypes.Config, *vcdtypes.RawConfig, error) { - if provSpec.Value == nil { - return nil, nil, nil, fmt.Errorf("machine.spec.providerconfig.value is nil") - } - - pconfig, err := providerconfigtypes.GetConfig(provSpec) +func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *providerconfig.Config, *vcdtypes.RawConfig, error) { + pconfig, err := providerconfig.GetConfig(provSpec) if err != nil { return nil, nil, nil, err } @@ -328,89 +324,109 @@ func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *p } c := Config{} - c.Username, err = p.configVarResolver.GetConfigVarStringValueOrEnv(rawConfig.Username, "VCD_USER") + + c.APIToken, err = p.configVarResolver.GetStringValueOrEnv(rawConfig.APIToken, "VCD_API_TOKEN") if err != nil { - return nil, nil, nil, fmt.Errorf("failed to get the value of \"username\" field, error = %w", err) + return nil, nil, nil, fmt.Errorf(`failed to get the value of "apiToken" field, error = %w`, err) } - c.Password, err = p.configVarResolver.GetConfigVarStringValueOrEnv(rawConfig.Password, "VCD_PASSWORD") + c.Username, err = p.configVarResolver.GetStringValueOrEnv(rawConfig.Username, "VCD_USER") if err != nil { - return nil, nil, nil, fmt.Errorf("failed to get the value of \"password\" field, error = %w", err) + return nil, nil, nil, fmt.Errorf(`failed to get the value of "username" field, error = %w`, err) } - c.Organization, err = p.configVarResolver.GetConfigVarStringValueOrEnv(rawConfig.Organization, "VCD_ORG") + c.Password, err = p.configVarResolver.GetStringValueOrEnv(rawConfig.Password, "VCD_PASSWORD") if err != nil { - return nil, nil, nil, fmt.Errorf("failed to get the value of \"organization\" field, error = %w", err) + return nil, nil, nil, fmt.Errorf(`failed to get the value of "password" field, error = %w`, err) } - c.URL, err = p.configVarResolver.GetConfigVarStringValueOrEnv(rawConfig.URL, "VCD_URL") + c.Organization, err = p.configVarResolver.GetStringValueOrEnv(rawConfig.Organization, "VCD_ORG") if err != nil { - return nil, nil, nil, fmt.Errorf("failed to get the value of \"url\" field, error = %w", err) + return nil, nil, nil, fmt.Errorf(`failed to get the value of "organization" field, error = %w`, err) } - c.VDC, err = p.configVarResolver.GetConfigVarStringValueOrEnv(rawConfig.VDC, "VCD_VDC") + c.URL, err = p.configVarResolver.GetStringValueOrEnv(rawConfig.URL, "VCD_URL") if err != nil { - return nil, nil, nil, fmt.Errorf("failed to get the value of \"vdc\" field, error = %w", err) + return nil, nil, nil, fmt.Errorf(`failed to get the value of "url" field, error = %w`, err) } - c.AllowInsecure, err = p.configVarResolver.GetConfigVarBoolValueOrEnv(rawConfig.AllowInsecure, "VCD_ALLOW_UNVERIFIED_SSL") + c.VDC, err = p.configVarResolver.GetStringValueOrEnv(rawConfig.VDC, "VCD_VDC") if err != nil { - return nil, nil, nil, fmt.Errorf("failed to get the value of \"allowInsecure\" field, error = %w", err) + return nil, nil, nil, fmt.Errorf(`failed to get the value of "vdc" field, error = %w`, err) } - c.VApp, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.VApp) + c.AllowInsecure, err = p.configVarResolver.GetBoolValueOrEnv(rawConfig.AllowInsecure, "VCD_ALLOW_UNVERIFIED_SSL") + if err != nil { + return nil, nil, nil, fmt.Errorf(`failed to get the value of "allowInsecure" field, error = %w`, err) + } + + c.VApp, err = p.configVarResolver.GetStringValue(rawConfig.VApp) if err != nil { return nil, nil, nil, err } - c.Template, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.Template) + c.Template, err = p.configVarResolver.GetStringValue(rawConfig.Template) if err != nil { return nil, nil, nil, err } - c.Catalog, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.Catalog) + c.Catalog, err = p.configVarResolver.GetStringValue(rawConfig.Catalog) if err != nil { return nil, nil, nil, err } - c.Network, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.Network) + singleNetwork, err := p.configVarResolver.GetStringValue(rawConfig.Network) if err != nil { return nil, nil, nil, err } + if singleNetwork != "" { + c.Networks = append([]string{singleNetwork}, c.Networks...) + } + + for _, network := range rawConfig.Networks { + networkValue, err := p.configVarResolver.GetStringValue(network) + if err != nil { + return nil, nil, nil, err + } + c.Networks = append(c.Networks, networkValue) + } + c.IPAllocationMode = rawConfig.IPAllocationMode if rawConfig.DiskSizeGB != nil && *rawConfig.DiskSizeGB < 0 { - return nil, nil, nil, fmt.Errorf("value for \"diskSizeGB\" should either be nil or greater than or equal to 0") + return nil, nil, nil, fmt.Errorf(`value for "diskSizeGB" should either be nil or greater than or equal to 0`) } c.DiskSizeGB = rawConfig.DiskSizeGB if rawConfig.DiskIOPS != nil && *rawConfig.DiskIOPS < 0 { - return nil, nil, nil, fmt.Errorf("value for \"diskIOPS\" should either be nil or greater than or equal to 0") + return nil, nil, nil, fmt.Errorf(`value for "diskIOPS" should either be nil or greater than or equal to 0`) } c.DiskIOPS = rawConfig.DiskIOPS if rawConfig.CPUs <= 0 { - return nil, nil, nil, fmt.Errorf("value for \"cpus\" should be greater than 0") + return nil, nil, nil, fmt.Errorf(`value for "cpus" should be greater than 0`) } c.CPUs = rawConfig.CPUs if rawConfig.CPUCores <= 0 { - return nil, nil, nil, fmt.Errorf("value for \"cpuCores\" should be greater than 0") + return nil, nil, nil, fmt.Errorf(`value for "cpuCores" should be greater than 0`) } c.CPUCores = rawConfig.CPUCores if rawConfig.MemoryMB <= 4 { - return nil, nil, nil, fmt.Errorf("value for \"memoryMB\" should be greater than 0") + return nil, nil, nil, fmt.Errorf(`value for "memoryMB" should be greater than 0`) } if rawConfig.MemoryMB%4 != 0 { - return nil, nil, nil, fmt.Errorf("value for \"memoryMB\" should be a multiple of 4") + return nil, nil, nil, fmt.Errorf(`value for "memoryMB" should be a multiple of 4`) } c.MemoryMB = rawConfig.MemoryMB c.DiskBusType = rawConfig.DiskBusType c.StorageProfile = rawConfig.StorageProfile c.Metadata = rawConfig.Metadata + c.SizingPolicy = rawConfig.SizingPolicy + c.PlacementPolicy = rawConfig.PlacementPolicy return &c, pconfig, rawConfig, err } @@ -460,21 +476,25 @@ func (p *provider) MachineMetricsLabels(machine *clusterv1alpha1.Machine) (map[s return labels, err } -func (p *provider) MigrateUID(_ context.Context, _ *clusterv1alpha1.Machine, _ types.UID) error { +func (p *provider) MigrateUID(_ context.Context, _ *zap.SugaredLogger, _ *clusterv1alpha1.Machine, _ types.UID) error { return nil } -func (p *provider) SetMetricsForMachines(machines clusterv1alpha1.MachineList) error { +func (p *provider) SetMetricsForMachines(_ clusterv1alpha1.MachineList) error { return nil } -func (p *provider) Validate(_ context.Context, spec clusterv1alpha1.MachineSpec) error { +func (p *provider) Validate(_ context.Context, _ *zap.SugaredLogger, spec clusterv1alpha1.MachineSpec) error { c, _, _, err := p.getConfig(spec.ProviderSpec) if err != nil { return fmt.Errorf("failed to parse config: %w", err) } - client, err := NewClient(c.Username, c.Password, c.Organization, c.URL, c.VDC, c.AllowInsecure) + if c.APIToken != "" && (c.Password != "" || c.Username != "") { + return fmt.Errorf(`either "apiToken" or "username" and "password" must be specified`) + } + + client, err := NewClient(c.Username, c.Password, c.APIToken, c.Organization, c.URL, c.VDC, c.AllowInsecure) if err != nil { return fmt.Errorf("failed to create VMware Cloud Director client: %w", err) } @@ -501,11 +521,18 @@ func (p *provider) Validate(_ context.Context, spec clusterv1alpha1.MachineSpec) return fmt.Errorf("diskSizeGB '%v' cannot be less than the template size '%v': %w", *c.DiskSizeGB, catalogItem.CatalogItem.Size, err) } - // Ensure that the network exists + // Ensure that the networks exists // It can either be a vApp network or a vApp Org network. - _, err = GetVappNetworkType(c.Network, *vapp) - if err != nil { - return fmt.Errorf("failed to get network '%s' for vapp '%s': %w", c.Network, c.VApp, err) + + if len(c.Networks) == 0 { + return fmt.Errorf("at least one network must be specified") + } + + for _, network := range c.Networks { + _, err = GetVappNetworkType(network, *vapp) + if err != nil { + return fmt.Errorf("failed to get network '%s' for vapp '%s': %w", network, c.VApp, err) + } } if c.SizingPolicy != nil || c.PlacementPolicy != nil { @@ -540,11 +567,7 @@ func (p *provider) Validate(_ context.Context, spec clusterv1alpha1.MachineSpec) } func setProviderSpec(rawConfig vcdtypes.RawConfig, provSpec clusterv1alpha1.ProviderSpec) (*runtime.RawExtension, error) { - if provSpec.Value == nil { - return nil, fmt.Errorf("machine.spec.providerconfig.value is nil") - } - - pconfig, err := providerconfigtypes.GetConfig(provSpec) + pconfig, err := providerconfig.GetConfig(provSpec) if err != nil { return nil, err } diff --git a/pkg/cloudprovider/provider/vsphere/client.go b/pkg/cloudprovider/provider/vsphere/client.go index f89f9c0e0..b706cef05 100644 --- a/pkg/cloudprovider/provider/vsphere/client.go +++ b/pkg/cloudprovider/provider/vsphere/client.go @@ -29,7 +29,7 @@ import ( "github.com/vmware/govmomi/vim25" "github.com/vmware/govmomi/vim25/soap" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/util" + "k8c.io/machine-controller/pkg/cloudprovider/util" utilruntime "k8s.io/apimachinery/pkg/util/runtime" ) @@ -50,9 +50,6 @@ func NewSession(ctx context.Context, config *Config) (*Session, error) { if err != nil { return nil, err } - if err != nil { - return nil, err - } client := &govmomi.Client{ Client: vim25Client, @@ -109,11 +106,13 @@ func (s *RESTSession) Logout(ctx context.Context) { } func createVim25Client(ctx context.Context, config *Config) (*vim25.Client, error) { - clientURL, err := url.Parse(fmt.Sprintf("%s/sdk", config.VSphereURL)) + endpointURL, err := url.Parse(config.VSphereURL) if err != nil { return nil, err } + clientURL := endpointURL.JoinPath("/sdk") + // creating the govmoni Client in roundabout way because we need to set the proper CA bundle: reference https://github.com/vmware/govmomi/issues/1200 soapClient := soap.NewClient(clientURL, config.AllowInsecure) // set our CA bundle diff --git a/pkg/cloudprovider/provider/vsphere/helper.go b/pkg/cloudprovider/provider/vsphere/helper.go index db08f9dbc..899c3d631 100644 --- a/pkg/cloudprovider/provider/vsphere/helper.go +++ b/pkg/cloudprovider/provider/vsphere/helper.go @@ -22,7 +22,6 @@ import ( "encoding/base64" "errors" "fmt" - "math" "os" "os/exec" "text/template" @@ -32,20 +31,20 @@ import ( "github.com/vmware/govmomi/vim25/mo" "github.com/vmware/govmomi/vim25/soap" "github.com/vmware/govmomi/vim25/types" - - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" + "go.uber.org/zap" utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/klog" ) const ( localTempDir = "/tmp" metaDataTemplate = `instance-id: {{ .InstanceID}} local-hostname: {{ .Hostname }}` + + gigaByte = (1024 * 1024 * 1024) ) -func createClonedVM(ctx context.Context, vmName string, config *Config, session *Session, os providerconfigtypes.OperatingSystem, containerLinuxUserdata string) (*object.VirtualMachine, error) { +func createClonedVM(ctx context.Context, log *zap.SugaredLogger, vmName string, config *Config, session *Session, containerLinuxUserdata string) (*object.VirtualMachine, error) { tpl, err := session.Finder.VirtualMachine(ctx, config.TemplateVMName) if err != nil { return nil, fmt.Errorf("failed to get template vm: %w", err) @@ -82,12 +81,12 @@ func createClonedVM(ctx context.Context, vmName string, config *Config, session Template: false, Location: relocateSpec, } - datastoreref, err := resolveDatastoreRef(ctx, config, session, tpl, targetVMFolder, &cloneSpec) + datastoreref, err := resolveDatastoreRef(ctx, log, config, session, tpl, targetVMFolder, &cloneSpec) if err != nil { return nil, fmt.Errorf("failed to resolve datastore: %w", err) } - resourcepoolref, err := resolveResourcePoolRef(ctx, config, session, tpl) + resourcepoolref, err := resolveResourcePoolRef(ctx, config, session) if err != nil { return nil, fmt.Errorf("failed to resolve resourcePool: %w", err) } @@ -102,15 +101,13 @@ func createClonedVM(ctx context.Context, vmName string, config *Config, session return nil, fmt.Errorf("failed to clone template vm: %w", err) } - if err := clonedVMTask.Wait(ctx); err != nil { + if err := clonedVMTask.WaitEx(ctx); err != nil { return nil, fmt.Errorf("error when waiting for result of clone task: %w", err) } - virtualMachine, err := session.Finder.VirtualMachine(ctx, vmName) if err != nil { return nil, fmt.Errorf("failed to get virtual machine object after cloning: %w", err) } - vmDevices, err := virtualMachine.Device(ctx) if err != nil { return nil, fmt.Errorf("failed to list devices of template VM: %w", err) @@ -140,7 +137,6 @@ func createClonedVM(ctx context.Context, vmName string, config *Config, session guestInfoUserData = "guestinfo.ignition.config.data" guestInfoUserDataEncoding = "guestinfo.ignition.config.data.encoding" - for _, item := range mvm.Config.VAppConfig.GetVmConfigInfo().Property { switch item.Id { case guestInfoUserData: @@ -172,7 +168,6 @@ func createClonedVM(ctx context.Context, vmName string, config *Config, session } diskUUIDEnabled := true - var deviceSpecs []types.BaseVirtualDeviceConfigSpec if config.DiskSizeGB != nil { disks, err := getDisksFromVM(ctx, virtualMachine) @@ -185,15 +180,15 @@ func createClonedVM(ctx context.Context, vmName string, config *Config, session return nil, err } - klog.V(4).Infof("Increasing disk size to %d GB", *config.DiskSizeGB) + log.Debugw("Increasing disk size", "targetgb", *config.DiskSizeGB) disk := disks[0] - disk.CapacityInBytes = *config.DiskSizeGB * int64(math.Pow(1024, 3)) + disk.CapacityInBytes = *config.DiskSizeGB * gigaByte diskspec := &types.VirtualDeviceConfigSpec{Operation: types.VirtualDeviceConfigSpecOperationEdit, Device: disk} deviceSpecs = append(deviceSpecs, diskspec) } - if config.VMNetName != "" { - networkSpecs, err := GetNetworkSpecs(ctx, session, vmDevices, config.VMNetName) + if config.VMNetName != "" || len(config.Networks) > 0 { + networkSpecs, err := GetNetworkSpecs(ctx, session, vmDevices, config.VMNetName, config.Networks) if err != nil { return nil, fmt.Errorf("failed to get network specifications: %w", err) } @@ -213,7 +208,7 @@ func createClonedVM(ctx context.Context, vmName string, config *Config, session if err != nil { return nil, fmt.Errorf("failed to reconfigure the VM: %w", err) } - if err := reconfigureTask.Wait(ctx); err != nil { + if err := reconfigureTask.WaitEx(ctx); err != nil { return nil, fmt.Errorf("error when waiting for result of the reconfigure task: %w", err) } @@ -223,14 +218,15 @@ func createClonedVM(ctx context.Context, vmName string, config *Config, session if err := removeFloppyDevice(ctx, virtualMachine); err != nil { return nil, fmt.Errorf("failed to remove floppy device: %w", err) } - return virtualMachine, nil } -func resolveDatastoreRef(ctx context.Context, config *Config, session *Session, vm *object.VirtualMachine, folder *object.Folder, cloneSpec *types.VirtualMachineCloneSpec) (*types.ManagedObjectReference, error) { +func resolveDatastoreRef(ctx context.Context, log *zap.SugaredLogger, config *Config, session *Session, vm *object.VirtualMachine, folder *object.Folder, cloneSpec *types.VirtualMachineCloneSpec) (*types.ManagedObjectReference, error) { // Based on https://github.com/vmware/govmomi/blob/v0.22.1/govc/vm/clone.go#L358 if config.DatastoreCluster != "" && config.Datastore == "" { - klog.Infof("Choosing initial datastore placement for vm %s from datastore cluster %s", vm.Name(), config.DatastoreCluster) + vmLog := log.With("vm", vm.Name(), "datastorecluster", config.DatastoreCluster) + vmLog.Infow("Choosing initial datastore placement for vm from datastore cluster") + storagePod, err := session.Finder.DatastoreCluster(ctx, config.DatastoreCluster) if err != nil { return nil, fmt.Errorf("failed to get datastore cluster: %w", err) @@ -273,7 +269,8 @@ func resolveDatastoreRef(ctx context.Context, config *Config, session *Session, // Get the first recommendation ds := recommendations[0].Action[0].(*types.StoragePlacementAction).Destination.Reference() - klog.Infof("The selected datastore from datastore cluster %s is: %v", config.DatastoreCluster, ds) + vmLog.Infow("Selected datastore from datastore cluster", "datastore", ds) + return &ds, nil } else if config.DatastoreCluster == "" && config.Datastore != "" { datastore, err := session.Finder.Datastore(ctx, config.Datastore) @@ -281,12 +278,11 @@ func resolveDatastoreRef(ctx context.Context, config *Config, session *Session, return nil, fmt.Errorf("failed to get datastore: %w", err) } return types.NewReference(datastore.Reference()), nil - } else { - return nil, fmt.Errorf("please provide either a datastore or a datastore cluster") } + return nil, fmt.Errorf("please provide either a datastore or a datastore cluster") } -func uploadAndAttachISO(ctx context.Context, session *Session, vmRef *object.VirtualMachine, localIsoFilePath string) error { +func uploadAndAttachISO(ctx context.Context, log *zap.SugaredLogger, session *Session, vmRef *object.VirtualMachine, localIsoFilePath string) error { p := soap.DefaultUpload remoteIsoFilePath := fmt.Sprintf("%s/%s", vmRef.Name(), "cloud-init.iso") // Get the datastore where VM files are located @@ -294,11 +290,12 @@ func uploadAndAttachISO(ctx context.Context, session *Session, vmRef *object.Vir if err != nil { return fmt.Errorf("error getting datastore from VM %s: %w", vmRef.Name(), err) } - klog.V(3).Infof("Uploading userdata ISO to datastore %+v, destination iso is %s\n", datastore, remoteIsoFilePath) + uploadLog := log.With("datastore", datastore, "source", localIsoFilePath, "destination", remoteIsoFilePath) + uploadLog.Debug("Uploading userdata ISO to datastore") if err := datastore.UploadFile(ctx, localIsoFilePath, remoteIsoFilePath, &p); err != nil { return fmt.Errorf("failed to upload iso: %w", err) } - klog.V(3).Infof("Uploaded ISO file %s", localIsoFilePath) + uploadLog.Debug("Uploaded ISO file") // Find the cd-rom device and insert the cloud init iso file into it. devices, err := vmRef.Device(ctx) @@ -316,7 +313,7 @@ func uploadAndAttachISO(ctx context.Context, session *Session, vmRef *object.Vir return vmRef.EditDevice(ctx, devices.InsertIso(cdrom, iso)) } -func generateLocalUserdataISO(userdata, name string) (string, error) { +func generateLocalUserdataISO(ctx context.Context, userdata, name string) (string, error) { // We must create a directory, because the iso-generation commands // take a directory as input userdataDir, err := os.MkdirTemp(localTempDir, name) @@ -370,7 +367,7 @@ func generateLocalUserdataISO(userdata, name string) (string, error) { return "", errors.New("system is missing genisoimage or mkisofs, can't generate userdata iso without it") } - cmd := exec.Command(command, args...) + cmd := exec.CommandContext(ctx, command, args...) if output, err := cmd.CombinedOutput(); err != nil { return "", fmt.Errorf("error executing command `%s %s`: output: `%s`, error: `%w`", command, args, string(output), err) } @@ -422,9 +419,9 @@ func validateDiskResizing(disks []*types.VirtualDisk, requestedSize int64) error if diskLen := len(disks); diskLen != 1 { return fmt.Errorf("expected vm to have exactly one disk, got %d", diskLen) } - requestedCapacityInBytes := requestedSize * int64(math.Pow(1024, 3)) + requestedCapacityInBytes := requestedSize * gigaByte if requestedCapacityInBytes < disks[0].CapacityInBytes { - attachedDiskSizeInGiB := disks[0].CapacityInBytes / int64(math.Pow(1024, 3)) + attachedDiskSizeInGiB := disks[0].CapacityInBytes / gigaByte return fmt.Errorf("requested diskSizeGB %d is smaller than size of attached disk(%dGiB)", requestedSize, attachedDiskSizeInGiB) } return nil @@ -435,17 +432,17 @@ func getDatastoreFromVM(ctx context.Context, session *Session, vmRef *object.Vir var props mo.VirtualMachine // Obtain VM properties if err := vmRef.Properties(ctx, vmRef.Reference(), nil, &props); err != nil { - return nil, fmt.Errorf("error getting VM properties: %w", err) + return nil, fmt.Errorf("failed to get VM properties: %w", err) } datastorePathObj := new(object.DatastorePath) isSuccess := datastorePathObj.FromString(props.Summary.Config.VmPathName) if !isSuccess { - return nil, fmt.Errorf("Failed to parse volPath: %s", props.Summary.Config.VmPathName) + return nil, fmt.Errorf("failed to parse volPath: %s", props.Summary.Config.VmPathName) } return session.Finder.Datastore(ctx, datastorePathObj.Datastore) } -func resolveResourcePoolRef(ctx context.Context, config *Config, session *Session, vm *object.VirtualMachine) (*types.ManagedObjectReference, error) { +func resolveResourcePoolRef(ctx context.Context, config *Config, session *Session) (*types.ManagedObjectReference, error) { if config.ResourcePool != "" { targetResourcePool, err := session.Finder.ResourcePool(ctx, config.ResourcePool) if err != nil { @@ -456,14 +453,14 @@ func resolveResourcePoolRef(ctx context.Context, config *Config, session *Sessio return nil, nil } -func attachTags(ctx context.Context, config *Config, vm *object.VirtualMachine) error { +func attachTags(ctx context.Context, log *zap.SugaredLogger, config *Config, vm *object.VirtualMachine) error { restAPISession, err := NewRESTSession(ctx, config) if err != nil { return fmt.Errorf("failed to create REST API session: %w", err) } defer restAPISession.Logout(ctx) tagManager := tags.NewManager(restAPISession.Client) - klog.V(3).Info("Attaching tags") + log.Debug("Attaching tags") for _, tag := range config.Tags { tagID, err := determineTagID(ctx, tagManager, tag) if err != nil { @@ -471,14 +468,14 @@ func attachTags(ctx context.Context, config *Config, vm *object.VirtualMachine) } if err := tagManager.AttachTag(ctx, tagID, vm.Reference()); err != nil { - klog.V(3).Infof("Failed to attach tag %v. The tag was successfully deleted", tag) + log.Debugw("Failed to attach tag; it was successfully deleted", "tag", tag) return fmt.Errorf("failed to attach tag to VM: %v %w", tag.Name, err) } } return nil } -func detachTags(ctx context.Context, config *Config, vm *object.VirtualMachine) error { +func detachTags(ctx context.Context, log *zap.SugaredLogger, config *Config, vm *object.VirtualMachine) error { restAPISession, err := NewRESTSession(ctx, config) if err != nil { return fmt.Errorf("failed to create REST API session: %w", err) @@ -490,7 +487,7 @@ func detachTags(ctx context.Context, config *Config, vm *object.VirtualMachine) if err != nil { return fmt.Errorf("failed to get attached tags for the VM: %s, %w", vm.Name(), err) } - klog.V(3).Info("Deleting tags") + log.Debug("Deleting tags") for _, tag := range attachedTags { tagID, err := determineTagID(ctx, tagManager, tag) if err != nil { diff --git a/pkg/cloudprovider/provider/vsphere/helper_test.go b/pkg/cloudprovider/provider/vsphere/helper_test.go index 359d867e7..257339db4 100644 --- a/pkg/cloudprovider/provider/vsphere/helper_test.go +++ b/pkg/cloudprovider/provider/vsphere/helper_test.go @@ -28,6 +28,7 @@ import ( "github.com/vmware/govmomi/vim25/methods" "github.com/vmware/govmomi/vim25/soap" "github.com/vmware/govmomi/vim25/types" + "go.uber.org/zap" ) func TestResolveDatastoreRef(t *testing.T) { @@ -112,7 +113,7 @@ func TestResolveDatastoreRef(t *testing.T) { t.Fatalf("error getting virtual machines: %v", err) } - got, err := resolveDatastoreRef(ctx, tt.config, session, vms[2], vmFolder, &types.VirtualMachineCloneSpec{}) + got, err := resolveDatastoreRef(ctx, zap.NewNop().Sugar(), tt.config, session, vms[2], vmFolder, &types.VirtualMachineCloneSpec{}) if (err != nil) != tt.wantErr { t.Errorf("resolveDatastoreRef() error = %v, wantErr %v", err, tt.wantErr) return @@ -130,7 +131,7 @@ type CustomStorageResourceManager struct { } // RecommendDatastores always return a recommendation for the purposes of the test. -func (c *CustomStorageResourceManager) RecommendDatastores(req *types.RecommendDatastores) soap.HasFault { +func (c *CustomStorageResourceManager) RecommendDatastores(_ *types.RecommendDatastores) soap.HasFault { body := &methods.RecommendDatastoresBody{} res := &types.RecommendDatastoresResponse{} ds := c.ds.Reference() @@ -221,11 +222,7 @@ func TestResolveResourcePoolRef(t *testing.T) { t.Fatalf("error creating session: %v", err) } - // Obtain a VM from the simulator - obj := simulator.Map.Any("VirtualMachine").(*simulator.VirtualMachine) - vm := object.NewVirtualMachine(session.Client.Client, obj.Reference()) - - got, err := resolveResourcePoolRef(ctx, tt.config, session, vm) + got, err := resolveResourcePoolRef(ctx, tt.config, session) if (err != nil) != tt.wantErr { t.Errorf("error = %v, wantErr %v", err, tt.wantErr) return diff --git a/pkg/cloudprovider/provider/vsphere/network.go b/pkg/cloudprovider/provider/vsphere/network.go index e38d11135..2f3b6dc3d 100644 --- a/pkg/cloudprovider/provider/vsphere/network.go +++ b/pkg/cloudprovider/provider/vsphere/network.go @@ -28,9 +28,9 @@ const ( ethCardType = "vmxnet3" ) -// Based on https://github.com/kubernetes-sigs/cluster-api-provider-vsphere/blob/main/pkg/cloud/vsphere/services/govmomi/vcenter/clone.go#L158 -func GetNetworkSpecs(ctx context.Context, session *Session, devices object.VirtualDeviceList, network string) ([]types.BaseVirtualDeviceConfigSpec, error) { - var deviceSpecs []types.BaseVirtualDeviceConfigSpec +// Based on https://github.com/kubernetes-sigs/cluster-api-provider-vsphere/blob/v1.7.0/pkg/services/govmomi/vcenter/clone.go#L372 +func GetNetworkSpecs(ctx context.Context, session *Session, devices object.VirtualDeviceList, network string, networks []string) ([]types.BaseVirtualDeviceConfigSpec, error) { + deviceSpecs := []types.BaseVirtualDeviceConfigSpec{} // Remove any existing NICs. for _, dev := range devices.SelectByType((*types.VirtualEthernetCard)(nil)) { @@ -40,33 +40,42 @@ func GetNetworkSpecs(ctx context.Context, session *Session, devices object.Virtu }) } - // Add new NICs based on the machine config. - ref, err := session.Finder.Network(ctx, network) - if err != nil { - return nil, fmt.Errorf("failed to find network %q: %w", network, err) - } - backing, err := ref.EthernetCardBackingInfo(ctx) - if err != nil { - return nil, fmt.Errorf("failed to create new ethernet card backing info for network %q: %w", network, err) - } - dev, err := object.EthernetCardTypes().CreateEthernetCard(ethCardType, backing) - if err != nil { - return nil, fmt.Errorf("failed to create new ethernet card %q for network %q: %v", ethCardType, network, ctx) + // Add the default network if no networks are specified. + if network != "" { + networks = append(networks, network) } - // Get the actual NIC object. This is safe to assert without a check - // because "object.EthernetCardTypes().CreateEthernetCard" returns a - // "types.BaseVirtualEthernetCard" as a "types.BaseVirtualDevice". - nic := dev.(types.BaseVirtualEthernetCard).GetVirtualEthernetCard() + // Add NICs for each network. + deviceKey := int32(-100) + for _, net := range networks { + // Add new NICs based on the machine config. + ref, err := session.Finder.Network(ctx, net) + if err != nil { + return nil, fmt.Errorf("failed to find network %q: %w", net, err) + } + backing, err := ref.EthernetCardBackingInfo(ctx) + if err != nil { + return nil, fmt.Errorf("failed to create new ethernet card backing info for network %q: %w", net, err) + } + dev, err := object.EthernetCardTypes().CreateEthernetCard(ethCardType, backing) + if err != nil { + return nil, fmt.Errorf("failed to create new ethernet card %q for network %q: %v", ethCardType, net, ctx) + } - // Assign a temporary device key to ensure that a unique one will be - // generated when the device is created. - nic.Key = devices.NewKey() + // Get the actual NIC object. This is safe to assert without a check + // because "object.EthernetCardTypes().CreateEthernetCard" returns a + // "types.BaseVirtualEthernetCard" as a "types.BaseVirtualDevice". + nic := dev.(types.BaseVirtualEthernetCard).GetVirtualEthernetCard() - deviceSpecs = append(deviceSpecs, &types.VirtualDeviceConfigSpec{ - Device: dev, - Operation: types.VirtualDeviceConfigSpecOperationAdd, - }) + // Assign a temporary device key to ensure that a unique one will be + // generated when the device is created. + nic.Key = deviceKey + deviceSpecs = append(deviceSpecs, &types.VirtualDeviceConfigSpec{ + Device: dev, + Operation: types.VirtualDeviceConfigSpecOperationAdd, + }) + deviceKey-- + } return deviceSpecs, nil } diff --git a/pkg/cloudprovider/provider/vsphere/provider.go b/pkg/cloudprovider/provider/vsphere/provider.go index 481a2e519..61587db9e 100644 --- a/pkg/cloudprovider/provider/vsphere/provider.go +++ b/pkg/cloudprovider/provider/vsphere/provider.go @@ -20,7 +20,6 @@ import ( "context" "errors" "fmt" - "net/url" "os" "strings" @@ -30,28 +29,27 @@ import ( "github.com/vmware/govmomi/vapi/tags" "github.com/vmware/govmomi/vim25/mo" "github.com/vmware/govmomi/vim25/types" + "go.uber.org/zap" - "github.com/kubermatic/machine-controller/pkg/apis/cluster/common" - clusterv1alpha1 "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" - cloudprovidererrors "github.com/kubermatic/machine-controller/pkg/cloudprovider/errors" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/instance" - vspheretypes "github.com/kubermatic/machine-controller/pkg/cloudprovider/provider/vsphere/types" - cloudprovidertypes "github.com/kubermatic/machine-controller/pkg/cloudprovider/types" - "github.com/kubermatic/machine-controller/pkg/providerconfig" - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" + cloudprovidererrors "k8c.io/machine-controller/pkg/cloudprovider/errors" + "k8c.io/machine-controller/pkg/cloudprovider/instance" + cloudprovidertypes "k8c.io/machine-controller/pkg/cloudprovider/types" + "k8c.io/machine-controller/sdk/apis/cluster/common" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" + vspheretypes "k8c.io/machine-controller/sdk/cloudprovider/vsphere" + "k8c.io/machine-controller/sdk/providerconfig" corev1 "k8s.io/api/core/v1" ktypes "k8s.io/apimachinery/pkg/types" utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/klog" ) type provider struct { - configVarResolver *providerconfig.ConfigVarResolver + configVarResolver providerconfig.ConfigVarResolver } // New returns a VSphere provider. -func New(configVarResolver *providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { +func New(configVarResolver providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { provider := &provider{configVarResolver: configVarResolver} return provider } @@ -60,19 +58,23 @@ func New(configVarResolver *providerconfig.ConfigVarResolver) cloudprovidertypes type Config struct { TemplateVMName string VMNetName string + Networks []string Username string Password string VSphereURL string Datacenter string + Cluster string Folder string ResourcePool string Datastore string DatastoreCluster string AllowInsecure bool + VMAntiAffinity bool CPUs int32 MemoryMB int64 DiskSizeGB *int64 Tags []tags.Tag + VMGroup string } // Ensures that Server implements Instance interface. @@ -96,6 +98,9 @@ func (vsphereServer Server) ID() string { } func (vsphereServer Server) ProviderID() string { + if vsphereServer.uuid == "" { + return "" + } return "vsphere://" + vsphereServer.uuid } @@ -110,16 +115,12 @@ func (vsphereServer Server) Status() instance.Status { // Ensures that provider implements Provider interface. var _ cloudprovidertypes.Provider = &provider{} -func (p *provider) AddDefaults(spec clusterv1alpha1.MachineSpec) (clusterv1alpha1.MachineSpec, error) { +func (p *provider) AddDefaults(_ *zap.SugaredLogger, spec clusterv1alpha1.MachineSpec) (clusterv1alpha1.MachineSpec, error) { return spec, nil } -func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *providerconfigtypes.Config, *vspheretypes.RawConfig, error) { - if provSpec.Value == nil { - return nil, nil, nil, fmt.Errorf("machine.spec.providerconfig.value is nil") - } - - pconfig, err := providerconfigtypes.GetConfig(provSpec) +func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *providerconfig.Config, *vspheretypes.RawConfig, error) { + pconfig, err := providerconfig.GetConfig(provSpec) if err != nil { return nil, nil, nil, err } @@ -134,57 +135,82 @@ func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *p } c := Config{} - c.TemplateVMName, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.TemplateVMName) + c.TemplateVMName, err = p.configVarResolver.GetStringValue(rawConfig.TemplateVMName) + if err != nil { + return nil, nil, nil, err + } + + //nolint:staticcheck + //lint:ignore SA1019: rawConfig.VMNetName is deprecated: use networks instead. + c.VMNetName, err = p.configVarResolver.GetStringValue(rawConfig.VMNetName) if err != nil { return nil, nil, nil, err } - c.VMNetName, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.VMNetName) + for _, network := range rawConfig.Networks { + networkValue, err := p.configVarResolver.GetStringValue(network) + if err != nil { + return nil, nil, rawConfig, err + } + c.Networks = append(c.Networks, networkValue) + } + + c.Username, err = p.configVarResolver.GetStringValueOrEnv(rawConfig.Username, "VSPHERE_USERNAME") if err != nil { return nil, nil, nil, err } - c.Username, err = p.configVarResolver.GetConfigVarStringValueOrEnv(rawConfig.Username, "VSPHERE_USERNAME") + c.Password, err = p.configVarResolver.GetStringValueOrEnv(rawConfig.Password, "VSPHERE_PASSWORD") if err != nil { return nil, nil, nil, err } - c.Password, err = p.configVarResolver.GetConfigVarStringValueOrEnv(rawConfig.Password, "VSPHERE_PASSWORD") + c.VSphereURL, err = p.configVarResolver.GetStringValueOrEnv(rawConfig.VSphereURL, "VSPHERE_ADDRESS") if err != nil { return nil, nil, nil, err } - c.VSphereURL, err = p.configVarResolver.GetConfigVarStringValueOrEnv(rawConfig.VSphereURL, "VSPHERE_ADDRESS") + c.Datacenter, err = p.configVarResolver.GetStringValue(rawConfig.Datacenter) if err != nil { return nil, nil, nil, err } - c.Datacenter, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.Datacenter) + c.Cluster, err = p.configVarResolver.GetStringValue(rawConfig.Cluster) if err != nil { return nil, nil, nil, err } - c.Folder, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.Folder) + c.Folder, err = p.configVarResolver.GetStringValue(rawConfig.Folder) if err != nil { return nil, nil, nil, err } - c.ResourcePool, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.ResourcePool) + c.ResourcePool, err = p.configVarResolver.GetStringValue(rawConfig.ResourcePool) if err != nil { return nil, nil, nil, err } - c.Datastore, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.Datastore) + c.Datastore, err = p.configVarResolver.GetStringValue(rawConfig.Datastore) if err != nil { return nil, nil, nil, err } - c.DatastoreCluster, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.DatastoreCluster) + c.DatastoreCluster, err = p.configVarResolver.GetStringValue(rawConfig.DatastoreCluster) if err != nil { return nil, nil, nil, err } - c.AllowInsecure, err = p.configVarResolver.GetConfigVarBoolValueOrEnv(rawConfig.AllowInsecure, "VSPHERE_ALLOW_INSECURE") + c.AllowInsecure, err = p.configVarResolver.GetBoolValueOrEnv(rawConfig.AllowInsecure, "VSPHERE_ALLOW_INSECURE") + if err != nil { + return nil, nil, nil, err + } + + c.VMAntiAffinity, _, err = p.configVarResolver.GetBoolValue(rawConfig.VMAntiAffinity) + if err != nil { + return nil, nil, nil, err + } + + c.VMGroup, err = p.configVarResolver.GetStringValue(rawConfig.VMGroup) if err != nil { return nil, nil, nil, err } @@ -205,7 +231,7 @@ func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *p return &c, pconfig, rawConfig, nil } -func (p *provider) Validate(ctx context.Context, spec clusterv1alpha1.MachineSpec) error { +func (p *provider) Validate(ctx context.Context, log *zap.SugaredLogger, spec clusterv1alpha1.MachineSpec) error { config, _, _, err := p.getConfig(spec.ProviderSpec) if err != nil { return fmt.Errorf("failed to get config: %w", err) @@ -217,6 +243,10 @@ func (p *provider) Validate(ctx context.Context, spec clusterv1alpha1.MachineSpe } defer session.Logout(ctx) + if len(config.Networks) > 0 && config.VMNetName != "" { + return fmt.Errorf("both networks and vmNetName are specified, only one of them can be used") + } + if config.Tags != nil { restAPISession, err := NewRESTSession(ctx, config) if err != nil { @@ -224,7 +254,7 @@ func (p *provider) Validate(ctx context.Context, spec clusterv1alpha1.MachineSpe } defer restAPISession.Logout(ctx) tagManager := tags.NewManager(restAPISession.Client) - klog.V(3).Info("Found tags") + log.Debug("Found tags") for _, tag := range config.Tags { if tag.ID == "" && tag.Name == "" { return fmt.Errorf("either tag id or name must be specified") @@ -236,7 +266,7 @@ func (p *provider) Validate(ctx context.Context, spec clusterv1alpha1.MachineSpe return fmt.Errorf("can't get the category with ID %s, %w", tag.CategoryID, err) } } - klog.V(3).Info("Tag validation passed") + log.Debug("Tag validation passed") } // Only and only one between datastore and datastre cluster should be @@ -285,6 +315,20 @@ func (p *provider) Validate(ctx context.Context, spec clusterv1alpha1.MachineSpe return err } } + + if config.VMAntiAffinity && config.Cluster == "" { + return fmt.Errorf("cluster is required for vm anti affinity") + } else if config.VMGroup != "" && config.Cluster == "" { + return fmt.Errorf("cluster is required for vm group") + } + + if config.Cluster != "" { + _, err = session.Finder.ClusterComputeResource(ctx, config.Cluster) + if err != nil { + return fmt.Errorf("failed to get cluster %q, %w", config.Cluster, err) + } + } + return nil } @@ -295,19 +339,19 @@ func machineInvalidConfigurationTerminalError(err error) error { } } -func (p *provider) Create(ctx context.Context, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData, userdata string) (instance.Instance, error) { - vm, err := p.create(ctx, machine, userdata) +func (p *provider) Create(ctx context.Context, log *zap.SugaredLogger, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData, userdata string) (instance.Instance, error) { + vm, err := p.create(ctx, log, machine, userdata) if err != nil { - _, cleanupErr := p.Cleanup(ctx, machine, data) + _, cleanupErr := p.Cleanup(ctx, log, machine, data) if cleanupErr != nil { - return nil, fmt.Errorf("cleaning up failed with err %v after creation failed with err %w", cleanupErr, err) + return nil, fmt.Errorf("cleaning up failed with err %w after creation failed with err %w", cleanupErr, err) } return nil, err } return vm, nil } -func (p *provider) create(ctx context.Context, machine *clusterv1alpha1.Machine, userdata string) (instance.Instance, error) { +func (p *provider) create(ctx context.Context, log *zap.SugaredLogger, machine *clusterv1alpha1.Machine, userdata string) (instance.Instance, error) { config, pc, _, err := p.getConfig(machine.Spec.ProviderSpec) if err != nil { return nil, fmt.Errorf("failed to parse config: %w", err) @@ -320,27 +364,40 @@ func (p *provider) create(ctx context.Context, machine *clusterv1alpha1.Machine, defer session.Logout(ctx) var containerLinuxUserdata string - if pc.OperatingSystem == providerconfigtypes.OperatingSystemFlatcar { + if pc.OperatingSystem == providerconfig.OperatingSystemFlatcar { containerLinuxUserdata = userdata } - virtualMachine, err := createClonedVM(ctx, + virtualMachine, err := createClonedVM( + ctx, + log, machine.Spec.Name, config, session, - pc.OperatingSystem, containerLinuxUserdata, ) if err != nil { return nil, machineInvalidConfigurationTerminalError(fmt.Errorf("failed to create cloned vm: '%w'", err)) } - if err := attachTags(ctx, config, virtualMachine); err != nil { + if err := attachTags(ctx, log, config, virtualMachine); err != nil { return nil, fmt.Errorf("failed to attach tags: %w", err) } - if pc.OperatingSystem != providerconfigtypes.OperatingSystemFlatcar { - localUserdataIsoFilePath, err := generateLocalUserdataISO(userdata, machine.Spec.Name) + if config.VMGroup != "" { + if err := p.addToVMGroup(ctx, log, session, machine, config); err != nil { + return nil, fmt.Errorf("failed to add VM to VM group: %w", err) + } + } + + if config.VMAntiAffinity { + if err := p.createOrUpdateVMAntiAffinityRule(ctx, log, session, machine, config); err != nil { + return nil, fmt.Errorf("failed to add VM to anti affinity rule: %w", err) + } + } + + if pc.OperatingSystem != providerconfig.OperatingSystemFlatcar { + localUserdataIsoFilePath, err := generateLocalUserdataISO(ctx, userdata, machine.Spec.Name) if err != nil { return nil, fmt.Errorf("failed to generate local userdadata iso: %w", err) } @@ -352,14 +409,14 @@ func (p *provider) create(ctx context.Context, machine *clusterv1alpha1.Machine, } }() - if err := uploadAndAttachISO(ctx, session, virtualMachine, localUserdataIsoFilePath); err != nil { + if err := uploadAndAttachISO(ctx, log, session, virtualMachine, localUserdataIsoFilePath); err != nil { // Destroy VM to avoid a leftover. destroyTask, vmErr := virtualMachine.Destroy(ctx) if vmErr != nil { - return nil, fmt.Errorf("failed to destroy vm %s after failing upload and attach userdata iso: %w / %v", virtualMachine.Name(), err, vmErr) + return nil, fmt.Errorf("failed to destroy vm %s after failing upload and attach userdata iso: %w / %w", virtualMachine.Name(), err, vmErr) } - if vmErr := destroyTask.Wait(ctx); vmErr != nil { - return nil, fmt.Errorf("failed to destroy vm %s after failing upload and attach userdata iso: %w / %v", virtualMachine.Name(), err, vmErr) + if vmErr := destroyTask.WaitEx(ctx); vmErr != nil { + return nil, fmt.Errorf("failed to destroy vm %s after failing upload and attach userdata iso: %w / %w", virtualMachine.Name(), err, vmErr) } return nil, machineInvalidConfigurationTerminalError(fmt.Errorf("failed to upload and attach userdata iso: %w", err)) } @@ -370,14 +427,14 @@ func (p *provider) create(ctx context.Context, machine *clusterv1alpha1.Machine, return nil, fmt.Errorf("failed to power on machine: %w", err) } - if err := powerOnTask.Wait(ctx); err != nil { + if err := powerOnTask.WaitEx(ctx); err != nil { return nil, fmt.Errorf("error when waiting for vm powerOn task: %w", err) } return Server{name: virtualMachine.Name(), status: instance.StatusRunning, id: virtualMachine.Reference().Value, uuid: virtualMachine.UUID(ctx)}, nil } -func (p *provider) Cleanup(ctx context.Context, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData) (bool, error) { +func (p *provider) Cleanup(ctx context.Context, log *zap.SugaredLogger, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData) (bool, error) { config, pc, _, err := p.getConfig(machine.Spec.ProviderSpec) if err != nil { return false, fmt.Errorf("failed to parse config: %w", err) @@ -397,10 +454,16 @@ func (p *provider) Cleanup(ctx context.Context, machine *clusterv1alpha1.Machine return false, fmt.Errorf("failed to get instance from vSphere: %w", err) } - if err := detachTags(ctx, config, virtualMachine); err != nil { + if err := detachTags(ctx, log, config, virtualMachine); err != nil { return false, fmt.Errorf("failed to delete tags: %w", err) } + if config.VMAntiAffinity { + if err := p.createOrUpdateVMAntiAffinityRule(ctx, log, session, machine, config); err != nil { + return false, fmt.Errorf("failed to update VMs in anti-affinity rule: %w", err) + } + } + powerState, err := virtualMachine.PowerState(ctx) if err != nil { return false, fmt.Errorf("failed to get virtual machine power state: %w", err) @@ -413,7 +476,7 @@ func (p *provider) Cleanup(ctx context.Context, machine *clusterv1alpha1.Machine if err != nil { return false, fmt.Errorf("failed to poweroff vm %s: %w", virtualMachine.Name(), err) } - if err = powerOffTask.Wait(ctx); err != nil { + if err = powerOffTask.WaitEx(ctx); err != nil { return false, fmt.Errorf("failed to poweroff vm %s: %w", virtualMachine.Name(), err) } } @@ -446,17 +509,17 @@ func (p *provider) Cleanup(ctx context.Context, machine *clusterv1alpha1.Machine datastore, err := getDatastoreFromVM(ctx, session, virtualMachine) if err != nil { - return false, fmt.Errorf("Error getting datastore from VM %s: %w", virtualMachine.Name(), err) + return false, fmt.Errorf("failed to get datastore from VM %s: %w", virtualMachine.Name(), err) } destroyTask, err := virtualMachine.Destroy(ctx) if err != nil { - return false, fmt.Errorf("failed to destroy vm %s: %w", virtualMachine.Name(), err) + return false, fmt.Errorf("failed to destroy VM %s: %w", virtualMachine.Name(), err) } - if err := destroyTask.Wait(ctx); err != nil { - return false, fmt.Errorf("failed to destroy vm %s: %w", virtualMachine.Name(), err) + if err := destroyTask.WaitEx(ctx); err != nil { + return false, fmt.Errorf("failed to destroy VM %s: %w", virtualMachine.Name(), err) } - if pc.OperatingSystem != providerconfigtypes.OperatingSystemFlatcar { + if pc.OperatingSystem != providerconfig.OperatingSystemFlatcar { filemanager := datastore.NewFileManager(session.Datacenter, false) if err := filemanager.Delete(ctx, virtualMachine.Name()); err != nil { @@ -467,11 +530,11 @@ func (p *provider) Cleanup(ctx context.Context, machine *clusterv1alpha1.Machine } } - klog.V(2).Infof("Successfully destroyed vm %s", virtualMachine.Name()) + log.Infow("Successfully destroyed VM", "vm", virtualMachine.Name()) return true, nil } -func (p *provider) Get(ctx context.Context, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData) (instance.Instance, error) { +func (p *provider) Get(ctx context.Context, log *zap.SugaredLogger, machine *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData) (instance.Instance, error) { config, _, _, err := p.getConfig(machine.Spec.ProviderSpec) if err != nil { return nil, fmt.Errorf("failed to parse config: %w", err) @@ -499,7 +562,7 @@ func (p *provider) Get(ctx context.Context, machine *clusterv1alpha1.Machine, da if err != nil { return nil, fmt.Errorf("failed to power on instance that was in state %q: %w", powerState, err) } - if err := powerOnTask.Wait(ctx); err != nil { + if err := powerOnTask.WaitEx(ctx); err != nil { return nil, fmt.Errorf("failed waiting for instance to be powered on: %w", err) } // We must return here because the vendored code for determining if the guest @@ -529,73 +592,16 @@ func (p *provider) Get(ctx context.Context, machine *clusterv1alpha1.Machine, da } } } else { - klog.V(3).Infof("Can't fetch the IP addresses for machine %s, the VMware guest utils are not running yet. This might take a few minutes", machine.Spec.Name) + log.Debug("Can't fetch the IP addresses for machine, the VMware guest utils are not running yet. This might take a few minutes") } return Server{name: virtualMachine.Name(), status: instance.StatusRunning, addresses: addresses, id: virtualMachine.Reference().Value, uuid: virtualMachine.UUID(ctx)}, nil } -func (p *provider) MigrateUID(_ context.Context, _ *clusterv1alpha1.Machine, _ ktypes.UID) error { +func (p *provider) MigrateUID(_ context.Context, _ *zap.SugaredLogger, _ *clusterv1alpha1.Machine, _ ktypes.UID) error { return nil } -func (p *provider) GetCloudConfig(spec clusterv1alpha1.MachineSpec) (config string, name string, err error) { - c, _, _, err := p.getConfig(spec.ProviderSpec) - if err != nil { - return "", "", fmt.Errorf("failed to parse config: %w", err) - } - - passedURL := c.VSphereURL - // Required because url.Parse returns an empty string for the hostname if there was no schema - if !strings.HasPrefix(passedURL, "https://") { - passedURL = "https://" + passedURL - } - - u, err := url.Parse(passedURL) - if err != nil { - return "", "", fmt.Errorf("failed to parse '%s' as url: %w", passedURL, err) - } - - workingDir := c.Folder - // Default to basedir - if workingDir == "" { - workingDir = fmt.Sprintf("/%s/vm", c.Datacenter) - } - - cc := &vspheretypes.CloudConfig{ - Global: vspheretypes.GlobalOpts{ - User: c.Username, - Password: c.Password, - InsecureFlag: c.AllowInsecure, - VCenterPort: u.Port(), - }, - Disk: vspheretypes.DiskOpts{ - SCSIControllerType: "pvscsi", - }, - Workspace: vspheretypes.WorkspaceOpts{ - Datacenter: c.Datacenter, - VCenterIP: u.Hostname(), - DefaultDatastore: c.Datastore, - Folder: workingDir, - }, - VirtualCenter: map[string]*vspheretypes.VirtualCenterConfig{ - u.Hostname(): { - VCenterPort: u.Port(), - Datacenters: c.Datacenter, - User: c.Username, - Password: c.Password, - }, - }, - } - - s, err := cc.String() - if err != nil { - return "", "", fmt.Errorf("failed to convert the cloud-config to string: %w", err) - } - - return s, "vsphere", nil -} - func (p *provider) MachineMetricsLabels(machine *clusterv1alpha1.Machine) (map[string]string, error) { labels := make(map[string]string) @@ -608,7 +614,7 @@ func (p *provider) MachineMetricsLabels(machine *clusterv1alpha1.Machine) (map[s return labels, err } -func (p *provider) SetMetricsForMachines(machines clusterv1alpha1.MachineList) error { +func (p *provider) SetMetricsForMachines(_ clusterv1alpha1.MachineList) error { return nil } diff --git a/pkg/cloudprovider/provider/vsphere/provider_test.go b/pkg/cloudprovider/provider/vsphere/provider_test.go index 3c28e66bf..3bcdad6ed 100644 --- a/pkg/cloudprovider/provider/vsphere/provider_test.go +++ b/pkg/cloudprovider/provider/vsphere/provider_test.go @@ -25,11 +25,12 @@ import ( "text/template" "github.com/vmware/govmomi/simulator" + "go.uber.org/zap" - cloudprovidertesting "github.com/kubermatic/machine-controller/pkg/cloudprovider/testing" - "github.com/kubermatic/machine-controller/pkg/providerconfig" + cloudprovidertesting "k8c.io/machine-controller/pkg/cloudprovider/testing" + "k8c.io/machine-controller/sdk/providerconfig/configvar" - "k8s.io/utils/pointer" + "k8s.io/utils/ptr" fakectrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" ) @@ -47,6 +48,8 @@ func (v vsphereProviderSpecConf) rawProviderSpec(t *testing.T) []byte { "cloudProvider": "vsphere", "cloudProviderSpec": { "allowInsecure": false, + "vmAntiAffinity": true, + "cluster": "DC0_C0", "cpus": 1, "datacenter": "DC0", {{- if .Datastore }} @@ -61,7 +64,9 @@ func (v vsphereProviderSpecConf) rawProviderSpec(t *testing.T) []byte { "password": "{{ .Password }}", "templateVMName": "DC0_H0_VM0", "username": "{{ .User }}", - "vmNetName": "", + "networks": [ + "" + ], "vsphereURL": "{{ .URL }}" }, "operatingSystem": "flatcar", @@ -92,7 +97,7 @@ func TestValidate(t *testing.T) { { name: "Valid Datastore", args: vsphereProviderSpecConf{ - Datastore: pointer.String("LocalDS_0"), + Datastore: ptr.To("LocalDS_0"), }, getConfigErr: nil, wantErr: false, @@ -100,8 +105,8 @@ func TestValidate(t *testing.T) { { name: "Valid Datastore end empty DatastoreCluster", args: vsphereProviderSpecConf{ - Datastore: pointer.String("LocalDS_0"), - DatastoreCluster: pointer.String(""), + Datastore: ptr.To("LocalDS_0"), + DatastoreCluster: ptr.To(""), }, getConfigErr: nil, wantErr: false, @@ -109,7 +114,7 @@ func TestValidate(t *testing.T) { { name: "Valid DatastoreCluster", args: vsphereProviderSpecConf{ - DatastoreCluster: pointer.String("DC0_POD0"), + DatastoreCluster: ptr.To("DC0_POD0"), }, getConfigErr: nil, wantErr: false, @@ -117,7 +122,7 @@ func TestValidate(t *testing.T) { { name: "Invalid Datastore", args: vsphereProviderSpecConf{ - Datastore: pointer.String("LocalDS_10"), + Datastore: ptr.To("LocalDS_10"), }, getConfigErr: nil, wantErr: true, @@ -125,7 +130,7 @@ func TestValidate(t *testing.T) { { name: "Invalid DatastoreCluster", args: vsphereProviderSpecConf{ - Datastore: pointer.String("DC0_POD10"), + Datastore: ptr.To("DC0_POD10"), }, getConfigErr: nil, wantErr: true, @@ -133,8 +138,8 @@ func TestValidate(t *testing.T) { { name: "Both Datastore and DatastoreCluster specified", args: vsphereProviderSpecConf{ - Datastore: pointer.String("DC0_POD10"), - DatastoreCluster: pointer.String("DC0_POD0"), + Datastore: ptr.To("DC0_POD10"), + DatastoreCluster: ptr.To("DC0_POD0"), }, getConfigErr: nil, wantErr: true, @@ -169,7 +174,7 @@ func TestValidate(t *testing.T) { password, _ := simulator.DefaultLogin.Password() p := &provider{ // Note that configVarResolver is not used in this test as the getConfigFunc is mocked. - configVarResolver: providerconfig.NewConfigVarResolver(context.Background(), fakectrlruntimeclient. + configVarResolver: configvar.NewResolver(context.Background(), fakectrlruntimeclient. NewClientBuilder(). Build()), } @@ -178,7 +183,7 @@ func TestValidate(t *testing.T) { tt.args.URL = vSphereURL m := cloudprovidertesting.Creator{Name: "test", Namespace: "vsphere", ProviderSpecGetter: tt.args.rawProviderSpec}. CreateMachine(t) - if err := p.Validate(context.Background(), m.Spec); (err != nil) != tt.wantErr { + if err := p.Validate(context.Background(), zap.NewNop().Sugar(), m.Spec); (err != nil) != tt.wantErr { t.Errorf("provider.Validate() error = %v, wantErr %v", err, tt.wantErr) } }) diff --git a/pkg/cloudprovider/provider/vsphere/rule.go b/pkg/cloudprovider/provider/vsphere/rule.go new file mode 100644 index 000000000..9140c9807 --- /dev/null +++ b/pkg/cloudprovider/provider/vsphere/rule.go @@ -0,0 +1,188 @@ +/* +Copyright 2023 The Machine Controller Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vsphere + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + + "github.com/vmware/govmomi/find" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" + "go.uber.org/zap" + + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" + + "k8s.io/utils/ptr" +) + +var lock sync.Mutex + +// createOrUpdateVMAntiAffinityRule creates or updates an anti affinity rule with the name in the given cluster. +// VMs are attached to the rule based on their folder path and name prefix in vsphere. +// A minimum of two VMs is required. +func (p *provider) createOrUpdateVMAntiAffinityRule(ctx context.Context, log *zap.SugaredLogger, session *Session, machine *clusterv1alpha1.Machine, config *Config) error { + lock.Lock() + defer lock.Unlock() + cluster, err := session.Finder.ClusterComputeResource(ctx, config.Cluster) + if err != nil { + return err + } + + machineSetName := machine.Name[:strings.LastIndex(machine.Name, "-")] + vmsInFolder, err := session.Finder.VirtualMachineList(ctx, strings.Join([]string{config.Folder, "*"}, "/")) + if err != nil { + if errors.Is(err, &find.NotFoundError{}) { + return removeVMAntiAffinityRule(ctx, session, config.Cluster, machineSetName) + } + return err + } + + var ruleVMRef []types.ManagedObjectReference + for _, vm := range vmsInFolder { + // Only add VMs with the same machineSetName to the rule and exclude the machine itself if it is being deleted + if strings.HasPrefix(vm.Name(), machineSetName) && (vm.Name() != machine.Name || machine.DeletionTimestamp == nil) { + ruleVMRef = append(ruleVMRef, vm.Reference()) + } + } + + if len(ruleVMRef) == 0 { + log.Debugf("No VMs in folder %s with name prefix %s found", config.Folder, machineSetName) + return removeVMAntiAffinityRule(ctx, session, config.Cluster, machineSetName) + } else if len(ruleVMRef) < 2 { + // DRS rule must have at least two virtual machine members + log.Debugf("Not enough VMs in folder %s to create anti-affinity rule", config.Folder) + return nil + } + + info, err := findClusterAntiAffinityRuleByName(ctx, cluster, machineSetName) + if err != nil { + return err + } + + log.Debugf("Creating or updating anti-affinity rule for VMs %v in cluster %s", ruleVMRef, config.Cluster) + operation := types.ArrayUpdateOperationEdit + + //create new rule + if info == nil { + info = &types.ClusterAntiAffinityRuleSpec{ + ClusterRuleInfo: types.ClusterRuleInfo{ + Enabled: ptr.To(true), + Mandatory: ptr.To(false), + Name: machineSetName, + UserCreated: ptr.To(true), + }, + } + operation = types.ArrayUpdateOperationAdd + } + + info.Vm = ruleVMRef + spec := &types.ClusterConfigSpecEx{ + RulesSpec: []types.ClusterRuleSpec{ + { + ArrayUpdateSpec: types.ArrayUpdateSpec{ + Operation: operation, + }, + Info: info, + }, + }, + } + + log.Debugf("Performing %q for anti-affinity rule for VMs %v in cluster %s", operation, ruleVMRef, config.Cluster) + task, err := cluster.Reconfigure(ctx, spec, true) + if err != nil { + return err + } + + taskResult, err := task.WaitForResultEx(ctx) + if err != nil { + return fmt.Errorf("error waiting for cluster %v reconfiguration to complete", cluster.Name()) + } + if taskResult.State != types.TaskInfoStateSuccess { + return fmt.Errorf("cluster %v reconfiguration task was not successful", cluster.Name()) + } + log.Debugf("Successfully created/updated anti-affinity rule for machineset %v against machine %v", machineSetName, machine.Name) + + return nil +} + +// removeVMAntiAffinityRule removes an anti affinity rule with the name in the given cluster. +func removeVMAntiAffinityRule(ctx context.Context, session *Session, clusterPath string, name string) error { + cluster, err := session.Finder.ClusterComputeResource(ctx, clusterPath) + if err != nil { + return err + } + + info, err := findClusterAntiAffinityRuleByName(ctx, cluster, name) + if err != nil { + return err + } + + // no rule found + if info == nil { + return nil + } + + spec := &types.ClusterConfigSpecEx{ + RulesSpec: []types.ClusterRuleSpec{ + { + ArrayUpdateSpec: types.ArrayUpdateSpec{ + Operation: types.ArrayUpdateOperationRemove, + RemoveKey: info.Key, + }, + }, + }, + } + + task, err := cluster.Reconfigure(ctx, spec, true) + if err != nil { + return err + } + + taskResult, err := task.WaitForResultEx(ctx) + if err != nil { + return fmt.Errorf("error waiting for cluster %v reconfiguration to complete", cluster.Name()) + } + if taskResult.State != types.TaskInfoStateSuccess { + return fmt.Errorf("cluster %v reconfiguration task was not successful", cluster.Name()) + } + return nil +} + +func findClusterAntiAffinityRuleByName(ctx context.Context, cluster *object.ClusterComputeResource, name string) (*types.ClusterAntiAffinityRuleSpec, error) { + var props mo.ClusterComputeResource + if err := cluster.Properties(ctx, cluster.Reference(), nil, &props); err != nil { + return nil, err + } + + var info *types.ClusterAntiAffinityRuleSpec + for _, clusterRuleInfo := range props.ConfigurationEx.(*types.ClusterConfigInfoEx).Rule { + if clusterRuleInfo.GetClusterRuleInfo().Name == name { + if vmAffinityRuleInfo, ok := clusterRuleInfo.(*types.ClusterAntiAffinityRuleSpec); ok { + info = vmAffinityRuleInfo + break + } + return nil, fmt.Errorf("rule name %s in cluster %q is not a VM anti-affinity rule", name, cluster.Name()) + } + } + + return info, nil +} diff --git a/pkg/cloudprovider/provider/vsphere/types/cloudconfig.go b/pkg/cloudprovider/provider/vsphere/types/cloudconfig.go deleted file mode 100644 index a20bcda8b..000000000 --- a/pkg/cloudprovider/provider/vsphere/types/cloudconfig.go +++ /dev/null @@ -1,143 +0,0 @@ -/* -Copyright 2019 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package types - -import ( - "bytes" - "fmt" - "text/template" - - "github.com/Masterminds/sprig/v3" - - "github.com/kubermatic/machine-controller/pkg/ini" -) - -const ( - cloudConfigTpl = `[Global] -user = {{ .Global.User | iniEscape }} -password = {{ .Global.Password | iniEscape }} -port = {{ .Global.VCenterPort | iniEscape }} -insecure-flag = {{ .Global.InsecureFlag }} -working-dir = {{ .Global.WorkingDir | iniEscape }} -datacenter = {{ .Global.Datacenter | iniEscape }} -datastore = {{ .Global.DefaultDatastore | iniEscape }} -server = {{ .Global.VCenterIP | iniEscape }} -{{- if .Global.IPFamily }} -ip-family = {{ .Global.IPFamily | iniEscape }} -{{- end }} - -[Disk] -scsicontrollertype = {{ .Disk.SCSIControllerType | iniEscape }} - -[Workspace] -server = {{ .Workspace.VCenterIP | iniEscape }} -datacenter = {{ .Workspace.Datacenter | iniEscape }} -folder = {{ .Workspace.Folder | iniEscape }} -default-datastore = {{ .Workspace.DefaultDatastore | iniEscape }} -resourcepool-path = {{ .Workspace.ResourcePoolPath | iniEscape }} - -{{ range $name, $vc := .VirtualCenter }} -[VirtualCenter {{ $name | iniEscape }}] -user = {{ $vc.User | iniEscape }} -password = {{ $vc.Password | iniEscape }} -port = {{ $vc.VCenterPort }} -datacenters = {{ $vc.Datacenters | iniEscape }} -{{- if $vc.IPFamily }} -ip-family = {{ $vc.IPFamily | iniEscape }} -{{- end }} -{{ end }} -` -) - -type WorkspaceOpts struct { - VCenterIP string `gcfg:"server"` - Datacenter string `gcfg:"datacenter"` - Folder string `gcfg:"folder"` - DefaultDatastore string `gcfg:"default-datastore"` - ResourcePoolPath string `gcfg:"resourcepool-path"` -} - -type DiskOpts struct { - SCSIControllerType string `dcfg:"scsicontrollertype"` -} - -type GlobalOpts struct { - User string `gcfg:"user"` - Password string `gcfg:"password"` - InsecureFlag bool `gcfg:"insecure-flag"` - VCenterPort string `gcfg:"port"` - WorkingDir string `gcfg:"working-dir"` - Datacenter string `gcfg:"datacenter"` - DefaultDatastore string `gcfg:"datastore"` - VCenterIP string `gcfg:"server"` - ClusterID string `gcfg:"cluster-id"` - IPFamily string `gcfg:"ip-family"` // NOTE: supported only in case of out-of-tree CCM -} - -type VirtualCenterConfig struct { - User string `gcfg:"user"` - Password string `gcfg:"password"` - VCenterPort string `gcfg:"port"` - Datacenters string `gcfg:"datacenters"` - IPFamily string `gcfg:"ip-family"` // NOTE: supported only in case of out-of-tree CCM -} - -// CloudConfig is used to read and store information from the cloud configuration file. -type CloudConfig struct { - Global GlobalOpts - Disk DiskOpts - Workspace WorkspaceOpts - - VirtualCenter map[string]*VirtualCenterConfig -} - -// String converts CloudConfig into its formatted string representation. -func (c *CloudConfig) String() (string, error) { - funcMap := sprig.TxtFuncMap() - funcMap["iniEscape"] = ini.Escape - - tpl, err := template.New("cloud-config").Funcs(funcMap).Parse(cloudConfigTpl) - if err != nil { - return "", fmt.Errorf("failed to parse the cloud config template: %w", err) - } - - buf := &bytes.Buffer{} - if err := tpl.Execute(buf, c); err != nil { - return "", fmt.Errorf("failed to execute cloud config template: %w", err) - } - - return buf.String(), nil -} - -// CloudConfigToString converts CloudConfig into its formatted string representation. -// Deprecated: use struct receiver function String() instead. -func CloudConfigToString(c *CloudConfig) (string, error) { - funcMap := sprig.TxtFuncMap() - funcMap["iniEscape"] = ini.Escape - - tpl, err := template.New("cloud-config").Funcs(funcMap).Parse(cloudConfigTpl) - if err != nil { - return "", fmt.Errorf("failed to parse the cloud config template: %w", err) - } - - buf := &bytes.Buffer{} - if err := tpl.Execute(buf, c); err != nil { - return "", fmt.Errorf("failed to execute cloud config template: %w", err) - } - - return buf.String(), nil -} diff --git a/pkg/cloudprovider/provider/vsphere/types/cloudconfig_test.go b/pkg/cloudprovider/provider/vsphere/types/cloudconfig_test.go deleted file mode 100644 index 0f69bf4db..000000000 --- a/pkg/cloudprovider/provider/vsphere/types/cloudconfig_test.go +++ /dev/null @@ -1,138 +0,0 @@ -/* -Copyright 2019 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package types - -import ( - "flag" - "testing" - - "gopkg.in/gcfg.v1" - - testhelper "github.com/kubermatic/machine-controller/pkg/test" -) - -var update = flag.Bool("update", false, "update testdata files") - -func TestCloudConfigToString(t *testing.T) { - tests := []struct { - name string - config *CloudConfig - }{ - { - name: "simple-config", - config: &CloudConfig{ - Global: GlobalOpts{ - User: "admin", - Password: "password", - InsecureFlag: true, - }, - Workspace: WorkspaceOpts{ - VCenterIP: "https://127.0.0.1:8443", - ResourcePoolPath: "/some-resource-pool", - DefaultDatastore: "Datastore", - Folder: "some-folder", - Datacenter: "Datacenter", - }, - Disk: DiskOpts{ - SCSIControllerType: "pvscsi", - }, - VirtualCenter: map[string]*VirtualCenterConfig{}, - }, - }, - { - name: "2-virtual-centers", - config: &CloudConfig{ - Global: GlobalOpts{ - User: "admin", - Password: "password", - InsecureFlag: true, - }, - Workspace: WorkspaceOpts{ - VCenterIP: "https://127.0.0.1:8443", - ResourcePoolPath: "/some-resource-pool", - DefaultDatastore: "Datastore", - Folder: "some-folder", - Datacenter: "Datacenter", - }, - Disk: DiskOpts{ - SCSIControllerType: "pvscsi", - }, - VirtualCenter: map[string]*VirtualCenterConfig{ - "vc1": { - User: "1-some-user", - Password: "1-some-password", - VCenterPort: "443", - Datacenters: "1-foo", - }, - "vc2": { - User: "2-some-user", - Password: "2-some-password", - VCenterPort: "443", - Datacenters: "2-foo", - }, - }, - }, - }, - { - name: "3-dual-stack", - config: &CloudConfig{ - Global: GlobalOpts{ - User: "admin", - Password: "password", - InsecureFlag: true, - IPFamily: "ipv4,ipv6", - }, - Workspace: WorkspaceOpts{ - VCenterIP: "https://127.0.0.1:8443", - ResourcePoolPath: "/some-resource-pool", - DefaultDatastore: "Datastore", - Folder: "some-folder", - Datacenter: "Datacenter", - }, - Disk: DiskOpts{ - SCSIControllerType: "pvscsi", - }, - VirtualCenter: map[string]*VirtualCenterConfig{ - "vc1": { - User: "1-some-user", - Password: "1-some-password", - VCenterPort: "443", - Datacenters: "1-foo", - IPFamily: "ipv4,ipv6", - }, - }, - }, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - s, err := test.config.String() - if err != nil { - t.Fatal(err) - } - t.Logf("\n%s", s) - - nc := &CloudConfig{} - if err := gcfg.ReadStringInto(nc, s); err != nil { - t.Fatalf("failed to load string into config object: %v", err) - } - goldenName := test.name + ".golden" - testhelper.CompareOutput(t, goldenName, s, *update) - }) - } -} diff --git a/pkg/cloudprovider/provider/vsphere/types/testdata/2-virtual-centers.golden b/pkg/cloudprovider/provider/vsphere/types/testdata/2-virtual-centers.golden deleted file mode 100644 index ccf5cb7d0..000000000 --- a/pkg/cloudprovider/provider/vsphere/types/testdata/2-virtual-centers.golden +++ /dev/null @@ -1,33 +0,0 @@ -[Global] -user = "admin" -password = "password" -port = "" -insecure-flag = true -working-dir = "" -datacenter = "" -datastore = "" -server = "" - -[Disk] -scsicontrollertype = "pvscsi" - -[Workspace] -server = "https://127.0.0.1:8443" -datacenter = "Datacenter" -folder = "some-folder" -default-datastore = "Datastore" -resourcepool-path = "/some-resource-pool" - - -[VirtualCenter "vc1"] -user = "1-some-user" -password = "1-some-password" -port = 443 -datacenters = "1-foo" - -[VirtualCenter "vc2"] -user = "2-some-user" -password = "2-some-password" -port = 443 -datacenters = "2-foo" - diff --git a/pkg/cloudprovider/provider/vsphere/types/testdata/3-dual-stack.golden b/pkg/cloudprovider/provider/vsphere/types/testdata/3-dual-stack.golden deleted file mode 100644 index 88343530b..000000000 --- a/pkg/cloudprovider/provider/vsphere/types/testdata/3-dual-stack.golden +++ /dev/null @@ -1,29 +0,0 @@ -[Global] -user = "admin" -password = "password" -port = "" -insecure-flag = true -working-dir = "" -datacenter = "" -datastore = "" -server = "" -ip-family = "ipv4,ipv6" - -[Disk] -scsicontrollertype = "pvscsi" - -[Workspace] -server = "https://127.0.0.1:8443" -datacenter = "Datacenter" -folder = "some-folder" -default-datastore = "Datastore" -resourcepool-path = "/some-resource-pool" - - -[VirtualCenter "vc1"] -user = "1-some-user" -password = "1-some-password" -port = 443 -datacenters = "1-foo" -ip-family = "ipv4,ipv6" - diff --git a/pkg/cloudprovider/provider/vsphere/types/testdata/simple-config.golden b/pkg/cloudprovider/provider/vsphere/types/testdata/simple-config.golden deleted file mode 100644 index 084d188ca..000000000 --- a/pkg/cloudprovider/provider/vsphere/types/testdata/simple-config.golden +++ /dev/null @@ -1,21 +0,0 @@ -[Global] -user = "admin" -password = "password" -port = "" -insecure-flag = true -working-dir = "" -datacenter = "" -datastore = "" -server = "" - -[Disk] -scsicontrollertype = "pvscsi" - -[Workspace] -server = "https://127.0.0.1:8443" -datacenter = "Datacenter" -folder = "some-folder" -default-datastore = "Datastore" -resourcepool-path = "/some-resource-pool" - - diff --git a/pkg/cloudprovider/provider/vsphere/types/types.go b/pkg/cloudprovider/provider/vsphere/types/types.go deleted file mode 100644 index 62331ee76..000000000 --- a/pkg/cloudprovider/provider/vsphere/types/types.go +++ /dev/null @@ -1,62 +0,0 @@ -/* -Copyright 2019 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package types - -import ( - "github.com/kubermatic/machine-controller/pkg/jsonutil" - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" -) - -// RawConfig represents vsphere specific configuration. -type RawConfig struct { - TemplateVMName providerconfigtypes.ConfigVarString `json:"templateVMName"` - VMNetName providerconfigtypes.ConfigVarString `json:"vmNetName"` - Username providerconfigtypes.ConfigVarString `json:"username"` - Password providerconfigtypes.ConfigVarString `json:"password"` - VSphereURL providerconfigtypes.ConfigVarString `json:"vsphereURL"` - Datacenter providerconfigtypes.ConfigVarString `json:"datacenter"` - - // Cluster is a noop field, it's not used anywhere but left here intentionally for backward compatibility purposes - Cluster providerconfigtypes.ConfigVarString `json:"cluster"` - - Folder providerconfigtypes.ConfigVarString `json:"folder"` - ResourcePool providerconfigtypes.ConfigVarString `json:"resourcePool"` - - // Either Datastore or DatastoreCluster have to be provided. - DatastoreCluster providerconfigtypes.ConfigVarString `json:"datastoreCluster"` - Datastore providerconfigtypes.ConfigVarString `json:"datastore"` - - CPUs int32 `json:"cpus"` - MemoryMB int64 `json:"memoryMB"` - DiskSizeGB *int64 `json:"diskSizeGB,omitempty"` - Tags []Tag `json:"tags,omitempty"` - AllowInsecure providerconfigtypes.ConfigVarBool `json:"allowInsecure"` -} - -// Tag represents vsphere tag. -type Tag struct { - Description string `json:"description,omitempty"` - ID string `json:"id,omitempty"` - Name string `json:"name,omitempty"` - CategoryID string `json:"categoryID"` -} - -func GetConfig(pconfig providerconfigtypes.Config) (*RawConfig, error) { - rawConfig := &RawConfig{} - - return rawConfig, jsonutil.StrictUnmarshal(pconfig.CloudProviderSpec.Raw, rawConfig) -} diff --git a/pkg/cloudprovider/provider/vsphere/vmgroup.go b/pkg/cloudprovider/provider/vsphere/vmgroup.go new file mode 100644 index 000000000..ad23e14cc --- /dev/null +++ b/pkg/cloudprovider/provider/vsphere/vmgroup.go @@ -0,0 +1,125 @@ +/* +Copyright 2024 The Machine Controller Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vsphere + +import ( + "context" + "fmt" + "strings" + + "github.com/vmware/govmomi/vim25/types" + "go.uber.org/zap" + + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" +) + +func (p *provider) addToVMGroup(ctx context.Context, log *zap.SugaredLogger, session *Session, machine *clusterv1alpha1.Machine, config *Config) error { + lock.Lock() + defer lock.Unlock() + + // Check if the VM group exists + vmGroup, err := findVMGroup(ctx, session, config.Cluster, config.VMGroup) + if err != nil { + return err + } + + // We have to find all VMs in the folder and add them to the VM group. VMGroup only contains VM reference ID which is not enough to + // identify the VM by name. + machineSetName := machine.Name[:strings.LastIndex(machine.Name, "-")] + vmsInFolder, err := session.Finder.VirtualMachineList(ctx, strings.Join([]string{config.Folder, "*"}, "/")) + if err != nil { + return fmt.Errorf("failed to find VMs in folder: %w", err) + } + + var vmRefs []types.ManagedObjectReference + for _, vm := range vmsInFolder { + // Only add VMs with the same machineSetName to the rule and exclude the machine itself if it is being deleted + if strings.HasPrefix(vm.Name(), machineSetName) && (vm.Name() != machine.Name || machine.DeletionTimestamp == nil) { + vmRefs = append(vmRefs, vm.Reference()) + } + } + + var vmRefsToAdd []types.ManagedObjectReference + for _, vm := range vmRefs { + found := false + for _, existingVM := range vmGroup.Vm { + if existingVM.Value == vm.Value { + log.Debugf("VM %s already in VM group %s", machine.Name, config.VMGroup) + found = true + break + } + } + if !found { + vmRefsToAdd = append(vmRefsToAdd, vm) + } + } + + // Add the VM to the VM group + vmGroup.Vm = append(vmGroup.Vm, vmRefsToAdd...) + cluster, err := session.Finder.ClusterComputeResource(ctx, config.Cluster) + if err != nil { + return err + } + + spec := &types.ClusterConfigSpecEx{ + GroupSpec: []types.ClusterGroupSpec{ + { + ArrayUpdateSpec: types.ArrayUpdateSpec{ + Operation: types.ArrayUpdateOperationEdit, + }, + Info: vmGroup, + }, + }, + } + + log.Debugf("Adding VM %s in VM group %s", machine.Name, config.VMGroup) + task, err := cluster.Reconfigure(ctx, spec, true) + if err != nil { + return err + } + + taskResult, err := task.WaitForResultEx(ctx) + if err != nil { + return fmt.Errorf("error waiting for cluster %v reconfiguration to complete", cluster.Name()) + } + if taskResult.State != types.TaskInfoStateSuccess { + return fmt.Errorf("cluster %v reconfiguration task was not successful", cluster.Name()) + } + log.Debugf("Successfully added VM %s in VM group %s", machine.Name, config.VMGroup) + return nil +} + +func findVMGroup(ctx context.Context, session *Session, clusterName, vmGroup string) (*types.ClusterVmGroup, error) { + cluster, err := session.Finder.ClusterComputeResource(ctx, clusterName) + if err != nil { + return nil, err + } + + clusterConfigInfoEx, err := cluster.Configuration(ctx) + if err != nil { + return nil, err + } + + for _, group := range clusterConfigInfoEx.Group { + if clusterVMGroup, ok := group.(*types.ClusterVmGroup); ok { + if clusterVMGroup.Name == vmGroup { + return clusterVMGroup, nil + } + } + } + return nil, fmt.Errorf("cannot find VM group %s", vmGroup) +} diff --git a/pkg/cloudprovider/provider/vultr/provider.go b/pkg/cloudprovider/provider/vultr/provider.go index 739036c01..3a4c77e37 100644 --- a/pkg/cloudprovider/provider/vultr/provider.go +++ b/pkg/cloudprovider/provider/vultr/provider.go @@ -18,57 +18,74 @@ package vultr import ( "context" + "encoding/base64" "errors" "fmt" + "net/http" "strconv" + "time" - "github.com/vultr/govultr/v2" + "github.com/vultr/govultr/v3" + "go.uber.org/zap" "golang.org/x/oauth2" - cloudprovidererrors "github.com/kubermatic/machine-controller/pkg/cloudprovider/errors" + cloudprovidererrors "k8c.io/machine-controller/pkg/cloudprovider/errors" + "k8c.io/machine-controller/pkg/cloudprovider/instance" + cloudprovidertypes "k8c.io/machine-controller/pkg/cloudprovider/types" + "k8c.io/machine-controller/sdk/apis/cluster/common" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" + vultrtypes "k8c.io/machine-controller/sdk/cloudprovider/vultr" + "k8c.io/machine-controller/sdk/providerconfig" - "github.com/kubermatic/machine-controller/pkg/apis/cluster/common" - clusterv1alpha1 "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/instance" - vultrtypes "github.com/kubermatic/machine-controller/pkg/cloudprovider/provider/vultr/types" - cloudprovidertypes "github.com/kubermatic/machine-controller/pkg/cloudprovider/types" - "github.com/kubermatic/machine-controller/pkg/providerconfig" - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" - - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/wait" +) + +const ( + createCheckPeriod = 10 * time.Second + createCheckTimeout = 5 * time.Minute + createCheckFailedWaitPeriod = 10 * time.Second ) +type ValidVPC struct { + IsAllValid bool + InvalidVpcs []string +} + type provider struct { - configVarResolver *providerconfig.ConfigVarResolver + configVarResolver providerconfig.ConfigVarResolver } // New returns a new vultr provider. -func New(configVarResolver *providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { +func New(configVarResolver providerconfig.ConfigVarResolver) cloudprovidertypes.Provider { return &provider{configVarResolver: configVarResolver} } type Config struct { - APIKey string - Region string - Plan string - OsID string - Tags []string + PhysicalMachine bool + APIKey string + Region string + Plan string + OsID string + Tags []string + VpcID []string + EnableVPC bool + EnableIPv6 bool + EnableVPC2 bool + Vpc2ID []string } -func getIDForOS(os providerconfigtypes.OperatingSystem) (int, error) { +func getIDForOS(os providerconfig.OperatingSystem) (int, error) { switch os { - case providerconfigtypes.OperatingSystemUbuntu: + case providerconfig.OperatingSystemUbuntu: return 1743, nil - // name: CentOS 7 x64 - case providerconfigtypes.OperatingSystemCentOS: - return 167, nil // name: Rocky Linux 9 x64 - case providerconfigtypes.OperatingSystemRockyLinux: + case providerconfig.OperatingSystemRockyLinux: return 1869, nil } - return 0, providerconfigtypes.ErrOSNotSupported + return 0, providerconfig.ErrOSNotSupported } func getClient(ctx context.Context, apiKey string) *govultr.Client { @@ -77,12 +94,8 @@ func getClient(ctx context.Context, apiKey string) *govultr.Client { return govultr.NewClient(oauth2.NewClient(ctx, ts)) } -func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *providerconfigtypes.Config, error) { - if provSpec.Value == nil { - return nil, nil, fmt.Errorf("machine.spec.providerconfig.value is nil") - } - - pconfig, err := providerconfigtypes.GetConfig(provSpec) +func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *providerconfig.Config, error) { + pconfig, err := providerconfig.GetConfig(provSpec) if err != nil { return nil, nil, err } @@ -97,36 +110,105 @@ func (p *provider) getConfig(provSpec clusterv1alpha1.ProviderSpec) (*Config, *p } c := Config{} - c.APIKey, err = p.configVarResolver.GetConfigVarStringValueOrEnv(rawConfig.APIKey, "VULTR_API_KEY") + + c.APIKey, err = p.configVarResolver.GetStringValueOrEnv(rawConfig.APIKey, "VULTR_API_KEY") if err != nil { return nil, nil, fmt.Errorf("failed to get the value of \"apiKey\" field, error = %w", err) } - c.Plan, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.Plan) + c.Plan, err = p.configVarResolver.GetStringValue(rawConfig.Plan) if err != nil { return nil, nil, err } - c.Region, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.Region) + c.Region, err = p.configVarResolver.GetStringValue(rawConfig.Region) if err != nil { return nil, nil, err } - c.OsID, err = p.configVarResolver.GetConfigVarStringValue(rawConfig.OsID) + c.OsID, err = p.configVarResolver.GetStringValue(rawConfig.OsID) if err != nil { return nil, nil, err } c.Tags = rawConfig.Tags + c.PhysicalMachine = rawConfig.PhysicalMachine + c.EnableIPv6 = rawConfig.EnableIPv6 + c.VpcID = rawConfig.VpcID + c.EnableVPC = rawConfig.EnableVPC + c.EnableVPC2 = rawConfig.EnableVPC2 + c.Vpc2ID = rawConfig.Vpc2ID return &c, pconfig, err } -func (p *provider) AddDefaults(spec clusterv1alpha1.MachineSpec) (clusterv1alpha1.MachineSpec, error) { +func (p *provider) AddDefaults(_ *zap.SugaredLogger, spec clusterv1alpha1.MachineSpec) (clusterv1alpha1.MachineSpec, error) { return spec, nil } -func (p *provider) Validate(ctx context.Context, spec clusterv1alpha1.MachineSpec) error { +func (p *provider) validateVpc(ctx context.Context, client *govultr.Client, c *Config, legacyVPC bool) (ValidVPC, error) { + validVpc := ValidVPC{IsAllValid: true} + accountvpcs := []string{} + var requestedvpcs []string + + if legacyVPC { + for { + vpcs, meta, err := func(ctx context.Context, client *govultr.Client) ([]govultr.VPC, *govultr.Meta, error) { + vpcs, meta, resp, err := client.VPC.List(ctx, &govultr.ListOptions{}) + if err != nil { + return nil, nil, vltErrorToTerminalError(resp.StatusCode, err) + } + defer resp.Body.Close() + + return vpcs, meta, nil + }(ctx, client) + if err != nil { + return validVpc, err + } + for _, v := range vpcs { + accountvpcs = append(accountvpcs, v.ID) + } + if meta.Links.Next == "" { + break + } + } + requestedvpcs = c.VpcID + } else { + for { + vpcs, meta, err := func(ctx context.Context, client *govultr.Client) ([]govultr.VPC2, *govultr.Meta, error) { + vpcs, meta, resp, err := client.VPC2.List(ctx, &govultr.ListOptions{}) + if err != nil { + return nil, nil, vltErrorToTerminalError(resp.StatusCode, err) + } + defer resp.Body.Close() + + return vpcs, meta, nil + }(ctx, client) + if err != nil { + return validVpc, err + } + for _, v := range vpcs { + accountvpcs = append(accountvpcs, v.ID) + } + if meta.Links.Next == "" { + break + } + } + requestedvpcs = c.Vpc2ID + } + accountvpcsset := sets.New[string](accountvpcs...) + // Iterator to provide user the exact mismatches + for _, v := range requestedvpcs { + if !accountvpcsset.Has(v) { + validVpc.IsAllValid = false + validVpc.InvalidVpcs = append(validVpc.InvalidVpcs, v) + } + } + + return validVpc, nil +} + +func (p *provider) Validate(ctx context.Context, _ *zap.SugaredLogger, spec clusterv1alpha1.MachineSpec) error { c, pc, err := p.getConfig(spec.ProviderSpec) if err != nil { return fmt.Errorf("failed to parse config: %w", err) @@ -155,12 +237,13 @@ func (p *provider) Validate(ctx context.Context, spec clusterv1alpha1.MachineSpe client := getClient(ctx, c.APIKey) - plans, err := client.Region.Availability(ctx, c.Region, "") + plans, resp, err := client.Region.Availability(ctx, c.Region, "") // TODO: Validate region separately if err != nil { - return fmt.Errorf("invalid/not supported region specified %q: %w", c.Region, err) + return err } + resp.Body.Close() planFound := false @@ -168,52 +251,171 @@ func (p *provider) Validate(ctx context.Context, spec clusterv1alpha1.MachineSpe for _, plan := range plans.AvailablePlans { if plan == c.Plan { planFound = true + break } } if !planFound { - return fmt.Errorf("invalid/not supported plan specified %q: %w", c.Plan, err) + return fmt.Errorf("invalid/not supported plan specified %q, available plans are: %q, %w", c.Plan, plans.AvailablePlans, err) + } + + validvpc, err := p.validateVpc(ctx, client, c, false) + if err != nil { + return err + } + if !validvpc.IsAllValid { + return fmt.Errorf("invalid/not supported vpc id specified %v", validvpc.InvalidVpcs) } + + if c.PhysicalMachine { + // Don't check for validity of legacy VPC as BareMetal doesn't support VPC v1 + return nil + } + + // Verify legacy VPCs + validvpc, err = p.validateVpc(ctx, client, c, true) + if err != nil { + return err + } + + if !validvpc.IsAllValid { + return fmt.Errorf("invalid/not supported vpc id specified %v", validvpc.InvalidVpcs) + } + return nil } -func (p *provider) get(ctx context.Context, machine *clusterv1alpha1.Machine) (*vultrInstance, error) { - c, _, err := p.getConfig(machine.Spec.ProviderSpec) +func (p *provider) getPhysicalMachine(ctx context.Context, c *Config, machine *clusterv1alpha1.Machine) (*vultrPhysicalMachine, error) { + client := getClient(ctx, c.APIKey) + // Not looping on metadata assuming that tagged machines won;t cross + // pagination boundary + instances, _, resp, err := client.BareMetalServer.List(ctx, &govultr.ListOptions{ + Tag: string(machine.UID), + }) if err != nil { - return nil, cloudprovidererrors.TerminalError{ - Reason: common.InvalidConfigurationMachineError, - Message: fmt.Sprintf("Failed to parse MachineSpec, due to %v", err), + return nil, vltErrorToTerminalError(resp.StatusCode, err) + } + resp.Body.Close() + for _, instance := range instances { + if sets.NewString(instance.Tags...).Has(string(machine.UID)) { + return &vultrPhysicalMachine{instance: &instance}, nil } } + return nil, cloudprovidererrors.ErrInstanceNotFound +} +func (p *provider) getVirtualMachine(ctx context.Context, c *Config, machine *clusterv1alpha1.Machine) (*vultrVirtualMachine, error) { client := getClient(ctx, c.APIKey) - instances, _, err := client.Instance.List(ctx, &govultr.ListOptions{ + instances, _, resp, err := client.Instance.List(ctx, &govultr.ListOptions{ Tag: string(machine.UID), }) if err != nil { - return nil, vltErrorToTerminalError(err, "failed to list servers") + return nil, vltErrorToTerminalError(resp.StatusCode, err) } + resp.Body.Close() for _, instance := range instances { - for _, tag := range instance.Tags { - if tag == string(machine.UID) { - return &vultrInstance{instance: &instance}, nil - } + if sets.NewString(instance.Tags...).Has(string(machine.UID)) && + instance.Label == machine.Name { + return &vultrVirtualMachine{instance: &instance}, nil } } return nil, cloudprovidererrors.ErrInstanceNotFound } -func (p *provider) Get(ctx context.Context, machine *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData) (instance.Instance, error) { - return p.get(ctx, machine) +func (p *provider) Get(ctx context.Context, _ *zap.SugaredLogger, machine *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData) (instance.Instance, error) { + c, _, err := p.getConfig(machine.Spec.ProviderSpec) + if err != nil { + return nil, cloudprovidererrors.TerminalError{ + Reason: common.InvalidConfigurationMachineError, + Message: fmt.Sprintf("Failed to parse MachineSpec, due to %v", err), + } + } + if !c.PhysicalMachine { + return p.getVirtualMachine(ctx, c, machine) + } + + return p.getPhysicalMachine(ctx, c, machine) +} + +func (p *provider) waitForInstanceCreation(ctx context.Context, c *Config, instance instance.Instance, machine *clusterv1alpha1.Machine) error { + return wait.PollUntilContextTimeout(ctx, createCheckPeriod, createCheckTimeout, false, func(ctx context.Context) (bool, error) { + var err error + if !c.PhysicalMachine { + _, err = p.getVirtualMachine(ctx, c, machine) + } else { + _, err = p.getPhysicalMachine(ctx, c, machine) + } + + if err != nil { + if cloudprovidererrors.IsNotFound(err) { + // Continue the loop as the instances was successfully fetched + // just that our instance was not found + return false, nil + } + if isTerminalErr, _, _ := cloudprovidererrors.IsTerminalError(err); isTerminalErr { + return true, err + } + // Wait for some time as instance creation is successful + // just that we are not able to fetch it + time.Sleep(createCheckFailedWaitPeriod) + return false, fmt.Errorf("instance %q created but controller failed to fetch instance details", instance.Name()) + } + return true, nil + }) +} + +func (p *provider) createVirtualMachine(ctx context.Context, client *govultr.Client, c *Config, machine *clusterv1alpha1.Machine, osid int, userdata string) (*vultrVirtualMachine, error) { + tags := sets.List[string](sets.New(c.Tags...).Insert(string(machine.UID))) + + instanceCreateRequest := govultr.InstanceCreateReq{ + Region: c.Region, + Plan: c.Plan, + OsID: osid, + + Label: machine.Spec.Name, + UserData: base64.StdEncoding.EncodeToString([]byte(userdata)), + Tags: tags, + + EnableIPv6: &c.EnableIPv6, + EnableVPC: &c.EnableVPC, + AttachVPC: c.VpcID, + EnableVPC2: &c.EnableVPC2, + AttachVPC2: c.Vpc2ID, + } + instance, resp, err := client.Instance.Create(ctx, &instanceCreateRequest) + if err != nil { + return nil, vltErrorToTerminalError(resp.StatusCode, err) + } + resp.Body.Close() + + return &vultrVirtualMachine{instance: instance}, nil } -func (p *provider) GetCloudConfig(spec clusterv1alpha1.MachineSpec) (config string, name string, err error) { - return "", "", nil +func (p *provider) createPhysicalMachine(ctx context.Context, client *govultr.Client, c *Config, machine *clusterv1alpha1.Machine, osid int, userdata string) (*vultrPhysicalMachine, error) { + tags := sets.NewString(c.Tags...).Insert(string(machine.UID)).List() + + bareMetalCreateRequest := govultr.BareMetalCreate{ + Region: c.Region, + Plan: c.Plan, + Label: machine.Spec.Name, + UserData: base64.StdEncoding.EncodeToString([]byte(userdata)), + EnableIPv6: &c.EnableIPv6, + Tags: tags, + OsID: osid, + AttachVPC2: c.Vpc2ID, + EnableVPC2: &c.EnableVPC2, + } + instance, resp, err := client.BareMetalServer.Create(ctx, &bareMetalCreateRequest) + if err != nil { + return nil, vltErrorToTerminalError(resp.StatusCode, err) + } + resp.Body.Close() + return &vultrPhysicalMachine{instance: instance}, nil } -func (p *provider) Create(ctx context.Context, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData, userdata string) (instance.Instance, error) { +func (p *provider) Create(ctx context.Context, log *zap.SugaredLogger, machine *clusterv1alpha1.Machine, _ *cloudprovidertypes.ProviderData, userdata string) (instance.Instance, error) { c, pc, err := p.getConfig(machine.Spec.ProviderSpec) if err != nil { return nil, cloudprovidererrors.TerminalError{ @@ -222,8 +424,6 @@ func (p *provider) Create(ctx context.Context, machine *clusterv1alpha1.Machine, } } - client := getClient(ctx, c.APIKey) - if c.OsID == "" { osID, err := getIDForOS(pc.OperatingSystem) if err != nil { @@ -234,37 +434,46 @@ func (p *provider) Create(ctx context.Context, machine *clusterv1alpha1.Machine, } c.OsID = strconv.Itoa(osID) } - - if c.Tags == nil { - c.Tags = []string{} - } - - c.Tags = append(c.Tags, string(machine.UID)) - strOsID, err := strconv.Atoi(c.OsID) if err != nil { - return nil, err + return nil, cloudprovidererrors.TerminalError{ + Reason: common.InvalidConfigurationMachineError, + Message: fmt.Sprintf("Cannot parse operating system id %q, details = %v", pc.OperatingSystem, err), + } } + client := getClient(ctx, c.APIKey) - instanceCreateRequest := govultr.InstanceCreateReq{ - Region: c.Region, - Plan: c.Plan, - Label: machine.Spec.Name, - UserData: userdata, - Tags: c.Tags, - OsID: strOsID, + var instance instance.Instance + if !c.PhysicalMachine { + instance, err = p.createVirtualMachine(ctx, client, c, machine, strOsID, userdata) + if err != nil { + return nil, err + } + } else { + instance, err = p.createPhysicalMachine(ctx, client, c, machine, strOsID, userdata) + if err != nil { + return nil, err + } } - res, err := client.Instance.Create(ctx, &instanceCreateRequest) + err = p.waitForInstanceCreation(ctx, c, instance, machine) if err != nil { - return nil, vltErrorToTerminalError(err, "failed to create server") + if !c.PhysicalMachine { + if err := client.Instance.Delete(ctx, instance.ID()); err != nil { + log.Error("Failed to cleanup instance after failed creation: %v", err) + } + } else { + if err := client.BareMetalServer.Delete(ctx, instance.ID()); err != nil { + log.Error("Failed to cleanup bare metal instance after failed creation: %v", err) + } + } + return nil, err } - - return &vultrInstance{instance: res}, nil + return instance, nil } -func (p *provider) Cleanup(ctx context.Context, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData) (bool, error) { - instance, err := p.Get(ctx, machine, data) +func (p *provider) Cleanup(ctx context.Context, log *zap.SugaredLogger, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData) (bool, error) { + instance, err := p.Get(ctx, log, machine, data) if err != nil { if errors.Is(err, cloudprovidererrors.ErrInstanceNotFound) { return true, nil @@ -281,8 +490,14 @@ func (p *provider) Cleanup(ctx context.Context, machine *clusterv1alpha1.Machine } client := getClient(ctx, c.APIKey) - if err = client.Instance.Delete(ctx, instance.ID()); err != nil { - return false, vltErrorToTerminalError(err, "failed to delete server") + if !c.PhysicalMachine { + if err := client.Instance.Delete(ctx, instance.ID()); err != nil { + return false, fmt.Errorf("failed to delete instance: %w", err) + } + } else { + if err := client.BareMetalServer.Delete(ctx, instance.ID()); err != nil { + return false, fmt.Errorf("failed to delete bare metal instance: %w", err) + } } return false, nil @@ -300,55 +515,88 @@ func (p *provider) MachineMetricsLabels(machine *clusterv1alpha1.Machine) (map[s return labels, err } -func (p *provider) MigrateUID(ctx context.Context, machine *clusterv1alpha1.Machine, newUID types.UID) error { +func (p *provider) MigrateUID(ctx context.Context, _ *zap.SugaredLogger, machine *clusterv1alpha1.Machine, newUID types.UID) error { c, _, err := p.getConfig(machine.Spec.ProviderSpec) if err != nil { return fmt.Errorf("failed to decode providerconfig: %w", err) } client := getClient(ctx, c.APIKey) - instances, _, err := client.Instance.List(ctx, &govultr.ListOptions{PerPage: 1000}) - if err != nil { - return fmt.Errorf("failed to list instances: %w", err) - } - for _, instance := range instances { - if instance.Label == machine.Spec.Name && sets.NewString(instance.Tags...).Has(string(machine.UID)) { - _, err = client.Instance.Update(ctx, instance.ID, &govultr.InstanceUpdateReq{ - Tags: sets.NewString(instance.Tags...).Delete(string(machine.UID)).Insert(string(newUID)).List(), - }) - if err != nil { - return fmt.Errorf("failed to tag instance with new UID tag: %w", err) - } + if !c.PhysicalMachine { + instance, err := p.getVirtualMachine(ctx, c, machine) + if err != nil { + return err + } + _, resp, err := client.Instance.Update(ctx, instance.instance.ID, &govultr.InstanceUpdateReq{ + Tags: sets.NewString(instance.instance.Tags...).Delete(string(machine.UID)).Insert(string(newUID)).List(), + }) + if err != nil { + return vltErrorToTerminalError(resp.StatusCode, err) } + resp.Body.Close() + return nil } - + instance, err := p.getPhysicalMachine(ctx, c, machine) + if err != nil { + return fmt.Errorf("failed to get instance with UID tag: %w", err) + } + _, resp, err := client.BareMetalServer.Update(ctx, instance.instance.ID, &govultr.BareMetalUpdate{ + Tags: sets.NewString(instance.instance.Tags...).Delete(string(machine.UID)).Insert(string(newUID)).List(), + }) + if err != nil { + return vltErrorToTerminalError(resp.StatusCode, err) + } + resp.Body.Close() return nil } -type vultrInstance struct { +type vultrVirtualMachine struct { instance *govultr.Instance } +type vultrPhysicalMachine struct { + instance *govultr.BareMetalServer +} -func (v *vultrInstance) Name() string { +func (v *vultrVirtualMachine) Name() string { + return v.instance.Label +} +func (v *vultrPhysicalMachine) Name() string { return v.instance.Label } -func (v *vultrInstance) ID() string { +func (v *vultrVirtualMachine) ID() string { + return v.instance.ID +} +func (v *vultrPhysicalMachine) ID() string { return v.instance.ID } -func (v *vultrInstance) ProviderID() string { +func (v *vultrVirtualMachine) ProviderID() string { + if v.instance == nil || v.instance.ID == "" { + return "" + } + return "vultr://" + v.instance.ID +} +func (v *vultrPhysicalMachine) ProviderID() string { + if v.instance == nil || v.instance.ID == "" { + return "" + } return "vultr://" + v.instance.ID } -func (v *vultrInstance) Addresses() map[string]v1.NodeAddressType { - addresses := map[string]v1.NodeAddressType{} - addresses[v.instance.MainIP] = v1.NodeExternalIP - addresses[v.instance.InternalIP] = v1.NodeInternalIP +func (v *vultrVirtualMachine) Addresses() map[string]corev1.NodeAddressType { + addresses := map[string]corev1.NodeAddressType{} + addresses[v.instance.MainIP] = corev1.NodeExternalIP + addresses[v.instance.InternalIP] = corev1.NodeInternalIP + return addresses +} +func (v *vultrPhysicalMachine) Addresses() map[string]corev1.NodeAddressType { + addresses := map[string]corev1.NodeAddressType{} + addresses[v.instance.MainIP] = corev1.NodeExternalIP return addresses } -func (v *vultrInstance) Status() instance.Status { +func (v *vultrVirtualMachine) Status() instance.Status { switch v.instance.Status { case "active": return instance.StatusRunning @@ -359,17 +607,30 @@ func (v *vultrInstance) Status() instance.Status { return instance.StatusUnknown } } - -func vltErrorToTerminalError(err error, msg string) error { - prepareAndReturnError := func() error { - return fmt.Errorf("%s, due to %w", msg, err) +func (v *vultrPhysicalMachine) Status() instance.Status { + switch v.instance.Status { + case "active": + return instance.StatusRunning + case "pending": + return instance.StatusCreating + // "suspending" or "resizing" + default: + return instance.StatusUnknown } - if err != nil { - return prepareAndReturnError() +} + +func vltErrorToTerminalError(status int, err error) error { + switch status { + case http.StatusUnauthorized: + return cloudprovidererrors.TerminalError{ + Reason: common.InvalidConfigurationMachineError, + Message: "A request has been rejected due to invalid credentials which were taken from the MachineSpec", + } + default: + return err } - return err } -func (p *provider) SetMetricsForMachines(machines clusterv1alpha1.MachineList) error { +func (p *provider) SetMetricsForMachines(_ clusterv1alpha1.MachineList) error { return nil } diff --git a/pkg/cloudprovider/testing/testing.go b/pkg/cloudprovider/testing/testing.go index aae583cff..25ba4f08a 100644 --- a/pkg/cloudprovider/testing/testing.go +++ b/pkg/cloudprovider/testing/testing.go @@ -19,7 +19,7 @@ package testing import ( "testing" - "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -35,18 +35,18 @@ type Creator struct { ProviderSpecGetter ProviderSpecGetter } -func (c Creator) CreateMachine(t *testing.T) *v1alpha1.Machine { - return &v1alpha1.Machine{ +func (c Creator) CreateMachine(t *testing.T) *clusterv1alpha1.Machine { + return &clusterv1alpha1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: c.Name, Namespace: c.Namespace, }, - Spec: v1alpha1.MachineSpec{ + Spec: clusterv1alpha1.MachineSpec{ ObjectMeta: metav1.ObjectMeta{ Name: c.Name, Namespace: c.Namespace, }, - ProviderSpec: v1alpha1.ProviderSpec{ + ProviderSpec: clusterv1alpha1.ProviderSpec{ Value: &runtime.RawExtension{ Raw: c.ProviderSpecGetter(t), }, diff --git a/pkg/cloudprovider/types/types.go b/pkg/cloudprovider/types/types.go index adc9010b1..e9e3c241e 100644 --- a/pkg/cloudprovider/types/types.go +++ b/pkg/cloudprovider/types/types.go @@ -18,9 +18,12 @@ package types import ( "context" + "fmt" - clusterv1alpha1 "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/instance" + "go.uber.org/zap" + + "k8c.io/machine-controller/pkg/cloudprovider/instance" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/types" @@ -31,13 +34,13 @@ import ( // Provider exposed all required functions to interact with a cloud provider. type Provider interface { // AddDefaults will read the MachineSpec and apply defaults for provider specific fields - AddDefaults(spec clusterv1alpha1.MachineSpec) (clusterv1alpha1.MachineSpec, error) + AddDefaults(log *zap.SugaredLogger, spec clusterv1alpha1.MachineSpec) (clusterv1alpha1.MachineSpec, error) // Validate validates the given machine's specification. // // In case of any error a "terminal" error should be set, // See v1alpha1.MachineStatus for more info - Validate(ctx context.Context, machinespec clusterv1alpha1.MachineSpec) error + Validate(ctx context.Context, log *zap.SugaredLogger, machinespec clusterv1alpha1.MachineSpec) error // Get gets a node that is associated with the given machine. // @@ -45,20 +48,17 @@ type Provider interface { // which indicates that a manual interaction is required to recover from this state. // See v1alpha1.MachineStatus for more info and TerminalError type // - // In case the instance cannot be found, github.com/kubermatic/machine-controller/pkg/cloudprovider/errors/ErrInstanceNotFound will be returned - Get(ctx context.Context, machine *clusterv1alpha1.Machine, data *ProviderData) (instance.Instance, error) - - // GetCloudConfig will return the cloud provider specific cloud-config, which gets consumed by the kubelet - GetCloudConfig(spec clusterv1alpha1.MachineSpec) (config string, name string, err error) + // In case the instance cannot be found, k8c.io/machine-controller/pkg/cloudprovider/errors/ErrInstanceNotFound will be returned + Get(ctx context.Context, log *zap.SugaredLogger, machine *clusterv1alpha1.Machine, data *ProviderData) (instance.Instance, error) // Create creates a cloud instance according to the given machine - Create(ctx context.Context, machine *clusterv1alpha1.Machine, data *ProviderData, userdata string) (instance.Instance, error) + Create(ctx context.Context, log *zap.SugaredLogger, machine *clusterv1alpha1.Machine, data *ProviderData, userdata string) (instance.Instance, error) // Cleanup will delete the instance associated with the machine and all associated resources. // If all resources have been cleaned up, true will be returned. // In case the cleanup involves asynchronous deletion of resources & those resources are not gone yet, // false should be returned. This is to indicate that the cleanup is not done, but needs to be called again at a later point - Cleanup(ctx context.Context, machine *clusterv1alpha1.Machine, data *ProviderData) (bool, error) + Cleanup(ctx context.Context, log *zap.SugaredLogger, machine *clusterv1alpha1.Machine, data *ProviderData) (bool, error) // MachineMetricsLabels returns labels used for the Prometheus metrics // about created machines, e.g. instance type, instance size, region @@ -69,7 +69,7 @@ type Provider interface { // MigrateUID is called when the controller migrates types and the UID of the machine object changes // All cloud providers that use Machine.UID to uniquely identify resources must implement this - MigrateUID(ctx context.Context, machine *clusterv1alpha1.Machine, newUID types.UID) error + MigrateUID(ctx context.Context, log *zap.SugaredLogger, machine *clusterv1alpha1.Machine, newUID types.UID) error // SetMetricsForMachines allows providers to provide provider-specific metrics. This may be implemented // as no-op @@ -100,7 +100,7 @@ func GetMachineUpdater(ctx context.Context, client ctrlruntimeclient.Client) Mac namespacedName := types.NamespacedName{Namespace: machine.Namespace, Name: machine.Name} return retry.RetryOnConflict(retry.DefaultBackoff, func() error { if err := client.Get(ctx, namespacedName, machine); err != nil { - return err + return fmt.Errorf("failed to get machine: %w", err) } // Check if we actually change something and only update if that is the case. diff --git a/pkg/cloudprovider/util/cloud_init_settings.go b/pkg/cloudprovider/util/cloud_init_settings.go index ed32c6e5a..46f185d16 100644 --- a/pkg/cloudprovider/util/cloud_init_settings.go +++ b/pkg/cloudprovider/util/cloud_init_settings.go @@ -24,7 +24,7 @@ import ( "gopkg.in/yaml.v3" corev1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/clientcmd" @@ -58,7 +58,7 @@ func ExtractTokenAndAPIServer(ctx context.Context, userdata string, client ctrlr func CreateMachineCloudInitSecret(ctx context.Context, userdata, machineName string, client ctrlruntimeclient.Client) error { secret := &corev1.Secret{} if err := client.Get(ctx, types.NamespacedName{Namespace: CloudInitNamespace, Name: machineName}, secret); err != nil { - if kerrors.IsNotFound(err) { + if apierrors.IsNotFound(err) { secret = &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: machineName, diff --git a/pkg/cloudprovider/util/net.go b/pkg/cloudprovider/util/net.go index bdb1fb2a7..fc4ac076f 100644 --- a/pkg/cloudprovider/util/net.go +++ b/pkg/cloudprovider/util/net.go @@ -23,11 +23,6 @@ import ( "net" ) -const ( - ErrIPv6OnlyUnsupported = "IPv6 only network family not supported yet" - ErrUnknownNetworkFamily = "Unknown IP family %q only IPv4,IPv6,IPv4+IPv6 are valid values" -) - func CIDRToIPAndNetMask(ipv4 string) (string, string, int, error) { ip, ipNet, err := net.ParseCIDR(ipv4) if err != nil { @@ -59,32 +54,3 @@ func GenerateRandMAC() (net.HardwareAddr, error) { return mac, nil } - -// IPFamily IPv4 | IPv6 | IPv4+IPv6. -type IPFamily string - -const ( - IPFamilyUnspecified IPFamily = "" // interpreted as IPv4 - IPFamilyIPv4 IPFamily = "IPv4" // IPv4 only - IPFamilyIPv6 IPFamily = "IPv6" // IPv6 only - IPFamilyIPv4IPv6 IPFamily = "IPv4+IPv6" // dualstack with IPv4 as primary - IPFamilyIPv6IPv4 IPFamily = "IPv6+IPv4" // dualstack with IPv6 as primary -) - -func (f IPFamily) HasIPv6() bool { - return f == IPFamilyIPv6 || f == IPFamilyIPv4IPv6 || f == IPFamilyIPv6IPv4 -} - -func (f IPFamily) HasIPv4() bool { - return f == IPFamilyUnspecified || f == IPFamilyIPv4 || f == IPFamilyIPv4IPv6 || f == IPFamilyIPv6IPv4 -} - -func (f IPFamily) IsDualstack() bool { - return f == IPFamilyIPv4IPv6 || f == IPFamilyIPv6IPv4 -} - -// IsLinkLocal checks if given ip address is link local.. -func IsLinkLocal(ipAddr string) bool { - addr := net.ParseIP(ipAddr) - return addr.IsLinkLocalMulticast() || addr.IsLinkLocalUnicast() -} diff --git a/pkg/cloudprovider/util/util.go b/pkg/cloudprovider/util/util.go index 63a15a83b..2801f835b 100644 --- a/pkg/cloudprovider/util/util.go +++ b/pkg/cloudprovider/util/util.go @@ -19,20 +19,20 @@ package util import ( "fmt" - "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" - cloudprovidertypes "github.com/kubermatic/machine-controller/pkg/cloudprovider/types" - kuberneteshelper "github.com/kubermatic/machine-controller/pkg/kubernetes" + cloudprovidertypes "k8c.io/machine-controller/pkg/cloudprovider/types" + kuberneteshelper "k8c.io/machine-controller/pkg/kubernetes" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" ) // RemoveFinalizerOnInstanceNotFound checks whether a finalizer exists and removes it on demand. func RemoveFinalizerOnInstanceNotFound(finalizer string, - machine *v1alpha1.Machine, + machine *clusterv1alpha1.Machine, provider *cloudprovidertypes.ProviderData) (bool, error) { if !kuberneteshelper.HasFinalizer(machine, finalizer) { return true, nil } - if err := provider.Update(machine, func(updatedMachine *v1alpha1.Machine) { + if err := provider.Update(machine, func(updatedMachine *clusterv1alpha1.Machine) { updatedMachine.Finalizers = kuberneteshelper.RemoveFinalizer(updatedMachine.Finalizers, finalizer) }); err != nil { return false, fmt.Errorf("failed updating machine %v finzaliers: %w", machine.Name, err) diff --git a/pkg/cloudprovider/util/util_test.go b/pkg/cloudprovider/util/util_test.go index 1c15707bd..c6367d2d6 100644 --- a/pkg/cloudprovider/util/util_test.go +++ b/pkg/cloudprovider/util/util_test.go @@ -21,9 +21,8 @@ import ( "reflect" "testing" - "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" - clusterv1alpha1 "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" - cloudprovidertypes "github.com/kubermatic/machine-controller/pkg/cloudprovider/types" + cloudprovidertypes "k8c.io/machine-controller/pkg/cloudprovider/types" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -39,7 +38,7 @@ func TestRemoveFinalizerOnInstanceNotFound(t *testing.T) { var fakeClient = fakectrlruntimeclient. NewClientBuilder(). WithScheme(scheme.Scheme). - WithObjects(&v1alpha1.Machine{ + WithObjects(&clusterv1alpha1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "test_machine", Finalizers: []string{ @@ -51,13 +50,13 @@ func TestRemoveFinalizerOnInstanceNotFound(t *testing.T) { var testCases = []struct { name string - machine *v1alpha1.Machine - expectedMachine *v1alpha1.Machine + machine *clusterv1alpha1.Machine + expectedMachine *clusterv1alpha1.Machine providerData *cloudprovidertypes.ProviderData }{ { name: "Test remove machine finalizer", - machine: &v1alpha1.Machine{ + machine: &clusterv1alpha1.Machine{ ObjectMeta: metav1.ObjectMeta{ UID: "123456", Name: "test_machine", @@ -66,7 +65,7 @@ func TestRemoveFinalizerOnInstanceNotFound(t *testing.T) { "test_finalizer_2"}, }, }, - expectedMachine: &v1alpha1.Machine{ + expectedMachine: &clusterv1alpha1.Machine{ ObjectMeta: metav1.ObjectMeta{ UID: "123456", Name: "test_machine", @@ -87,7 +86,7 @@ func TestRemoveFinalizerOnInstanceNotFound(t *testing.T) { t.Fatalf("failed removing finalizer: %v", err) } - foundMachine := &v1alpha1.Machine{} + foundMachine := &clusterv1alpha1.Machine{} if err := fakeClient.Get( context.Background(), types.NamespacedName{Name: "test_machine"}, diff --git a/pkg/cloudprovider/validationwrapper.go b/pkg/cloudprovider/validationwrapper.go index 965e2205b..8f98e54f8 100644 --- a/pkg/cloudprovider/validationwrapper.go +++ b/pkg/cloudprovider/validationwrapper.go @@ -18,14 +18,16 @@ package cloudprovider import ( "context" + "errors" "fmt" - "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/instance" - cloudprovidertypes "github.com/kubermatic/machine-controller/pkg/cloudprovider/types" + "go.uber.org/zap" + + "k8c.io/machine-controller/pkg/cloudprovider/instance" + cloudprovidertypes "k8c.io/machine-controller/pkg/cloudprovider/types" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" "k8s.io/apimachinery/pkg/types" - "k8s.io/klog" ) type cachingValidationWrapper struct { @@ -38,61 +40,61 @@ func NewValidationCacheWrappingCloudProvider(actualProvider cloudprovidertypes.P } // AddDefaults just calls the underlying cloudproviders AddDefaults. -func (w *cachingValidationWrapper) AddDefaults(spec v1alpha1.MachineSpec) (v1alpha1.MachineSpec, error) { - return w.actualProvider.AddDefaults(spec) +func (w *cachingValidationWrapper) AddDefaults(log *zap.SugaredLogger, spec clusterv1alpha1.MachineSpec) (clusterv1alpha1.MachineSpec, error) { + return w.actualProvider.AddDefaults(log, spec) } // Validate tries to get the validation result from the cache and if not found, calls the // cloudproviders Validate and saves that to the cache. -func (w *cachingValidationWrapper) Validate(ctx context.Context, spec v1alpha1.MachineSpec) error { +func (w *cachingValidationWrapper) Validate(ctx context.Context, log *zap.SugaredLogger, spec clusterv1alpha1.MachineSpec) error { result, exists, err := cache.Get(spec) if err != nil { return fmt.Errorf("error getting validation result from cache: %w", err) } if exists { - klog.V(6).Infof("Got cache hit for validation") + log.Debug("Got cache hit for validation") return result } - klog.V(6).Infof("Got cache miss for validation") - err = w.actualProvider.Validate(ctx, spec) - if err := cache.Set(spec, err); err != nil { - return fmt.Errorf("failed to set cache after validation: %w", err) + log.Debug("Got cache miss for validation") + err = w.actualProvider.Validate(ctx, log, spec) + + // do not cache canceled contexts (e.g. the validation request was canceled client-side) + // and timeouts (assumed to be temporary) + if !errors.Is(err, context.Canceled) && !errors.Is(err, context.DeadlineExceeded) { + if err := cache.Set(spec, err); err != nil { + return fmt.Errorf("failed to set cache after validation: %w", err) + } } return err } // Get just calls the underlying cloudproviders Get. -func (w *cachingValidationWrapper) Get(ctx context.Context, machine *v1alpha1.Machine, data *cloudprovidertypes.ProviderData) (instance.Instance, error) { - return w.actualProvider.Get(ctx, machine, data) -} - -// GetCloudConfig just calls the underlying cloudproviders GetCloudConfig. -func (w *cachingValidationWrapper) GetCloudConfig(spec v1alpha1.MachineSpec) (string, string, error) { - return w.actualProvider.GetCloudConfig(spec) +func (w *cachingValidationWrapper) Get(ctx context.Context, log *zap.SugaredLogger, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData) (instance.Instance, error) { + return w.actualProvider.Get(ctx, log, machine, data) } // Create just calls the underlying cloudproviders Create. -func (w *cachingValidationWrapper) Create(ctx context.Context, machine *v1alpha1.Machine, data *cloudprovidertypes.ProviderData, userdata string) (instance.Instance, error) { - return w.actualProvider.Create(ctx, machine, data, userdata) +func (w *cachingValidationWrapper) Create(ctx context.Context, log *zap.SugaredLogger, machine *clusterv1alpha1.Machine, data *cloudprovidertypes.ProviderData, userdata string) (instance.Instance, error) { + return w.actualProvider.Create(ctx, log, machine, data, userdata) } // Cleanup just calls the underlying cloudproviders Cleanup. -func (w *cachingValidationWrapper) Cleanup(ctx context.Context, m *v1alpha1.Machine, mcd *cloudprovidertypes.ProviderData) (bool, error) { - return w.actualProvider.Cleanup(ctx, m, mcd) +func (w *cachingValidationWrapper) Cleanup(ctx context.Context, log *zap.SugaredLogger, m *clusterv1alpha1.Machine, mcd *cloudprovidertypes.ProviderData) (bool, error) { + return w.actualProvider.Cleanup(ctx, log, m, mcd) } // MigrateUID just calls the underlying cloudproviders MigrateUID. -func (w *cachingValidationWrapper) MigrateUID(ctx context.Context, m *v1alpha1.Machine, newUID types.UID) error { - return w.actualProvider.MigrateUID(ctx, m, newUID) +func (w *cachingValidationWrapper) MigrateUID(ctx context.Context, log *zap.SugaredLogger, m *clusterv1alpha1.Machine, newUID types.UID) error { + return w.actualProvider.MigrateUID(ctx, log, m, newUID) } // MachineMetricsLabels just calls the underlying cloudproviders MachineMetricsLabels. -func (w *cachingValidationWrapper) MachineMetricsLabels(machine *v1alpha1.Machine) (map[string]string, error) { +func (w *cachingValidationWrapper) MachineMetricsLabels(machine *clusterv1alpha1.Machine) (map[string]string, error) { return w.actualProvider.MachineMetricsLabels(machine) } -func (w *cachingValidationWrapper) SetMetricsForMachines(machines v1alpha1.MachineList) error { +func (w *cachingValidationWrapper) SetMetricsForMachines(machines clusterv1alpha1.MachineList) error { return w.actualProvider.SetMetricsForMachines(machines) } diff --git a/pkg/clusterinfo/configmap.go b/pkg/clusterinfo/configmap.go index 399de944f..4792f043a 100644 --- a/pkg/clusterinfo/configmap.go +++ b/pkg/clusterinfo/configmap.go @@ -24,19 +24,20 @@ import ( "os" "strconv" - corev1 "k8s.io/api/core/v1" + "go.uber.org/zap" + + discoveryv1 "k8s.io/api/discovery/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" - "k8s.io/klog" ) const ( - configMapName = "cluster-info" - kubernetesEndpointsName = "kubernetes" - securePortName = "https" + configMapName = "cluster-info" + kubernetesService = "kubernetes" + securePortName = "https" ) func New(clientConfig *rest.Config, kubeClient kubernetes.Interface) *KubeconfigProvider { @@ -49,17 +50,16 @@ func New(clientConfig *rest.Config, kubeClient kubernetes.Interface) *Kubeconfig type KubeconfigProvider struct { clientConfig *rest.Config // We use a kubeClient to not accidentally create listers in the ctrlruntimeclient for - // secrets, configmaps and endpoints, as that would result in a lot of traffic we don't + // secrets, configmaps and endpointslices, as that would result in a lot of traffic we don't // care about kubeClient kubernetes.Interface } -func (p *KubeconfigProvider) GetKubeconfig(ctx context.Context) (*clientcmdapi.Config, error) { +func (p *KubeconfigProvider) GetKubeconfig(ctx context.Context, log *zap.SugaredLogger) (*clientcmdapi.Config, error) { cm, err := p.getKubeconfigFromConfigMap(ctx) if err != nil { - klog.V(6).Infof("could not get cluster-info kubeconfig from configmap: %v", err) - klog.V(6).Info("falling back to retrieval via endpoint") - return p.buildKubeconfigFromEndpoint(ctx) + log.Debugw("Failed to get cluster-info kubeconfig from configmap; falling back to retrieval via endpointslice", zap.Error(err)) + return p.buildKubeconfigFromEndpointSlice(ctx) } return cm, nil } @@ -76,65 +76,76 @@ func (p *KubeconfigProvider) getKubeconfigFromConfigMap(ctx context.Context) (*c return clientcmd.Load([]byte(data)) } -func (p *KubeconfigProvider) buildKubeconfigFromEndpoint(ctx context.Context) (*clientcmdapi.Config, error) { - e, err := p.kubeClient.CoreV1().Endpoints(metav1.NamespaceDefault).Get(ctx, kubernetesEndpointsName, metav1.GetOptions{}) +func (p *KubeconfigProvider) buildKubeconfigFromEndpointSlice(ctx context.Context) (*clientcmdapi.Config, error) { + slices, err := p.kubeClient.DiscoveryV1().EndpointSlices(metav1.NamespaceDefault).List(ctx, + metav1.ListOptions{LabelSelector: discoveryv1.LabelServiceName + "=" + kubernetesService}) if err != nil { - return nil, fmt.Errorf("failed to get endpoint from lister: %w", err) + return nil, fmt.Errorf("failed to list endpointslices: %w", err) } - if len(e.Subsets) == 0 { - return nil, errors.New("no subsets in the kubernetes endpoints resource") + if len(slices.Items) == 0 { + return nil, errors.New("no endpointslices found for kubernetes service") } - subset := e.Subsets[0] - if len(subset.Addresses) == 0 { - return nil, errors.New("no addresses in the first subset of the kubernetes endpoints resource") + caData, err := getCAData(p.clientConfig) + if err != nil { + return nil, fmt.Errorf("failed to get ca data from config: %w", err) } - address := subset.Addresses[0] - ip := net.ParseIP(address.IP) - if ip == nil { - return nil, errors.New("could not parse ip from ") - } + for _, slice := range slices.Items { + port := getSecurePortFromSlice(slice.Ports) + if port == nil { + continue + } + + // Find a ready endpoint with a valid address + for _, endpoint := range slice.Endpoints { + if endpoint.Conditions.Ready == nil || !*endpoint.Conditions.Ready { + continue + } + + if len(endpoint.Addresses) == 0 { + continue + } - getSecurePort := func(endpointSubset corev1.EndpointSubset) *corev1.EndpointPort { - for _, p := range subset.Ports { - if p.Name == securePortName { - return &p + ip := net.ParseIP(endpoint.Addresses[0]) + if ip == nil { + continue } + + url := fmt.Sprintf("https://%s", net.JoinHostPort(ip.String(), strconv.Itoa(int(*port.Port)))) + + return &clientcmdapi.Config{ + Kind: "Config", + APIVersion: "v1", + Clusters: map[string]*clientcmdapi.Cluster{ + "": { + Server: url, + CertificateAuthorityData: caData, + }, + }, + }, nil } - return nil } - port := getSecurePort(subset) - if port == nil { - return nil, errors.New("no secure port in the subset") - } - url := fmt.Sprintf("https://%s", net.JoinHostPort(ip.String(), strconv.Itoa(int(port.Port)))) + return nil, errors.New("no ready endpoint found in kubernetes endpointslices") +} - caData, err := getCAData(p.clientConfig) - if err != nil { - return nil, fmt.Errorf("failed to get ca data from config: %w", err) +func getSecurePortFromSlice(ports []discoveryv1.EndpointPort) *discoveryv1.EndpointPort { + for _, p := range ports { + if p.Name != nil && *p.Name == securePortName && p.Port != nil { + return &p + } } - - return &clientcmdapi.Config{ - Kind: "Config", - APIVersion: "v1", - Clusters: map[string]*clientcmdapi.Cluster{ - "": { - Server: url, - CertificateAuthorityData: caData, - }, - }, - }, nil + return nil } func getCAData(config *rest.Config) ([]byte, error) { - if len(config.TLSClientConfig.CAData) > 0 { - return config.TLSClientConfig.CAData, nil + if len(config.CAData) > 0 { + return config.CAData, nil } - return os.ReadFile(config.TLSClientConfig.CAFile) + return os.ReadFile(config.CAFile) } func (p *KubeconfigProvider) GetBearerToken() string { diff --git a/pkg/clusterinfo/configmap_test.go b/pkg/clusterinfo/configmap_test.go index ef45b3a8f..abd94e87a 100644 --- a/pkg/clusterinfo/configmap_test.go +++ b/pkg/clusterinfo/configmap_test.go @@ -22,13 +22,16 @@ import ( "github.com/go-test/deep" "github.com/pmezard/go-difflib/difflib" + "go.uber.org/zap" corev1 "k8s.io/api/core/v1" + discoveryv1 "k8s.io/api/discovery/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" + "k8s.io/utils/ptr" ) var ( @@ -41,7 +44,6 @@ clusters: contexts: null current-context: "" kind: Config -preferences: {} users: null ` clusterInfoKubeconfig2 = `apiVersion: v1 @@ -53,7 +55,6 @@ clusters: contexts: null current-context: "" kind: Config -preferences: {} users: null ` ) @@ -80,27 +81,76 @@ func TestKubeconfigProvider_GetKubeconfig(t *testing.T) { resConfig: clusterInfoKubeconfig1, }, { - name: "successful from in-cluster via endpoints - clusterIP", - objects: []runtime.Object{&corev1.Endpoints{ + name: "successful from in-cluster via endpointslice - clusterIP", + objects: []runtime.Object{&discoveryv1.EndpointSlice{ ObjectMeta: metav1.ObjectMeta{ - Name: "kubernetes", + Name: "kubernetes-abc", Namespace: "default", + Labels: map[string]string{ + discoveryv1.LabelServiceName: "kubernetes", + }, }, - Subsets: []corev1.EndpointSubset{ + AddressType: discoveryv1.AddressTypeIPv4, + Endpoints: []discoveryv1.Endpoint{ { - Addresses: []corev1.EndpointAddress{ - { - IP: "192.168.1.2", - }, + Addresses: []string{"192.168.1.2"}, + Conditions: discoveryv1.EndpointConditions{ + Ready: ptr.To(true), }, - Ports: []corev1.EndpointPort{ - { - Name: "https", - Port: 8443, - }, + }, + }, + Ports: []discoveryv1.EndpointPort{ + { + Name: ptr.To("https"), + Port: ptr.To(int32(8443)), + Protocol: ptr.To(corev1.ProtocolTCP), + }, + }, + }}, + clientConfig: &rest.Config{ + TLSClientConfig: rest.TLSClientConfig{ + CAData: []byte( + "foo", + ), + }, + }, + err: nil, + resConfig: clusterInfoKubeconfig2, + }, + { + name: "skips not-ready endpoint and uses ready one", + objects: []runtime.Object{&discoveryv1.EndpointSlice{ + ObjectMeta: metav1.ObjectMeta{ + Name: "kubernetes-abc", + Namespace: "default", + Labels: map[string]string{ + discoveryv1.LabelServiceName: "kubernetes", + }, + }, + AddressType: discoveryv1.AddressTypeIPv4, + Endpoints: []discoveryv1.Endpoint{ + { + // Not-ready endpoint should be skipped + Addresses: []string{"192.168.1.99"}, + Conditions: discoveryv1.EndpointConditions{ + Ready: ptr.To(false), + }, + }, + { + // Ready endpoint should be used + Addresses: []string{"192.168.1.2"}, + Conditions: discoveryv1.EndpointConditions{ + Ready: ptr.To(true), }, }, }, + Ports: []discoveryv1.EndpointPort{ + { + Name: ptr.To("https"), + Port: ptr.To(int32(8443)), + Protocol: ptr.To(corev1.ProtocolTCP), + }, + }, }}, clientConfig: &rest.Config{ TLSClientConfig: rest.TLSClientConfig{ @@ -117,14 +167,14 @@ func TestKubeconfigProvider_GetKubeconfig(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { ctx := context.Background() - client := fake.NewSimpleClientset(test.objects...) + client := fake.NewClientset(test.objects...) provider := KubeconfigProvider{ clientConfig: test.clientConfig, kubeClient: client, } - resConfig, err := provider.GetKubeconfig(ctx) + resConfig, err := provider.GetKubeconfig(ctx, zap.NewNop().Sugar()) if diff := deep.Equal(err, test.err); diff != nil { t.Error(diff) } diff --git a/pkg/containerruntime/config.go b/pkg/containerruntime/config.go deleted file mode 100644 index f93a54d9f..000000000 --- a/pkg/containerruntime/config.go +++ /dev/null @@ -1,128 +0,0 @@ -/* -Copyright 2022 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package containerruntime - -import ( - "context" - "encoding/json" - "fmt" - "net/url" - "regexp" - "strings" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" - ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" -) - -type Opts struct { - ContainerRuntime string - ContainerdVersion string - InsecureRegistries string - RegistryMirrors string - RegistryCredentialsSecret string - PauseImage string - ContainerdRegistryMirrors RegistryMirrorsFlags -} - -func BuildConfig(opts Opts) (Config, error) { - var insecureRegistries []string - for _, registry := range strings.Split(opts.InsecureRegistries, ",") { - if trimmedRegistry := strings.TrimSpace(registry); trimmedRegistry != "" { - insecureRegistries = append(insecureRegistries, trimmedRegistry) - } - } - - // we want to match e.g. docker.io=registry.docker-cn.com, having docker.io as the first - // match group and registry.docker-cn.com as the second one. - registryMirrorRegexp := regexp.MustCompile(`^([a-zA-Z0-9\.-]+)=(.*)`) - - if opts.ContainerdRegistryMirrors == nil { - opts.ContainerdRegistryMirrors = make(RegistryMirrorsFlags) - } - - for _, mirror := range strings.Split(opts.RegistryMirrors, ",") { - if trimmedMirror := strings.TrimSpace(mirror); trimmedMirror != "" { - registry := "docker.io" - - if matches := registryMirrorRegexp.FindStringSubmatch(trimmedMirror); matches != nil { - registry = matches[1] - trimmedMirror = matches[2] - } - - if !strings.HasPrefix(trimmedMirror, "http") { - trimmedMirror = "https://" + trimmedMirror - } - - _, err := url.Parse(trimmedMirror) - if err != nil { - return Config{}, fmt.Errorf("incorrect mirror provided: %w", err) - } - - if opts.ContainerdRegistryMirrors[registry] == nil { - opts.ContainerdRegistryMirrors[registry] = make([]string, 0, 1) - } - - opts.ContainerdRegistryMirrors[registry] = append(opts.ContainerdRegistryMirrors[registry], trimmedMirror) - } - } - - // Only validate registry credential here - if opts.RegistryCredentialsSecret != "" { - if secRef := strings.Split(opts.RegistryCredentialsSecret, "/"); len(secRef) != 2 { - return Config{}, fmt.Errorf("-node-registry-credentials-secret is in incorrect format %q, should be in 'namespace/secretname'", opts.RegistryCredentialsSecret) - } - } - - return get( - opts.ContainerRuntime, - withInsecureRegistries(insecureRegistries), - withRegistryMirrors(opts.ContainerdRegistryMirrors), - withSandboxImage(opts.PauseImage), - withContainerdVersion(opts.ContainerdVersion), - ), nil -} - -func GetContainerdAuthConfig(ctx context.Context, client ctrlruntimeclient.Client, registryCredentialsSecret string) (map[string]AuthConfig, error) { - registryCredentials := map[string]AuthConfig{} - - if secRef := strings.SplitN(registryCredentialsSecret, "/", 2); len(secRef) == 2 { - var credsSecret corev1.Secret - err := client.Get(ctx, types.NamespacedName{Namespace: secRef[0], Name: secRef[1]}, &credsSecret) - if err != nil { - return nil, fmt.Errorf("failed to retrieve registry credentials secret object: %w", err) - } - - switch credsSecret.Type { - case corev1.SecretTypeDockerConfigJson: - var regCred DockerCfgJSON - if err := json.Unmarshal(credsSecret.Data[".dockerconfigjson"], ®Cred); err != nil { - return nil, fmt.Errorf("failed to unmarshal registry credentials: %w", err) - } - registryCredentials = regCred.Auths - default: - for registry, data := range credsSecret.Data { - var regCred AuthConfig - if err := json.Unmarshal(data, ®Cred); err != nil { - return nil, fmt.Errorf("failed to unmarshal registry credentials: %w", err) - } - registryCredentials[registry] = regCred - } - } - } - return registryCredentials, nil -} diff --git a/pkg/containerruntime/config_test.go b/pkg/containerruntime/config_test.go deleted file mode 100644 index 4ee6ecd79..000000000 --- a/pkg/containerruntime/config_test.go +++ /dev/null @@ -1,134 +0,0 @@ -/* -Copyright 2022 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package containerruntime - -import ( - "errors" - "fmt" - "testing" -) - -func TestContainerdRegistryMirror(t *testing.T) { - type testCase struct { - desc string - flag string - expectedMirrors map[string][]string - expectedError error - } - - testCases := []testCase{ - { - desc: "no registry mirrors set", - flag: "", - expectedMirrors: map[string][]string{}, - expectedError: nil, - }, - - { - desc: "registry mirror without name and protocol", - flag: "registry-v1.docker.io", - expectedMirrors: map[string][]string{ - "docker.io": {"https://registry-v1.docker.io"}, - }, - expectedError: nil, - }, - { - desc: "multiple registry mirrors without name, with and without protocol", - flag: "registry-v1.docker.io,http://registry.docker-cn.com", - expectedMirrors: map[string][]string{ - "docker.io": { - "https://registry-v1.docker.io", - "http://registry.docker-cn.com", - }, - }, - expectedError: nil, - }, - - { - desc: "registry mirror with name and without protocol", - flag: "quay.io=my-quay-io-mirror.example.com", - expectedMirrors: map[string][]string{ - "quay.io": {"https://my-quay-io-mirror.example.com"}, - }, - expectedError: nil, - }, - { - desc: "registry mirror with name and protocol", - flag: "quay.io=http://my-quay-io-mirror.example.com", - expectedMirrors: map[string][]string{ - "quay.io": {"http://my-quay-io-mirror.example.com"}, - }, - expectedError: nil, - }, - { - desc: "multiple registry mirrors with same name", - flag: "quay.io=http://my-quay-io-mirror.example.com,quay.io=example.net", - expectedMirrors: map[string][]string{ - "quay.io": { - "http://my-quay-io-mirror.example.com", - "https://example.net", - }, - }, - expectedError: nil, - }, - - { - desc: "complex example", - flag: "quay.io=http://my-quay-io-mirror.example.com,quay.io=example.net," + - "registry-v1.docker.io,http://registry.docker-cn.com," + - "ghcr.io=http://foo/bar", - expectedMirrors: map[string][]string{ - "quay.io": { - "http://my-quay-io-mirror.example.com", - "https://example.net", - }, - "docker.io": { - "https://registry-v1.docker.io", - "http://registry.docker-cn.com", - }, - "ghcr.io": { - "http://foo/bar", - }, - }, - expectedError: nil, - }, - } - - for _, tc := range testCases { - t.Run(tc.desc, func(t *testing.T) { - opts := Opts{ - ContainerRuntime: containerdName, - RegistryMirrors: tc.flag, - } - - config, err := BuildConfig(opts) - if tc.expectedError != nil { - if !errors.Is(err, tc.expectedError) { - t.Errorf("expected error %q but got %q", tc.expectedError, err) - } - } - - if err != nil { - t.Errorf("expected success but got error: %q", err) - } - - if fmt.Sprint(config.RegistryMirrors) != fmt.Sprint(tc.expectedMirrors) { - t.Errorf("expected to get %v instead got: %v", tc.expectedMirrors, config.RegistryMirrors) - } - }) - } -} diff --git a/pkg/containerruntime/containerd.go b/pkg/containerruntime/containerd.go deleted file mode 100644 index 02bc3e85f..000000000 --- a/pkg/containerruntime/containerd.go +++ /dev/null @@ -1,297 +0,0 @@ -/* -Copyright 2020 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package containerruntime - -import ( - "fmt" - "strings" - "text/template" - - "github.com/BurntSushi/toml" - - "github.com/kubermatic/machine-controller/pkg/providerconfig/types" -) - -const ( - DefaultContainerdVersion = "1.6*" -) - -type Containerd struct { - insecureRegistries []string - registryMirrors map[string][]string - sandboxImage string - registryCredentials map[string]AuthConfig - version string -} - -func (eng *Containerd) ConfigFileName() string { - return "/etc/containerd/config.toml" -} - -func (eng *Containerd) AuthConfig() (string, error) { - return "", nil -} - -func (eng *Containerd) AuthConfigFileName() string { - return "" -} - -func (eng *Containerd) KubeletFlags() []string { - return []string{ - "--container-runtime=remote", - "--container-runtime-endpoint=unix:///run/containerd/containerd.sock", - } -} - -func (eng *Containerd) ScriptFor(os types.OperatingSystem) (string, error) { - var buf strings.Builder - - args := struct { - ContainerdVersion string - }{ - ContainerdVersion: DefaultContainerdVersion, - } - - if eng.version != "" { - args.ContainerdVersion = eng.version - } - - switch os { - case types.OperatingSystemAmazonLinux2: - err := containerdAmzn2Template.Execute(&buf, args) - return buf.String(), err - case types.OperatingSystemCentOS, types.OperatingSystemRHEL, types.OperatingSystemRockyLinux: - err := containerdYumTemplate.Execute(&buf, args) - return buf.String(), err - case types.OperatingSystemUbuntu: - err := containerdAptTemplate.Execute(&buf, args) - return buf.String(), err - case types.OperatingSystemFlatcar: - err := containedFlatcarTemplate.Execute(&buf, args) - return buf.String(), err - } - - return "", fmt.Errorf("unknown OS: %s", os) -} - -var ( - containedFlatcarTemplate = template.Must(template.New("containerd-flatcar").Parse(` -mkdir -p /etc/systemd/system/containerd.service.d - -cat <= 1.24") - - switch { - case moreThan124.Check(kubeletVersion) || cfg.Containerd != nil: - // docker support has been removed in Kubernetes 1.24 - return containerd - case cfg.Docker != nil: - return docker - } - - return docker -} diff --git a/pkg/containerruntime/docker.go b/pkg/containerruntime/docker.go deleted file mode 100644 index 801e8e0ea..000000000 --- a/pkg/containerruntime/docker.go +++ /dev/null @@ -1,197 +0,0 @@ -/* -Copyright 2020 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package containerruntime - -import ( - "encoding/json" - "fmt" - "strings" - "text/template" - - "github.com/kubermatic/machine-controller/pkg/providerconfig/types" - "github.com/kubermatic/machine-controller/pkg/userdata/helper" -) - -const ( - LegacyDockerContainerdVersion = "1.4*" - DefaultDockerContainerdVersion = "1.6*" - DefaultDockerVersion = "20.10" - LegacyDockerVersion = "19.03" -) - -type Docker struct { - insecureRegistries []string - registryMirrors []string - containerLogMaxFiles string - containerLogMaxSize string - registryCredentials map[string]AuthConfig - containerdVersion string -} - -type DockerCfgJSON struct { - Auths map[string]AuthConfig `json:"auths,omitempty"` -} - -func (eng *Docker) Config() (string, error) { - return helper.DockerConfig(eng.insecureRegistries, eng.registryMirrors, eng.containerLogMaxFiles, eng.containerLogMaxSize) -} - -func (eng *Docker) ConfigFileName() string { - return "/etc/docker/daemon.json" -} - -func (eng *Docker) AuthConfig() (string, error) { - if eng.registryCredentials == nil { - return "", nil - } - - cfg := DockerCfgJSON{ - Auths: eng.registryCredentials, - } - b, err := json.MarshalIndent(cfg, "", " ") - - return string(b), err -} - -func (eng *Docker) AuthConfigFileName() string { - return "/root/.docker/config.json" -} - -func (eng *Docker) KubeletFlags() []string { - return []string{ - "--container-runtime=docker", - "--container-runtime-endpoint=unix:///var/run/dockershim.sock", - } -} - -func (eng *Docker) ScriptFor(os types.OperatingSystem) (string, error) { - var buf strings.Builder - - args := struct { - DockerVersion string - ContainerdVersion string - }{ - DockerVersion: DefaultDockerVersion, - ContainerdVersion: DefaultDockerContainerdVersion, - } - - if eng.containerdVersion != "" { - args.ContainerdVersion = eng.containerdVersion - } - - switch os { - case types.OperatingSystemAmazonLinux2: - args.ContainerdVersion = LegacyDockerContainerdVersion - err := dockerAmazonTemplate.Execute(&buf, args) - return buf.String(), err - case types.OperatingSystemCentOS, types.OperatingSystemRHEL, types.OperatingSystemRockyLinux: - err := dockerYumTemplate.Execute(&buf, args) - return buf.String(), err - case types.OperatingSystemUbuntu: - err := dockerAptTemplate.Execute(&buf, args) - return buf.String(), err - case types.OperatingSystemFlatcar: - err := dockerFlatcarTemplate.Execute(&buf, args) - return buf.String(), err - } - - return "", fmt.Errorf("unknown OS: %s", os) -} - -var ( - dockerFlatcarTemplate = template.Must(template.New("docker-flatcar").Parse(` -systemctl daemon-reload -systemctl enable --now docker -`)) - - dockerAmazonTemplate = template.Must(template.New("docker-yum-amzn2").Parse(` -mkdir -p /etc/systemd/system/containerd.service.d /etc/systemd/system/docker.service.d - -cat < r.skipEvictionAfter { - klog.V(0).Infof("Skipping eviction for machine %q since the deletion got triggered %.2f minutes ago", machine.Name, r.skipEvictionAfter.Minutes()) + if machine.DeletionTimestamp != nil && time.Since(machine.DeletionTimestamp.Time) > r.skipEvictionAfter { + log.Infow("Skipping eviction since the deletion got triggered too long ago", "threshold", r.skipEvictionAfter) return false, nil } - // No node - Nothing to evict - if machine.Status.NodeRef == nil { - klog.V(4).Infof("Skipping eviction for machine %q since it does not have a node", machine.Name) - return false, nil + hasMachine, err := r.machineHasValidNode(ctx, machine) + if err != nil { + return false, err } - node := &corev1.Node{} - if err := r.client.Get(ctx, types.NamespacedName{Name: machine.Status.NodeRef.Name}, node); err != nil { - // Node does not exist - Nothing to evict - if kerrors.IsNotFound(err) { - klog.V(4).Infof("Skipping eviction for machine %q since it does not have a node", machine.Name) - return false, nil - } - return false, fmt.Errorf("failed to get node %q", machine.Status.NodeRef.Name) + if !hasMachine { + log.Debug("Skipping eviction since it does not have a node") + return false, nil } // We must check if an eviction is actually possible and only then return true @@ -543,17 +572,31 @@ func (r *Reconciler) shouldEvict(ctx context.Context, machine *clusterv1alpha1.M // If we arrived here we didn't find any machine without a NodeRef and we didn't // find any node that is schedulable, so eviction can't succeed - klog.V(4).Infof("Skipping eviction for machine %q since there is no possible target for an eviction", machine.Name) + log.Debug("Skipping eviction since there is no possible target for an eviction") return false, nil } // deleteMachine makes sure that an instance has gone in a series of steps. -func (r *Reconciler) deleteMachine(ctx context.Context, prov cloudprovidertypes.Provider, providerName providerconfigtypes.CloudProvider, machine *clusterv1alpha1.Machine) (*reconcile.Result, error) { - shouldEvict, err := r.shouldEvict(ctx, machine) - if err != nil { - return nil, err +func (r *Reconciler) deleteMachine( + ctx context.Context, + log *zap.SugaredLogger, + prov cloudprovidertypes.Provider, + providerName providerconfig.CloudProvider, + machine *clusterv1alpha1.Machine, + skipEviction bool, +) (*reconcile.Result, error) { + var ( + shouldEvict bool + err error + ) + + if !skipEviction { + shouldEvict, err = r.shouldEvict(ctx, log, machine) + if err != nil { + return nil, err + } } - shouldCleanUpVolumes, err := r.shouldCleanupVolumes(ctx, machine, providerName) + shouldCleanUpVolumes, err := r.shouldCleanupVolumes(ctx, log, machine, providerName) if err != nil { return nil, err } @@ -561,13 +604,13 @@ func (r *Reconciler) deleteMachine(ctx context.Context, prov cloudprovidertypes. var evictedSomething, deletedSomething bool var volumesFree = true if shouldEvict { - evictedSomething, err = eviction.New(ctx, machine.Status.NodeRef.Name, r.client, r.kubeClient).Run() + evictedSomething, err = eviction.New(machine.Status.NodeRef.Name, r.client, r.kubeClient).Run(ctx, log) if err != nil { return nil, fmt.Errorf("failed to evict node %s: %w", machine.Status.NodeRef.Name, err) } } if shouldCleanUpVolumes { - deletedSomething, volumesFree, err = poddeletion.New(ctx, machine.Status.NodeRef.Name, r.client, r.kubeClient).Run() + deletedSomething, volumesFree, err = poddeletion.New(machine.Status.NodeRef.Name, r.client, r.kubeClient).Run(ctx, log) if err != nil { return nil, fmt.Errorf("failed to delete pods bound to volumes running on node %s: %w", machine.Status.NodeRef.Name, err) } @@ -577,7 +620,7 @@ func (r *Reconciler) deleteMachine(ctx context.Context, prov cloudprovidertypes. return &reconcile.Result{RequeueAfter: 10 * time.Second}, nil } - if result, err := r.deleteCloudProviderInstance(ctx, prov, machine); result != nil || err != nil { + if result, err := r.deleteCloudProviderInstance(ctx, log, prov, machine); result != nil || err != nil { return result, err } @@ -590,15 +633,21 @@ func (r *Reconciler) deleteMachine(ctx context.Context, prov cloudprovidertypes. return nil, nil } - nodes, err := r.retrieveNodesRelatedToMachine(ctx, machine) + nodes, err := r.retrieveNodesRelatedToMachine(ctx, log, machine) if err != nil { return nil, err } - return nil, r.deleteNodeForMachine(ctx, nodes, machine) + if err := r.deleteNodeForMachine(ctx, log, nodes, machine); err != nil { + return nil, err + } + + r.metrics.Deprovisioning.Observe(time.Until(machine.DeletionTimestamp.Time).Abs().Seconds()) + + return nil, nil } -func (r *Reconciler) retrieveNodesRelatedToMachine(ctx context.Context, machine *clusterv1alpha1.Machine) ([]*corev1.Node, error) { +func (r *Reconciler) retrieveNodesRelatedToMachine(ctx context.Context, log *zap.SugaredLogger, machine *clusterv1alpha1.Machine) ([]*corev1.Node, error) { nodes := make([]*corev1.Node, 0) // If there's NodeRef on the Machine object, retrieve the node by using the @@ -608,10 +657,10 @@ func (r *Reconciler) retrieveNodesRelatedToMachine(ctx context.Context, machine objKey := ctrlruntimeclient.ObjectKey{Name: machine.Status.NodeRef.Name} node := &corev1.Node{} if err := r.client.Get(ctx, objKey, node); err != nil { - if !kerrors.IsNotFound(err) { + if !apierrors.IsNotFound(err) { return nil, fmt.Errorf("failed to get node %s: %w", machine.Status.NodeRef.Name, err) } - klog.V(2).Infof("node %q does not longer exist for machine %q", machine.Status.NodeRef.Name, machine.Spec.Name) + log.Debugw("Node does not longer exist for machine", "node", machine.Status.NodeRef.Name) } else { nodes = append(nodes, node) } @@ -627,7 +676,7 @@ func (r *Reconciler) retrieveNodesRelatedToMachine(ctx context.Context, machine } if len(nodeList.Items) == 0 { // We just want log that we didn't found the node. - klog.V(3).Infof("No node found for the machine %s", machine.Spec.Name) + log.Debug("No node found for the machine") } for i := range nodeList.Items { @@ -638,37 +687,36 @@ func (r *Reconciler) retrieveNodesRelatedToMachine(ctx context.Context, machine return nodes, nil } -func (r *Reconciler) deleteCloudProviderInstance(ctx context.Context, prov cloudprovidertypes.Provider, machine *clusterv1alpha1.Machine) (*reconcile.Result, error) { +func (r *Reconciler) deleteCloudProviderInstance(ctx context.Context, log *zap.SugaredLogger, prov cloudprovidertypes.Provider, machine *clusterv1alpha1.Machine) (*reconcile.Result, error) { finalizers := sets.NewString(machine.Finalizers...) if !finalizers.Has(FinalizerDeleteInstance) { return nil, nil } // Delete the instance - completelyGone, err := prov.Cleanup(ctx, machine, r.providerData) + completelyGone, err := prov.Cleanup(ctx, log, machine, r.providerData) if err != nil { message := fmt.Sprintf("%v. Please manually delete %s finalizer from the machine object.", err, FinalizerDeleteInstance) return nil, r.updateMachineErrorIfTerminalError(machine, common.DeleteMachineError, message, err, "failed to delete machine at cloud provider") } - if !completelyGone { // As the instance is not completely gone yet, we need to recheck in a few seconds. return &reconcile.Result{RequeueAfter: deletionRetryWaitPeriod}, nil } - machineConfig, err := providerconfigtypes.GetConfig(machine.Spec.ProviderSpec) + machineConfig, err := providerconfig.GetConfig(machine.Spec.ProviderSpec) if err != nil { return nil, fmt.Errorf("failed to get provider config: %w", err) } - if machineConfig.OperatingSystem == providerconfigtypes.OperatingSystemRHEL { + if machineConfig.OperatingSystem == providerconfig.OperatingSystemRHEL { rhelConfig, err := rhel.LoadConfig(machineConfig.OperatingSystemSpec) if err != nil { return nil, fmt.Errorf("failed to get rhel os specs: %w", err) } machineName := machine.Name - if machineConfig.CloudProvider == providerconfigtypes.CloudProviderAWS { + if machineConfig.CloudProvider == providerconfig.CloudProviderAWS { for _, address := range machine.Status.Addresses { if address.Type == corev1.NodeInternalDNS { machineName = address.Address @@ -703,19 +751,25 @@ func (r *Reconciler) deleteCloudProviderInstance(ctx context.Context, prov cloud return nil, r.updateMachine(machine, func(m *clusterv1alpha1.Machine) { finalizers := sets.NewString(m.Finalizers...) - finalizers.Delete(FinalizerDeleteInstance) - m.Finalizers = finalizers.List() + // If a machine deployment belongs to an external cloud provider, the 'machine-delete-finalizer' must be manually + // removed by an administrator or an external service. This is because the machine controller lacks access to cloud + // instances and cannot ensure their deletion. If the external service fails to delete the instance, it may result + // in orphaned resources or nodes without a machine reference. + if machineConfig.CloudProvider != providerconfig.CloudProviderExternal { + finalizers.Delete(FinalizerDeleteInstance) + m.Finalizers = finalizers.List() + } }) } -func (r *Reconciler) deleteNodeForMachine(ctx context.Context, nodes []*corev1.Node, machine *clusterv1alpha1.Machine) error { +func (r *Reconciler) deleteNodeForMachine(ctx context.Context, log *zap.SugaredLogger, nodes []*corev1.Node, machine *clusterv1alpha1.Machine) error { // iterates on all nodes and delete them. Finally, remove the finalizer on the machine for _, node := range nodes { if err := r.client.Delete(ctx, node); err != nil { - if !kerrors.IsNotFound(err) { + if !apierrors.IsNotFound(err) { return err } - klog.V(2).Infof("node %q does not longer exist for machine %q", machine.Status.NodeRef.Name, machine.Spec.Name) + log.Infow("Node does not longer exist for machine", "node", machine.Status.NodeRef.Name) } } @@ -731,144 +785,67 @@ func (r *Reconciler) deleteNodeForMachine(ctx context.Context, nodes []*corev1.N func (r *Reconciler) ensureInstanceExistsForMachine( ctx context.Context, + log *zap.SugaredLogger, prov cloudprovidertypes.Provider, machine *clusterv1alpha1.Machine, - userdataPlugin userdataplugin.Provider, - providerConfig *providerconfigtypes.Config, + providerConfig *providerconfig.Config, ) (*reconcile.Result, error) { - klog.V(6).Infof("Requesting instance for machine '%s' from cloudprovider because no associated node with status ready found...", machine.Name) + log.Debug("Requesting instance for machine from cloudprovider because no associated node with status ready found...") - providerInstance, err := prov.Get(ctx, machine, r.providerData) + providerInstance, err := prov.Get(ctx, log, machine, r.providerData) // case 2: retrieving instance from provider was not successful if err != nil { // case 2.1: instance was not found and we are going to create one if errors.Is(err, cloudprovidererrors.ErrInstanceNotFound) { - klog.V(3).Infof("Validated machine spec of %s", machine.Name) - - var kubeconfig *clientcmdapi.Config + log.Debug("Validated machine spec") - // an external provider will take care of the bootstrap kubeconfig and token by itself. - if !r.useExternalBootstrap { - kubeconfig, err = r.createBootstrapKubeconfig(ctx, machine.Name) - if err != nil { - return nil, fmt.Errorf("failed to create bootstrap kubeconfig: %w", err) - } - } - - // grab kubelet featureGates from the annotations - kubeletFeatureGates := common.GetKubeletFeatureGates(machine.GetAnnotations()) - if len(kubeletFeatureGates) == 0 { - // fallback to command-line input - kubeletFeatureGates = r.nodeSettings.KubeletFeatureGates - } - - // grab kubelet general options from the annotations - kubeletFlags := common.GetKubeletFlags(machine.GetAnnotations()) - kubeletConfigs := common.GetKubeletConfigs(machine.GetAnnotations()) - - // look up for ExternalCloudProvider feature, with fallback to command-line input - externalCloudProvider := r.nodeSettings.ExternalCloudProvider - if val, ok := kubeletFlags[common.ExternalCloudProviderKubeletFlag]; ok { - externalCloudProvider, _ = strconv.ParseBool(val) - } - - cloudConfig, kubeletCloudProviderName, err := prov.GetCloudConfig(machine.Spec) - if err != nil { - return nil, fmt.Errorf("failed to render cloud config: %w", err) - } - - if providerConfig.CloudProvider == providerconfigtypes.CloudProviderVsphere && externalCloudProvider { - cloudConfig = "" - } - - registryCredentials, err := containerruntime.GetContainerdAuthConfig(ctx, r.client, r.nodeSettings.RegistryCredentialsSecretRef) + // Here we do stuff! + var userdata string + referencedMachineDeployment, machineDeploymentRevision, err := controllerutil.GetMachineDeploymentNameAndRevisionForMachine(ctx, machine, r.client) if err != nil { - return nil, fmt.Errorf("failed to get containerd auth config: %w", err) + return nil, fmt.Errorf("failed to find machine's MachineDployment: %w", err) } - crRuntime := r.nodeSettings.ContainerRuntime - crRuntime.RegistryCredentials = registryCredentials - - if val, ok := kubeletConfigs[common.ContainerLogMaxSizeKubeletConfig]; ok { - crRuntime.ContainerLogMaxSize = val + bootstrapSecretName := fmt.Sprintf(bootstrap.CloudConfigSecretNamePattern, + referencedMachineDeployment, + machine.Namespace, + bootstrap.BootstrapCloudConfig) + + bootstrapSecret := &corev1.Secret{} + if err := r.client.Get(ctx, + types.NamespacedName{Name: bootstrapSecretName, Namespace: util.CloudInitNamespace}, + bootstrapSecret); err != nil { + log.Errorw("cloud-init configuration: cloud config is not ready yet", "secret", bootstrap.BootstrapCloudConfig) + return &reconcile.Result{RequeueAfter: 3 * time.Second}, nil } - if val, ok := kubeletConfigs[common.ContainerLogMaxFilesKubeletConfig]; ok { - crRuntime.ContainerLogMaxFiles = val + bootstrapSecretRevision := bootstrapSecret.Annotations[bootstrap.MachineDeploymentRevision] + if bootstrapSecretRevision != machineDeploymentRevision { + return nil, fmt.Errorf("cloud-init configuration: cloud config %q is not ready yet", bootstrap.BootstrapCloudConfig) } - // Here we do stuff! - var userdata string - - if r.useExternalBootstrap { - referencedMachineDeployment, machineDeploymentRevision, err := controllerutil.GetMachineDeploymentNameAndRevisionForMachine(ctx, machine, r.client) - if err != nil { - return nil, fmt.Errorf("failed to find machine's MachineDployment: %w", err) - } - - bootstrapSecretName := fmt.Sprintf(bootstrap.CloudConfigSecretNamePattern, - referencedMachineDeployment, - machine.Namespace, - bootstrap.BootstrapCloudConfig) - - bootstrapSecret := &corev1.Secret{} - if err := r.client.Get(ctx, - types.NamespacedName{Name: bootstrapSecretName, Namespace: util.CloudInitNamespace}, - bootstrapSecret); err != nil { - klog.Errorf(CloudInitNotReadyError, bootstrap.BootstrapCloudConfig, machine.Name) - return nil, err - } - - bootstrapSecretRevision := bootstrapSecret.Annotations[bootstrap.MachineDeploymentRevision] - if bootstrapSecretRevision != machineDeploymentRevision { - return nil, fmt.Errorf(CloudInitNotReadyError, bootstrap.BootstrapCloudConfig, machine.Name) - } - - userdata = getOSMBootstrapUserdata(machine.Spec.Name, *bootstrapSecret) - } else { - req := plugin.UserDataRequest{ - MachineSpec: machine.Spec, - Kubeconfig: kubeconfig, - CloudConfig: cloudConfig, - CloudProviderName: string(providerConfig.CloudProvider), - ExternalCloudProvider: externalCloudProvider, - DNSIPs: r.nodeSettings.ClusterDNSIPs, - PauseImage: r.nodeSettings.PauseImage, - KubeletCloudProviderName: kubeletCloudProviderName, - KubeletFeatureGates: kubeletFeatureGates, - KubeletConfigs: kubeletConfigs, - NoProxy: r.nodeSettings.NoProxy, - HTTPProxy: r.nodeSettings.HTTPProxy, - ContainerRuntime: crRuntime, - NodePortRange: r.nodePortRange, - } - - userdata, err = userdataPlugin.UserData(req) - if err != nil { - return nil, fmt.Errorf("failed get userdata: %w", err) - } - } + userdata = getOSMBootstrapUserdata(machine.Spec.Name, *bootstrapSecret) // Create the instance - if _, err = r.createProviderInstance(ctx, prov, machine, userdata); err != nil { - message := fmt.Sprintf("%v. Unable to create a machine.", err) + if _, err = r.createProviderInstance(ctx, log, prov, machine, userdata); err != nil { + message := fmt.Sprintf("%v. Failed to create a machine.", err) return nil, r.updateMachineErrorIfTerminalError(machine, common.CreateMachineError, message, err, "failed to create machine at cloudprovider") } - if providerConfig.OperatingSystem == providerconfigtypes.OperatingSystemRHEL { + if providerConfig.OperatingSystem == providerconfig.OperatingSystemRHEL { if err := rhsm.AddRHELSubscriptionFinalizer(machine, r.updateMachine); err != nil { return nil, fmt.Errorf("failed to add redhat subscription finalizer: %w", err) } } r.recorder.Event(machine, corev1.EventTypeNormal, "Created", "Successfully created instance") - klog.V(3).Infof("Created machine %s at cloud provider", machine.Name) + log.Info("Created machine at cloud provider") // Reqeue the machine to make sure we notice if creation failed silently return &reconcile.Result{RequeueAfter: 30 * time.Second}, nil } // case 2.2: terminal error was returned and manual interaction is required to recover if ok, _, _ := cloudprovidererrors.IsTerminalError(err); ok { - message := fmt.Sprintf("%v. Unable to create a machine.", err) + message := fmt.Sprintf("%v. Failed to create a machine.", err) return nil, r.updateMachineErrorIfTerminalError(machine, common.CreateMachineError, message, err, "failed to get instance from provider") } @@ -878,7 +855,7 @@ func (r *Reconciler) ensureInstanceExistsForMachine( // Instance exists, so ensure finalizer does as well machine, err = r.ensureDeleteFinalizerExists(machine) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to add %q finalizer: %w", FinalizerDeleteInstance, err) } // case 3: retrieving the instance from cloudprovider was successful @@ -907,16 +884,42 @@ func (r *Reconciler) ensureInstanceExistsForMachine( for address, addressType := range addresses { machineAddresses = append(machineAddresses, corev1.NodeAddress{Address: address, Type: addressType}) } + + // Addresses from the provider are a map; prevent needless updates by sorting them. + sort.Slice(machineAddresses, func(i, j int) bool { + a := machineAddresses[i] + b := machineAddresses[j] + + if a.Type == b.Type { + return a.Address < b.Address + } + + return a.Type < b.Type + }) + + var providerID string + if machine.Spec.ProviderID == nil { + inTree := providerconfig.IntreeCloudProviderImplementationSupported(providerConfig.CloudProvider) + // If both external and internal CCM are not available. We set provider-id for the machine explicitly. + if !inTree && !r.nodeSettings.ExternalCloudProvider { + providerID = fmt.Sprintf(ProviderIDPattern, providerConfig.CloudProvider, machine.UID) + } + } + if err := r.updateMachine(machine, func(m *clusterv1alpha1.Machine) { m.Status.Addresses = machineAddresses + if providerID != "" { + m.Spec.ProviderID = &providerID + } }); err != nil { - return nil, fmt.Errorf("failed to update machine after setting .status.addresses: %w", err) + return nil, fmt.Errorf("failed to update machine after setting .status.addresses and providerID: %w", err) } - return r.ensureNodeOwnerRef(ctx, providerInstance, machine, providerConfig) + + return r.ensureNodeOwnerRef(ctx, log, providerInstance, machine, providerConfig) } -func (r *Reconciler) ensureNodeOwnerRef(ctx context.Context, providerInstance instance.Instance, machine *clusterv1alpha1.Machine, providerConfig *providerconfigtypes.Config) (*reconcile.Result, error) { - node, exists, err := r.getNode(ctx, providerInstance, providerConfig.CloudProvider) +func (r *Reconciler) ensureNodeOwnerRef(ctx context.Context, log *zap.SugaredLogger, providerInstance instance.Instance, machine *clusterv1alpha1.Machine, providerConfig *providerconfig.Config) (*reconcile.Result, error) { + node, exists, err := r.getNode(ctx, log, providerInstance, providerConfig.CloudProvider) if err != nil { return nil, fmt.Errorf("failed to get node for machine %s: %w", machine.Name, err) } @@ -938,7 +941,7 @@ func (r *Reconciler) ensureNodeOwnerRef(ctx context.Context, providerInstance in // Check if the machine is a potential candidate for triggering deletion if r.joinClusterTimeout != nil && ownerReferencesHasMachineSetKind(machine.OwnerReferences) { if time.Since(machine.CreationTimestamp.Time) > *r.joinClusterTimeout { - klog.V(3).Infof("Join cluster timeout expired for machine %s, deleting it", machine.Name) + log.Info("Join cluster timeout expired for machine; deleting it", "timeout", *r.joinClusterTimeout) if err := r.client.Delete(ctx, machine); err != nil { return nil, fmt.Errorf("failed to delete machine %s/%s that didn't join cluster within expected period of %s: %w", machine.Namespace, machine.Name, r.joinClusterTimeout.String(), err) @@ -961,7 +964,7 @@ func ownerReferencesHasMachineSetKind(ownerReferences []metav1.OwnerReference) b return false } -func (r *Reconciler) ensureNodeLabelsAnnotationsAndTaints(ctx context.Context, node *corev1.Node, machine *clusterv1alpha1.Machine) error { +func (r *Reconciler) ensureNodeLabelsAnnotationsAndTaints(ctx context.Context, nodeLog *zap.SugaredLogger, node *corev1.Node, machine *clusterv1alpha1.Machine) error { var modifiers []func(*corev1.Node) for k, v := range machine.Spec.Labels { @@ -995,14 +998,6 @@ func (r *Reconciler) ensureNodeLabelsAnnotationsAndTaints(ctx context.Context, n modifiers = append(modifiers, f(AnnotationAutoscalerIdentifier, autoscalerAnnotationValue)) } - taintExists := func(node *corev1.Node, taint corev1.Taint) bool { - for _, t := range node.Spec.Taints { - if t.MatchTaint(&taint) { - return true - } - } - return false - } for _, t := range machine.Spec.Taints { if !taintExists(node, t) { f := func(t corev1.Taint) func(*corev1.Node) { @@ -1019,7 +1014,7 @@ func (r *Reconciler) ensureNodeLabelsAnnotationsAndTaints(ctx context.Context, n return fmt.Errorf("failed to update node %s after setting labels/annotations/taints: %w", node.Name, err) } r.recorder.Event(machine, corev1.EventTypeNormal, "LabelsAnnotationsTaintsUpdated", "Successfully updated labels/annotations/taints") - klog.V(3).Infof("Added labels/annotations/taints to node %s (machine %s)", node.Name, machine.Name) + nodeLog.Info("Added labels/annotations/taints") } return nil @@ -1048,7 +1043,7 @@ func (r *Reconciler) updateMachineStatus(machine *clusterv1alpha1.Machine, node return nil } -func (r *Reconciler) getNode(ctx context.Context, instance instance.Instance, provider providerconfigtypes.CloudProvider) (node *corev1.Node, exists bool, err error) { +func (r *Reconciler) getNode(ctx context.Context, log *zap.SugaredLogger, instance instance.Instance, provider providerconfig.CloudProvider) (node *corev1.Node, exists bool, err error) { if instance == nil { return nil, false, fmt.Errorf("getNode called with nil provider instance") } @@ -1060,7 +1055,7 @@ func (r *Reconciler) getNode(ctx context.Context, instance instance.Instance, pr for _, node := range nodes.Items { // Try to find Node by providerID. Should work if CCM is deployed. if node := findNodeByProviderID(instance, provider, nodes.Items); node != nil { - klog.V(4).Infof("Found node %q by providerID", node.Name) + log.Debugw("Found node by providerID", "node", node.Name) return node, true, nil } @@ -1086,11 +1081,11 @@ func (r *Reconciler) getNode(ctx context.Context, instance instance.Instance, pr // TODO: We should do this for other providers, but there are providers where // the node and the instance names will not match, so it requires further // investigation (e.g. AWS). - if provider == providerconfigtypes.CloudProviderHetzner && node.Name != instance.Name() { + if provider == providerconfig.CloudProviderHetzner && node.Name != instance.Name() { continue } if nodeAddress.Address == instanceAddress { - klog.V(4).Infof("Found node %q by IP address", node.Name) + log.Debugw("Found node by IP address", "node", node.Name) return node.DeepCopy(), true, nil } } @@ -1099,7 +1094,7 @@ func (r *Reconciler) getNode(ctx context.Context, instance instance.Instance, pr return nil, false, nil } -func findNodeByProviderID(instance instance.Instance, provider providerconfigtypes.CloudProvider, nodes []corev1.Node) *corev1.Node { +func findNodeByProviderID(instance instance.Instance, provider providerconfig.CloudProvider, nodes []corev1.Node) *corev1.Node { providerID := instance.ProviderID() if providerID == "" { return nil @@ -1114,7 +1109,7 @@ func findNodeByProviderID(instance instance.Instance, provider providerconfigtyp // * aws://// // * aws:/// // The first case is handled above, while the second here is handled here. - if provider == providerconfigtypes.CloudProviderAWS { + if provider == providerconfig.CloudProviderAWS { pid := strings.Split(node.Spec.ProviderID, "aws:///") if len(pid) == 2 && pid[1] == instance.ID() { return node.DeepCopy() @@ -1125,29 +1120,38 @@ func findNodeByProviderID(instance instance.Instance, provider providerconfigtyp return nil } +func taintExists(node *corev1.Node, taint corev1.Taint) bool { + for _, t := range node.Spec.Taints { + if t.MatchTaint(&taint) { + return true + } + } + return false +} + func (r *Reconciler) ReadinessChecks(ctx context.Context) map[string]healthcheck.Check { return map[string]healthcheck.Check{ "valid-info-kubeconfig": func() error { - cm, err := r.kubeconfigProvider.GetKubeconfig(ctx) + cm, err := r.kubeconfigProvider.GetKubeconfig(ctx, r.log) if err != nil { err := fmt.Errorf("failed to get cluster-info configmap: %w", err) - klog.V(2).Info(err) + r.log.Error(err) return err } if len(cm.Clusters) != 1 { err := errors.New("invalid kubeconfig: no clusters found") - klog.V(2).Info(err) + r.log.Error(err) return err } for name, c := range cm.Clusters { if len(c.CertificateAuthorityData) == 0 { err := fmt.Errorf("invalid kubeconfig: no certificate authority data was specified for kuberconfig.clusters.['%s']", name) - klog.V(2).Info(err) + r.log.Error(err) return err } if len(c.Server) == 0 { err := fmt.Errorf("invalid kubeconfig: no server was specified for kuberconfig.clusters.['%s']", name) - klog.V(2).Info(err) + r.log.Error(err) return err } } @@ -1157,17 +1161,19 @@ func (r *Reconciler) ReadinessChecks(ctx context.Context) map[string]healthcheck } func (r *Reconciler) ensureDeleteFinalizerExists(machine *clusterv1alpha1.Machine) (*clusterv1alpha1.Machine, error) { - if !sets.NewString(machine.Finalizers...).Has(FinalizerDeleteInstance) { + finalizers := sets.NewString(machine.Finalizers...) + length := finalizers.Len() + + finalizers.Insert(FinalizerDeleteInstance, FinalizerDeleteNode) + + if finalizers.Len() > length { if err := r.updateMachine(machine, func(m *clusterv1alpha1.Machine) { - finalizers := sets.NewString(m.Finalizers...) - finalizers.Insert(FinalizerDeleteInstance) - finalizers.Insert(FinalizerDeleteNode) m.Finalizers = finalizers.List() }); err != nil { - return nil, fmt.Errorf("failed to update machine after adding the delete instance finalizer: %w", err) + return nil, err } - klog.V(3).Infof("Added delete finalizer to machine %s", machine.Name) } + return machine, nil } @@ -1184,3 +1190,46 @@ func (r *Reconciler) updateNode(ctx context.Context, node *corev1.Node, modifier return r.client.Update(ctx, node) }) } + +// handleNodeFailuresWithExternalCCM reacts to node status discovery of CCM's node lifecycle controller. +// If an instance at cloud provider is not found then it waits till CCM deletes node objects, that allows: +// - create a new instance at cloud provider +// - initialize a new node object - the object should not be reused between instance creation +// for example, instance foo that got deleted and recreated should initialize a completely new node object +// instead of reusing the old one as it can cause problems to update node's metadata, like IP address. +// +// If node is shut-down it allows MC to react accordingly to specific cloud provider requirements, those are: +// - wait for node to become online again or +// - delete a machine which cannot be recovered +func (r *Reconciler) handleNodeFailuresWithExternalCCM( + ctx context.Context, + log *zap.SugaredLogger, + prov cloudprovidertypes.Provider, + provConfig *providerconfig.Config, + node *corev1.Node, + machine *clusterv1alpha1.Machine, +) (*reconcile.Result, error) { + taintShutdown := corev1.Taint{ + Key: ccmapi.TaintNodeShutdown, + Effect: corev1.TaintEffectNoSchedule, + } + + _, err := prov.Get(ctx, log, machine, r.providerData) + if err != nil { + if cloudprovidererrors.IsNotFound(err) { + log.Info("The node does not have corresponding instance, waiting for CCM to delete it") + return &reconcile.Result{RequeueAfter: deletionRetryWaitPeriod}, nil + } + return nil, err + } else if taintExists(node, taintShutdown) { + switch provConfig.CloudProvider { + case providerconfig.CloudProviderKubeVirt: + log.Infof("Deleting a shut-down machine %q that cannot recover", machine.Name) + skipEviction := true + return r.deleteMachine(ctx, log, prov, providerconfig.CloudProviderKubeVirt, machine, skipEviction) + } + } + + log.Debug("Waiting for a node to become %q", corev1.NodeReady) + return &reconcile.Result{RequeueAfter: deletionRetryWaitPeriod}, err +} diff --git a/pkg/controller/machine/machine_test.go b/pkg/controller/machine/controller_test.go similarity index 93% rename from pkg/controller/machine/machine_test.go rename to pkg/controller/machine/controller_test.go index 9d109ffe0..c5d9834d7 100644 --- a/pkg/controller/machine/machine_test.go +++ b/pkg/controller/machine/controller_test.go @@ -23,27 +23,26 @@ import ( "time" "github.com/go-test/deep" + "go.uber.org/zap" - clusterv1alpha1 "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/instance" - cloudprovidertypes "github.com/kubermatic/machine-controller/pkg/cloudprovider/types" - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" + "k8c.io/machine-controller/pkg/cloudprovider/instance" + cloudprovidertypes "k8c.io/machine-controller/pkg/cloudprovider/types" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" + providerconfigtypes "k8c.io/machine-controller/sdk/providerconfig" corev1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/tools/record" - "k8s.io/klog" ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" - ctrlruntimefake "sigs.k8s.io/controller-runtime/pkg/client/fake" fakectrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" ) func init() { if err := clusterv1alpha1.AddToScheme(scheme.Scheme); err != nil { - klog.Fatalf("failed to add clusterv1alpha1 api to scheme: %v", err) + panic(fmt.Sprintf("failed to add clusterv1alpha1 api to scheme: %v", err)) } } @@ -202,7 +201,7 @@ func TestController_GetNode(t *testing.T) { reconciler := Reconciler{client: client} - node, exists, err := reconciler.getNode(ctx, test.instance, test.provider) + node, exists, err := reconciler.getNode(ctx, zap.NewNop().Sugar(), test.instance, test.provider) if diff := deep.Equal(err, test.err); diff != nil { t.Errorf("expected to get %v instead got: %v", test.err, err) } @@ -314,12 +313,12 @@ func TestControllerDeletesMachinesOnJoinTimeout(t *testing.T) { joinClusterTimeout: test.joinTimeoutConfig, } - if _, err := reconciler.ensureNodeOwnerRef(ctx, instance, machine, providerConfig); err != nil { + if _, err := reconciler.ensureNodeOwnerRef(ctx, zap.NewNop().Sugar(), instance, machine, providerConfig); err != nil { t.Fatalf("failed to call ensureNodeOwnerRef: %v", err) } err := client.Get(ctx, types.NamespacedName{Name: machine.Name}, &clusterv1alpha1.Machine{}) - wasDeleted := kerrors.IsNotFound(err) + wasDeleted := apierrors.IsNotFound(err) if wasDeleted != test.getsDeleted { t.Errorf("Machine was deleted: %v, but expectedDeletion: %v", wasDeleted, test.getsDeleted) @@ -335,6 +334,7 @@ func durationPtr(d time.Duration) *time.Duration { func TestControllerShouldEvict(t *testing.T) { threeHoursAgo := metav1.NewTime(time.Now().Add(-3 * time.Hour)) now := metav1.Now() + finalizer := "test" tests := []struct { name string @@ -354,6 +354,7 @@ func TestControllerShouldEvict(t *testing.T) { machine: &clusterv1alpha1.Machine{ ObjectMeta: metav1.ObjectMeta{ DeletionTimestamp: &threeHoursAgo, + Finalizers: []string{finalizer}, }, Status: clusterv1alpha1.MachineStatus{ NodeRef: &corev1.ObjectReference{Name: "existing-node"}, @@ -366,6 +367,7 @@ func TestControllerShouldEvict(t *testing.T) { machine: &clusterv1alpha1.Machine{ ObjectMeta: metav1.ObjectMeta{ DeletionTimestamp: &now, + Finalizers: []string{finalizer}, }, Status: clusterv1alpha1.MachineStatus{ NodeRef: nil, @@ -378,6 +380,7 @@ func TestControllerShouldEvict(t *testing.T) { machine: &clusterv1alpha1.Machine{ ObjectMeta: metav1.ObjectMeta{ DeletionTimestamp: &now, + Finalizers: []string{finalizer}, }, Status: clusterv1alpha1.MachineStatus{ NodeRef: &corev1.ObjectReference{Name: "non-existing-node"}, @@ -394,6 +397,7 @@ func TestControllerShouldEvict(t *testing.T) { machine: &clusterv1alpha1.Machine{ ObjectMeta: metav1.ObjectMeta{ DeletionTimestamp: &now, + Finalizers: []string{finalizer}, }, Status: clusterv1alpha1.MachineStatus{ NodeRef: &corev1.ObjectReference{Name: "existing-node"}, @@ -414,6 +418,7 @@ func TestControllerShouldEvict(t *testing.T) { machine: &clusterv1alpha1.Machine{ ObjectMeta: metav1.ObjectMeta{ DeletionTimestamp: &now, + Finalizers: []string{finalizer}, }, Status: clusterv1alpha1.MachineStatus{ NodeRef: &corev1.ObjectReference{Name: "existing-node"}, @@ -434,6 +439,7 @@ func TestControllerShouldEvict(t *testing.T) { machine: &clusterv1alpha1.Machine{ ObjectMeta: metav1.ObjectMeta{ DeletionTimestamp: &now, + Finalizers: []string{finalizer}, }, Status: clusterv1alpha1.MachineStatus{ NodeRef: &corev1.ObjectReference{Name: "existing-node"}, @@ -457,7 +463,7 @@ func TestControllerShouldEvict(t *testing.T) { objects = append(objects, test.existingNodes...) objects = append(objects, test.additionalMachines...) - client := ctrlruntimefake.NewClientBuilder(). + client := fakectrlruntimeclient.NewClientBuilder(). WithScheme(scheme.Scheme). WithObjects(objects...). Build() @@ -467,7 +473,7 @@ func TestControllerShouldEvict(t *testing.T) { skipEvictionAfter: 2 * time.Hour, } - shouldEvict, err := reconciler.shouldEvict(ctx, test.machine) + shouldEvict, err := reconciler.shouldEvict(ctx, zap.NewNop().Sugar(), test.machine) if err != nil { t.Fatal(err) } @@ -591,6 +597,8 @@ func TestControllerDeleteNodeForMachine(t *testing.T) { }, } + log := zap.NewNop().Sugar() + for _, test := range tests { t.Run(test.name, func(t *testing.T) { ctx := context.Background() @@ -602,6 +610,7 @@ func TestControllerDeleteNodeForMachine(t *testing.T) { client := fakectrlruntimeclient.NewClientBuilder(). WithScheme(scheme.Scheme). + WithStatusSubresource(). WithObjects(objects...). Build() @@ -617,12 +626,12 @@ func TestControllerDeleteNodeForMachine(t *testing.T) { providerData: providerData, } - nodes, err := reconciler.retrieveNodesRelatedToMachine(ctx, test.machine) + nodes, err := reconciler.retrieveNodesRelatedToMachine(ctx, log, test.machine) if err != nil { return } - err = reconciler.deleteNodeForMachine(ctx, nodes, test.machine) + err = reconciler.deleteNodeForMachine(ctx, log, nodes, test.machine) if diff := deep.Equal(err, test.err); diff != nil { t.Errorf("expected to get %v instead got: %v", test.err, err) } @@ -632,7 +641,7 @@ func TestControllerDeleteNodeForMachine(t *testing.T) { if test.shouldDeleteNode != "" { err = client.Get(ctx, types.NamespacedName{Name: test.shouldDeleteNode}, &corev1.Node{}) - if !kerrors.IsNotFound(err) { + if !apierrors.IsNotFound(err) { t.Errorf("expected node %q to be deleted, but got: %v", test.shouldDeleteNode, err) } } else { diff --git a/pkg/controller/machine/kubeconfig.go b/pkg/controller/machine/kubeconfig.go deleted file mode 100644 index d1926fee4..000000000 --- a/pkg/controller/machine/kubeconfig.go +++ /dev/null @@ -1,254 +0,0 @@ -/* -Copyright 2019 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "time" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/selection" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/rand" - "k8s.io/client-go/kubernetes/scheme" - clientcmdapi "k8s.io/client-go/tools/clientcmd/api" - ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" -) - -const ( - secretTypeBootstrapToken corev1.SecretType = "bootstrap.kubernetes.io/token" - machineNameLabelKey string = "machine.k8s.io/machine.name" - tokenIDKey string = "token-id" - tokenSecretKey string = "token-secret" - expirationKey string = "expiration" - tokenFormatter string = "%s.%s" - // Keep this short, userdata is limited. - contextIdentifier string = "c" -) - -func (r *Reconciler) createBootstrapKubeconfig(ctx context.Context, name string) (*clientcmdapi.Config, error) { - var token string - var err error - - if r.bootstrapTokenServiceAccountName != nil { - token, err = r.getTokenFromServiceAccount(ctx, *r.bootstrapTokenServiceAccountName) - if err != nil { - return nil, fmt.Errorf("failed to get token from ServiceAccount %s/%s: %w", r.bootstrapTokenServiceAccountName.Namespace, r.bootstrapTokenServiceAccountName.Name, err) - } - } else { - token, err = r.createBootstrapToken(ctx, name) - if err != nil { - return nil, fmt.Errorf("failed to create bootstrap token: %w", err) - } - } - - infoKubeconfig, err := r.kubeconfigProvider.GetKubeconfig(ctx) - if err != nil { - return nil, err - } - - outConfig := infoKubeconfig.DeepCopy() - - // Some consumers expect a valid `Contexts` map and the serialization - // for the Context ignores empty string fields, hence we must make sure - // both the Cluster and the User have a non-empty key. - clusterContextName := "" - // This is supposed to have a length of 1. We have code further down the - // line that extracts the CA cert and errors out if that is not the case, - // so we can simply iterate over it here. - for key := range infoKubeconfig.Clusters { - clusterContextName = key - } - cluster := outConfig.Clusters[clusterContextName].DeepCopy() - delete(outConfig.Clusters, clusterContextName) - outConfig.Clusters[contextIdentifier] = cluster - - outConfig.AuthInfos = map[string]*clientcmdapi.AuthInfo{ - contextIdentifier: { - Token: token, - }, - } - - // This is supposed to have a length of 1. We have code further down the - // line that extracts the CA cert and errors out if that is not the case. - // - // This handles a very special case in which we want to override the API server - // address that will be used in the `bootstrap-kubelet.conf` in the worker nodes for - // our E2E tests that run in KIND clusters. - if r.overrideBootstrapKubeletAPIServer != "" { - for key := range outConfig.Clusters { - outConfig.Clusters[key].Server = r.overrideBootstrapKubeletAPIServer - } - } - - outConfig.Contexts = map[string]*clientcmdapi.Context{contextIdentifier: {Cluster: contextIdentifier, AuthInfo: contextIdentifier}} - outConfig.CurrentContext = contextIdentifier - - return outConfig, nil -} - -func (r *Reconciler) getTokenFromServiceAccount(ctx context.Context, name types.NamespacedName) (string, error) { - sa := &corev1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: name.Name, Namespace: name.Namespace}} - raw, err := r.getAsUnstructured(ctx, sa) - if err != nil { - return "", fmt.Errorf("failed to get serviceAccount %q: %w", name.String(), err) - } - sa = raw.(*corev1.ServiceAccount) - for _, serviceAccountSecretName := range sa.Secrets { - serviceAccountSecret := &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Namespace: sa.Namespace, Name: serviceAccountSecretName.Name}} - raw, err = r.getAsUnstructured(ctx, serviceAccountSecret) - if err != nil { - return "", fmt.Errorf("failed to get serviceAccountSecret: %w", err) - } - serviceAccountSecret = raw.(*corev1.Secret) - if serviceAccountSecret.Type != corev1.SecretTypeServiceAccountToken { - continue - } - return string(serviceAccountSecret.Data[corev1.ServiceAccountTokenKey]), nil - } - return "", errors.New("no serviceAccountSecret found") -} - -func (r *Reconciler) createBootstrapToken(ctx context.Context, name string) (string, error) { - existingSecret, err := r.getSecretIfExists(ctx, name) - if err != nil { - return "", err - } - if existingSecret != nil { - return r.updateSecretExpirationAndGetToken(ctx, existingSecret) - } - - tokenID := rand.String(6) - tokenSecret := rand.String(16) - - secret := corev1.Secret{ - ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("bootstrap-token-%s", tokenID), - Namespace: metav1.NamespaceSystem, - Labels: map[string]string{machineNameLabelKey: name}, - }, - Type: secretTypeBootstrapToken, - Data: map[string][]byte{ - "description": []byte("bootstrap token for " + name), - tokenIDKey: []byte(tokenID), - tokenSecretKey: []byte(tokenSecret), - expirationKey: []byte(metav1.Now().Add(1 * time.Hour).Format(time.RFC3339)), - "usage-bootstrap-authentication": []byte("true"), - "usage-bootstrap-signing": []byte("true"), - "auth-extra-groups": []byte("system:bootstrappers:machine-controller:default-node-token"), - }, - } - - if err := r.client.Create(ctx, &secret); err != nil { - return "", fmt.Errorf("failed to create bootstrap token secret: %w", err) - } - - return fmt.Sprintf(tokenFormatter, tokenID, tokenSecret), nil -} - -func (r *Reconciler) updateSecretExpirationAndGetToken(ctx context.Context, secret *corev1.Secret) (string, error) { - if secret.Data == nil { - secret.Data = map[string][]byte{} - } - tokenID := string(secret.Data[tokenIDKey]) - tokenSecret := string(secret.Data[tokenSecretKey]) - token := fmt.Sprintf(tokenFormatter, tokenID, tokenSecret) - - expirationTime, err := time.Parse(time.RFC3339, string(secret.Data[expirationKey])) - if err != nil { - return "", err - } - - // If the token is close to expire, reset it's expiration time - if time.Until(expirationTime).Minutes() < 30 { - secret.Data[expirationKey] = []byte(metav1.Now().Add(1 * time.Hour).Format(time.RFC3339)) - } else { - return token, nil - } - - if err := r.client.Update(ctx, secret); err != nil { - return "", fmt.Errorf("failed to update secret: %w", err) - } - return token, nil -} - -func (r *Reconciler) getSecretIfExists(ctx context.Context, name string) (*corev1.Secret, error) { - req, err := labels.NewRequirement(machineNameLabelKey, selection.Equals, []string{name}) - if err != nil { - return nil, err - } - selector := labels.NewSelector().Add(*req) - secrets := &corev1.SecretList{} - if err := r.client.List(ctx, secrets, - &ctrlruntimeclient.ListOptions{ - Namespace: metav1.NamespaceSystem, - LabelSelector: selector}); err != nil { - return nil, err - } - - if len(secrets.Items) == 0 { - return nil, nil - } - if len(secrets.Items) > 1 { - return nil, fmt.Errorf("expected to find exactly one secret for the given machine name =%s but found %d", name, len(secrets.Items)) - } - return &secrets.Items[0], nil -} - -// getAsUnstructured is a helper to get an object as unstrucuted.Unstructered from the client. -// The purpose of this is to avoid establishing a lister, which the cache-backed client automatically -// does. The object passed in must have name and namespace set. The returned object will -// be the same as the passed in one, if there was no error. -func (r *Reconciler) getAsUnstructured(ctx context.Context, obj runtime.Object) (runtime.Object, error) { - metaObj, ok := obj.(metav1.Object) - if !ok { - return nil, errors.New("can not assert object as metav1.Object") - } - kinds, _, err := scheme.Scheme.ObjectKinds(obj) - if err != nil { - return nil, fmt.Errorf("failed to get kinds for object: %w", err) - } - if len(kinds) == 0 { - return nil, fmt.Errorf("found no kind for object %t", obj) - } - apiVersion, kind := kinds[0].ToAPIVersionAndKind() - - target := &unstructured.Unstructured{} - target.SetAPIVersion(apiVersion) - target.SetKind(kind) - name := types.NamespacedName{Name: metaObj.GetName(), Namespace: metaObj.GetNamespace()} - - if err := r.client.Get(ctx, name, target); err != nil { - return nil, fmt.Errorf("failed to get object: %w", err) - } - - rawJSON, err := target.MarshalJSON() - if err != nil { - return nil, fmt.Errorf("failed to marshal unstructured.Unstructured: %w", err) - } - if err := json.Unmarshal(rawJSON, obj); err != nil { - return nil, fmt.Errorf("failed to marshal unstructured.Unstructued into %T: %w", obj, err) - } - return obj, nil -} diff --git a/pkg/controller/machine/kubeconfig_test.go b/pkg/controller/machine/kubeconfig_test.go deleted file mode 100644 index a98b702a1..000000000 --- a/pkg/controller/machine/kubeconfig_test.go +++ /dev/null @@ -1,100 +0,0 @@ -/* -Copyright 2019 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package controller - -import ( - "bytes" - "context" - "testing" - "time" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/kubernetes/scheme" - ctrlruntimefake "sigs.k8s.io/controller-runtime/pkg/client/fake" -) - -func TestUpdateSecretExpirationAndGetToken(t *testing.T) { - tests := []struct { - initialExpirationTime time.Time - shouldRenew bool - }{ - { - initialExpirationTime: time.Now().Add(1 * time.Hour), - shouldRenew: false, - }, - { - initialExpirationTime: time.Now().Add(25 * time.Minute), - shouldRenew: true, - }, - { - initialExpirationTime: time.Now().Add(-25 * time.Minute), - shouldRenew: true, - }, - } - - reconciler := Reconciler{} - - for _, testCase := range tests { - ctx := context.Background() - secret := &corev1.Secret{} - secret.Name = "secret" - secret.Namespace = metav1.NamespaceSystem - data := map[string][]byte{} - data[tokenSecretKey] = []byte("tokenSecret") - data[tokenIDKey] = []byte("tokenID") - data[expirationKey] = []byte(testCase.initialExpirationTime.Format(time.RFC3339)) - secret.Data = data - reconciler.client = ctrlruntimefake. - NewClientBuilder(). - WithScheme(scheme.Scheme). - WithObjects(secret). - Build() - - if _, err := reconciler.updateSecretExpirationAndGetToken(ctx, secret); err != nil { - t.Fatalf("Unexpected error running updateSecretExpirationAndGetToken: %v", err) - } - - updatedSecret := &corev1.Secret{} - if err := reconciler.client.Get(ctx, types.NamespacedName{ - Namespace: metav1.NamespaceSystem, - Name: "secret", - }, updatedSecret); err != nil { - t.Fatalf("Unsexpected error getting secret: %v", err) - } - - if testCase.shouldRenew && - bytes.Equal(updatedSecret.Data[expirationKey], []byte(testCase.initialExpirationTime.Format(time.RFC3339))) { - t.Errorf("Error, token secret did not update but was expected to!") - } - - if !testCase.shouldRenew && - !bytes.Equal(updatedSecret.Data[expirationKey], []byte(testCase.initialExpirationTime.Format(time.RFC3339))) { - t.Errorf("Error, token secret was expected to get updated, but did not happen!") - } - - expirationTimeParsed, err := time.Parse(time.RFC3339, string(secret.Data[expirationKey])) - if err != nil { - t.Fatalf("Failed to parse timestamp from secret: %v", err) - } - - if time.Until(expirationTimeParsed).Minutes() < 0 { - t.Errorf("Error, secret expiration is in the past!") - } - } -} diff --git a/pkg/controller/machine/metrics.go b/pkg/controller/machine/metrics.go index a5d375a93..ff09db81d 100644 --- a/pkg/controller/machine/metrics.go +++ b/pkg/controller/machine/metrics.go @@ -23,10 +23,10 @@ import ( "github.com/prometheus/client_golang/prometheus" - clusterv1alpha1 "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" - "github.com/kubermatic/machine-controller/pkg/cloudprovider" - "github.com/kubermatic/machine-controller/pkg/providerconfig" - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" + "k8c.io/machine-controller/pkg/cloudprovider" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" + "k8c.io/machine-controller/sdk/providerconfig" + "k8c.io/machine-controller/sdk/providerconfig/configvar" "k8s.io/apimachinery/pkg/api/equality" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -50,6 +50,16 @@ func NewMachineControllerMetrics() *MetricsCollection { Name: metricsPrefix + "errors_total", Help: "The total number or unexpected errors the controller encountered", }), + Provisioning: prometheus.NewHistogram(prometheus.HistogramOpts{ + Name: metricsPrefix + "provisioning_time_seconds", + Help: "Histogram of times spent from creating a Machine to ready state in the cluster", + Buckets: prometheus.ExponentialBuckets(32, 1.5, 10), + }), + Deprovisioning: prometheus.NewHistogram(prometheus.HistogramOpts{ + Name: metricsPrefix + "deprovisioning_time_seconds", + Help: "Histogram of times spent from deleting a Machine to be removed from cluster and cloud provider", + Buckets: prometheus.ExponentialBuckets(32, 1.5, 10), + }), } // Set default values, so that these metrics always show up @@ -70,8 +80,8 @@ type MachineCollector struct { type machineMetricLabels struct { KubeletVersion string - CloudProvider providerconfigtypes.CloudProvider - OperatingSystem providerconfigtypes.OperatingSystem + CloudProvider providerconfig.CloudProvider + OperatingSystem providerconfig.OperatingSystem ProviderLabels map[string]string } @@ -113,7 +123,7 @@ func (l *machineMetricLabels) Counter(value uint) prometheus.Counter { func NewMachineCollector(ctx context.Context, client ctrlruntimeclient.Client) *MachineCollector { // Start periodically calling the providers SetMetricsForMachines in a dedicated go routine - skg := providerconfig.NewConfigVarResolver(ctx, client) + configResolver := configvar.NewResolver(ctx, client) go func() { metricGatheringExecutor := func() { machines := &clusterv1alpha1.MachineList{} @@ -129,9 +139,9 @@ func NewMachineCollector(ctx context.Context, client ctrlruntimeclient.Client) * return } - providerMachineMap := map[providerconfigtypes.CloudProvider]*clusterv1alpha1.MachineList{} + providerMachineMap := map[providerconfig.CloudProvider]*clusterv1alpha1.MachineList{} for _, machine := range machines.Items { - providerConfig, err := providerconfigtypes.GetConfig(machine.Spec.ProviderSpec) + providerConfig, err := providerconfig.GetConfig(machine.Spec.ProviderSpec) if err != nil { utilruntime.HandleError(fmt.Errorf("failed to get providerSpec for SetMetricsForMachines: %w", err)) continue @@ -143,7 +153,7 @@ func NewMachineCollector(ctx context.Context, client ctrlruntimeclient.Client) * } for provider, providerMachineList := range providerMachineMap { - prov, err := cloudprovider.ForProvider(provider, skg) + prov, err := cloudprovider.ForProvider(provider, configResolver) if err != nil { utilruntime.HandleError(fmt.Errorf("failed to get cloud provider for SetMetricsForMachines:: %q: %w", provider, err)) continue @@ -196,7 +206,7 @@ func (mc MachineCollector) Collect(ch chan<- prometheus.Metric) { return } - cvr := providerconfig.NewConfigVarResolver(mc.ctx, mc.client) + configResolver := configvar.NewResolver(mc.ctx, mc.client) machineCountByLabels := make(map[*machineMetricLabels]uint) for _, machine := range machines.Items { @@ -216,13 +226,13 @@ func (mc MachineCollector) Collect(ch chan<- prometheus.Metric) { ) } - providerConfig, err := providerconfigtypes.GetConfig(machine.Spec.ProviderSpec) + providerConfig, err := providerconfig.GetConfig(machine.Spec.ProviderSpec) if err != nil { utilruntime.HandleError(fmt.Errorf("failed to determine providerSpec for machine %s: %w", machine.Name, err)) continue } - provider, err := cloudprovider.ForProvider(providerConfig.CloudProvider, cvr) + provider, err := cloudprovider.ForProvider(providerConfig.CloudProvider, configResolver) if err != nil { utilruntime.HandleError(fmt.Errorf("failed to determine provider provider: %w", err)) continue diff --git a/pkg/controller/machinedeployment/controller.go b/pkg/controller/machinedeployment/controller.go new file mode 100644 index 000000000..1ff2def02 --- /dev/null +++ b/pkg/controller/machinedeployment/controller.go @@ -0,0 +1,326 @@ +/* +Copyright 2019 The Machine Controller Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package machinedeployment + +import ( + "context" + "reflect" + + "github.com/go-logr/logr" + "github.com/go-logr/zapr" + "github.com/pkg/errors" + "go.uber.org/zap" + + "k8c.io/machine-controller/sdk/apis/cluster/common" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" + + corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/tools/record" + ctrlruntime "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" +) + +// controllerName is the name of this controller. +const controllerName = "machinedeployment-controller" + +var ( + // controllerKind contains the schema.GroupVersionKind for this controller type. + controllerKind = clusterv1alpha1.SchemeGroupVersion.WithKind("MachineDeployment") +) + +// ReconcileMachineDeployment reconciles a MachineDeployment object. +type ReconcileMachineDeployment struct { + ctrlruntimeclient.Client + log *zap.SugaredLogger + scheme *runtime.Scheme + recorder record.EventRecorder +} + +// newReconciler returns a new reconcile.Reconciler. +func newReconciler(mgr manager.Manager, log *zap.SugaredLogger) *ReconcileMachineDeployment { + return &ReconcileMachineDeployment{ + Client: mgr.GetClient(), + log: log.Named(controllerName), + scheme: mgr.GetScheme(), + recorder: mgr.GetEventRecorderFor(controllerName), + } +} + +// Add creates a new MachineDeployment Controller and adds it to the Manager with default RBAC. +func Add(mgr manager.Manager, log *zap.SugaredLogger) error { + r := newReconciler(mgr, log) + return add(mgr, r, r.MachineSetToDeployments()) +} + +// add adds a new Controller to mgr with r as the reconcile.Reconciler. +func add(mgr manager.Manager, r reconcile.Reconciler, mapFn handler.MapFunc) error { + _, err := builder.ControllerManagedBy(mgr). + Named(controllerName). + WithOptions(controller.Options{ + LogConstructor: func(*reconcile.Request) logr.Logger { + // we log ourselves + return zapr.NewLogger(zap.NewNop()) + }, + }). + // Watch for changes to MachineDeployment. + For(&clusterv1alpha1.MachineDeployment{}). + // Watch for changes to MachineSet and reconcile the owner MachineDeployment. + Owns(&clusterv1alpha1.MachineSet{}). + // Watch for changes to MachineSets using a mapping function to MachineDeployment. + // This watcher is required for use cases like adoption. In case a MachineSet doesn't have + // a controller reference, it'll look for potential matching MachineDeployments to reconcile. + Watches(&clusterv1alpha1.MachineSet{}, handler.EnqueueRequestsFromMapFunc(mapFn)). + Build(r) + + return err +} + +// Reconcile reads that state of the cluster for a MachineDeployment object and makes changes based on the state read +// and what is in the MachineDeployment.Spec. +// +// +kubebuilder:rbac:groups=cluster.k8s.io,resources=machinedeployments;machinedeployments/status,verbs=get;list;watch;create;update;patch;delete +func (r *ReconcileMachineDeployment) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { + log := r.log.With("machinedeployment", request.NamespacedName) + log.Debug("Reconciling") + + // Fetch the MachineDeployment instance + deployment := &clusterv1alpha1.MachineDeployment{} + if err := r.Get(ctx, request.NamespacedName, deployment); err != nil { + if apierrors.IsNotFound(err) { + // Object not found, return. Created objects are automatically garbage collected. + // For additional cleanup logic use finalizers. + return reconcile.Result{}, nil + } + // Error reading the object - requeue the request. + log.Errorw("Failed to get MachineDeployment", zap.Error(err)) + return reconcile.Result{}, err + } + + // Ignore deleted MachineDeployments, this can happen when foregroundDeletion + // is enabled + if deployment.DeletionTimestamp != nil { + return reconcile.Result{}, nil + } + + result, err := r.reconcile(ctx, log, deployment) + if err != nil { + log.Errorw("Reconciling failed", zap.Error(err)) + r.recorder.Eventf(deployment, corev1.EventTypeWarning, "ReconcileError", "%v", err) + } + + return result, err +} + +func (r *ReconcileMachineDeployment) reconcile(ctx context.Context, log *zap.SugaredLogger, d *clusterv1alpha1.MachineDeployment) (reconcile.Result, error) { + clusterv1alpha1.PopulateDefaultsMachineDeployment(d) + + everything := metav1.LabelSelector{} + if reflect.DeepEqual(d.Spec.Selector, &everything) { + if d.Status.ObservedGeneration < d.Generation { + d.Status.ObservedGeneration = d.Generation + if err := r.Status().Update(ctx, d); err != nil { + return reconcile.Result{}, err + } + } + return reconcile.Result{Requeue: true}, nil + } + + // Make sure that label selector can match the template's labels. + // TODO(vincepri): Move to a validation (admission) webhook when supported. + selector, err := metav1.LabelSelectorAsSelector(&d.Spec.Selector) + if err != nil { + return reconcile.Result{}, errors.Wrapf(err, "failed to parse MachineDeployment %q label selector", d.Name) + } + + if !selector.Matches(labels.Set(d.Spec.Template.Labels)) { + return reconcile.Result{}, errors.Errorf("failed validation on MachineDeployment %q label selector, cannot match Machine template labels", d.Name) + } + + if !contains(d.Finalizers, metav1.FinalizerDeleteDependents) { + d.Finalizers = append(d.Finalizers, metav1.FinalizerDeleteDependents) + if err := r.Update(ctx, d); err != nil { + return reconcile.Result{}, err + } + + // Since adding the finalizer updates the object return to avoid later update issues + return reconcile.Result{Requeue: true}, nil + } + + msList, err := r.getMachineSetsForDeployment(ctx, log, d) + if err != nil { + return reconcile.Result{}, err + } + + if d.DeletionTimestamp != nil { + return reconcile.Result{}, r.sync(ctx, log, d, msList) + } + + if d.Spec.Paused { + return reconcile.Result{}, r.sync(ctx, log, d, msList) + } + + switch d.Spec.Strategy.Type { + case common.RollingUpdateMachineDeploymentStrategyType: + return reconcile.Result{}, r.rolloutRolling(ctx, log, d, msList) + } + + return reconcile.Result{}, errors.Errorf("unexpected deployment strategy type: %s", d.Spec.Strategy.Type) +} + +// getMachineSetsForDeployment returns a list of MachineSets associated with a MachineDeployment. +func (r *ReconcileMachineDeployment) getMachineSetsForDeployment(ctx context.Context, log *zap.SugaredLogger, d *clusterv1alpha1.MachineDeployment) ([]*clusterv1alpha1.MachineSet, error) { + // List all MachineSets to find those we own but that no longer match our selector. + machineSets := &clusterv1alpha1.MachineSetList{} + listOptions := &ctrlruntimeclient.ListOptions{Namespace: d.Namespace} + if err := r.List(ctx, machineSets, listOptions); err != nil { + return nil, err + } + + filtered := make([]*clusterv1alpha1.MachineSet, 0, len(machineSets.Items)) + for idx := range machineSets.Items { + ms := &machineSets.Items[idx] + msLog := log.With("machineset", ctrlruntimeclient.ObjectKeyFromObject(ms)) + + selector, err := metav1.LabelSelectorAsSelector(&d.Spec.Selector) + if err != nil { + msLog.Errorw("Skipping MachineSet, failed to get label selector from spec selector", zap.Error(err)) + continue + } + + // If a MachineDeployment with a nil or empty selector creeps in, it should match nothing, not everything. + if selector.Empty() { + msLog.Info("Skipping MachineSet as the selector is empty") + continue + } + + if !selector.Matches(labels.Set(ms.Labels)) { + msLog.Debug("Skipping MachineSet, label mismatch") + continue + } + + // Attempt to adopt machine if it meets previous conditions and it has no controller references. + if metav1.GetControllerOf(ms) == nil { + if err := r.adoptOrphan(ctx, d, ms); err != nil { + msLog.Infow("Failed to adopt MachineSet into MachineDeployment", zap.Error(err)) + continue + } + } + + if !metav1.IsControlledBy(ms, d) { + continue + } + + filtered = append(filtered, ms) + } + + return filtered, nil +} + +// adoptOrphan sets the MachineDeployment as a controller OwnerReference to the MachineSet. +func (r *ReconcileMachineDeployment) adoptOrphan(ctx context.Context, deployment *clusterv1alpha1.MachineDeployment, machineSet *clusterv1alpha1.MachineSet) error { + newRef := *metav1.NewControllerRef(deployment, controllerKind) + machineSet.OwnerReferences = append(machineSet.OwnerReferences, newRef) + return r.Update(ctx, machineSet) +} + +// getMachineDeploymentsForMachineSet returns a list of MachineDeployments that could potentially match a MachineSet. +func (r *ReconcileMachineDeployment) getMachineDeploymentsForMachineSet(ctx context.Context, log *zap.SugaredLogger, ms *clusterv1alpha1.MachineSet) []*clusterv1alpha1.MachineDeployment { + if len(ms.Labels) == 0 { + log.Info("No MachineDeployments found for MachineSet because it has no labels") + return nil + } + + dList := &clusterv1alpha1.MachineDeploymentList{} + listOptions := &ctrlruntimeclient.ListOptions{Namespace: ms.Namespace} + if err := r.List(ctx, dList, listOptions); err != nil { + log.Errorw("Failed to list MachineDeployments", zap.Error(err)) + return nil + } + + deployments := make([]*clusterv1alpha1.MachineDeployment, 0, len(dList.Items)) + for idx, d := range dList.Items { + selector, err := metav1.LabelSelectorAsSelector(&d.Spec.Selector) + if err != nil { + continue + } + + // If a deployment with a nil or empty selector creeps in, it should match nothing, not everything. + if selector.Empty() || !selector.Matches(labels.Set(ms.Labels)) { + continue + } + + deployments = append(deployments, &dList.Items[idx]) + } + + return deployments +} + +// MachineSetTodeployments is a handler.MapFunc to be used to enqueue requests for reconciliation +// for MachineDeployments that might adopt an orphaned MachineSet. +func (r *ReconcileMachineDeployment) MachineSetToDeployments() handler.MapFunc { + return func(ctx context.Context, o ctrlruntimeclient.Object) []ctrlruntime.Request { + result := []reconcile.Request{} + + ms := &clusterv1alpha1.MachineSet{} + key := ctrlruntimeclient.ObjectKey{Namespace: o.GetNamespace(), Name: o.GetName()} + if err := r.Get(ctx, key, ms); err != nil { + if !apierrors.IsNotFound(err) { + r.log.Errorw("Failed to retrieve MachineSet for possible MachineDeployment adoption", "machineset", key, zap.Error(err)) + } + return nil + } + + // Check if the controller reference is already set and + // return an empty result when one is found. + for _, ref := range ms.OwnerReferences { + if ref.Controller != nil && *ref.Controller { + return result + } + } + + mds := r.getMachineDeploymentsForMachineSet(ctx, r.log.With("machineset", key), ms) + if len(mds) == 0 { + r.log.Debugw("Found no MachineDeployments for MachineSet", "machineset", key) + return nil + } + + for _, md := range mds { + name := ctrlruntimeclient.ObjectKey{Namespace: md.Namespace, Name: md.Name} + result = append(result, reconcile.Request{NamespacedName: name}) + } + + return result + } +} + +func contains(list []string, strToSearch string) bool { + for _, item := range list { + if item == strToSearch { + return true + } + } + return false +} diff --git a/pkg/controller/machinedeployment/machinedeployment_controller.go b/pkg/controller/machinedeployment/machinedeployment_controller.go deleted file mode 100644 index 37528d471..000000000 --- a/pkg/controller/machinedeployment/machinedeployment_controller.go +++ /dev/null @@ -1,381 +0,0 @@ -/* -Copyright 2019 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package machinedeployment - -import ( - "context" - "reflect" - - "github.com/pkg/errors" - - "github.com/kubermatic/machine-controller/pkg/apis/cluster/common" - "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" - - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/tools/record" - "k8s.io/klog" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/handler" - "sigs.k8s.io/controller-runtime/pkg/manager" - "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" -) - -// controllerName is the name of this controller. -const controllerName = "machinedeployment-controller" - -var ( - // controllerKind contains the schema.GroupVersionKind for this controller type. - controllerKind = v1alpha1.SchemeGroupVersion.WithKind("MachineDeployment") -) - -// ReconcileMachineDeployment reconciles a MachineDeployment object. -type ReconcileMachineDeployment struct { - client.Client - scheme *runtime.Scheme - recorder record.EventRecorder -} - -// newReconciler returns a new reconcile.Reconciler. -func newReconciler(mgr manager.Manager) *ReconcileMachineDeployment { - return &ReconcileMachineDeployment{Client: mgr.GetClient(), scheme: mgr.GetScheme(), recorder: mgr.GetEventRecorderFor(controllerName)} -} - -// Add creates a new MachineDeployment Controller and adds it to the Manager with default RBAC. -func Add(mgr manager.Manager) error { - r := newReconciler(mgr) - return add(mgr, newReconciler(mgr), r.MachineSetToDeployments) -} - -// add adds a new Controller to mgr with r as the reconcile.Reconciler. -func add(mgr manager.Manager, r reconcile.Reconciler, mapFn handler.MapFunc) error { - // Create a new controller. - c, err := controller.New(controllerName, mgr, controller.Options{Reconciler: r}) - if err != nil { - return err - } - - // Watch for changes to MachineDeployment. - err = c.Watch(&source.Kind{ - Type: &v1alpha1.MachineDeployment{}}, - &handler.EnqueueRequestForObject{}, - ) - if err != nil { - return err - } - - // Watch for changes to MachineSet and reconcile the owner MachineDeployment. - err = c.Watch( - &source.Kind{Type: &v1alpha1.MachineSet{}}, - &handler.EnqueueRequestForOwner{OwnerType: &v1alpha1.MachineDeployment{}, IsController: true}, - ) - if err != nil { - return err - } - - // Watch for changes to MachineSets using a mapping function to MachineDeployment. - // This watcher is required for use cases like adoption. In case a MachineSet doesn't have - // a controller reference, it'll look for potential matching MachineDeployments to reconcile. - err = c.Watch( - &source.Kind{Type: &v1alpha1.MachineSet{}}, - handler.EnqueueRequestsFromMapFunc(mapFn), - ) - if err != nil { - return err - } - - return nil -} - -// Reconcile reads that state of the cluster for a MachineDeployment object and makes changes based on the state read -// and what is in the MachineDeployment.Spec. -// -// +kubebuilder:rbac:groups=cluster.k8s.io,resources=machinedeployments;machinedeployments/status,verbs=get;list;watch;create;update;patch;delete -func (r *ReconcileMachineDeployment) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { - // Fetch the MachineDeployment instance - d := &v1alpha1.MachineDeployment{} - if err := r.Get(ctx, request.NamespacedName, d); err != nil { - if apierrors.IsNotFound(err) { - // Object not found, return. Created objects are automatically garbage collected. - // For additional cleanup logic use finalizers. - return reconcile.Result{}, nil - } - // Error reading the object - requeue the request. - return reconcile.Result{}, err - } - - // Ignore deleted MachineDeployments, this can happen when foregroundDeletion - // is enabled - if d.DeletionTimestamp != nil { - return reconcile.Result{}, nil - } - - result, err := r.reconcile(ctx, d) - if err != nil { - klog.Errorf("Failed to reconcile MachineDeployment %q: %v", request.NamespacedName, err) - r.recorder.Eventf(d, corev1.EventTypeWarning, "ReconcileError", "%v", err) - } - - return result, err -} - -func (r *ReconcileMachineDeployment) reconcile(ctx context.Context, d *v1alpha1.MachineDeployment) (reconcile.Result, error) { - v1alpha1.PopulateDefaultsMachineDeployment(d) - - everything := metav1.LabelSelector{} - if reflect.DeepEqual(d.Spec.Selector, &everything) { - if d.Status.ObservedGeneration < d.Generation { - d.Status.ObservedGeneration = d.Generation - if err := r.Status().Update(ctx, d); err != nil { - klog.Warningf("Failed to update status for MachineDeployment %q: %v", d.Name, err) - return reconcile.Result{}, err - } - } - return reconcile.Result{Requeue: true}, nil - } - - // Make sure that label selector can match the template's labels. - // TODO(vincepri): Move to a validation (admission) webhook when supported. - selector, err := metav1.LabelSelectorAsSelector(&d.Spec.Selector) - if err != nil { - return reconcile.Result{}, errors.Wrapf(err, "failed to parse MachineDeployment %q label selector", d.Name) - } - - if !selector.Matches(labels.Set(d.Spec.Template.Labels)) { - return reconcile.Result{}, errors.Errorf("failed validation on MachineDeployment %q label selector, cannot match Machine template labels", d.Name) - } - - if !contains(d.Finalizers, metav1.FinalizerDeleteDependents) { - d.Finalizers = append(d.ObjectMeta.Finalizers, metav1.FinalizerDeleteDependents) - if err := r.Client.Update(ctx, d); err != nil { - klog.Infof("Failed to add finalizers to MachineSet %q: %v", d.Name, err) - return reconcile.Result{}, err - } - - // Since adding the finalizer updates the object return to avoid later update issues - return reconcile.Result{Requeue: true}, nil - } - - msList, err := r.getMachineSetsForDeployment(ctx, d) - if err != nil { - return reconcile.Result{}, err - } - - machineMap, err := r.getMachineMapForDeployment(ctx, d, msList) - if err != nil { - return reconcile.Result{}, err - } - - if d.DeletionTimestamp != nil { - return reconcile.Result{}, r.sync(ctx, d, msList, machineMap) - } - - if d.Spec.Paused { - return reconcile.Result{}, r.sync(ctx, d, msList, machineMap) - } - - switch d.Spec.Strategy.Type { - case common.RollingUpdateMachineDeploymentStrategyType: - return reconcile.Result{}, r.rolloutRolling(ctx, d, msList, machineMap) - } - - return reconcile.Result{}, errors.Errorf("unexpected deployment strategy type: %s", d.Spec.Strategy.Type) -} - -// getMachineSetsForDeployment returns a list of MachineSets associated with a MachineDeployment. -func (r *ReconcileMachineDeployment) getMachineSetsForDeployment(ctx context.Context, d *v1alpha1.MachineDeployment) ([]*v1alpha1.MachineSet, error) { - // List all MachineSets to find those we own but that no longer match our selector. - machineSets := &v1alpha1.MachineSetList{} - listOptions := &client.ListOptions{Namespace: d.Namespace} - if err := r.Client.List(ctx, machineSets, listOptions); err != nil { - return nil, err - } - - filtered := make([]*v1alpha1.MachineSet, 0, len(machineSets.Items)) - for idx := range machineSets.Items { - ms := &machineSets.Items[idx] - - selector, err := metav1.LabelSelectorAsSelector(&d.Spec.Selector) - if err != nil { - klog.Errorf("Skipping MachineSet %q, failed to get label selector from spec selector: %v", ms.Name, err) - continue - } - - // If a MachineDeployment with a nil or empty selector creeps in, it should match nothing, not everything. - if selector.Empty() { - klog.Warningf("Skipping MachineSet %q as the selector is empty", ms.Name) - continue - } - - if !selector.Matches(labels.Set(ms.Labels)) { - klog.V(4).Infof("Skipping MachineSet %v, label mismatch", ms.Name) - continue - } - - // Attempt to adopt machine if it meets previous conditions and it has no controller references. - if metav1.GetControllerOf(ms) == nil { - if err := r.adoptOrphan(ctx, d, ms); err != nil { - klog.Warningf("Failed to adopt MachineSet %q into MachineDeployment %q: %v", ms.Name, d.Name, err) - continue - } - } - - if !metav1.IsControlledBy(ms, d) { - continue - } - - filtered = append(filtered, ms) - } - - return filtered, nil -} - -// adoptOrphan sets the MachineDeployment as a controller OwnerReference to the MachineSet. -func (r *ReconcileMachineDeployment) adoptOrphan(ctx context.Context, deployment *v1alpha1.MachineDeployment, machineSet *v1alpha1.MachineSet) error { - newRef := *metav1.NewControllerRef(deployment, controllerKind) - machineSet.OwnerReferences = append(machineSet.OwnerReferences, newRef) - return r.Client.Update(ctx, machineSet) -} - -// getMachineMapForDeployment returns the Machines managed by a Deployment. -// -// It returns a map from MachineSet UID to a list of Machines controlled by that MachineSet, -// according to the Machine's ControllerRef. -func (r *ReconcileMachineDeployment) getMachineMapForDeployment(ctx context.Context, d *v1alpha1.MachineDeployment, msList []*v1alpha1.MachineSet) (map[types.UID]*v1alpha1.MachineList, error) { - // TODO(droot): double check if previous selector maps correctly to new one. - // _, err := metav1.LabelSelectorAsSelector(&d.Spec.Selector) - - // Get all Machines that potentially belong to this Deployment. - selector, err := metav1.LabelSelectorAsMap(&d.Spec.Selector) - if err != nil { - return nil, err - } - - machines := &v1alpha1.MachineList{} - listOptions := &client.ListOptions{Namespace: d.Namespace} - if err = r.Client.List(ctx, machines, listOptions, client.MatchingLabels(selector)); err != nil { - return nil, err - } - - // Group Machines by their controller (if it's in msList). - machineMap := make(map[types.UID]*v1alpha1.MachineList, len(msList)) - for _, ms := range msList { - machineMap[ms.UID] = &v1alpha1.MachineList{} - } - - for idx := range machines.Items { - machine := &machines.Items[idx] - - // Do not ignore inactive Machines because Recreate Deployments need to verify that no - // Machines from older versions are running before spinning up new Machines. - controllerRef := metav1.GetControllerOf(machine) - if controllerRef == nil { - continue - } - - // Only append if we care about this UID. - if machineList, ok := machineMap[controllerRef.UID]; ok { - machineList.Items = append(machineList.Items, *machine) - } - } - - return machineMap, nil -} - -// getMachineDeploymentsForMachineSet returns a list of MachineDeployments that could potentially match a MachineSet. -func (r *ReconcileMachineDeployment) getMachineDeploymentsForMachineSet(ctx context.Context, ms *v1alpha1.MachineSet) []*v1alpha1.MachineDeployment { - if len(ms.Labels) == 0 { - klog.Warningf("No machine deployments found for MachineSet %q because it has no labels", ms.Name) - return nil - } - - dList := &v1alpha1.MachineDeploymentList{} - listOptions := &client.ListOptions{Namespace: ms.Namespace} - if err := r.Client.List(ctx, dList, listOptions); err != nil { - klog.Warningf("Failed to list machine deployments: %v", err) - return nil - } - - deployments := make([]*v1alpha1.MachineDeployment, 0, len(dList.Items)) - for idx, d := range dList.Items { - selector, err := metav1.LabelSelectorAsSelector(&d.Spec.Selector) - if err != nil { - continue - } - - // If a deployment with a nil or empty selector creeps in, it should match nothing, not everything. - if selector.Empty() || !selector.Matches(labels.Set(ms.Labels)) { - continue - } - - deployments = append(deployments, &dList.Items[idx]) - } - - return deployments -} - -// MachineSetTodeployments is a handler.MapFunc to be used to enqeue requests for reconciliation -// for MachineDeployments that might adopt an orphaned MachineSet. -func (r *ReconcileMachineDeployment) MachineSetToDeployments(o client.Object) []reconcile.Request { - result := []reconcile.Request{} - ctx := context.Background() - - ms := &v1alpha1.MachineSet{} - key := client.ObjectKey{Namespace: o.GetNamespace(), Name: o.GetName()} - if err := r.Client.Get(ctx, key, ms); err != nil { - if !apierrors.IsNotFound(err) { - klog.Errorf("Unable to retrieve MachineSet %q for possible MachineDeployment adoption: %v", key, err) - } - return nil - } - - // Check if the controller reference is already set and - // return an empty result when one is found. - for _, ref := range ms.ObjectMeta.OwnerReferences { - if ref.Controller != nil && *ref.Controller { - return result - } - } - - mds := r.getMachineDeploymentsForMachineSet(ctx, ms) - if len(mds) == 0 { - klog.V(4).Infof("Found no machine set for machine: %v", ms.Name) - return nil - } - - for _, md := range mds { - name := client.ObjectKey{Namespace: md.Namespace, Name: md.Name} - result = append(result, reconcile.Request{NamespacedName: name}) - } - - return result -} - -func contains(list []string, strToSearch string) bool { - for _, item := range list { - if item == strToSearch { - return true - } - } - return false -} diff --git a/pkg/controller/machinedeployment/metrics.go b/pkg/controller/machinedeployment/metrics.go new file mode 100644 index 000000000..2b6a98169 --- /dev/null +++ b/pkg/controller/machinedeployment/metrics.go @@ -0,0 +1,114 @@ +/* +Copyright 2025 The Machine Controller Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package machinedeployment + +import ( + "context" + + "github.com/prometheus/client_golang/prometheus" + + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" + + ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" +) + +const metricsPrefix = "machine_deployment_" + +type Collector struct { + ctx context.Context + client ctrlruntimeclient.Client + + replicas *prometheus.Desc + availableReplicas *prometheus.Desc + readyReplicas *prometheus.Desc + updatedReplicas *prometheus.Desc +} + +// NewCollector creates new machine deployment collector for metrics collection. +func NewCollector(ctx context.Context, client ctrlruntimeclient.Client) *Collector { + return &Collector{ + ctx: ctx, + client: client, + replicas: prometheus.NewDesc( + metricsPrefix+"replicas", + "The number of replicas defined for a machine deployment", + []string{"name", "namespace"}, nil, + ), + availableReplicas: prometheus.NewDesc( + metricsPrefix+"available_replicas", + "The number of available replicas for a machine deployment", + []string{"name", "namespace"}, nil, + ), + readyReplicas: prometheus.NewDesc( + metricsPrefix+"ready_replicas", + "The number of ready replicas for a machine deployment", + []string{"name", "namespace"}, nil, + ), + updatedReplicas: prometheus.NewDesc( + metricsPrefix+"updated_replicas", + "The number of replicas updated for a machine deployment", + []string{"name", "namespace"}, nil, + ), + } +} + +// Describe implements the prometheus.Describe interface. +func (c *Collector) Describe(desc chan<- *prometheus.Desc) { + desc <- c.replicas + desc <- c.readyReplicas + desc <- c.availableReplicas + desc <- c.readyReplicas +} + +// Collect implements the prometheus.Collector interface. +func (c *Collector) Collect(metrics chan<- prometheus.Metric) { + machineDeployments := &clusterv1alpha1.MachineDeploymentList{} + if err := c.client.List(c.ctx, machineDeployments); err != nil { + return + } + + for _, machineDeployment := range machineDeployments.Items { + metrics <- prometheus.MustNewConstMetric( + c.replicas, + prometheus.GaugeValue, + float64(machineDeployment.Status.Replicas), + machineDeployment.Name, + machineDeployment.Namespace, + ) + metrics <- prometheus.MustNewConstMetric( + c.readyReplicas, + prometheus.GaugeValue, + float64(machineDeployment.Status.ReadyReplicas), + machineDeployment.Name, + machineDeployment.Namespace, + ) + metrics <- prometheus.MustNewConstMetric( + c.availableReplicas, + prometheus.GaugeValue, + float64(machineDeployment.Status.AvailableReplicas), + machineDeployment.Name, + machineDeployment.Namespace, + ) + metrics <- prometheus.MustNewConstMetric( + c.updatedReplicas, + prometheus.GaugeValue, + float64(machineDeployment.Status.UpdatedReplicas), + machineDeployment.Name, + machineDeployment.Namespace, + ) + } +} diff --git a/pkg/controller/machinedeployment/rolling.go b/pkg/controller/machinedeployment/rolling.go index 3267d9487..cc3703adf 100644 --- a/pkg/controller/machinedeployment/rolling.go +++ b/pkg/controller/machinedeployment/rolling.go @@ -21,18 +21,18 @@ import ( "sort" "github.com/pkg/errors" + "go.uber.org/zap" - "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" - dutil "github.com/kubermatic/machine-controller/pkg/controller/util" + "k8c.io/machine-controller/pkg/controller/util" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/klog" "k8s.io/utils/integer" + ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" ) // rolloutRolling implements the logic for rolling a new machine set. -func (r *ReconcileMachineDeployment) rolloutRolling(ctx context.Context, d *v1alpha1.MachineDeployment, msList []*v1alpha1.MachineSet, machineMap map[types.UID]*v1alpha1.MachineList) error { - newMS, oldMSs, err := r.getAllMachineSetsAndSyncRevision(ctx, d, msList, machineMap, true) +func (r *ReconcileMachineDeployment) rolloutRolling(ctx context.Context, log *zap.SugaredLogger, d *clusterv1alpha1.MachineDeployment, msList []*clusterv1alpha1.MachineSet) error { + newMS, oldMSs, err := r.getAllMachineSetsAndSyncRevision(ctx, log, d, msList, true) if err != nil { return err } @@ -56,7 +56,7 @@ func (r *ReconcileMachineDeployment) rolloutRolling(ctx context.Context, d *v1al } // Scale down, if we can. - if err := r.reconcileOldMachineSets(ctx, allMSs, oldMSs, newMS, d); err != nil { + if err := r.reconcileOldMachineSets(ctx, log.With("newmachineset", ctrlruntimeclient.ObjectKeyFromObject(newMS)), allMSs, oldMSs, newMS, d); err != nil { return err } @@ -64,8 +64,8 @@ func (r *ReconcileMachineDeployment) rolloutRolling(ctx context.Context, d *v1al return err } - if dutil.DeploymentComplete(d, &d.Status) { - if err := r.cleanupDeployment(ctx, oldMSs, d); err != nil { + if util.DeploymentComplete(d, &d.Status) { + if err := r.cleanupDeployment(ctx, log, oldMSs, d); err != nil { return err } } @@ -73,7 +73,7 @@ func (r *ReconcileMachineDeployment) rolloutRolling(ctx context.Context, d *v1al return nil } -func (r *ReconcileMachineDeployment) reconcileNewMachineSet(ctx context.Context, allMSs []*v1alpha1.MachineSet, newMS *v1alpha1.MachineSet, deployment *v1alpha1.MachineDeployment) error { +func (r *ReconcileMachineDeployment) reconcileNewMachineSet(ctx context.Context, allMSs []*clusterv1alpha1.MachineSet, newMS *clusterv1alpha1.MachineSet, deployment *clusterv1alpha1.MachineDeployment) error { if deployment.Spec.Replicas == nil { return errors.Errorf("spec replicas for deployment set %v is nil, this is unexpected", deployment.Name) } @@ -93,7 +93,7 @@ func (r *ReconcileMachineDeployment) reconcileNewMachineSet(ctx context.Context, return err } - newReplicasCount, err := dutil.NewMSNewReplicas(deployment, allMSs, newMS) + newReplicasCount, err := util.NewMSNewReplicas(deployment, allMSs, newMS) if err != nil { return err } @@ -101,7 +101,7 @@ func (r *ReconcileMachineDeployment) reconcileNewMachineSet(ctx context.Context, return err } -func (r *ReconcileMachineDeployment) reconcileOldMachineSets(ctx context.Context, allMSs []*v1alpha1.MachineSet, oldMSs []*v1alpha1.MachineSet, newMS *v1alpha1.MachineSet, deployment *v1alpha1.MachineDeployment) error { +func (r *ReconcileMachineDeployment) reconcileOldMachineSets(ctx context.Context, log *zap.SugaredLogger, allMSs []*clusterv1alpha1.MachineSet, oldMSs []*clusterv1alpha1.MachineSet, newMS *clusterv1alpha1.MachineSet, deployment *clusterv1alpha1.MachineDeployment) error { if deployment.Spec.Replicas == nil { return errors.Errorf("spec replicas for deployment set %v is nil, this is unexpected", deployment.Name) } @@ -110,15 +110,15 @@ func (r *ReconcileMachineDeployment) reconcileOldMachineSets(ctx context.Context return errors.Errorf("spec replicas for machine set %v is nil, this is unexpected", newMS.Name) } - oldMachinesCount := dutil.GetReplicaCountForMachineSets(oldMSs) + oldMachinesCount := util.GetReplicaCountForMachineSets(oldMSs) if oldMachinesCount == 0 { // Can't scale down further return nil } - allMachinesCount := dutil.GetReplicaCountForMachineSets(allMSs) - klog.V(4).Infof("New machine set %s/%s has %d available machines.", newMS.Namespace, newMS.Name, newMS.Status.AvailableReplicas) - maxUnavailable := dutil.MaxUnavailable(*deployment) + allMachinesCount := util.GetReplicaCountForMachineSets(allMSs) + log.Debugw("New machine set status", "replicas", newMS.Status.AvailableReplicas) + maxUnavailable := util.MaxUnavailable(*deployment) // Check if we can scale down. We can scale down in the following 2 cases: // * Some old machine sets have unhealthy replicas, we could safely scale down those unhealthy replicas since that won't further @@ -159,27 +159,27 @@ func (r *ReconcileMachineDeployment) reconcileOldMachineSets(ctx context.Context // Clean up unhealthy replicas first, otherwise unhealthy replicas will block deployment // and cause timeout. See https://github.com/kubernetes/kubernetes/issues/16737 - oldMSs, cleanupCount, err := r.cleanupUnhealthyReplicas(ctx, oldMSs, deployment, maxScaledDown) + oldMSs, cleanupCount, err := r.cleanupUnhealthyReplicas(ctx, log, oldMSs, deployment, maxScaledDown) if err != nil { return nil } - klog.V(4).Infof("Cleaned up unhealthy replicas from old MachineSets by %d", cleanupCount) + log.Debugw("Cleaned up unhealthy replicas from old MachineSets", "reduction", cleanupCount) // Scale down old machine sets, need check maxUnavailable to ensure we can scale down allMSs = append(oldMSs, newMS) - scaledDownCount, err := r.scaleDownOldMachineSetsForRollingUpdate(ctx, allMSs, oldMSs, deployment) + scaledDownCount, err := r.scaleDownOldMachineSetsForRollingUpdate(ctx, log, allMSs, oldMSs, deployment) if err != nil { return err } - klog.V(4).Infof("Scaled down old MachineSets of deployment %s by %d", deployment.Name, scaledDownCount) + log.Debugw("Scaled down old MachineSets", "reduction", scaledDownCount) return nil } // cleanupUnhealthyReplicas will scale down old machine sets with unhealthy replicas, so that all unhealthy replicas will be deleted. -func (r *ReconcileMachineDeployment) cleanupUnhealthyReplicas(ctx context.Context, oldMSs []*v1alpha1.MachineSet, deployment *v1alpha1.MachineDeployment, maxCleanupCount int32) ([]*v1alpha1.MachineSet, int32, error) { - sort.Sort(dutil.MachineSetsByCreationTimestamp(oldMSs)) +func (r *ReconcileMachineDeployment) cleanupUnhealthyReplicas(ctx context.Context, log *zap.SugaredLogger, oldMSs []*clusterv1alpha1.MachineSet, deployment *clusterv1alpha1.MachineDeployment, maxCleanupCount int32) ([]*clusterv1alpha1.MachineSet, int32, error) { + sort.Sort(util.MachineSetsByCreationTimestamp(oldMSs)) // Safely scale down all old machine sets with unhealthy replicas. Replica set will sort the machines in the order // such that not-ready < ready, unscheduled < scheduled, and pending < running. This ensures that unhealthy replicas will @@ -202,7 +202,7 @@ func (r *ReconcileMachineDeployment) cleanupUnhealthyReplicas(ctx context.Contex } oldMSAvailableReplicas := targetMS.Status.AvailableReplicas - klog.V(4).Infof("Found %d available machines in old MS %s/%s", oldMSAvailableReplicas, targetMS.Namespace, targetMS.Name) + log.Debugw("Available machines in old MachineSet", "oldmachineset", ctrlruntimeclient.ObjectKeyFromObject(targetMS), "replicas", oldMSAvailableReplicas) if oldMSReplicas == oldMSAvailableReplicas { // no unhealthy replicas found, no scaling required. continue @@ -229,26 +229,26 @@ func (r *ReconcileMachineDeployment) cleanupUnhealthyReplicas(ctx context.Contex // scaleDownOldMachineSetsForRollingUpdate scales down old machine sets when deployment strategy is "RollingUpdate". // Need check maxUnavailable to ensure availability. -func (r *ReconcileMachineDeployment) scaleDownOldMachineSetsForRollingUpdate(ctx context.Context, allMSs []*v1alpha1.MachineSet, oldMSs []*v1alpha1.MachineSet, deployment *v1alpha1.MachineDeployment) (int32, error) { +func (r *ReconcileMachineDeployment) scaleDownOldMachineSetsForRollingUpdate(ctx context.Context, log *zap.SugaredLogger, allMSs []*clusterv1alpha1.MachineSet, oldMSs []*clusterv1alpha1.MachineSet, deployment *clusterv1alpha1.MachineDeployment) (int32, error) { if deployment.Spec.Replicas == nil { return 0, errors.Errorf("spec replicas for deployment %v is nil, this is unexpected", deployment.Name) } - maxUnavailable := dutil.MaxUnavailable(*deployment) + maxUnavailable := util.MaxUnavailable(*deployment) // Check if we can scale down. minAvailable := *(deployment.Spec.Replicas) - maxUnavailable // Find the number of available machines. - availableMachineCount := dutil.GetAvailableReplicaCountForMachineSets(allMSs) + availableMachineCount := util.GetAvailableReplicaCountForMachineSets(allMSs) if availableMachineCount <= minAvailable { // Cannot scale down. return 0, nil } - klog.V(4).Infof("Found %d available machines in deployment %s, scaling down old MSes", availableMachineCount, deployment.Name) + log.Debugw("Found available machines, scaling down old MachineSets", "replicas", availableMachineCount) - sort.Sort(dutil.MachineSetsByCreationTimestamp(oldMSs)) + sort.Sort(util.MachineSetsByCreationTimestamp(oldMSs)) totalScaledDown := int32(0) totalScaleDownCount := availableMachineCount - minAvailable diff --git a/pkg/controller/machinedeployment/sync.go b/pkg/controller/machinedeployment/sync.go index f0865ef51..73fe95359 100644 --- a/pkg/controller/machinedeployment/sync.go +++ b/pkg/controller/machinedeployment/sync.go @@ -24,9 +24,10 @@ import ( "strconv" "github.com/pkg/errors" + "go.uber.org/zap" - clusterv1alpha1 "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" - dutil "github.com/kubermatic/machine-controller/pkg/controller/util" + dutil "k8c.io/machine-controller/pkg/controller/util" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" "k8s.io/apimachinery/pkg/api/equality" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -35,26 +36,25 @@ import ( apirand "k8s.io/apimachinery/pkg/util/rand" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/util/retry" - "k8s.io/klog" - "sigs.k8s.io/controller-runtime/pkg/client" + ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" ) // sync is responsible for reconciling deployments on scaling events or when they // are paused. -func (r *ReconcileMachineDeployment) sync(ctx context.Context, d *clusterv1alpha1.MachineDeployment, msList []*clusterv1alpha1.MachineSet, machineMap map[types.UID]*clusterv1alpha1.MachineList) error { - newMS, oldMSs, err := r.getAllMachineSetsAndSyncRevision(ctx, d, msList, machineMap, false) +func (r *ReconcileMachineDeployment) sync(ctx context.Context, log *zap.SugaredLogger, d *clusterv1alpha1.MachineDeployment, msList []*clusterv1alpha1.MachineSet) error { + newMS, oldMSs, err := r.getAllMachineSetsAndSyncRevision(ctx, log, d, msList, false) if err != nil { return err } - if err := r.scale(ctx, d, newMS, oldMSs); err != nil { + if err := r.scale(ctx, log, d, newMS, oldMSs); err != nil { // If we get an error while trying to scale, the deployment will be requeued // so we can abort this resync return err } // - // // TODO: Clean up the deployment when it's paused and no rollback is in flight. + // TODO: Clean up the deployment when it's paused and no rollback is in flight. // allMSs := append(oldMSs, newMS) return r.syncDeploymentStatus(ctx, allMSs, newMS, d) @@ -72,11 +72,11 @@ func (r *ReconcileMachineDeployment) sync(ctx context.Context, d *clusterv1alpha // // Note that currently the deployment controller is using caches to avoid querying the server for reads. // This may lead to stale reads of machine sets, thus incorrect deployment status. -func (r *ReconcileMachineDeployment) getAllMachineSetsAndSyncRevision(ctx context.Context, d *clusterv1alpha1.MachineDeployment, msList []*clusterv1alpha1.MachineSet, machineMap map[types.UID]*clusterv1alpha1.MachineList, createIfNotExisted bool) (*clusterv1alpha1.MachineSet, []*clusterv1alpha1.MachineSet, error) { +func (r *ReconcileMachineDeployment) getAllMachineSetsAndSyncRevision(ctx context.Context, log *zap.SugaredLogger, d *clusterv1alpha1.MachineDeployment, msList []*clusterv1alpha1.MachineSet, createIfNotExisted bool) (*clusterv1alpha1.MachineSet, []*clusterv1alpha1.MachineSet, error) { _, allOldMSs := dutil.FindOldMachineSets(d, msList) // Get new machine set with the updated revision number - newMS, err := r.getNewMachineSet(ctx, d, msList, allOldMSs, createIfNotExisted) + newMS, err := r.getNewMachineSet(ctx, log, d, msList, allOldMSs, createIfNotExisted) if err != nil { return nil, nil, err } @@ -89,11 +89,11 @@ func (r *ReconcileMachineDeployment) getAllMachineSetsAndSyncRevision(ctx contex // 2. If there's existing new MS, update its revision number if it's smaller than (maxOldRevision + 1), where maxOldRevision is the max revision number among all old MSes. // 3. If there's no existing new MS and createIfNotExisted is true, create one with appropriate revision number (maxOldRevision + 1) and replicas. // Note that the machine-template-hash will be added to adopted MSes and machines. -func (r *ReconcileMachineDeployment) getNewMachineSet(ctx context.Context, d *clusterv1alpha1.MachineDeployment, msList, oldMSs []*clusterv1alpha1.MachineSet, createIfNotExisted bool) (*clusterv1alpha1.MachineSet, error) { +func (r *ReconcileMachineDeployment) getNewMachineSet(ctx context.Context, log *zap.SugaredLogger, d *clusterv1alpha1.MachineDeployment, msList, oldMSs []*clusterv1alpha1.MachineSet, createIfNotExisted bool) (*clusterv1alpha1.MachineSet, error) { existingNewMS := dutil.FindNewMachineSet(d, msList) // Calculate the max revision number among all old MSes - maxOldRevision := dutil.MaxRevision(oldMSs) + maxOldRevision := dutil.MaxRevision(log, oldMSs) // Calculate revision number for this new machine set newRevision := strconv.FormatInt(maxOldRevision+1, 10) @@ -106,7 +106,7 @@ func (r *ReconcileMachineDeployment) getNewMachineSet(ctx context.Context, d *cl msCopy := existingNewMS.DeepCopy() // Set existing new machine set's annotation - annotationsUpdated := dutil.SetNewMachineSetAnnotations(d, msCopy, newRevision, true) + annotationsUpdated := dutil.SetNewMachineSetAnnotations(log, d, msCopy, newRevision, true) minReadySecondsNeedsUpdate := msCopy.Spec.MinReadySeconds != *d.Spec.MinReadySeconds if annotationsUpdated || minReadySecondsNeedsUpdate { @@ -115,8 +115,8 @@ func (r *ReconcileMachineDeployment) getNewMachineSet(ctx context.Context, d *cl } // Apply revision annotation from existingNewMS if it is missing from the deployment. - err := r.updateMachineDeployment(ctx, d, func(innerDeployment *clusterv1alpha1.MachineDeployment) { - dutil.SetDeploymentRevision(d, msCopy.Annotations[dutil.RevisionAnnotation]) + err := r.updateMachineDeployment(ctx, d, func(md *clusterv1alpha1.MachineDeployment) { + dutil.SetDeploymentRevision(md, msCopy.Annotations[dutil.RevisionAnnotation]) }) return msCopy, err } @@ -171,7 +171,7 @@ func (r *ReconcileMachineDeployment) getNewMachineSet(ctx context.Context, d *cl *(newMS.Spec.Replicas) = newReplicasCount // Set new machine set's annotation - dutil.SetNewMachineSetAnnotations(d, &newMS, newRevision, false) + dutil.SetNewMachineSetAnnotations(log, d, &newMS, newRevision, false) // Create the new MachineSet. If it already exists, then we need to check for possible // hash collisions. If there is any other error, we need to report it in the status of // the Deployment. @@ -184,7 +184,7 @@ func (r *ReconcileMachineDeployment) getNewMachineSet(ctx context.Context, d *cl alreadyExists = true ms := &clusterv1alpha1.MachineSet{} - msErr := r.Get(ctx, client.ObjectKey{Namespace: newMS.Namespace, Name: newMS.Name}, ms) + msErr := r.Get(ctx, ctrlruntimeclient.ObjectKey{Namespace: newMS.Namespace, Name: newMS.Name}, ms) if msErr != nil { return nil, msErr } @@ -201,16 +201,16 @@ func (r *ReconcileMachineDeployment) getNewMachineSet(ctx context.Context, d *cl return nil, err case err != nil: - klog.V(4).Infof("Failed to create new machine set %q: %v", newMS.Name, err) + log.Errorw("Failed to create new MachineSet", "machineset", ctrlruntimeclient.ObjectKeyFromObject(&newMS), zap.Error(err)) return nil, err } if !alreadyExists { - klog.V(4).Infof("Created new machine set %q", createdMS.Name) + log.Debugw("Created new MachineSet", "machineset", ctrlruntimeclient.ObjectKeyFromObject(createdMS)) } - err = r.updateMachineDeployment(ctx, d, func(innerDeployment *clusterv1alpha1.MachineDeployment) { - dutil.SetDeploymentRevision(d, newRevision) + err = r.updateMachineDeployment(ctx, d, func(md *clusterv1alpha1.MachineDeployment) { + dutil.SetDeploymentRevision(md, newRevision) }) return createdMS, err @@ -221,7 +221,7 @@ func (r *ReconcileMachineDeployment) getNewMachineSet(ctx context.Context, d *cl // have the effect of hastening the rollout progress, which could produce a higher proportion of unavailable // replicas in the event of a problem with the rolled out template. Should run only on scaling events or // when a deployment is paused and not during the normal rollout process. -func (r *ReconcileMachineDeployment) scale(ctx context.Context, deployment *clusterv1alpha1.MachineDeployment, newMS *clusterv1alpha1.MachineSet, oldMSs []*clusterv1alpha1.MachineSet) error { +func (r *ReconcileMachineDeployment) scale(ctx context.Context, log *zap.SugaredLogger, deployment *clusterv1alpha1.MachineDeployment, newMS *clusterv1alpha1.MachineSet, oldMSs []*clusterv1alpha1.MachineSet) error { if deployment.Spec.Replicas == nil { return errors.Errorf("spec replicas for deployment %v is nil, this is unexpected", deployment.Name) } @@ -269,21 +269,6 @@ func (r *ReconcileMachineDeployment) scale(ctx context.Context, deployment *clus // machine sets. deploymentReplicasToAdd := allowedSize - totalMSReplicas - // The additional replicas should be distributed proportionally amongst the active - // machine sets from the larger to the smaller in size machine set. Scaling direction - // drives what happens in case we are trying to scale machine sets of the same size. - // In such a case when scaling up, we should scale up newer machine sets first, and - // when scaling down, we should scale down older machine sets first. - var scalingOperation string - switch { - case deploymentReplicasToAdd > 0: - sort.Sort(dutil.MachineSetsBySizeNewer(allMSs)) - scalingOperation = "up" - case deploymentReplicasToAdd < 0: - sort.Sort(dutil.MachineSetsBySizeOlder(allMSs)) - scalingOperation = "down" - } - // Iterate over all active machine sets and estimate proportions for each of them. // The absolute value of deploymentReplicasAdded should never exceed the absolute // value of deploymentReplicasToAdd. @@ -292,14 +277,14 @@ func (r *ReconcileMachineDeployment) scale(ctx context.Context, deployment *clus for i := range allMSs { ms := allMSs[i] if ms.Spec.Replicas == nil { - klog.Errorf("spec replicas for machine set %v is nil, this is unexpected.", ms.Name) + log.Errorw("spec.replicas for MachineSet is nil, this is unexpected.", "machineset", ctrlruntimeclient.ObjectKeyFromObject(ms)) continue } // Estimate proportions if we have replicas to add, otherwise simply populate // nameToSize with the current sizes for each machine set. if deploymentReplicasToAdd != 0 { - proportion := dutil.GetProportion(ms, *deployment, deploymentReplicasToAdd, deploymentReplicasAdded) + proportion := dutil.GetProportion(log, ms, *deployment, deploymentReplicasToAdd, deploymentReplicasAdded) nameToSize[ms.Name] = *(ms.Spec.Replicas) + proportion deploymentReplicasAdded += proportion } else { @@ -321,7 +306,7 @@ func (r *ReconcileMachineDeployment) scale(ctx context.Context, deployment *clus } // TODO: Use transactions when we have them. - if _, err := r.scaleMachineSetOperation(ctx, ms, nameToSize[ms.Name], deployment, scalingOperation); err != nil { + if _, err := r.scaleMachineSetOperation(ctx, ms, nameToSize[ms.Name], deployment); err != nil { // Return as soon as we fail, the deployment is requeued return err } @@ -376,18 +361,10 @@ func (r *ReconcileMachineDeployment) scaleMachineSet(ctx context.Context, ms *cl if *(ms.Spec.Replicas) == newScale { return false, nil } - - var scalingOperation string - if *(ms.Spec.Replicas) < newScale { - scalingOperation = "up" - } else { - scalingOperation = "down" - } - - return r.scaleMachineSetOperation(ctx, ms, newScale, deployment, scalingOperation) + return r.scaleMachineSetOperation(ctx, ms, newScale, deployment) } -func (r *ReconcileMachineDeployment) scaleMachineSetOperation(ctx context.Context, ms *clusterv1alpha1.MachineSet, newScale int32, deployment *clusterv1alpha1.MachineDeployment, scaleOperation string) (bool, error) { +func (r *ReconcileMachineDeployment) scaleMachineSetOperation(ctx context.Context, ms *clusterv1alpha1.MachineSet, newScale int32, deployment *clusterv1alpha1.MachineDeployment) (bool, error) { if ms.Spec.Replicas == nil { return false, errors.Errorf("spec replicas for machine set %v is nil, this is unexpected", ms.Name) } @@ -421,14 +398,14 @@ func (r *ReconcileMachineDeployment) scaleMachineSetOperation(ctx context.Contex // cleanupDeployment is responsible for cleaning up a deployment i.e. retains all but the latest N old machine sets // where N=d.Spec.RevisionHistoryLimit. Old machine sets are older versions of the machinetemplate of a deployment kept // around by default 1) for historical reasons and 2) for the ability to rollback a deployment. -func (r *ReconcileMachineDeployment) cleanupDeployment(ctx context.Context, oldMSs []*clusterv1alpha1.MachineSet, deployment *clusterv1alpha1.MachineDeployment) error { +func (r *ReconcileMachineDeployment) cleanupDeployment(ctx context.Context, log *zap.SugaredLogger, oldMSs []*clusterv1alpha1.MachineSet, deployment *clusterv1alpha1.MachineDeployment) error { if deployment.Spec.RevisionHistoryLimit == nil { return nil } // Avoid deleting machine set with deletion timestamp set aliveFilter := func(ms *clusterv1alpha1.MachineSet) bool { - return ms != nil && ms.ObjectMeta.DeletionTimestamp == nil + return ms != nil && ms.DeletionTimestamp == nil } cleanableMSes := dutil.FilterMachineSets(oldMSs, aliveFilter) @@ -439,12 +416,12 @@ func (r *ReconcileMachineDeployment) cleanupDeployment(ctx context.Context, oldM } sort.Sort(dutil.MachineSetsByCreationTimestamp(cleanableMSes)) - klog.V(4).Infof("Looking to cleanup old machine sets for deployment %q", deployment.Name) + log.Debug("Looking to cleanup old MachineSets for MachineDeployment") for i := int32(0); i < diff; i++ { ms := cleanableMSes[i] if ms.Spec.Replicas == nil { - return errors.Errorf("spec replicas for machine set %v is nil, this is unexpected", ms.Name) + return errors.Errorf("spec replicas for MachineSets %v is nil, this is unexpected", ms.Name) } // Avoid delete machine set with non-zero replica counts @@ -452,7 +429,7 @@ func (r *ReconcileMachineDeployment) cleanupDeployment(ctx context.Context, oldM continue } - klog.V(4).Infof("Trying to cleanup machine set %q for deployment %q", ms.Name, deployment.Name) + log.Debugw("Trying to cleanup MachineSet for MachineDeployment", "machineset", ctrlruntimeclient.ObjectKeyFromObject(ms)) if err := r.Delete(ctx, ms); err != nil && !apierrors.IsNotFound(err) { // Return error instead of aggregating and continuing DELETEs on the theory // that we may be overloading the api server. @@ -468,7 +445,7 @@ func (r *ReconcileMachineDeployment) updateMachineDeployment(ctx context.Context } // We have this as standalone variant to be able to use it from the tests. -func updateMachineDeployment(ctx context.Context, c client.Client, d *clusterv1alpha1.MachineDeployment, modify func(*clusterv1alpha1.MachineDeployment)) error { +func updateMachineDeployment(ctx context.Context, c ctrlruntimeclient.Client, d *clusterv1alpha1.MachineDeployment, modify func(*clusterv1alpha1.MachineDeployment)) error { dCopy := d.DeepCopy() modify(dCopy) if equality.Semantic.DeepEqual(dCopy, d) { diff --git a/pkg/controller/machineset/machineset_controller.go b/pkg/controller/machineset/controller.go similarity index 64% rename from pkg/controller/machineset/machineset_controller.go rename to pkg/controller/machineset/controller.go index e43fe6def..9db8acbb2 100644 --- a/pkg/controller/machineset/machineset_controller.go +++ b/pkg/controller/machineset/controller.go @@ -23,9 +23,12 @@ import ( "sync" "time" + "github.com/go-logr/logr" + "github.com/go-logr/zapr" "github.com/pkg/errors" + "go.uber.org/zap" - clusterv1alpha1 "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -34,13 +37,13 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/record" - "k8s.io/klog" - "sigs.k8s.io/controller-runtime/pkg/client" + ctrlruntime "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" + ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" ) // controllerName is the name of this controller. @@ -60,54 +63,48 @@ var ( // Add creates a new MachineSet Controller and adds it to the Manager with default RBAC. // The Manager will set fields on the Controller and Start it when the Manager is Started. -func Add(mgr manager.Manager) error { - r := newReconciler(mgr) - return add(mgr, r, r.MachineToMachineSets) +func Add(mgr manager.Manager, log *zap.SugaredLogger) error { + r := newReconciler(mgr, log) + return add(mgr, r, r.MachineToMachineSets()) } // newReconciler returns a new reconcile.Reconciler. -func newReconciler(mgr manager.Manager) *ReconcileMachineSet { - return &ReconcileMachineSet{Client: mgr.GetClient(), scheme: mgr.GetScheme(), recorder: mgr.GetEventRecorderFor(controllerName)} +func newReconciler(mgr manager.Manager, log *zap.SugaredLogger) *ReconcileMachineSet { + return &ReconcileMachineSet{ + Client: mgr.GetClient(), + scheme: mgr.GetScheme(), + log: log.Named(controllerName), + recorder: mgr.GetEventRecorderFor(controllerName), + } } // add adds a new Controller to mgr with r as the reconcile.Reconciler. func add(mgr manager.Manager, r reconcile.Reconciler, mapFn handler.MapFunc) error { - // Create a new controller. - c, err := controller.New(controllerName, mgr, controller.Options{Reconciler: r}) - if err != nil { - return err - } - - // Watch for changes to MachineSet. - err = c.Watch( - &source.Kind{Type: &clusterv1alpha1.MachineSet{}}, - &handler.EnqueueRequestForObject{}, - ) - if err != nil { - return err - } - - // Watch for changes to Machines and reconcile the owner MachineSet. - err = c.Watch( - &source.Kind{Type: &clusterv1alpha1.Machine{}}, - &handler.EnqueueRequestForOwner{IsController: true, OwnerType: &clusterv1alpha1.MachineSet{}}, - ) - if err != nil { - return err - } - - // Watch for changes to Machines using a mapping function to MachineSets. - // This watcher is required for use cases like adoption. In case a Machine doesn't have - // a controller reference, it'll look for potential matching MachineSet to reconcile. - return c.Watch( - &source.Kind{Type: &clusterv1alpha1.Machine{}}, - handler.EnqueueRequestsFromMapFunc(mapFn), - ) + _, err := builder.ControllerManagedBy(mgr). + Named(controllerName). + WithOptions(controller.Options{ + LogConstructor: func(*reconcile.Request) logr.Logger { + // we log ourselves + return zapr.NewLogger(zap.NewNop()) + }, + }). + // Watch for changes to MachineSet. + For(&clusterv1alpha1.MachineSet{}). + // Watch for changes to Machines and reconcile the owner MachineSet. + Owns(&clusterv1alpha1.Machine{}). + // Watch for changes to Machines using a mapping function to MachineSets. + // This watcher is required for use cases like adoption. In case a Machine doesn't have + // a controller reference, it'll look for potential matching MachineSet to reconcile. + Watches(&clusterv1alpha1.Machine{}, handler.EnqueueRequestsFromMapFunc(mapFn)). + Build(r) + + return err } // ReconcileMachineSet reconciles a MachineSet object. type ReconcileMachineSet struct { - client.Client + ctrlruntimeclient.Client + log *zap.SugaredLogger scheme *runtime.Scheme recorder record.EventRecorder } @@ -118,6 +115,9 @@ type ReconcileMachineSet struct { // +kubebuilder:rbac:groups=cluster.k8s.io,resources=machinesets;machinesets/status,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=cluster.k8s.io,resources=machines,verbs=get;list;watch;create;update;patch;delete func (r *ReconcileMachineSet) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { + log := r.log.With("machineset", request.NamespacedName) + log.Debug("Reconciling") + // Fetch the MachineSet instance machineSet := &clusterv1alpha1.MachineSet{} if err := r.Get(ctx, request.NamespacedName, machineSet); err != nil { @@ -127,6 +127,7 @@ func (r *ReconcileMachineSet) Reconcile(ctx context.Context, request reconcile.R return reconcile.Result{}, nil } // Error reading the object - requeue the request. + log.Errorw("Failed to get MachineSet", zap.Error(err)) return reconcile.Result{}, err } @@ -136,19 +137,19 @@ func (r *ReconcileMachineSet) Reconcile(ctx context.Context, request reconcile.R return reconcile.Result{}, nil } - result, err := r.reconcile(ctx, machineSet) + result, err := r.reconcile(ctx, log, machineSet) if err != nil { - klog.Errorf("Failed to reconcile MachineSet %q: %v", request.NamespacedName, err) + log.Errorw("Reconciling failed", zap.Error(err)) r.recorder.Eventf(machineSet, corev1.EventTypeWarning, "ReconcileError", "%v", err) } return result, err } -func (r *ReconcileMachineSet) reconcile(ctx context.Context, machineSet *clusterv1alpha1.MachineSet) (reconcile.Result, error) { - klog.V(4).Infof("Reconcile machineset %v", machineSet.Name) +func (r *ReconcileMachineSet) reconcile(ctx context.Context, log *zap.SugaredLogger, machineSet *clusterv1alpha1.MachineSet) (reconcile.Result, error) { + log.Debug("Reconcile MachineSet") allMachines := &clusterv1alpha1.MachineList{} - if err := r.Client.List(ctx, allMachines, client.InNamespace(machineSet.Namespace)); err != nil { + if err := r.List(ctx, allMachines, ctrlruntimeclient.InNamespace(machineSet.Namespace)); err != nil { return reconcile.Result{}, errors.Wrap(err, "failed to list machines") } @@ -165,10 +166,9 @@ func (r *ReconcileMachineSet) reconcile(ctx context.Context, machineSet *cluster // Add foregroundDeletion finalizer if !contains(machineSet.Finalizers, metav1.FinalizerDeleteDependents) { - machineSet.Finalizers = append(machineSet.ObjectMeta.Finalizers, metav1.FinalizerDeleteDependents) + machineSet.Finalizers = append(machineSet.Finalizers, metav1.FinalizerDeleteDependents) - if err := r.Client.Update(ctx, machineSet); err != nil { - klog.Infof("Failed to add finalizers to MachineSet %q: %v", machineSet.Name, err) + if err := r.Update(ctx, machineSet); err != nil { return reconcile.Result{}, err } @@ -177,7 +177,7 @@ func (r *ReconcileMachineSet) reconcile(ctx context.Context, machineSet *cluster } // Return early if the MachineSet is deleted. - if !machineSet.ObjectMeta.DeletionTimestamp.IsZero() { + if !machineSet.DeletionTimestamp.IsZero() { return reconcile.Result{}, nil } @@ -185,14 +185,16 @@ func (r *ReconcileMachineSet) reconcile(ctx context.Context, machineSet *cluster filteredMachines := make([]*clusterv1alpha1.Machine, 0, len(allMachines.Items)) for idx := range allMachines.Items { machine := &allMachines.Items[idx] - if shouldExcludeMachine(machineSet, machine) { + machineLog := log.With("machine", ctrlruntimeclient.ObjectKeyFromObject(machine)) + + if shouldExcludeMachine(machineLog, machineSet, machine) { continue } // Attempt to adopt machine if it meets previous conditions and it has no controller references. if metav1.GetControllerOf(machine) == nil { if err := r.adoptOrphan(ctx, machineSet, machine); err != nil { - klog.Warningf("Failed to adopt MachineSet %q into MachineSet %q: %v", machine.Name, machineSet.Name, err) + machineLog.Errorw("Failed to adopt Machine into MachineSet", zap.Error(err)) continue } } @@ -200,13 +202,13 @@ func (r *ReconcileMachineSet) reconcile(ctx context.Context, machineSet *cluster filteredMachines = append(filteredMachines, machine) } - syncErr := r.syncReplicas(ctx, machineSet, filteredMachines) + syncErr := r.syncReplicas(ctx, log, machineSet, filteredMachines) ms := machineSet.DeepCopy() - newStatus := r.calculateStatus(ctx, ms, filteredMachines) + newStatus := r.calculateStatus(ctx, log, ms, filteredMachines) // Always updates status as machines come up or die. - updatedMS, err := updateMachineSetStatus(ctx, r.Client, machineSet, newStatus) + updatedMS, err := updateMachineSetStatus(ctx, log, r.Client, machineSet, newStatus) if err != nil { if syncErr != nil { return reconcile.Result{}, errors.Wrapf(err, "failed to sync machines: %v. failed to update machine set status", syncErr) @@ -239,27 +241,26 @@ func (r *ReconcileMachineSet) reconcile(ctx context.Context, machineSet *cluster } // syncReplicas scales Machine resources up or down. -func (r *ReconcileMachineSet) syncReplicas(ctx context.Context, ms *clusterv1alpha1.MachineSet, machines []*clusterv1alpha1.Machine) error { +func (r *ReconcileMachineSet) syncReplicas(ctx context.Context, log *zap.SugaredLogger, ms *clusterv1alpha1.MachineSet, machines []*clusterv1alpha1.Machine) error { if ms.Spec.Replicas == nil { return errors.Errorf("the Replicas field in Spec for machineset %v is nil, this should not be allowed", ms.Name) } diff := len(machines) - int(*(ms.Spec.Replicas)) + replicasLog := log.With("spec", *(ms.Spec.Replicas), "current", len(machines)) if diff < 0 { diff *= -1 - klog.Infof("Too few replicas for %v %s/%s, need %d, creating %d", - controllerKind, ms.Namespace, ms.Name, *(ms.Spec.Replicas), diff) + replicasLog.Infow("Too few replicas, creating more", "diff", diff) var machineList []*clusterv1alpha1.Machine var errstrings []string for i := 0; i < diff; i++ { - klog.Infof("Creating machine %d of %d, ( spec.replicas(%d) > currentMachineCount(%d) )", - i+1, diff, *(ms.Spec.Replicas), len(machines)) + replicasLog.Infow("Creating new machine", "index", i+1) machine := r.createMachine(ms) - if err := r.Client.Create(ctx, machine); err != nil { - klog.Errorf("Unable to create Machine %q: %v", machine.Name, err) + if err := r.Create(ctx, machine); err != nil { + log.Errorw("Failed to create Machine", "machine", ctrlruntimeclient.ObjectKeyFromObject(machine), zap.Error(err)) errstrings = append(errstrings, err.Error()) continue } @@ -271,16 +272,15 @@ func (r *ReconcileMachineSet) syncReplicas(ctx context.Context, ms *clusterv1alp return errors.New(strings.Join(errstrings, "; ")) } - return r.waitForMachineCreation(ctx, machineList) + return r.waitForMachineCreation(ctx, log, machineList) } else if diff > 0 { - klog.Infof("Too many replicas for %v %s/%s, need %d, deleting %d", - controllerKind, ms.Namespace, ms.Name, *(ms.Spec.Replicas), diff) + replicasLog.Infow("Too many replicas, deleting extras", "diff", diff, "deletepolicy", ms.Spec.DeletePolicy) deletePriorityFunc, err := getDeletePriorityFunc(ms) if err != nil { return err } - klog.Infof("Found %s delete policy", ms.Spec.DeletePolicy) + // Choose which Machines to delete. machinesToDelete := getMachinesToDeletePrioritized(machines, diff, deletePriorityFunc) @@ -291,9 +291,9 @@ func (r *ReconcileMachineSet) syncReplicas(ctx context.Context, ms *clusterv1alp for _, machine := range machinesToDelete { go func(targetMachine *clusterv1alpha1.Machine) { defer wg.Done() - err := r.Client.Delete(ctx, targetMachine) + err := r.Delete(ctx, targetMachine) if err != nil { - klog.Errorf("Unable to delete Machine %s: %v", targetMachine.Name, err) + log.Errorw("Failed to delete Machine", "machine", ctrlruntimeclient.ObjectKeyFromObject(targetMachine), zap.Error(err)) errCh <- err } }(machine) @@ -327,25 +327,25 @@ func (r *ReconcileMachineSet) createMachine(machineSet *clusterv1alpha1.MachineS ObjectMeta: machineSet.Spec.Template.ObjectMeta, Spec: machineSet.Spec.Template.Spec, } - machine.ObjectMeta.GenerateName = fmt.Sprintf("%s-", machineSet.Name) - machine.ObjectMeta.OwnerReferences = []metav1.OwnerReference{*metav1.NewControllerRef(machineSet, controllerKind)} + machine.GenerateName = fmt.Sprintf("%s-", machineSet.Name) + machine.OwnerReferences = []metav1.OwnerReference{*metav1.NewControllerRef(machineSet, controllerKind)} machine.Namespace = machineSet.Namespace return machine } // shouldExcludeMachine returns true if the machine should be filtered out, false otherwise. -func shouldExcludeMachine(machineSet *clusterv1alpha1.MachineSet, machine *clusterv1alpha1.Machine) bool { +func shouldExcludeMachine(machineLog *zap.SugaredLogger, machineSet *clusterv1alpha1.MachineSet, machine *clusterv1alpha1.Machine) bool { // Ignore inactive machines. if metav1.GetControllerOf(machine) != nil && !metav1.IsControlledBy(machine, machineSet) { - klog.V(4).Infof("%s not controlled by %v", machine.Name, machineSet.Name) + machineLog.Debug("Machine not controlled by MachineSet") return true } - if machine.ObjectMeta.DeletionTimestamp != nil { + if machine.DeletionTimestamp != nil { return true } - if !hasMatchingLabels(machineSet, machine) { + if !hasMatchingLabels(machineLog, machineSet, machine) { return true } @@ -356,19 +356,19 @@ func shouldExcludeMachine(machineSet *clusterv1alpha1.MachineSet, machine *clust func (r *ReconcileMachineSet) adoptOrphan(ctx context.Context, machineSet *clusterv1alpha1.MachineSet, machine *clusterv1alpha1.Machine) error { newRef := *metav1.NewControllerRef(machineSet, controllerKind) machine.OwnerReferences = append(machine.OwnerReferences, newRef) - return r.Client.Update(ctx, machine) + return r.Update(ctx, machine) } -func (r *ReconcileMachineSet) waitForMachineCreation(ctx context.Context, machineList []*clusterv1alpha1.Machine) error { +func (r *ReconcileMachineSet) waitForMachineCreation(ctx context.Context, log *zap.SugaredLogger, machineList []*clusterv1alpha1.Machine) error { for _, machine := range machineList { - pollErr := wait.PollImmediate(stateConfirmationInterval, stateConfirmationTimeout, func() (bool, error) { - key := client.ObjectKey{Namespace: machine.Namespace, Name: machine.Name} + pollErr := wait.PollUntilContextTimeout(ctx, stateConfirmationInterval, stateConfirmationTimeout, false, func(ctx context.Context) (bool, error) { + key := ctrlruntimeclient.ObjectKey{Namespace: machine.Namespace, Name: machine.Name} - if err := r.Client.Get(ctx, key, &clusterv1alpha1.Machine{}); err != nil { + if err := r.Get(ctx, key, &clusterv1alpha1.Machine{}); err != nil { if apierrors.IsNotFound(err) { return false, nil } - klog.Error(err) + log.Error(err) return false, err } @@ -376,7 +376,6 @@ func (r *ReconcileMachineSet) waitForMachineCreation(ctx context.Context, machin }) if pollErr != nil { - klog.Error(pollErr) return errors.Wrap(pollErr, "failed waiting for machine object to be created") } } @@ -386,11 +385,11 @@ func (r *ReconcileMachineSet) waitForMachineCreation(ctx context.Context, machin func (r *ReconcileMachineSet) waitForMachineDeletion(ctx context.Context, machineList []*clusterv1alpha1.Machine) error { for _, machine := range machineList { - pollErr := wait.PollImmediate(stateConfirmationInterval, stateConfirmationTimeout, func() (bool, error) { + pollErr := wait.PollUntilContextTimeout(ctx, stateConfirmationInterval, stateConfirmationTimeout, false, func(ctx context.Context) (bool, error) { m := &clusterv1alpha1.Machine{} - key := client.ObjectKey{Namespace: machine.Namespace, Name: machine.Name} + key := ctrlruntimeclient.ObjectKey{Namespace: machine.Namespace, Name: machine.Name} - err := r.Client.Get(ctx, key, m) + err := r.Get(ctx, key, m) if apierrors.IsNotFound(err) || !m.DeletionTimestamp.IsZero() { return true, nil } @@ -399,48 +398,50 @@ func (r *ReconcileMachineSet) waitForMachineDeletion(ctx context.Context, machin }) if pollErr != nil { - klog.Error(pollErr) return errors.Wrap(pollErr, "failed waiting for machine object to be deleted") } } return nil } -// MachineToMachineSets is a handler.ToRequestsFunc to be used to enqeue requests for reconciliation +// MachineToMachineSets is a handler.ToRequestsFunc to be used to enqueue requests for reconciliation // for MachineSets that might adopt an orphaned Machine. -func (r *ReconcileMachineSet) MachineToMachineSets(o client.Object) []reconcile.Request { - result := []reconcile.Request{} - ctx := context.Background() - - m := &clusterv1alpha1.Machine{} - key := client.ObjectKey{Namespace: o.GetNamespace(), Name: o.GetName()} - if err := r.Client.Get(ctx, key, m); err != nil { - if !apierrors.IsNotFound(err) { - klog.Errorf("Unable to retrieve Machine %q for possible MachineSet adoption: %v", key, err) +func (r *ReconcileMachineSet) MachineToMachineSets() handler.MapFunc { + return func(ctx context.Context, o ctrlruntimeclient.Object) []ctrlruntime.Request { + result := []reconcile.Request{} + + m := &clusterv1alpha1.Machine{} + key := ctrlruntimeclient.ObjectKey{Namespace: o.GetNamespace(), Name: o.GetName()} + machineLog := r.log.With("machine", key) + + if err := r.Get(ctx, key, m); err != nil { + if !apierrors.IsNotFound(err) { + machineLog.Errorw("Failed to retrieve Machine for possible MachineSet adoption", zap.Error(err)) + } + return nil + } + + // Check if the controller reference is already set and + // return an empty result when one is found. + for _, ref := range m.OwnerReferences { + if ref.Controller != nil && *ref.Controller { + return result + } } - return nil - } - // Check if the controller reference is already set and - // return an empty result when one is found. - for _, ref := range m.ObjectMeta.OwnerReferences { - if ref.Controller != nil && *ref.Controller { - return result + mss := r.getMachineSetsForMachine(ctx, machineLog, m) + if len(mss) == 0 { + machineLog.Debug("Found no MachineSet for Machine") + return nil } - } - mss := r.getMachineSetsForMachine(ctx, m) - if len(mss) == 0 { - klog.V(4).Infof("Found no MachineSet for Machine %q", m.Name) - return nil - } + for _, ms := range mss { + name := ctrlruntimeclient.ObjectKey{Namespace: ms.Namespace, Name: ms.Name} + result = append(result, reconcile.Request{NamespacedName: name}) + } - for _, ms := range mss { - name := client.ObjectKey{Namespace: ms.Namespace, Name: ms.Name} - result = append(result, reconcile.Request{NamespacedName: name}) + return result } - - return result } func contains(list []string, strToSearch string) bool { diff --git a/pkg/controller/machineset/delete_policy.go b/pkg/controller/machineset/delete_policy.go index 44fee2cd3..77ed4e6d8 100644 --- a/pkg/controller/machineset/delete_policy.go +++ b/pkg/controller/machineset/delete_policy.go @@ -22,14 +22,14 @@ import ( "github.com/pkg/errors" - "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) type ( deletePriority float64 - deletePriorityFunc func(machine *v1alpha1.Machine) deletePriority + deletePriorityFunc func(machine *clusterv1alpha1.Machine) deletePriority ) const ( @@ -47,31 +47,31 @@ const ( ) // maps the creation timestamp onto the 0-100 priority range. -func oldestDeletePriority(machine *v1alpha1.Machine) deletePriority { +func oldestDeletePriority(machine *clusterv1alpha1.Machine) deletePriority { if machine.DeletionTimestamp != nil && !machine.DeletionTimestamp.IsZero() { return mustDelete } - if machine.ObjectMeta.Annotations != nil && machine.ObjectMeta.Annotations[DeleteNodeAnnotation] != "" { + if machine.Annotations != nil && machine.Annotations[DeleteNodeAnnotation] != "" { return mustDelete } if machine.Status.ErrorReason != nil || machine.Status.ErrorMessage != nil { return mustDelete } - if machine.ObjectMeta.CreationTimestamp.Time.IsZero() { + if machine.CreationTimestamp.Time.IsZero() { return mustNotDelete } - d := metav1.Now().Sub(machine.ObjectMeta.CreationTimestamp.Time) + d := metav1.Now().Sub(machine.CreationTimestamp.Time) if d.Seconds() < 0 { return mustNotDelete } return deletePriority(float64(mustDelete) * (1.0 - math.Exp(-d.Seconds()/secondsPerTenDays))) } -func newestDeletePriority(machine *v1alpha1.Machine) deletePriority { +func newestDeletePriority(machine *clusterv1alpha1.Machine) deletePriority { if machine.DeletionTimestamp != nil && !machine.DeletionTimestamp.IsZero() { return mustDelete } - if machine.ObjectMeta.Annotations != nil && machine.ObjectMeta.Annotations[DeleteNodeAnnotation] != "" { + if machine.Annotations != nil && machine.Annotations[DeleteNodeAnnotation] != "" { return mustDelete } if machine.Status.ErrorReason != nil || machine.Status.ErrorMessage != nil { @@ -80,11 +80,11 @@ func newestDeletePriority(machine *v1alpha1.Machine) deletePriority { return mustDelete - oldestDeletePriority(machine) } -func randomDeletePolicy(machine *v1alpha1.Machine) deletePriority { +func randomDeletePolicy(machine *clusterv1alpha1.Machine) deletePriority { if machine.DeletionTimestamp != nil && !machine.DeletionTimestamp.IsZero() { return mustDelete } - if machine.ObjectMeta.Annotations != nil && machine.ObjectMeta.Annotations[DeleteNodeAnnotation] != "" { + if machine.Annotations != nil && machine.Annotations[DeleteNodeAnnotation] != "" { return betterDelete } if machine.Status.ErrorReason != nil || machine.Status.ErrorMessage != nil { @@ -94,7 +94,7 @@ func randomDeletePolicy(machine *v1alpha1.Machine) deletePriority { } type sortableMachines struct { - machines []*v1alpha1.Machine + machines []*clusterv1alpha1.Machine priority deletePriorityFunc } @@ -106,11 +106,11 @@ func (m sortableMachines) Less(i, j int) bool { return m.priority(m.machines[j]) < m.priority(m.machines[i]) // high to low } -func getMachinesToDeletePrioritized(filteredMachines []*v1alpha1.Machine, diff int, fun deletePriorityFunc) []*v1alpha1.Machine { +func getMachinesToDeletePrioritized(filteredMachines []*clusterv1alpha1.Machine, diff int, fun deletePriorityFunc) []*clusterv1alpha1.Machine { if diff >= len(filteredMachines) { return filteredMachines } else if diff <= 0 { - return []*v1alpha1.Machine{} + return []*clusterv1alpha1.Machine{} } sortable := sortableMachines{ @@ -122,14 +122,14 @@ func getMachinesToDeletePrioritized(filteredMachines []*v1alpha1.Machine, diff i return sortable.machines[:diff] } -func getDeletePriorityFunc(ms *v1alpha1.MachineSet) (deletePriorityFunc, error) { +func getDeletePriorityFunc(ms *clusterv1alpha1.MachineSet) (deletePriorityFunc, error) { // Map the Spec.DeletePolicy value to the appropriate delete priority function - switch msdp := v1alpha1.MachineSetDeletePolicy(ms.Spec.DeletePolicy); msdp { - case v1alpha1.RandomMachineSetDeletePolicy: + switch msdp := clusterv1alpha1.MachineSetDeletePolicy(ms.Spec.DeletePolicy); msdp { + case clusterv1alpha1.RandomMachineSetDeletePolicy: return randomDeletePolicy, nil - case v1alpha1.NewestMachineSetDeletePolicy: + case clusterv1alpha1.NewestMachineSetDeletePolicy: return newestDeletePriority, nil - case v1alpha1.OldestMachineSetDeletePolicy: + case clusterv1alpha1.OldestMachineSetDeletePolicy: return oldestDeletePriority, nil case "": return randomDeletePolicy, nil diff --git a/pkg/controller/machineset/machine.go b/pkg/controller/machineset/machine.go index c64167c66..0215a273f 100644 --- a/pkg/controller/machineset/machine.go +++ b/pkg/controller/machineset/machine.go @@ -19,35 +19,36 @@ package machineset import ( "context" - "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" + "go.uber.org/zap" + + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" - "k8s.io/klog" - "sigs.k8s.io/controller-runtime/pkg/client" + ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" ) -func (c *ReconcileMachineSet) getMachineSetsForMachine(ctx context.Context, m *v1alpha1.Machine) []*v1alpha1.MachineSet { +func (c *ReconcileMachineSet) getMachineSetsForMachine(ctx context.Context, machineLog *zap.SugaredLogger, m *clusterv1alpha1.Machine) []*clusterv1alpha1.MachineSet { if len(m.Labels) == 0 { - klog.Warningf("No machine sets found for Machine %v because it has no labels", m.Name) + machineLog.Infow("No MachineSets found for Machine because it has no labels") return nil } - msList := &v1alpha1.MachineSetList{} - listOptions := &client.ListOptions{ + msList := &clusterv1alpha1.MachineSetList{} + listOptions := &ctrlruntimeclient.ListOptions{ Namespace: m.Namespace, } - err := c.Client.List(ctx, msList, listOptions) + err := c.List(ctx, msList, listOptions) if err != nil { - klog.Errorf("Failed to list machine sets, %v", err) + machineLog.Errorw("Failed to list MachineSets", zap.Error(err)) return nil } - var mss []*v1alpha1.MachineSet + var mss []*clusterv1alpha1.MachineSet for idx := range msList.Items { ms := &msList.Items[idx] - if hasMatchingLabels(ms, m) { + if hasMatchingLabels(machineLog, ms, m) { mss = append(mss, ms) } } @@ -55,21 +56,21 @@ func (c *ReconcileMachineSet) getMachineSetsForMachine(ctx context.Context, m *v return mss } -func hasMatchingLabels(machineSet *v1alpha1.MachineSet, machine *v1alpha1.Machine) bool { +func hasMatchingLabels(machineLog *zap.SugaredLogger, machineSet *clusterv1alpha1.MachineSet, machine *clusterv1alpha1.Machine) bool { selector, err := metav1.LabelSelectorAsSelector(&machineSet.Spec.Selector) if err != nil { - klog.Warningf("unable to convert selector: %v", err) + machineLog.Errorw("Failed to convert selector", zap.Error(err)) return false } // If a deployment with a nil or empty selector creeps in, it should match nothing, not everything. if selector.Empty() { - klog.V(2).Infof("%v machineset has empty selector", machineSet.Name) + machineLog.Info("MachineSet has empty selector") return false } if !selector.Matches(labels.Set(machine.Labels)) { - klog.V(4).Infof("%v machine has mismatch labels", machine.Name) + machineLog.Debug("Machine has mismatch labels") return false } diff --git a/pkg/controller/machineset/status.go b/pkg/controller/machineset/status.go index b8829174e..981cf5875 100644 --- a/pkg/controller/machineset/status.go +++ b/pkg/controller/machineset/status.go @@ -18,18 +18,17 @@ package machineset import ( "context" - "fmt" "time" "github.com/pkg/errors" + "go.uber.org/zap" - "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" - "k8s.io/klog" - "sigs.k8s.io/controller-runtime/pkg/client" + ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" ) const ( @@ -37,7 +36,7 @@ const ( statusUpdateRetries = 1 ) -func (c *ReconcileMachineSet) calculateStatus(ctx context.Context, ms *v1alpha1.MachineSet, filteredMachines []*v1alpha1.Machine) v1alpha1.MachineSetStatus { +func (c *ReconcileMachineSet) calculateStatus(ctx context.Context, log *zap.SugaredLogger, ms *clusterv1alpha1.MachineSet, filteredMachines []*clusterv1alpha1.Machine) clusterv1alpha1.MachineSetStatus { newStatus := ms.Status // Count the number of machines that have labels matching the labels of the machine // template of the replica set, the matching machines may have more @@ -54,7 +53,7 @@ func (c *ReconcileMachineSet) calculateStatus(ctx context.Context, ms *v1alpha1. } node, err := c.getMachineNode(ctx, machine) if err != nil { - klog.V(4).Infof("Unable to get node for machine %v, %v", machine.Name, err) + log.Debugw("Failed to get node for machine", "machine", ctrlruntimeclient.ObjectKeyFromObject(machine), zap.Error(err)) continue } if isNodeReady(node) { @@ -73,7 +72,7 @@ func (c *ReconcileMachineSet) calculateStatus(ctx context.Context, ms *v1alpha1. } // updateMachineSetStatus attempts to update the Status.Replicas of the given MachineSet, with a single GET/PUT retry. -func updateMachineSetStatus(ctx context.Context, c client.Client, ms *v1alpha1.MachineSet, newStatus v1alpha1.MachineSetStatus) (*v1alpha1.MachineSet, error) { +func updateMachineSetStatus(ctx context.Context, log *zap.SugaredLogger, c ctrlruntimeclient.Client, ms *clusterv1alpha1.MachineSet, newStatus clusterv1alpha1.MachineSetStatus) (*clusterv1alpha1.MachineSet, error) { // This is the steady state. It happens when the MachineSet doesn't have any expectations, since // we do a periodic relist every 30s. If the generations differ but the replicas are // the same, a caller might've resized to the same replica count. @@ -97,12 +96,20 @@ func updateMachineSetStatus(ctx context.Context, c client.Client, ms *v1alpha1.M if ms.Spec.Replicas != nil { replicas = *ms.Spec.Replicas } - klog.V(4).Infof(fmt.Sprintf("Updating status for %v: %s/%s, ", ms.Kind, ms.Namespace, ms.Name) + - fmt.Sprintf("replicas %d->%d (need %d), ", ms.Status.Replicas, newStatus.Replicas, replicas) + - fmt.Sprintf("fullyLabeledReplicas %d->%d, ", ms.Status.FullyLabeledReplicas, newStatus.FullyLabeledReplicas) + - fmt.Sprintf("readyReplicas %d->%d, ", ms.Status.ReadyReplicas, newStatus.ReadyReplicas) + - fmt.Sprintf("availableReplicas %d->%d, ", ms.Status.AvailableReplicas, newStatus.AvailableReplicas) + - fmt.Sprintf("sequence No: %v->%v", ms.Status.ObservedGeneration, newStatus.ObservedGeneration)) + + log.Debugw("Updating status", + "specreplicas", replicas, + "oldreplicas", ms.Status.Replicas, + "newreplicas", newStatus.Replicas, + "oldlabeledreplicas", ms.Status.FullyLabeledReplicas, + "newlabeledreplicas", newStatus.FullyLabeledReplicas, + "oldreadyreplicas", ms.Status.ReadyReplicas, + "newreadyreplicas", newStatus.ReadyReplicas, + "oldavailablereplicas", ms.Status.AvailableReplicas, + "newavailablereplicas", newStatus.AvailableReplicas, + "oldobservedgeneration", ms.Status.ObservedGeneration, + "newobservedgeneration", newStatus.ObservedGeneration, + ) ms.Status = newStatus updateErr = c.Status().Update(ctx, ms) @@ -114,7 +121,7 @@ func updateMachineSetStatus(ctx context.Context, c client.Client, ms *v1alpha1.M break } // Update the MachineSet with the latest resource version for the next poll - if getErr = c.Get(ctx, client.ObjectKey{Namespace: ms.Namespace, Name: ms.Name}, ms); getErr != nil { + if getErr = c.Get(ctx, ctrlruntimeclient.ObjectKey{Namespace: ms.Namespace, Name: ms.Name}, ms); getErr != nil { // If the GET fails we can't trust status.Replicas anymore. This error // is bound to be more interesting than the update failure. return nil, getErr @@ -124,14 +131,14 @@ func updateMachineSetStatus(ctx context.Context, c client.Client, ms *v1alpha1.M return nil, updateErr } -func (c *ReconcileMachineSet) getMachineNode(ctx context.Context, machine *v1alpha1.Machine) (*corev1.Node, error) { +func (c *ReconcileMachineSet) getMachineNode(ctx context.Context, machine *clusterv1alpha1.Machine) (*corev1.Node, error) { nodeRef := machine.Status.NodeRef if nodeRef == nil { return nil, errors.New("machine has no node ref") } node := &corev1.Node{} - err := c.Client.Get(ctx, client.ObjectKey{Name: nodeRef.Name}, node) + err := c.Get(ctx, ctrlruntimeclient.ObjectKey{Name: nodeRef.Name}, node) return node, err } diff --git a/pkg/controller/nodecsrapprover/node_csr_approver.go b/pkg/controller/nodecsrapprover/controller.go similarity index 75% rename from pkg/controller/nodecsrapprover/node_csr_approver.go rename to pkg/controller/nodecsrapprover/controller.go index 5997a105e..0ec752af5 100644 --- a/pkg/controller/nodecsrapprover/node_csr_approver.go +++ b/pkg/controller/nodecsrapprover/controller.go @@ -23,26 +23,28 @@ import ( "fmt" "strings" - "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" + "github.com/go-logr/logr" + "github.com/go-logr/zapr" + "go.uber.org/zap" + + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" certificatesv1 "k8s.io/api/certificates/v1" corev1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" certificatesv1client "k8s.io/client-go/kubernetes/typed/certificates/v1" - "k8s.io/klog" - "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/builder" + ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller" - "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" - "sigs.k8s.io/controller-runtime/pkg/source" ) const ( // ControllerName is name of the NodeCSRApprover controller. - ControllerName = "node_csr_autoapprover" + ControllerName = "node-csr-approver-controller" nodeUser = "system:node" nodeUserPrefix = nodeUser + ":" @@ -60,52 +62,66 @@ var ( ) type reconciler struct { - client.Client + ctrlruntimeclient.Client + log *zap.SugaredLogger // Have to use the typed client because csr approval is a subresource // the dynamic client does not approve certClient certificatesv1client.CertificateSigningRequestInterface } -func Add(mgr manager.Manager) error { +func Add(mgr manager.Manager, log *zap.SugaredLogger) error { certClient, err := certificatesv1client.NewForConfig(mgr.GetConfig()) if err != nil { return fmt.Errorf("failed to create certificate client: %w", err) } - rec := &reconciler{Client: mgr.GetClient(), certClient: certClient.CertificateSigningRequests()} - watchType := &certificatesv1.CertificateSigningRequest{} - - cntrl, err := controller.New(ControllerName, mgr, controller.Options{Reconciler: rec}) - if err != nil { - return fmt.Errorf("failed to construct controller: %w", err) + rec := &reconciler{ + Client: mgr.GetClient(), + log: log.Named(ControllerName), + certClient: certClient.CertificateSigningRequests(), } - return cntrl.Watch(&source.Kind{Type: watchType}, &handler.EnqueueRequestForObject{}) + _, err = builder.ControllerManagedBy(mgr). + Named(ControllerName). + WithOptions(controller.Options{ + LogConstructor: func(*reconcile.Request) logr.Logger { + // we log ourselves + return zapr.NewLogger(zap.NewNop()) + }, + }). + For(&certificatesv1.CertificateSigningRequest{}). + Build(rec) + + return err } func (r *reconciler) Reconcile(ctx context.Context, request reconcile.Request) (reconcile.Result, error) { - err := r.reconcile(ctx, request) - if err != nil { - klog.Errorf("Reconciliation of request %s failed: %v", request.NamespacedName.String(), err) - } - return reconcile.Result{}, err -} + log := r.log.With("csr", request.NamespacedName) + log.Debug("Reconciling") -func (r *reconciler) reconcile(ctx context.Context, request reconcile.Request) error { // Get the CSR object csr := &certificatesv1.CertificateSigningRequest{} if err := r.Get(ctx, request.NamespacedName, csr); err != nil { - if kerrors.IsNotFound(err) { - return nil + if apierrors.IsNotFound(err) { + return reconcile.Result{}, nil } - return err + log.Errorw("Failed to get CertificateSigningRequest", zap.Error(err)) + return reconcile.Result{}, err + } + + err := r.reconcile(ctx, log, csr) + if err != nil { + log.Errorw("Reconciling failed", zap.Error(err)) } - klog.V(4).Infof("Reconciling CSR %s", csr.ObjectMeta.Name) + return reconcile.Result{}, err +} + +func (r *reconciler) reconcile(ctx context.Context, log *zap.SugaredLogger, csr *certificatesv1.CertificateSigningRequest) error { // If CSR is approved, skip it for _, condition := range csr.Status.Conditions { if condition.Type == certificatesv1.CertificateApproved { - klog.V(4).Infof("CSR %s already approved, skipping reconciling", csr.ObjectMeta.Name) + log.Debug("CSR already approved, skipping reconciling") return nil } } @@ -113,7 +129,7 @@ func (r *reconciler) reconcile(ctx context.Context, request reconcile.Request) e // Validate the CSR object and get the node name nodeName, err := r.validateCSRObject(csr) if err != nil { - klog.V(4).Infof("Skipping reconciling CSR '%s' because CSR object is not valid: %v", csr.ObjectMeta.Name, err) + log.Debugw("Skipping reconciling CSR because object is invalid", zap.Error(err)) return nil } @@ -145,7 +161,8 @@ func (r *reconciler) reconcile(ctx context.Context, request reconcile.Request) e } // Approve CSR - klog.V(4).Infof("Approving CSR %s", csr.ObjectMeta.Name) + nodeLog := log.With("node", nodeName) + nodeLog.Debug("Approving CSR") approvalCondition := certificatesv1.CertificateSigningRequestCondition{ Type: certificatesv1.CertificateApproved, Reason: "machine-controller NodeCSRApprover controller approved node serving cert", @@ -157,7 +174,7 @@ func (r *reconciler) reconcile(ctx context.Context, request reconcile.Request) e return fmt.Errorf("failed to approve CSR %q: %w", csr.Name, err) } - klog.Infof("Successfully approved CSR %s", csr.ObjectMeta.Name) + nodeLog.Info("Successfully approved CSR") return nil } @@ -180,10 +197,7 @@ func (r *reconciler) validateCSRObject(csr *certificatesv1.CertificateSigningReq return "", fmt.Errorf("'%s' and/or '%s' are not in its groups", nodeGroup, authenticatedGroup) } - // Check are present usages matching allowed usages - if len(csr.Spec.Usages) != 3 { - return "", fmt.Errorf("there are no exactly three usages defined") - } + // Check that present usages matching allowed usages for _, usage := range csr.Spec.Usages { if !isUsageInUsageList(usage, allowedUsages) { return "", fmt.Errorf("usage %v is not in the list of allowed usages (%v)", usage, allowedUsages) @@ -195,7 +209,7 @@ func (r *reconciler) validateCSRObject(csr *certificatesv1.CertificateSigningReq // validateX509CSR validates the certificate request by comparing CN with username, // and organization with groups. -func (r *reconciler) validateX509CSR(csr *certificatesv1.CertificateSigningRequest, certReq *x509.CertificateRequest, machine v1alpha1.Machine) error { +func (r *reconciler) validateX509CSR(csr *certificatesv1.CertificateSigningRequest, certReq *x509.CertificateRequest, machine clusterv1alpha1.Machine) error { // Validate Subject CommonName. if certReq.Subject.CommonName != csr.Spec.Username { return fmt.Errorf("commonName '%s' is different then CSR username '%s'", certReq.Subject.CommonName, csr.Spec.Username) @@ -237,11 +251,11 @@ func (r *reconciler) validateX509CSR(csr *certificatesv1.CertificateSigningReque return nil } -func (r *reconciler) getMachineForNode(ctx context.Context, nodeName string) (v1alpha1.Machine, bool, error) { +func (r *reconciler) getMachineForNode(ctx context.Context, nodeName string) (clusterv1alpha1.Machine, bool, error) { // List all Machines in all namespaces. - machines := &v1alpha1.MachineList{} - if err := r.Client.List(ctx, machines); err != nil { - return v1alpha1.Machine{}, false, fmt.Errorf("failed to list all machine objects: %w", err) + machines := &clusterv1alpha1.MachineList{} + if err := r.List(ctx, machines); err != nil { + return clusterv1alpha1.Machine{}, false, fmt.Errorf("failed to list all machine objects: %w", err) } for _, machine := range machines.Items { @@ -250,7 +264,7 @@ func (r *reconciler) getMachineForNode(ctx context.Context, nodeName string) (v1 } } - return v1alpha1.Machine{}, false, fmt.Errorf("failed to get machine for given node name '%s'", nodeName) + return clusterv1alpha1.Machine{}, false, fmt.Errorf("failed to get machine for given node name '%s'", nodeName) } func isUsageInUsageList(usage certificatesv1.KeyUsage, usageList []certificatesv1.KeyUsage) bool { diff --git a/pkg/controller/nodecsrapprover/node_csr_approver_test.go b/pkg/controller/nodecsrapprover/controller_test.go similarity index 92% rename from pkg/controller/nodecsrapprover/node_csr_approver_test.go rename to pkg/controller/nodecsrapprover/controller_test.go index 18bbb5625..82ca2c7c5 100644 --- a/pkg/controller/nodecsrapprover/node_csr_approver_test.go +++ b/pkg/controller/nodecsrapprover/controller_test.go @@ -22,7 +22,7 @@ import ( "fmt" "testing" - "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" certificatesv1 "k8s.io/api/certificates/v1" corev1 "k8s.io/api/core/v1" @@ -289,54 +289,6 @@ func TestValidateCSRObject(t *testing.T) { nodeName: "", err: fmt.Errorf("'%s' and/or '%s' are not in its groups", nodeGroup, authenticatedGroup), }, - { - name: "validate csr with less than 3 usages", - csr: &certificatesv1.CertificateSigningRequest{ - ObjectMeta: metav1.ObjectMeta{ - Name: "csr", - Namespace: metav1.NamespaceSystem, - }, - Spec: certificatesv1.CertificateSigningRequestSpec{ - Request: []byte(testValidCSR), - Usages: []certificatesv1.KeyUsage{ - certificatesv1.UsageDigitalSignature, - certificatesv1.UsageKeyEncipherment, - }, - Username: "system:node:ip-172-31-114-48.eu-west-3.compute.internal", - Groups: []string{ - "system:nodes", - "system:authenticated", - }, - }, - }, - nodeName: "", - err: fmt.Errorf("there are no exactly three usages defined"), - }, - { - name: "validate csr with more than 3 usages", - csr: &certificatesv1.CertificateSigningRequest{ - ObjectMeta: metav1.ObjectMeta{ - Name: "csr", - Namespace: metav1.NamespaceSystem, - }, - Spec: certificatesv1.CertificateSigningRequestSpec{ - Request: []byte(testValidCSR), - Usages: []certificatesv1.KeyUsage{ - certificatesv1.UsageDigitalSignature, - certificatesv1.UsageKeyEncipherment, - certificatesv1.UsageServerAuth, - certificatesv1.UsageClientAuth, - }, - Username: "system:node:ip-172-31-114-48.eu-west-3.compute.internal", - Groups: []string{ - "system:nodes", - "system:authenticated", - }, - }, - }, - nodeName: "", - err: fmt.Errorf("there are no exactly three usages defined"), - }, { name: "validate csr with usages not matching expected usages", csr: &certificatesv1.CertificateSigningRequest{ @@ -379,13 +331,13 @@ func TestValidateCSRObject(t *testing.T) { } func TestValidateX509CSR(t *testing.T) { - machine := v1alpha1.Machine{ + machine := clusterv1alpha1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "test-machine", Namespace: metav1.NamespaceSystem, }, - Spec: v1alpha1.MachineSpec{}, - Status: v1alpha1.MachineStatus{ + Spec: clusterv1alpha1.MachineSpec{}, + Status: clusterv1alpha1.MachineStatus{ NodeRef: &corev1.ObjectReference{ APIVersion: "v1", Kind: "Node", @@ -407,7 +359,7 @@ func TestValidateX509CSR(t *testing.T) { testCases := []struct { name string csr *certificatesv1.CertificateSigningRequest - machine v1alpha1.Machine + machine clusterv1alpha1.Machine err error }{ { @@ -455,13 +407,13 @@ func TestValidateX509CSR(t *testing.T) { }, }, }, - machine: v1alpha1.Machine{ + machine: clusterv1alpha1.Machine{ ObjectMeta: metav1.ObjectMeta{ Name: "test-machine", Namespace: metav1.NamespaceSystem, }, - Spec: v1alpha1.MachineSpec{}, - Status: v1alpha1.MachineStatus{ + Spec: clusterv1alpha1.MachineSpec{}, + Status: clusterv1alpha1.MachineStatus{ NodeRef: &corev1.ObjectReference{ APIVersion: "v1", Kind: "Node", diff --git a/pkg/controller/util/machine.go b/pkg/controller/util/machine.go index 06afef450..cee33f100 100644 --- a/pkg/controller/util/machine.go +++ b/pkg/controller/util/machine.go @@ -20,16 +20,13 @@ import ( "context" "fmt" - clusterv1alpha1 "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" "k8s.io/apimachinery/pkg/types" - "sigs.k8s.io/controller-runtime/pkg/client" + ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" ) -// LegacyMachineControllerUserDataLabel is set to true when machine-controller is used for managing machine configuration. -const LegacyMachineControllerUserDataLabel = "machine.clusters.k8s.io/legacy-machine-controller-user-data" - -func GetMachineDeploymentNameAndRevisionForMachine(ctx context.Context, machine *clusterv1alpha1.Machine, c client.Client) (string, string, error) { +func GetMachineDeploymentNameAndRevisionForMachine(ctx context.Context, machine *clusterv1alpha1.Machine, c ctrlruntimeclient.Client) (string, string, error) { var ( machineSetName string machineDeploymentName string @@ -42,7 +39,7 @@ func GetMachineDeploymentNameAndRevisionForMachine(ctx context.Context, machine if machineSetName != "" { machineSet := &clusterv1alpha1.MachineSet{} - if err := c.Get(ctx, types.NamespacedName{Name: machineSetName, Namespace: "kube-system"}, machineSet); err != nil { + if err := c.Get(ctx, types.NamespacedName{Name: machineSetName, Namespace: machine.Namespace}, machineSet); err != nil { return "", "", err } diff --git a/pkg/controller/util/machine_deployment.go b/pkg/controller/util/machine_deployment.go index 034510ca7..ee6df5c10 100644 --- a/pkg/controller/util/machine_deployment.go +++ b/pkg/controller/util/machine_deployment.go @@ -25,53 +25,36 @@ import ( "strings" "github.com/davecgh/go-spew/spew" + "go.uber.org/zap" - "github.com/kubermatic/machine-controller/pkg/apis/cluster/common" - "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" + sdkclustercommon "k8c.io/machine-controller/sdk/apis/cluster/common" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" apiequality "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" intstrutil "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/klog" "k8s.io/utils/integer" + ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" ) const ( - DefaultMachineDeploymentUniqueLabelKey = "machine-template-hash" - - // RevisionAnnotation is the revision annotation of a machine deployment's machine sets which records its rollout sequence. - RevisionAnnotation = "machinedeployment.clusters.k8s.io/revision" - // RevisionHistoryAnnotation maintains the history of all old revisions that a machine set has served for a machine deployment. - RevisionHistoryAnnotation = "machinedeployment.clusters.k8s.io/revision-history" - // DesiredReplicasAnnotation is the desired replicas for a machine deployment recorded as an annotation - // in its machine sets. Helps in separating scaling events from the rollout process and for - // determining if the new machine set for a deployment is really saturated. - DesiredReplicasAnnotation = "machinedeployment.clusters.k8s.io/desired-replicas" - // MaxReplicasAnnotation is the maximum replicas a deployment can have at a given point, which - // is machinedeployment.spec.replicas + maxSurge. Used by the underlying machine sets to estimate their - // proportions in case the deployment has surge replicas. - MaxReplicasAnnotation = "machinedeployment.clusters.k8s.io/max-replicas" - - // FailedMSCreateReason is added in a machine deployment when it cannot create a new machine set. - FailedMSCreateReason = "MachineSetCreateError" - // FoundNewMSReason is added in a machine deployment when it adopts an existing machine set. - FoundNewMSReason = "FoundNewMachineSet" - // PausedDeployReason is added in a deployment when it is paused. Lack of progress shouldn't be - // estimated once a deployment is paused. - PausedDeployReason = "DeploymentPaused" - - // MinimumReplicasAvailable is added in a deployment when it has its minimum replicas required available. - MinimumReplicasAvailable = "MinimumReplicasAvailable" - // MinimumReplicasUnavailable is added in a deployment when it doesn't have the minimum required replicas - // available. - MinimumReplicasUnavailable = "MinimumReplicasUnavailable" + DefaultMachineDeploymentUniqueLabelKey = sdkclustercommon.DefaultMachineDeploymentUniqueLabelKey + RevisionAnnotation = sdkclustercommon.RevisionAnnotation + RevisionHistoryAnnotation = sdkclustercommon.RevisionHistoryAnnotation + DesiredReplicasAnnotation = sdkclustercommon.DesiredReplicasAnnotation + MaxReplicasAnnotation = sdkclustercommon.MaxReplicasAnnotation + FailedMSCreateReason = sdkclustercommon.FailedMSCreateReason + FoundNewMSReason = sdkclustercommon.FoundNewMSReason + PausedDeployReason = sdkclustercommon.PausedDeployReason + MinimumReplicasAvailable = sdkclustercommon.MinimumReplicasAvailable + MinimumReplicasUnavailable = sdkclustercommon.MinimumReplicasUnavailable ) // MachineSetsByCreationTimestamp sorts a list of MachineSet by creation timestamp, using their names as a tie breaker. -type MachineSetsByCreationTimestamp []*v1alpha1.MachineSet +type MachineSetsByCreationTimestamp []*clusterv1alpha1.MachineSet func (o MachineSetsByCreationTimestamp) Len() int { return len(o) } @@ -86,7 +69,7 @@ func (o MachineSetsByCreationTimestamp) Less(i, j int) bool { // MachineSetsBySizeOlder sorts a list of MachineSet by size in descending order, using their creation timestamp or name as a tie breaker. // By using the creation timestamp, this sorts from old to new machine sets. -type MachineSetsBySizeOlder []*v1alpha1.MachineSet +type MachineSetsBySizeOlder []*clusterv1alpha1.MachineSet func (o MachineSetsBySizeOlder) Len() int { return len(o) } @@ -101,7 +84,7 @@ func (o MachineSetsBySizeOlder) Less(i, j int) bool { // MachineSetsBySizeNewer sorts a list of MachineSet by size in descending order, using their creation timestamp or name as a tie breaker. // By using the creation timestamp, this sorts from new to old machine sets. -type MachineSetsBySizeNewer []*v1alpha1.MachineSet +type MachineSetsBySizeNewer []*clusterv1alpha1.MachineSet func (o MachineSetsBySizeNewer) Len() int { return len(o) } @@ -115,7 +98,7 @@ func (o MachineSetsBySizeNewer) Less(i, j int) bool { } // SetDeploymentRevision updates the revision for a deployment. -func SetDeploymentRevision(deployment *v1alpha1.MachineDeployment, revision string) bool { +func SetDeploymentRevision(deployment *clusterv1alpha1.MachineDeployment, revision string) bool { updated := false if deployment.Annotations == nil { @@ -130,17 +113,20 @@ func SetDeploymentRevision(deployment *v1alpha1.MachineDeployment, revision stri } // MaxRevision finds the highest revision in the machine sets. -func MaxRevision(allMSs []*v1alpha1.MachineSet) int64 { - max := int64(0) +func MaxRevision(log *zap.SugaredLogger, allMSs []*clusterv1alpha1.MachineSet) int64 { + maxRev := int64(0) for _, ms := range allMSs { if v, err := Revision(ms); err != nil { - // Skip the machine sets when it failed to parse their revision information - klog.V(4).Infof("Error: %v. Couldn't parse revision for machine set %#v, deployment controller will skip it when reconciling revisions.", err, ms) - } else if v > max { - max = v + log.Debugw( + "Failed to parse revision for MachineSet, deployment controller will skip it when reconciling revisions", + "machinset", ctrlruntimeclient.ObjectKeyFromObject(ms), + zap.Error(err), + ) + } else if v > maxRev { + maxRev = v } } - return max + return maxRev } // Revision returns the revision number of the input object. @@ -157,11 +143,11 @@ func Revision(obj runtime.Object) (int64, error) { } var annotationsToSkip = map[string]bool{ - v1.LastAppliedConfigAnnotation: true, - RevisionAnnotation: true, - RevisionHistoryAnnotation: true, - DesiredReplicasAnnotation: true, - MaxReplicasAnnotation: true, + corev1.LastAppliedConfigAnnotation: true, + RevisionAnnotation: true, + RevisionHistoryAnnotation: true, + DesiredReplicasAnnotation: true, + MaxReplicasAnnotation: true, } // skipCopyAnnotation returns true if we should skip copying the annotation with the given annotation key @@ -175,7 +161,7 @@ func skipCopyAnnotation(key string) bool { // copyDeploymentAnnotationsToMachineSet copies deployment's annotations to machine set's annotations, // and returns true if machine set's annotation is changed. // Note that apply and revision annotations are not copied. -func copyDeploymentAnnotationsToMachineSet(deployment *v1alpha1.MachineDeployment, ms *v1alpha1.MachineSet) bool { +func copyDeploymentAnnotationsToMachineSet(deployment *clusterv1alpha1.MachineDeployment, ms *clusterv1alpha1.MachineSet) bool { msAnnotationsChanged := false if ms.Annotations == nil { ms.Annotations = make(map[string]string) @@ -194,22 +180,22 @@ func copyDeploymentAnnotationsToMachineSet(deployment *v1alpha1.MachineDeploymen } // GetDesiredReplicasAnnotation returns the number of desired replicas. -func GetDesiredReplicasAnnotation(ms *v1alpha1.MachineSet) (int32, bool) { - return getIntFromAnnotation(ms, DesiredReplicasAnnotation) +func GetDesiredReplicasAnnotation(log *zap.SugaredLogger, ms *clusterv1alpha1.MachineSet) (int32, bool) { + return getIntFromAnnotation(log, ms, DesiredReplicasAnnotation) } -func getMaxReplicasAnnotation(ms *v1alpha1.MachineSet) (int32, bool) { - return getIntFromAnnotation(ms, MaxReplicasAnnotation) +func getMaxReplicasAnnotation(log *zap.SugaredLogger, ms *clusterv1alpha1.MachineSet) (int32, bool) { + return getIntFromAnnotation(log, ms, MaxReplicasAnnotation) } -func getIntFromAnnotation(ms *v1alpha1.MachineSet, annotationKey string) (int32, bool) { +func getIntFromAnnotation(log *zap.SugaredLogger, ms *clusterv1alpha1.MachineSet, annotationKey string) (int32, bool) { annotationValue, ok := ms.Annotations[annotationKey] if !ok { return int32(0), false } intValue, err := strconv.Atoi(annotationValue) if err != nil { - klog.V(2).Infof("Cannot convert the value %q with annotation key %q for the machine set %q", annotationValue, annotationKey, ms.Name) + log.Infow("Failed to convert annotation to integer", "key", annotationKey, "value", annotationValue, zap.Error(err)) return int32(0), false } return int32(intValue), true @@ -217,7 +203,7 @@ func getIntFromAnnotation(ms *v1alpha1.MachineSet, annotationKey string) (int32, // SetNewMachineSetAnnotations sets new machine set's annotations appropriately by updating its revision and // copying required deployment annotations to it; it returns true if machine set's annotation is changed. -func SetNewMachineSetAnnotations(deployment *v1alpha1.MachineDeployment, newMS *v1alpha1.MachineSet, newRevision string, exists bool) bool { +func SetNewMachineSetAnnotations(mdLog *zap.SugaredLogger, deployment *clusterv1alpha1.MachineDeployment, newMS *clusterv1alpha1.MachineSet, newRevision string, exists bool) bool { // First, copy deployment's annotations (except for apply and revision annotations) annotationChanged := copyDeploymentAnnotationsToMachineSet(deployment, newMS) // Then, update machine set's revision annotation @@ -229,25 +215,31 @@ func SetNewMachineSetAnnotations(deployment *v1alpha1.MachineDeployment, newMS * // of all old MSes + 1). However, it's possible that some of the old MSes are deleted after the newMS revision being updated, and // newRevision becomes smaller than newMS's revision. We should only update newMS revision when it's smaller than newRevision. + msLog := mdLog.With("machineset", ctrlruntimeclient.ObjectKeyFromObject(newMS)) + oldRevisionInt, err := strconv.ParseInt(oldRevision, 10, 64) if err != nil { if oldRevision != "" { - klog.Warningf("Updating machine set revision OldRevision not int %s", err) + msLog.Infow("MachineSet revision annotation is not a valid integer", "value", oldRevision, zap.Error(err)) return false } - //If the MS annotation is empty then initialise it to 0 + // If the MS annotation is empty then initialise it to 0 oldRevisionInt = 0 } + newRevisionInt, err := strconv.ParseInt(newRevision, 10, 64) if err != nil { - klog.Warningf("Updating machine set revision NewRevision not int %s", err) + // This should never happen, as newRevision is calculated by the machine-controller itself. + msLog.Errorw("New MachineSet revision annotation is not a valid integer", "value", newRevision, zap.Error(err)) return false } + if oldRevisionInt < newRevisionInt { newMS.Annotations[RevisionAnnotation] = newRevision annotationChanged = true - klog.V(4).Infof("Updating machine set %q revision to %s", newMS.Name, newRevision) + msLog.Debugw("Updating MachineSet revision", "revision", newRevision) } + // If a revision annotation already existed and this machine set was updated with a new revision // then that means we are rolling back to this machine set. We need to preserve the old revisions // for historical information. @@ -271,7 +263,7 @@ func SetNewMachineSetAnnotations(deployment *v1alpha1.MachineDeployment, newMS * // FindOneActiveOrLatest returns the only active or the latest machine set in case there is at most one active // machine set. If there are more than one active machine sets, return nil so machine sets can be scaled down // to the point where there is only one active machine set. -func FindOneActiveOrLatest(newMS *v1alpha1.MachineSet, oldMSs []*v1alpha1.MachineSet) *v1alpha1.MachineSet { +func FindOneActiveOrLatest(newMS *clusterv1alpha1.MachineSet, oldMSs []*clusterv1alpha1.MachineSet) *clusterv1alpha1.MachineSet { if newMS == nil && len(oldMSs) == 0 { return nil } @@ -294,7 +286,7 @@ func FindOneActiveOrLatest(newMS *v1alpha1.MachineSet, oldMSs []*v1alpha1.Machin } // SetReplicasAnnotations sets the desiredReplicas and maxReplicas into the annotations. -func SetReplicasAnnotations(ms *v1alpha1.MachineSet, desiredReplicas, maxReplicas int32) bool { +func SetReplicasAnnotations(ms *clusterv1alpha1.MachineSet, desiredReplicas, maxReplicas int32) bool { updated := false if ms.Annotations == nil { ms.Annotations = make(map[string]string) @@ -313,7 +305,7 @@ func SetReplicasAnnotations(ms *v1alpha1.MachineSet, desiredReplicas, maxReplica } // AnnotationsNeedUpdate return true if ReplicasAnnotations need to be updated. -func ReplicasAnnotationsNeedUpdate(ms *v1alpha1.MachineSet, desiredReplicas, maxReplicas int32) bool { +func ReplicasAnnotationsNeedUpdate(ms *clusterv1alpha1.MachineSet, desiredReplicas, maxReplicas int32) bool { if ms.Annotations == nil { return true } @@ -329,7 +321,7 @@ func ReplicasAnnotationsNeedUpdate(ms *v1alpha1.MachineSet, desiredReplicas, max } // MaxUnavailable returns the maximum unavailable machines a rolling deployment can take. -func MaxUnavailable(deployment v1alpha1.MachineDeployment) int32 { +func MaxUnavailable(deployment clusterv1alpha1.MachineDeployment) int32 { if !IsRollingUpdate(&deployment) || *(deployment.Spec.Replicas) == 0 { return int32(0) } @@ -342,7 +334,7 @@ func MaxUnavailable(deployment v1alpha1.MachineDeployment) int32 { } // MaxSurge returns the maximum surge machines a rolling deployment can take. -func MaxSurge(deployment v1alpha1.MachineDeployment) int32 { +func MaxSurge(deployment clusterv1alpha1.MachineDeployment) int32 { if !IsRollingUpdate(&deployment) { return int32(0) } @@ -354,12 +346,12 @@ func MaxSurge(deployment v1alpha1.MachineDeployment) int32 { // GetProportion will estimate the proportion for the provided machine set using 1. the current size // of the parent deployment, 2. the replica count that needs be added on the machine sets of the // deployment, and 3. the total replicas added in the machine sets of the deployment so far. -func GetProportion(ms *v1alpha1.MachineSet, d v1alpha1.MachineDeployment, deploymentReplicasToAdd, deploymentReplicasAdded int32) int32 { +func GetProportion(log *zap.SugaredLogger, ms *clusterv1alpha1.MachineSet, d clusterv1alpha1.MachineDeployment, deploymentReplicasToAdd, deploymentReplicasAdded int32) int32 { if ms == nil || *(ms.Spec.Replicas) == 0 || deploymentReplicasToAdd == 0 || deploymentReplicasToAdd == deploymentReplicasAdded { return int32(0) } - msFraction := getMachineSetFraction(*ms, d) + msFraction := getMachineSetFraction(log, *ms, d) allowed := deploymentReplicasToAdd - deploymentReplicasAdded if deploymentReplicasToAdd > 0 { @@ -376,14 +368,14 @@ func GetProportion(ms *v1alpha1.MachineSet, d v1alpha1.MachineDeployment, deploy // getMachineSetFraction estimates the fraction of replicas a machine set can have in // 1. a scaling event during a rollout or 2. when scaling a paused deployment. -func getMachineSetFraction(ms v1alpha1.MachineSet, d v1alpha1.MachineDeployment) int32 { +func getMachineSetFraction(log *zap.SugaredLogger, ms clusterv1alpha1.MachineSet, d clusterv1alpha1.MachineDeployment) int32 { // If we are scaling down to zero then the fraction of this machine set is its whole size (negative) if *(d.Spec.Replicas) == int32(0) { return -*(ms.Spec.Replicas) } deploymentReplicas := *(d.Spec.Replicas) + MaxSurge(d) - annotatedReplicas, ok := getMaxReplicasAnnotation(&ms) + annotatedReplicas, ok := getMaxReplicasAnnotation(log, &ms) if !ok { // If we cannot find the annotation then fallback to the current deployment size. Note that this // will not be an accurate proportion estimation in case other machine sets have different values @@ -403,7 +395,7 @@ func getMachineSetFraction(ms v1alpha1.MachineSet, d v1alpha1.MachineDeployment) // 1. The hash result would be different upon machineTemplateSpec API changes // (e.g. the addition of a new field will cause the hash code to change) // 2. The deployment template won't have hash labels. -func EqualIgnoreHash(template1, template2 *v1alpha1.MachineTemplateSpec) bool { +func EqualIgnoreHash(template1, template2 *clusterv1alpha1.MachineTemplateSpec) bool { t1Copy := template1.DeepCopy() t2Copy := template2.DeepCopy() // Remove hash labels from template.Labels before comparing. @@ -413,7 +405,7 @@ func EqualIgnoreHash(template1, template2 *v1alpha1.MachineTemplateSpec) bool { } // FindNewMachineSet returns the new MS this given deployment targets (the one with the same machine template). -func FindNewMachineSet(deployment *v1alpha1.MachineDeployment, msList []*v1alpha1.MachineSet) *v1alpha1.MachineSet { +func FindNewMachineSet(deployment *clusterv1alpha1.MachineDeployment, msList []*clusterv1alpha1.MachineSet) *clusterv1alpha1.MachineSet { sort.Sort(MachineSetsByCreationTimestamp(msList)) for i := range msList { if EqualIgnoreHash(&msList[i].Spec.Template, &deployment.Spec.Template) { @@ -432,9 +424,9 @@ func FindNewMachineSet(deployment *v1alpha1.MachineDeployment, msList []*v1alpha // Returns two list of machine sets // - the first contains all old machine sets with all non-zero replicas // - the second contains all old machine sets -func FindOldMachineSets(deployment *v1alpha1.MachineDeployment, msList []*v1alpha1.MachineSet) ([]*v1alpha1.MachineSet, []*v1alpha1.MachineSet) { - var requiredMSs []*v1alpha1.MachineSet - allMSs := make([]*v1alpha1.MachineSet, 0, len(msList)) +func FindOldMachineSets(deployment *clusterv1alpha1.MachineDeployment, msList []*clusterv1alpha1.MachineSet) ([]*clusterv1alpha1.MachineSet, []*clusterv1alpha1.MachineSet) { + var requiredMSs []*clusterv1alpha1.MachineSet + allMSs := make([]*clusterv1alpha1.MachineSet, 0, len(msList)) newMS := FindNewMachineSet(deployment, msList) for _, ms := range msList { // Filter out new machine set @@ -450,7 +442,7 @@ func FindOldMachineSets(deployment *v1alpha1.MachineDeployment, msList []*v1alph } // GetReplicaCountForMachineSets returns the sum of Replicas of the given machine sets. -func GetReplicaCountForMachineSets(machineSets []*v1alpha1.MachineSet) int32 { +func GetReplicaCountForMachineSets(machineSets []*clusterv1alpha1.MachineSet) int32 { totalReplicas := int32(0) for _, ms := range machineSets { if ms != nil { @@ -461,7 +453,7 @@ func GetReplicaCountForMachineSets(machineSets []*v1alpha1.MachineSet) int32 { } // GetActualReplicaCountForMachineSets returns the sum of actual replicas of the given machine sets. -func GetActualReplicaCountForMachineSets(machineSets []*v1alpha1.MachineSet) int32 { +func GetActualReplicaCountForMachineSets(machineSets []*clusterv1alpha1.MachineSet) int32 { totalActualReplicas := int32(0) for _, ms := range machineSets { if ms != nil { @@ -472,7 +464,7 @@ func GetActualReplicaCountForMachineSets(machineSets []*v1alpha1.MachineSet) int } // GetReadyReplicaCountForMachineSets returns the number of ready machines corresponding to the given machine sets. -func GetReadyReplicaCountForMachineSets(machineSets []*v1alpha1.MachineSet) int32 { +func GetReadyReplicaCountForMachineSets(machineSets []*clusterv1alpha1.MachineSet) int32 { totalReadyReplicas := int32(0) for _, ms := range machineSets { if ms != nil { @@ -483,7 +475,7 @@ func GetReadyReplicaCountForMachineSets(machineSets []*v1alpha1.MachineSet) int3 } // GetAvailableReplicaCountForMachineSets returns the number of available machines corresponding to the given machine sets. -func GetAvailableReplicaCountForMachineSets(machineSets []*v1alpha1.MachineSet) int32 { +func GetAvailableReplicaCountForMachineSets(machineSets []*clusterv1alpha1.MachineSet) int32 { totalAvailableReplicas := int32(0) for _, ms := range machineSets { if ms != nil { @@ -494,13 +486,13 @@ func GetAvailableReplicaCountForMachineSets(machineSets []*v1alpha1.MachineSet) } // IsRollingUpdate returns true if the strategy type is a rolling update. -func IsRollingUpdate(deployment *v1alpha1.MachineDeployment) bool { - return deployment.Spec.Strategy.Type == common.RollingUpdateMachineDeploymentStrategyType +func IsRollingUpdate(deployment *clusterv1alpha1.MachineDeployment) bool { + return deployment.Spec.Strategy.Type == sdkclustercommon.RollingUpdateMachineDeploymentStrategyType } // DeploymentComplete considers a deployment to be complete once all of its desired replicas // are updated and available, and no old machines are running. -func DeploymentComplete(deployment *v1alpha1.MachineDeployment, newStatus *v1alpha1.MachineDeploymentStatus) bool { +func DeploymentComplete(deployment *clusterv1alpha1.MachineDeployment, newStatus *clusterv1alpha1.MachineDeploymentStatus) bool { return newStatus.UpdatedReplicas == *(deployment.Spec.Replicas) && newStatus.Replicas == *(deployment.Spec.Replicas) && newStatus.AvailableReplicas == *(deployment.Spec.Replicas) && @@ -511,9 +503,9 @@ func DeploymentComplete(deployment *v1alpha1.MachineDeployment, newStatus *v1alp // When one of the following is true, we're rolling out the deployment; otherwise, we're scaling it. // 1) The new MS is saturated: newMS's replicas == deployment's replicas // 2) Max number of machines allowed is reached: deployment's replicas + maxSurge == all MSs' replicas. -func NewMSNewReplicas(deployment *v1alpha1.MachineDeployment, allMSs []*v1alpha1.MachineSet, newMS *v1alpha1.MachineSet) (int32, error) { +func NewMSNewReplicas(deployment *clusterv1alpha1.MachineDeployment, allMSs []*clusterv1alpha1.MachineSet, newMS *clusterv1alpha1.MachineSet) (int32, error) { switch deployment.Spec.Strategy.Type { - case common.RollingUpdateMachineDeploymentStrategyType: + case sdkclustercommon.RollingUpdateMachineDeploymentStrategyType: // Check if we can scale up. maxSurge, err := intstrutil.GetValueFromIntOrPercent(deployment.Spec.Strategy.RollingUpdate.MaxSurge, int(*(deployment.Spec.Replicas)), true) if err != nil { @@ -557,7 +549,7 @@ func NewMSNewReplicas(deployment *v1alpha1.MachineDeployment, allMSs []*v1alpha1 // Both the deployment and the machine set have to believe this machine set can own all of the desired // replicas in the deployment and the annotation helps in achieving that. All machines of the MachineSet // need to be available. -func IsSaturated(deployment *v1alpha1.MachineDeployment, ms *v1alpha1.MachineSet) bool { +func IsSaturated(deployment *clusterv1alpha1.MachineDeployment, ms *clusterv1alpha1.MachineSet) bool { if ms == nil { return false } @@ -602,18 +594,18 @@ func ResolveFenceposts(maxSurge, maxUnavailable *intstrutil.IntOrString, desired } // FilterActiveMachineSets returns machine sets that have (or at least ought to have) machines. -func FilterActiveMachineSets(machineSets []*v1alpha1.MachineSet) []*v1alpha1.MachineSet { - activeFilter := func(ms *v1alpha1.MachineSet) bool { +func FilterActiveMachineSets(machineSets []*clusterv1alpha1.MachineSet) []*clusterv1alpha1.MachineSet { + activeFilter := func(ms *clusterv1alpha1.MachineSet) bool { return ms != nil && ms.Spec.Replicas != nil && *(ms.Spec.Replicas) > 0 } return FilterMachineSets(machineSets, activeFilter) } -type filterMS func(ms *v1alpha1.MachineSet) bool +type filterMS func(ms *clusterv1alpha1.MachineSet) bool // FilterMachineSets returns machine sets that are filtered by filterFn (all returned ones should match filterFn). -func FilterMachineSets(MSes []*v1alpha1.MachineSet, filterFn filterMS) []*v1alpha1.MachineSet { - var filtered []*v1alpha1.MachineSet +func FilterMachineSets(MSes []*clusterv1alpha1.MachineSet, filterFn filterMS) []*clusterv1alpha1.MachineSet { + var filtered []*clusterv1alpha1.MachineSet for i := range MSes { if filterFn(MSes[i]) { filtered = append(filtered, MSes[i]) @@ -692,7 +684,7 @@ func DeepHashObject(hasher hash.Hash, objectToWrite interface{}) { printer.Fprintf(hasher, "%#v", objectToWrite) } -func ComputeHash(template *v1alpha1.MachineTemplateSpec) uint32 { +func ComputeHash(template *clusterv1alpha1.MachineTemplateSpec) uint32 { machineTemplateSpecHasher := fnv.New32a() DeepHashObject(machineTemplateSpecHasher, *template) diff --git a/pkg/health/readiness.go b/pkg/health/readiness.go index d79a21c6c..85081fd42 100644 --- a/pkg/health/readiness.go +++ b/pkg/health/readiness.go @@ -21,7 +21,9 @@ import ( "fmt" "net/http" - machinecontroller "github.com/kubermatic/machine-controller/pkg/controller/machine" + "go.uber.org/zap" + + machinecontroller "k8c.io/machine-controller/pkg/controller/machine" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" @@ -32,18 +34,18 @@ func ApiserverReachable(client kubernetes.Interface) healthz.Checker { return func(req *http.Request) error { _, err := client.CoreV1().Nodes().List(req.Context(), metav1.ListOptions{}) if err != nil { - return fmt.Errorf("unable to list nodes check: %w", err) + return fmt.Errorf("failed to list nodes check: %w", err) } return nil } } -func KubeconfigAvailable(kubeconfigProvider machinecontroller.KubeconfigProvider) healthz.Checker { +func KubeconfigAvailable(kubeconfigProvider machinecontroller.KubeconfigProvider, log *zap.SugaredLogger) healthz.Checker { return func(req *http.Request) error { - cm, err := kubeconfigProvider.GetKubeconfig(req.Context()) + cm, err := kubeconfigProvider.GetKubeconfig(req.Context(), log) if err != nil { - return fmt.Errorf("unable to get kubeconfig: %w", err) + return fmt.Errorf("failed to get kubeconfig: %w", err) } if len(cm.Clusters) != 1 { diff --git a/pkg/ini/escape_test.go b/pkg/ini/escape_test.go deleted file mode 100644 index 725b6b9b8..000000000 --- a/pkg/ini/escape_test.go +++ /dev/null @@ -1,87 +0,0 @@ -/* -Copyright 2019 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package ini - -import ( - "bytes" - "testing" - "text/template" - - "github.com/Masterminds/sprig/v3" - "github.com/sethvargo/go-password/password" - "gopkg.in/gcfg.v1" -) - -const ( - testTpl = `[Global] -Password = {{ .Global.Password | iniEscape }} -` -) - -type globalSection struct { - Password string -} - -type testData struct { - Global globalSection -} - -// TestINIEscape will ensure that we hopefully cover every case. -func TestINIEscape(t *testing.T) { - // We'll simply generate 1000 times a password with special chars, - // Put it into a OpenStack cloud config, - // Marshal it, - // Unmarshal it, - // Compare if the input & output password match - for i := 0; i <= 1000; i++ { - pw, err := password.Generate(64, 10, len(password.Symbols), false, false) - if err != nil { - t.Fatal(err) - } - - t.Logf("testing with pw: %s", pw) - - before := &testData{ - Global: globalSection{ - Password: pw, - }, - } - - funcMap := sprig.TxtFuncMap() - funcMap["iniEscape"] = Escape - - tpl, err := template.New("test").Funcs(funcMap).Parse(testTpl) - if err != nil { - t.Fatalf("failed to parse the cloud config template: %v", err) - } - - buf := &bytes.Buffer{} - if err := tpl.Execute(buf, before); err != nil { - t.Fatalf("failed to execute cloud config template: %v", err) - } - - after := &testData{} - if err := gcfg.ReadStringInto(after, buf.String()); err != nil { - t.Logf("\n%s", after) - t.Fatalf("failed to load string into config object: %v", err) - } - - if before.Global.Password != after.Global.Password { - t.Fatalf("after unmarshalling the config into a string an reading it back in, the value changed. Password before:\n%s Password after:\n%s", before.Global.Password, after.Global.Password) - } - } -} diff --git a/pkg/log/zap.go b/pkg/log/zap.go new file mode 100644 index 000000000..1d13484ea --- /dev/null +++ b/pkg/log/zap.go @@ -0,0 +1,168 @@ +/* +Copyright 2019 The Machine Controller Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package log has been graciously copied from KKP (Copyright 2020 The Kubermatic Kubernetes Platform contributors). +package log + +import ( + "flag" + "fmt" + "os" + "strings" + + "github.com/spf13/pflag" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + + ctrlruntimelzap "sigs.k8s.io/controller-runtime/pkg/log/zap" +) + +func init() { + Logger = NewDefault().Sugar() +} + +var Logger *zap.SugaredLogger + +// Options exports a options struct to be used by cmd's. +type Options struct { + // Enable debug logs + Debug bool + // Log format (JSON or plain text) + Format Format +} + +func NewDefaultOptions() Options { + return Options{ + Debug: false, + Format: FormatJSON, + } +} + +func (o *Options) AddFlags(fs *flag.FlagSet) { + fs.BoolVar(&o.Debug, "log-debug", o.Debug, "Enables more verbose logging") + fs.Var(&o.Format, "log-format", "Log format, one of "+AvailableFormats.String()) +} + +func (o *Options) AddPFlags(fs *pflag.FlagSet) { + fs.BoolVar(&o.Debug, "log-debug", o.Debug, "Enables more verbose logging") + fs.Var(&o.Format, "log-format", "Log format, one of "+AvailableFormats.String()) +} + +func (o *Options) Validate() error { + if !AvailableFormats.Contains(o.Format) { + return fmt.Errorf("invalid log-format specified %q; available: %s", o.Format, AvailableFormats.String()) + } + return nil +} + +type Format string + +// Type implements the pflag.Value interfaces. +func (f *Format) Type() string { + return "string" +} + +// String implements the cli.Value and flag.Value interfaces. +func (f *Format) String() string { + return string(*f) +} + +// Set implements the cli.Value and flag.Value interfaces. +func (f *Format) Set(s string) error { + switch strings.ToLower(s) { + case "json": + *f = FormatJSON + return nil + case "console": + *f = FormatConsole + return nil + default: + return fmt.Errorf("invalid format '%s'", s) + } +} + +type Formats []Format + +const ( + FormatJSON Format = "JSON" + FormatConsole Format = "Console" +) + +var ( + AvailableFormats = Formats{FormatJSON, FormatConsole} +) + +func (f Formats) String() string { + const separator = ", " + var s string + for _, format := range f { + s = s + separator + string(format) + } + return strings.TrimPrefix(s, separator) +} + +func (f Formats) Contains(s Format) bool { + for _, format := range f { + if s == format { + return true + } + } + return false +} + +func NewFromOptions(o Options) *zap.Logger { + return New(o.Debug, o.Format) +} + +func New(debug bool, format Format) *zap.Logger { + // this basically mimics NewConfig, but with a custom sink + sink := zapcore.AddSync(os.Stderr) + + // Level - We only support setting Info+ or Debug+ + lvl := zap.NewAtomicLevelAt(zap.InfoLevel) + if debug { + lvl = zap.NewAtomicLevelAt(zap.DebugLevel) + } + + encCfg := zap.NewProductionEncoderConfig() + // Having a dateformat makes it more easy to look at logs outside of something like Kibana + encCfg.TimeKey = "time" + encCfg.EncodeTime = zapcore.ISO8601TimeEncoder + + // production config encodes durations as a float of the seconds value, but we want a more + // readable, precise representation + encCfg.EncodeDuration = zapcore.StringDurationEncoder + + var enc zapcore.Encoder + if format == FormatJSON { + enc = zapcore.NewJSONEncoder(encCfg) + } else { + enc = zapcore.NewConsoleEncoder(encCfg) + } + + opts := []zap.Option{ + zap.AddCaller(), + zap.ErrorOutput(sink), + } + + coreLog := zapcore.NewCore(&ctrlruntimelzap.KubeAwareEncoder{Encoder: enc}, sink, lvl) + return zap.New(coreLog, opts...) +} + +// NewDefault creates new default logger. +func NewDefault() *zap.Logger { + return New(false, FormatJSON) +} diff --git a/pkg/apis/cluster/v1alpha1/migrations/migrations.go b/pkg/migrations/migrations.go similarity index 67% rename from pkg/apis/cluster/v1alpha1/migrations/migrations.go rename to pkg/migrations/migrations.go index 284bfa18a..de790f7d9 100644 --- a/pkg/apis/cluster/v1alpha1/migrations/migrations.go +++ b/pkg/migrations/migrations.go @@ -22,37 +22,37 @@ import ( "fmt" "time" - machinecontrolleradmission "github.com/kubermatic/machine-controller/pkg/admission" - clusterv1alpha1 "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" - "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1/conversions" - "github.com/kubermatic/machine-controller/pkg/cloudprovider" - cloudprovidertypes "github.com/kubermatic/machine-controller/pkg/cloudprovider/types" - machinecontroller "github.com/kubermatic/machine-controller/pkg/controller/machine" - "github.com/kubermatic/machine-controller/pkg/machines" - machinesv1alpha1 "github.com/kubermatic/machine-controller/pkg/machines/v1alpha1" - "github.com/kubermatic/machine-controller/pkg/providerconfig" - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" + "go.uber.org/zap" + + machinecontrolleradmission "k8c.io/machine-controller/pkg/admission" + "k8c.io/machine-controller/pkg/cloudprovider" + cloudprovidertypes "k8c.io/machine-controller/pkg/cloudprovider/types" + machinecontroller "k8c.io/machine-controller/pkg/controller/machine" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" + "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1/conversions" + "k8c.io/machine-controller/sdk/apis/machines" + machinesv1alpha1 "k8c.io/machine-controller/sdk/apis/machines/v1alpha1" + "k8c.io/machine-controller/sdk/providerconfig" + "k8c.io/machine-controller/sdk/providerconfig/configvar" corev1 "k8s.io/api/core/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/api/equality" - kerrors "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/wait" dynamicclient "k8s.io/client-go/dynamic" - "k8s.io/client-go/kubernetes" restclient "k8s.io/client-go/rest" "k8s.io/client-go/util/retry" - "k8s.io/klog" "sigs.k8s.io/controller-runtime/pkg/cache" ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" ) -func MigrateProviderConfigToProviderSpecIfNecessary(ctx context.Context, config *restclient.Config, client ctrlruntimeclient.Client) error { - klog.Infof("Starting to migrate providerConfigs to providerSpecs") +func MigrateProviderConfigToProviderSpecIfNecessary(ctx context.Context, log *zap.SugaredLogger, config *restclient.Config, client ctrlruntimeclient.Client) error { + log.Info("Starting to migrate providerConfigs to providerSpecs") dynamicClient, err := dynamicclient.NewForConfig(config) if err != nil { return fmt.Errorf("failed to construct dynamic client: %w", err) @@ -67,6 +67,8 @@ func MigrateProviderConfigToProviderSpecIfNecessary(ctx context.Context, config return fmt.Errorf("failed to list machine objects: %w", err) } for _, machine := range machines.Items { + machineLog := log.With("machine", ctrlruntimeclient.ObjectKeyFromObject(&machine)) + marshalledObject, err := machine.MarshalJSON() if err != nil { return fmt.Errorf("failed to marshal unstructured machine %s: %w", machine.GetName(), err) @@ -76,7 +78,7 @@ func MigrateProviderConfigToProviderSpecIfNecessary(ctx context.Context, config return fmt.Errorf("failed to convert machine: %w", err) } if wasConverted { - klog.Infof("Converted providerConfig -> providerSpec for machine %s/%s, attempting to update", convertedMachine.Namespace, convertedMachine.Name) + machineLog.Info("Converted providerConfig -> providerSpec, attempting to update") if convertedMachine.Annotations == nil { convertedMachine.Annotations = map[string]string{} } @@ -86,7 +88,7 @@ func MigrateProviderConfigToProviderSpecIfNecessary(ctx context.Context, config if err := client.Update(ctx, convertedMachine); err != nil { return fmt.Errorf("failed to update converted machine %s: %w", convertedMachine.Name, err) } - klog.Infof("Successfully updated machine %s/%s after converting providerConfig -> providerSpec", convertedMachine.Namespace, convertedMachine.Name) + machineLog.Info("Successfully updated machine after converting providerConfig -> providerSpec") } } @@ -95,6 +97,8 @@ func MigrateProviderConfigToProviderSpecIfNecessary(ctx context.Context, config return fmt.Errorf("failed to list MachineSets: %w", err) } for _, machineSet := range machineSets.Items { + machineSetLog := log.With("machineset", ctrlruntimeclient.ObjectKeyFromObject(&machineSet)) + marshalledObject, err := machineSet.MarshalJSON() if err != nil { return fmt.Errorf("failed to marshal unstructured MachineSet %s: %w", machineSet.GetName(), err) @@ -104,11 +108,11 @@ func MigrateProviderConfigToProviderSpecIfNecessary(ctx context.Context, config return fmt.Errorf("failed to convert MachineSet %s/%s: %w", machineSet.GetNamespace(), machineSet.GetName(), err) } if machineSetWasConverted { - klog.Infof("Converted providerConfig -> providerSpec for MachineSet %s/%s, attempting to update", convertedMachineSet.Namespace, convertedMachineSet.Name) + machineSetLog.Info("Converted providerConfig -> providerSpec, attempting to update") if err := client.Update(ctx, convertedMachineSet); err != nil { return fmt.Errorf("failed to update MachineSet %s/%s after converting providerConfig -> providerSpec: %w", convertedMachineSet.Namespace, convertedMachineSet.Name, err) } - klog.Infof("Successfully updated MachineSet %s/%s after converting providerConfig -> providerSpec", convertedMachineSet.Namespace, convertedMachineSet.Name) + machineSetLog.Info("Successfully updated MachineSet after converting providerConfig -> providerSpec") } } @@ -117,6 +121,8 @@ func MigrateProviderConfigToProviderSpecIfNecessary(ctx context.Context, config return fmt.Errorf("failed to list MachineDeplyoments: %w", err) } for _, machineDeployment := range machineDeployments.Items { + machineDeploymentLog := log.With("machinedeployment", ctrlruntimeclient.ObjectKeyFromObject(&machineDeployment)) + marshalledObject, err := machineDeployment.MarshalJSON() if err != nil { return fmt.Errorf("failed to marshal unstructured MachineDeployment %s: %w", machineDeployment.GetName(), err) @@ -126,21 +132,21 @@ func MigrateProviderConfigToProviderSpecIfNecessary(ctx context.Context, config return fmt.Errorf("failed to convert MachineDeployment %s/%s: %w", machineDeployment.GetNamespace(), machineDeployment.GetName(), err) } if machineDeploymentWasConverted { - klog.Infof("Converted providerConfig -> providerSpec for MachineDeployment %s/%s, attempting to update", convertedMachineDeployment.Namespace, convertedMachineDeployment.Name) + machineDeploymentLog.Info("Converted providerConfig -> providerSpec, attempting to update") if err := client.Update(ctx, convertedMachineDeployment); err != nil { return fmt.Errorf("failed to update MachineDeployment %s/%s after converting providerConfig -> providerSpec: %w", convertedMachineDeployment.Namespace, convertedMachineDeployment.Name, err) } - klog.Infof("Successfully updated MachineDeployment %s/%s after converting providerConfig -> providerSpec", convertedMachineDeployment.Namespace, convertedMachineDeployment.Name) + machineDeploymentLog.Info("Successfully updated MachineDeployment after converting providerConfig -> providerSpec") } } - klog.Infof("Successfully migrated providerConfigs to providerSpecs") + log.Info("Successfully migrated providerConfigs to providerSpecs") return nil } func MigrateMachinesv1Alpha1MachineToClusterv1Alpha1MachineIfNecessary( - ctx context.Context, client ctrlruntimeclient.Client, - kubeClient kubernetes.Interface, + ctx context.Context, log *zap.SugaredLogger, + client ctrlruntimeclient.Client, providerData *cloudprovidertypes.ProviderData) error { var ( cachePopulatingInterval = 15 * time.Second @@ -148,17 +154,19 @@ func MigrateMachinesv1Alpha1MachineToClusterv1Alpha1MachineIfNecessary( noMigrationNeed = false ) - err := wait.Poll(cachePopulatingInterval, cachePopulatingTimeout, func() (done bool, err error) { - err = client.Get(ctx, types.NamespacedName{Name: machines.CRDName}, &apiextensionsv1.CustomResourceDefinition{}) + crdLog := log.With("crd", machines.CRDName) + + err := wait.PollUntilContextTimeout(ctx, cachePopulatingInterval, cachePopulatingTimeout, false, func(ctx context.Context) (bool, error) { + err := client.Get(ctx, types.NamespacedName{Name: machines.CRDName}, &apiextensionsv1.CustomResourceDefinition{}) if err != nil { - if kerrors.IsNotFound(err) { + if apierrors.IsNotFound(err) { noMigrationNeed = true return true, nil } var cerr *cache.ErrCacheNotStarted if errors.As(err, &cerr) { - klog.Info("Cache hasn't started yet, trying in 5 seconds") + log.Info("Cache hasn't started yet, trying in 5 seconds") return false, nil } @@ -168,12 +176,12 @@ func MigrateMachinesv1Alpha1MachineToClusterv1Alpha1MachineIfNecessary( }) if err != nil { - klog.Errorf("Failed waiting for caches to be populated: %v", err) + crdLog.Errorw("Failed waiting for caches to be populated", zap.Error(err)) return err } if noMigrationNeed { - klog.Infof("CRD %s not present, no migration needed", machines.CRDName) + crdLog.Info("CRD not present, no migration needed") return nil } @@ -182,34 +190,36 @@ func MigrateMachinesv1Alpha1MachineToClusterv1Alpha1MachineIfNecessary( return fmt.Errorf("error when checking for existence of 'machines.cluster.k8s.io' crd: %w", err) } - if err := migrateMachines(ctx, client, kubeClient, providerData); err != nil { + if err := migrateMachines(ctx, log, client, providerData); err != nil { return fmt.Errorf("failed to migrate machines: %w", err) } - klog.Infof("Attempting to delete CRD %s", machines.CRDName) + crdLog.Info("Attempting to delete CRD") if err := client.Delete(ctx, &apiextensionsv1.CustomResourceDefinition{ObjectMeta: metav1.ObjectMeta{Name: machines.CRDName}}); err != nil { return fmt.Errorf("failed to delete machinesv1alpha1.machine crd: %w", err) } - klog.Infof("Successfully deleted CRD %s", machines.CRDName) + crdLog.Info("Successfully deleted CRD") return nil } -func migrateMachines(ctx context.Context, client ctrlruntimeclient.Client, kubeClient kubernetes.Interface, providerData *cloudprovidertypes.ProviderData) error { - klog.Infof("Starting migration for machine.machines.k8s.io/v1alpha1 to machine.cluster.k8s.io/v1alpha1") +func migrateMachines(ctx context.Context, log *zap.SugaredLogger, client ctrlruntimeclient.Client, providerData *cloudprovidertypes.ProviderData) error { + log.Info("Starting migration for machine.machines.k8s.io/v1alpha1 to machine.cluster.k8s.io/v1alpha1") // Get machinesv1Alpha1Machines - klog.Infof("Getting existing machine.machines.k8s.io/v1alpha1 to migrate") + log.Info("Getting existing machine.machines.k8s.io/v1alpha1 to migrate") machinesv1Alpha1Machines := &machinesv1alpha1.MachineList{} if err := client.List(ctx, machinesv1Alpha1Machines); err != nil { return fmt.Errorf("failed to list machinesV1Alpha1 machines: %w", err) } - klog.Infof("Found %v machine.machines.k8s.io/v1alpha1", len(machinesv1Alpha1Machines.Items)) + log.Infof("Found %d machine.machines.k8s.io/v1alpha1 resources", len(machinesv1Alpha1Machines.Items)) // Convert the machine, create the new machine, delete the old one, wait for it to be absent // We do this in one loop to avoid ending up having all machines in both the new and the old format if deletion // fails for whatever reason for _, machinesV1Alpha1Machine := range machinesv1Alpha1Machines.Items { - klog.Infof("Starting migration for machine.machines.k8s.io/v1alpha1 %s", machinesV1Alpha1Machine.Name) + machineLog := log.With("machine", machinesV1Alpha1Machine.Name) + machineLog.Info("Starting migration") + convertedClusterv1alpha1Machine := &clusterv1alpha1.Machine{} err := conversions.Convert_MachinesV1alpha1Machine_To_ClusterV1alpha1Machine(&machinesV1Alpha1Machine, convertedClusterv1alpha1Machine) @@ -221,16 +231,18 @@ func migrateMachines(ctx context.Context, client ctrlruntimeclient.Client, kubeC // Some providers need to update the provider instance to the new UID, we get the provider as early as possible // to not fail in a half-migrated state when the providerconfig is invalid - providerConfig, err := providerconfigtypes.GetConfig(convertedClusterv1alpha1Machine.Spec.ProviderSpec) + providerConfig, err := providerconfig.GetConfig(convertedClusterv1alpha1Machine.Spec.ProviderSpec) if err != nil { return fmt.Errorf("failed to get provider config: %w", err) } - skg := providerconfig.NewConfigVarResolver(ctx, client) - prov, err := cloudprovider.ForProvider(providerConfig.CloudProvider, skg) + configResolver := configvar.NewResolver(ctx, client) + prov, err := cloudprovider.ForProvider(providerConfig.CloudProvider, configResolver) if err != nil { return fmt.Errorf("failed to get cloud provider %q: %w", providerConfig.CloudProvider, err) } + machineLog = machineLog.With("provider", providerConfig.CloudProvider) + // We will set that to what's finally in the apisever, be that a created a clusterv1alpha1machine // or a preexisting one, because the migration got interrupted // It is required to set the ownerRef of the node @@ -239,8 +251,8 @@ func migrateMachines(ctx context.Context, client ctrlruntimeclient.Client, kubeC // Do a get first to cover the case the new machine was already created but then something went wrong // If that is the case and the clusterv1alpha1machine != machinesv1alpha1machine we error out and the operator // has to manually delete either the new or the old machine - klog.Infof("Checking if machine.cluster.k8s.io/v1alpha1 %s/%s already exists", - convertedClusterv1alpha1Machine.Namespace, convertedClusterv1alpha1Machine.Name) + machineLog = machineLog.With("converted", ctrlruntimeclient.ObjectKeyFromObject(convertedClusterv1alpha1Machine)) + machineLog.Info("Checking if converted machine already exists") existingClusterV1alpha1Machine := &clusterv1alpha1.Machine{} err = client.Get(ctx, @@ -248,19 +260,17 @@ func migrateMachines(ctx context.Context, client ctrlruntimeclient.Client, kubeC existingClusterV1alpha1Machine) if err != nil { // Some random error occurred - if !kerrors.IsNotFound(err) { + if !apierrors.IsNotFound(err) { return fmt.Errorf("failed to check if converted machine %s already exists: %w", convertedClusterv1alpha1Machine.Name, err) } // ClusterV1alpha1Machine does not exist yet - klog.Infof("Machine.cluster.k8s.io/v1alpha1 %s/%s does not yet exist, attempting to create it", - convertedClusterv1alpha1Machine.Namespace, convertedClusterv1alpha1Machine.Name) + machineLog.Info("Converted machine does not yet exist, attempting to create it") if err := client.Create(ctx, convertedClusterv1alpha1Machine); err != nil { return fmt.Errorf("failed to create clusterv1alpha1.machine %s: %w", convertedClusterv1alpha1Machine.Name, err) } - klog.Infof("Successfully created machine.cluster.k8s.io/v1alpha1 %s/%s", - convertedClusterv1alpha1Machine.Namespace, convertedClusterv1alpha1Machine.Name) + machineLog.Info("Successfully created converted machine") finalClusterV1Alpha1Machine = convertedClusterv1alpha1Machine } else { // ClusterV1alpha1Machine already exists @@ -272,34 +282,32 @@ func migrateMachines(ctx context.Context, client ctrlruntimeclient.Client, kubeC existingClusterV1alpha1Machine.Annotations = convertedClusterv1alpha1Machine.Annotations existingClusterV1alpha1Machine.Finalizers = convertedClusterv1alpha1Machine.Finalizers - klog.Infof("Updating existing machine.cluster.k8s.io/v1alpha1 %s/%s", - existingClusterV1alpha1Machine.Namespace, existingClusterV1alpha1Machine.Name) + machineLog.Info("Updating existing converted machine") if err := client.Update(ctx, existingClusterV1alpha1Machine); err != nil { return fmt.Errorf("failed to update metadata of existing clusterV1Alpha1 machine: %w", err) } - klog.Infof("Successfully updated existing machine.cluster.k8s.io/v1alpha1 %s/%s", - existingClusterV1alpha1Machine.Namespace, existingClusterV1alpha1Machine.Name) + machineLog.Info("Successfully updated existing converted machine") finalClusterV1Alpha1Machine = existingClusterV1alpha1Machine } // We have to ensure there is an ownerRef to our clusterv1alpha1.Machine on the node if it exists // and that there is no ownerRef to the old machine anymore - if err := ensureClusterV1Alpha1NodeOwnership(ctx, finalClusterV1Alpha1Machine, client); err != nil { + if err := ensureClusterV1Alpha1NodeOwnership(ctx, machineLog, finalClusterV1Alpha1Machine, client); err != nil { return err } if sets.NewString(finalClusterV1Alpha1Machine.Finalizers...).Has(machinecontroller.FinalizerDeleteInstance) { - klog.Infof("Attempting to update the UID at the cloud provider for machine.cluster.k8s.io/v1alpha1 %s", machinesV1Alpha1Machine.Name) + machineLog.Info("Attempting to update the UID at the cloud provider") newMachineWithOldUID := finalClusterV1Alpha1Machine.DeepCopy() newMachineWithOldUID.UID = machinesV1Alpha1Machine.UID - if err := prov.MigrateUID(ctx, newMachineWithOldUID, finalClusterV1Alpha1Machine.UID); err != nil { + if err := prov.MigrateUID(ctx, machineLog, newMachineWithOldUID, finalClusterV1Alpha1Machine.UID); err != nil { return fmt.Errorf("running the provider migration for the UID failed: %w", err) } // Block until we can actually GET the instance with the new UID var isMigrated bool for i := 0; i < 100; i++ { - if _, err := prov.Get(ctx, finalClusterV1Alpha1Machine, providerData); err == nil { + if _, err := prov.Get(ctx, machineLog, finalClusterV1Alpha1Machine, providerData); err == nil { isMigrated = true break } @@ -308,28 +316,27 @@ func migrateMachines(ctx context.Context, client ctrlruntimeclient.Client, kubeC if !isMigrated { return fmt.Errorf("failed to GET instance for machine %s after UID migration", finalClusterV1Alpha1Machine.Name) } - klog.Infof("Successfully updated the UID at the cloud provider for machine.cluster.k8s.io/v1alpha1 %s", machinesV1Alpha1Machine.Name) + machineLog.Info("Successfully updated the UID at the cloud provider") } // All went fine, we only have to clear the old machine now - klog.Infof("Deleting machine.machines.k8s.io/v1alpha1 %s", machinesV1Alpha1Machine.Name) + machineLog.Info("Deleting old machine object") if err := deleteMachinesV1Alpha1Machine(ctx, &machinesV1Alpha1Machine, client); err != nil { return err } - klog.Infof("Successfully deleted machine.machines.k8s.io/v1alpha1 %s", machinesV1Alpha1Machine.Name) - klog.Infof("Successfully finished migration for machine.machines.k8s.io/v1alpha1 %s", machinesV1Alpha1Machine.Name) + machineLog.Info("Successfully deleted old machine object") + machineLog.Info("Successfully finished migration") } - klog.Infof("Successfully finished migration for machine.machines.k8s.io/v1alpha1 to machine.cluster.k8s.io/v1alpha1") + log.Info("Successfully finished migration for machine.machines.k8s.io/v1alpha1 to machine.cluster.k8s.io/v1alpha1") return nil } -func ensureClusterV1Alpha1NodeOwnership(ctx context.Context, machine *clusterv1alpha1.Machine, client ctrlruntimeclient.Client) error { +func ensureClusterV1Alpha1NodeOwnership(ctx context.Context, machineLog *zap.SugaredLogger, machine *clusterv1alpha1.Machine, client ctrlruntimeclient.Client) error { if machine.Spec.Name == "" { machine.Spec.Name = machine.Name } - klog.Infof("Checking if node for machines.cluster.k8s.io/v1alpha1 %s/%s exists", - machine.Namespace, machine.Name) + machineLog.Info("Checking if node for machines exists") nodeNameCandidates := []string{machine.Spec.Name} if machine.Status.NodeRef != nil { if machine.Status.NodeRef.Name != machine.Spec.Name { @@ -340,17 +347,17 @@ func ensureClusterV1Alpha1NodeOwnership(ctx context.Context, machine *clusterv1a for _, nodeName := range nodeNameCandidates { node := &corev1.Node{} if err := client.Get(ctx, types.NamespacedName{Name: nodeName}, node); err != nil { - if kerrors.IsNotFound(err) { - klog.Infof("No node for machines.cluster.k8s.io/v1alpha1 %s/%s found", - machine.Namespace, machine.Name) + if apierrors.IsNotFound(err) { + machineLog.Info("No node for machines found") continue } - return fmt.Errorf("Failed to get node %s for machine %s: %w", + return fmt.Errorf("failed to get node %s for machine %s: %w", machine.Spec.Name, machine.Name, err) } - klog.Infof("Found node for machines.cluster.k8s.io/v1alpha1 %s/%s: %s, removing its ownerRef and adding NodeOwnerLabel", - node.Name, machine.Namespace, machine.Name) + nodeLog := machineLog.With("node", node.Name) + nodeLog.Info("Found node for machine, removing its ownerRef and adding NodeOwnerLabel") + nodeLabels := node.Labels nodeLabels[machinecontroller.NodeOwnerLabelName] = string(machine.UID) // We retry this because nodes get frequently updated so there is a reasonable chance this may fail @@ -365,8 +372,7 @@ func ensureClusterV1Alpha1NodeOwnership(ctx context.Context, machine *clusterv1a }); err != nil { return fmt.Errorf("failed to update OwnerLabel on node %s: %w", node.Name, err) } - klog.Infof("Successfully removed ownerRef and added NodeOwnerLabelName to node %s for machines.cluster.k8s.io/v1alpha1 %s/%s", - node.Name, machine.Namespace, machine.Name) + nodeLog.Info("Successfully removed ownerRef and added NodeOwnerLabelName to node") } return nil @@ -382,7 +388,7 @@ func deleteMachinesV1Alpha1Machine(ctx context.Context, return fmt.Errorf("failed to delete machine %s: %w", machine.Name, err) } - if err := wait.Poll(500*time.Millisecond, 60*time.Second, func() (bool, error) { + if err := wait.PollUntilContextTimeout(ctx, 500*time.Millisecond, 60*time.Second, false, func(ctx context.Context) (bool, error) { return isMachinesV1Alpha1MachineDeleted(ctx, machine.Name, client) }); err != nil { return fmt.Errorf("failed to wait for machine %s to be deleted: %w", machine.Name, err) @@ -393,7 +399,7 @@ func deleteMachinesV1Alpha1Machine(ctx context.Context, func isMachinesV1Alpha1MachineDeleted(ctx context.Context, name string, client ctrlruntimeclient.Client) (bool, error) { if err := client.Get(ctx, types.NamespacedName{Name: name}, &machinesv1alpha1.Machine{}); err != nil { - if kerrors.IsNotFound(err) { + if apierrors.IsNotFound(err) { return true, nil } return false, err diff --git a/pkg/node/eviction/eviction.go b/pkg/node/eviction/eviction.go index d22679b84..2e513e49c 100644 --- a/pkg/node/eviction/eviction.go +++ b/pkg/node/eviction/eviction.go @@ -21,77 +21,79 @@ import ( "fmt" "sync" - evictiontypes "github.com/kubermatic/machine-controller/pkg/node/eviction/types" - "github.com/kubermatic/machine-controller/pkg/node/nodemanager" + "go.uber.org/zap" + + "k8c.io/machine-controller/pkg/node/nodemanager" + nodetypes "k8c.io/machine-controller/sdk/node" corev1 "k8s.io/api/core/v1" - policy "k8s.io/api/policy/v1beta1" - kerrors "k8s.io/apimachinery/pkg/api/errors" + policyv1beta1 "k8s.io/api/policy/v1beta1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/client-go/kubernetes" - "k8s.io/klog" ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" ) type NodeEviction struct { nodeManager *nodemanager.NodeManager - ctx context.Context nodeName string kubeClient kubernetes.Interface } // New returns a new NodeEviction. -func New(ctx context.Context, nodeName string, client ctrlruntimeclient.Client, kubeClient kubernetes.Interface) *NodeEviction { +func New(nodeName string, client ctrlruntimeclient.Client, kubeClient kubernetes.Interface) *NodeEviction { return &NodeEviction{ - nodeManager: nodemanager.New(ctx, client, nodeName), - ctx: ctx, + nodeManager: nodemanager.New(client, nodeName), nodeName: nodeName, kubeClient: kubeClient, } } // Run executes the eviction. -func (ne *NodeEviction) Run() (bool, error) { - node, err := ne.nodeManager.GetNode() +func (ne *NodeEviction) Run(ctx context.Context, log *zap.SugaredLogger) (bool, error) { + nodeLog := log.With("node", ne.nodeName) + + node, err := ne.nodeManager.GetNode(ctx) if err != nil { return false, fmt.Errorf("failed to get node from lister: %w", err) } - if _, exists := node.Annotations[evictiontypes.SkipEvictionAnnotationKey]; exists { - klog.V(3).Infof("Skipping eviction for node %s as it has a %s annotation", ne.nodeName, evictiontypes.SkipEvictionAnnotationKey) + if _, exists := node.Annotations[nodetypes.SkipEvictionAnnotationKey]; exists { + nodeLog.Infof("Skipping eviction for node as it has a %s annotation", nodetypes.SkipEvictionAnnotationKey) return false, nil } - klog.V(3).Infof("Starting to evict node %s", ne.nodeName) - if err := ne.nodeManager.CordonNode(node); err != nil { + nodeLog.Info("Starting to evict node") + + if err := ne.nodeManager.CordonNode(ctx, node); err != nil { return false, fmt.Errorf("failed to cordon node %s: %w", ne.nodeName, err) } - klog.V(6).Infof("Successfully cordoned node %s", ne.nodeName) + nodeLog.Debug("Successfully cordoned node") - podsToEvict, err := ne.getFilteredPods() + podsToEvict, err := ne.getFilteredPods(ctx) if err != nil { return false, fmt.Errorf("failed to get Pods to evict for node %s: %w", ne.nodeName, err) } - klog.V(6).Infof("Found %v pods to evict for node %s", len(podsToEvict), ne.nodeName) + nodeLog.Debugf("Found %d pods to evict for node", len(podsToEvict)) if len(podsToEvict) == 0 { return false, nil } // If we arrived here we have pods to evict, so tell the controller to retry later - if errs := ne.evictPods(podsToEvict); len(errs) > 0 { + if errs := ne.evictPods(ctx, nodeLog, podsToEvict); len(errs) > 0 { return true, fmt.Errorf("failed to evict pods, errors encountered: %v", errs) } - klog.V(6).Infof("Successfully created evictions for all pods on node %s!", ne.nodeName) + nodeLog.Debug("Successfully created evictions for all pods on node") return true, nil } -func (ne *NodeEviction) getFilteredPods() ([]corev1.Pod, error) { +func (ne *NodeEviction) getFilteredPods(ctx context.Context) ([]corev1.Pod, error) { // The lister-backed client from the mgr automatically creates a lister for all objects requested through it. // We explicitly do not want that for pods, hence we have to use the kubernetes core client // TODO @alvaroaleman: Add source code ref for this - pods, err := ne.kubeClient.CoreV1().Pods(metav1.NamespaceAll).List(ne.ctx, metav1.ListOptions{ + pods, err := ne.kubeClient.CoreV1().Pods(metav1.NamespaceAll).List(ctx, metav1.ListOptions{ FieldSelector: fields.SelectorFromSet(fields.Set{"spec.nodeName": ne.nodeName}).String(), }) if err != nil { @@ -106,7 +108,7 @@ func (ne *NodeEviction) getFilteredPods() ([]corev1.Pod, error) { if controllerRef := metav1.GetControllerOf(&candidatePod); controllerRef != nil && controllerRef.Kind == "DaemonSet" { continue } - if _, found := candidatePod.ObjectMeta.Annotations[corev1.MirrorPodAnnotationKey]; found { + if _, found := candidatePod.Annotations[corev1.MirrorPodAnnotationKey]; found { continue } filteredPods = append(filteredPods, candidatePod) @@ -115,7 +117,7 @@ func (ne *NodeEviction) getFilteredPods() ([]corev1.Pod, error) { return filteredPods, nil } -func (ne *NodeEviction) evictPods(pods []corev1.Pod) []error { +func (ne *NodeEviction) evictPods(ctx context.Context, log *zap.SugaredLogger, pods []corev1.Pod) []error { errCh := make(chan error, len(pods)) retErrs := []error{} @@ -131,17 +133,15 @@ func (ne *NodeEviction) evictPods(pods []corev1.Pod) []error { if isDone { return } - err := ne.evictPod(&p) - if err == nil || kerrors.IsNotFound(err) { - klog.V(6).Infof("Successfully evicted pod %s/%s on node %s", p.Namespace, p.Name, ne.nodeName) + err := ne.evictPod(ctx, &p) + if err == nil || apierrors.IsNotFound(err) { + log.Debugw("Successfully evicted pod on node", "pod", ctrlruntimeclient.ObjectKeyFromObject(&p)) return - } else if kerrors.IsTooManyRequests(err) { + } else if apierrors.IsTooManyRequests(err) { // PDB prevents eviction, return and make the controller retry later return - } else { - errCh <- fmt.Errorf("error evicting pod %s/%s on node %s: %w", p.Namespace, p.Name, ne.nodeName, err) - return } + errCh <- fmt.Errorf("error evicting pod %s/%s on node %s: %w", p.Namespace, p.Name, ne.nodeName, err) } }(pod) } @@ -151,22 +151,22 @@ func (ne *NodeEviction) evictPods(pods []corev1.Pod) []error { select { case <-finished: - klog.V(6).Infof("All goroutines for eviction pods on node %s finished", ne.nodeName) + log.Debug("All goroutines for eviction pods on node finished") break case err := <-errCh: - klog.V(6).Infof("Got an error from eviction goroutine for node %s: %v", ne.nodeName, err) + log.Debugw("Got an error from eviction goroutine for node", zap.Error(err)) retErrs = append(retErrs, err) } return retErrs } -func (ne *NodeEviction) evictPod(pod *corev1.Pod) error { - eviction := &policy.Eviction{ +func (ne *NodeEviction) evictPod(ctx context.Context, pod *corev1.Pod) error { + eviction := &policyv1beta1.Eviction{ ObjectMeta: metav1.ObjectMeta{ Name: pod.Name, Namespace: pod.Namespace, }, } - return ne.kubeClient.PolicyV1beta1().Evictions(eviction.Namespace).Evict(ne.ctx, eviction) + return ne.kubeClient.PolicyV1beta1().Evictions(eviction.Namespace).Evict(ctx, eviction) } diff --git a/pkg/node/eviction/eviction_test.go b/pkg/node/eviction/eviction_test.go index 61ed90a5e..481051262 100644 --- a/pkg/node/eviction/eviction_test.go +++ b/pkg/node/eviction/eviction_test.go @@ -17,8 +17,11 @@ limitations under the License. package eviction import ( + "context" "testing" + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" @@ -53,10 +56,10 @@ func TestEvictPods(t *testing.T) { for _, pod := range test.Pods { literalPods = append(literalPods, *(pod.(*corev1.Pod))) } - client := kubefake.NewSimpleClientset(test.Pods...) + client := kubefake.NewClientset(test.Pods...) t.Run(test.Name, func(t *testing.T) { ne := &NodeEviction{kubeClient: client, nodeName: "node1"} - if errs := ne.evictPods(literalPods); len(errs) > 0 { + if errs := ne.evictPods(context.Background(), zap.NewNop().Sugar(), literalPods); len(errs) > 0 { t.Fatalf("Got unexpected errors=%v when running evictPods", errs) } diff --git a/pkg/node/flags.go b/pkg/node/flags.go index de560d60f..f101a3afb 100644 --- a/pkg/node/flags.go +++ b/pkg/node/flags.go @@ -22,7 +22,7 @@ import ( "strconv" "strings" - machinecontroller "github.com/kubermatic/machine-controller/pkg/controller/machine" + machinecontroller "k8c.io/machine-controller/pkg/controller/machine" ) func NewFlags(flagset *flag.FlagSet) *Flags { @@ -30,7 +30,6 @@ func NewFlags(flagset *flag.FlagSet) *Flags { FlagSet: flagset, } - settings.BoolVar(&settings.externalCloudProvider, "external-cloud-provider", false, "[DEPRECATED replaced by -node-external-cloud-provider] when set, kubelets will receive --cloud-provider=external flag") settings.BoolVar(&settings.externalCloudProvider, "node-external-cloud-provider", false, "when set, kubelets will receive --cloud-provider=external flag") settings.StringVar(&settings.kubeletFeatureGates, "node-kubelet-feature-gates", "RotateKubeletServerCertificate=true", "Feature gates to set on the kubelet") diff --git a/pkg/node/nodemanager/node_manager.go b/pkg/node/nodemanager/node_manager.go index 1c69b5d90..c4942a12e 100644 --- a/pkg/node/nodemanager/node_manager.go +++ b/pkg/node/nodemanager/node_manager.go @@ -29,30 +29,28 @@ import ( ) type NodeManager struct { - ctx context.Context client ctrlruntimeclient.Client nodeName string } -func New(ctx context.Context, client ctrlruntimeclient.Client, nodeName string) *NodeManager { +func New(client ctrlruntimeclient.Client, nodeName string) *NodeManager { return &NodeManager{ - ctx: ctx, client: client, nodeName: nodeName, } } -func (nm *NodeManager) GetNode() (*corev1.Node, error) { +func (nm *NodeManager) GetNode(ctx context.Context) (*corev1.Node, error) { node := &corev1.Node{} - if err := nm.client.Get(nm.ctx, types.NamespacedName{Name: nm.nodeName}, node); err != nil { + if err := nm.client.Get(ctx, types.NamespacedName{Name: nm.nodeName}, node); err != nil { return nil, fmt.Errorf("failed to get node from lister: %w", err) } return node, nil } -func (nm *NodeManager) CordonNode(node *corev1.Node) error { +func (nm *NodeManager) CordonNode(ctx context.Context, node *corev1.Node) error { if !node.Spec.Unschedulable { - _, err := nm.updateNode(func(n *corev1.Node) { + _, err := nm.updateNode(ctx, func(n *corev1.Node) { n.Spec.Unschedulable = true }) if err != nil { @@ -66,9 +64,9 @@ func (nm *NodeManager) CordonNode(node *corev1.Node) error { // that is not the case, there is a small chance the scheduler schedules // pods in between, those will then get deleted upon node deletion and // not evicted - return wait.Poll(1*time.Second, 10*time.Second, func() (bool, error) { + return wait.PollUntilContextTimeout(ctx, 1*time.Second, 10*time.Second, false, func(ctx context.Context) (bool, error) { node := &corev1.Node{} - if err := nm.client.Get(nm.ctx, types.NamespacedName{Name: nm.nodeName}, node); err != nil { + if err := nm.client.Get(ctx, types.NamespacedName{Name: nm.nodeName}, node); err != nil { return false, err } if node.Spec.Unschedulable { @@ -78,16 +76,16 @@ func (nm *NodeManager) CordonNode(node *corev1.Node) error { }) } -func (nm *NodeManager) updateNode(modify func(*corev1.Node)) (*corev1.Node, error) { +func (nm *NodeManager) updateNode(ctx context.Context, modify func(*corev1.Node)) (*corev1.Node, error) { node := &corev1.Node{} err := retry.RetryOnConflict(retry.DefaultBackoff, func() error { - if err := nm.client.Get(nm.ctx, types.NamespacedName{Name: nm.nodeName}, node); err != nil { + if err := nm.client.Get(ctx, types.NamespacedName{Name: nm.nodeName}, node); err != nil { return err } // Apply modifications modify(node) // Update the node - return nm.client.Update(nm.ctx, node) + return nm.client.Update(ctx, node) }) return node, err diff --git a/pkg/node/poddeletion/pod_deletion.go b/pkg/node/poddeletion/pod_deletion.go index 6a8ecad62..47dec7e35 100644 --- a/pkg/node/poddeletion/pod_deletion.go +++ b/pkg/node/poddeletion/pod_deletion.go @@ -21,13 +21,14 @@ import ( "fmt" "sync" - "github.com/kubermatic/machine-controller/pkg/node/nodemanager" + "go.uber.org/zap" + + "k8c.io/machine-controller/pkg/node/nodemanager" corev1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" - "k8s.io/klog" ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -37,31 +38,31 @@ const ( type NodeVolumeAttachmentsCleanup struct { nodeManager *nodemanager.NodeManager - ctx context.Context nodeName string kubeClient kubernetes.Interface } // New returns a new NodeVolumeAttachmentsCleanup. -func New(ctx context.Context, nodeName string, client ctrlruntimeclient.Client, kubeClient kubernetes.Interface) *NodeVolumeAttachmentsCleanup { +func New(nodeName string, client ctrlruntimeclient.Client, kubeClient kubernetes.Interface) *NodeVolumeAttachmentsCleanup { return &NodeVolumeAttachmentsCleanup{ - nodeManager: nodemanager.New(ctx, client, nodeName), - ctx: ctx, + nodeManager: nodemanager.New(client, nodeName), nodeName: nodeName, kubeClient: kubeClient, } } // Run executes the pod deletion. -func (vc *NodeVolumeAttachmentsCleanup) Run() (bool, bool, error) { - node, err := vc.nodeManager.GetNode() +func (vc *NodeVolumeAttachmentsCleanup) Run(ctx context.Context, log *zap.SugaredLogger) (bool, bool, error) { + node, err := vc.nodeManager.GetNode(ctx) if err != nil { return false, false, fmt.Errorf("failed to get node from lister: %w", err) } - klog.V(3).Infof("Starting to cleanup node %s", vc.nodeName) + + nodeLog := log.With("node", vc.nodeName) + nodeLog.Info("Starting to cleanup node...") // if there are no more volumeAttachments related to the node, then it can be deleted. - volumeAttachmentsDeleted, err := vc.nodeCanBeDeleted() + volumeAttachmentsDeleted, err := vc.nodeCanBeDeleted(ctx, nodeLog) if err != nil { return false, false, fmt.Errorf("failed to check volumeAttachments deletion: %w", err) } @@ -70,42 +71,42 @@ func (vc *NodeVolumeAttachmentsCleanup) Run() (bool, bool, error) { } // cordon the node to be sure that the deleted pods are re-scheduled in the same node. - if err := vc.nodeManager.CordonNode(node); err != nil { + if err := vc.nodeManager.CordonNode(ctx, node); err != nil { return false, false, fmt.Errorf("failed to cordon node %s: %w", vc.nodeName, err) } - klog.V(6).Infof("Successfully cordoned node %s", vc.nodeName) + nodeLog.Debug("Successfully cordoned node.") // get all the pods that needs to be deleted (i.e. those mounting volumes attached to the node that is going to be deleted). - podsToDelete, errors := vc.getFilteredPods() + podsToDelete, errors := vc.getFilteredPods(ctx) if len(errors) > 0 { return false, false, fmt.Errorf("failed to get Pods to delete for node %s, errors encountered: %w", vc.nodeName, err) } - klog.V(6).Infof("Found %v pods to delete for node %s", len(podsToDelete), vc.nodeName) + nodeLog.Debugf("Found %d pods to delete for node", len(podsToDelete)) if len(podsToDelete) == 0 { return false, false, nil } // delete the previously filtered pods, then tells the controller to retry later. - if errs := vc.deletePods(podsToDelete); len(errs) > 0 { + if errs := vc.deletePods(ctx, nodeLog, podsToDelete); len(errs) > 0 { return false, false, fmt.Errorf("failed to delete pods, errors encountered: %v", errs) } - klog.V(6).Infof("Successfully deleted all pods mounting persistent volumes attached on node %s", vc.nodeName) + nodeLog.Debug("Successfully deleted all pods mounting persistent volumes attached on node") return true, false, err } -func (vc *NodeVolumeAttachmentsCleanup) getFilteredPods() ([]corev1.Pod, []error) { +func (vc *NodeVolumeAttachmentsCleanup) getFilteredPods(ctx context.Context) ([]corev1.Pod, []error) { filteredPods := []corev1.Pod{} lock := sync.Mutex{} retErrs := []error{} - volumeAttachments, err := vc.kubeClient.StorageV1().VolumeAttachments().List(vc.ctx, metav1.ListOptions{}) + volumeAttachments, err := vc.kubeClient.StorageV1().VolumeAttachments().List(ctx, metav1.ListOptions{}) if err != nil { retErrs = append(retErrs, fmt.Errorf("failed to list pods: %w", err)) return nil, retErrs } - persistentVolumeClaims, err := vc.kubeClient.CoreV1().PersistentVolumeClaims(metav1.NamespaceAll).List(vc.ctx, metav1.ListOptions{}) + persistentVolumeClaims, err := vc.kubeClient.CoreV1().PersistentVolumeClaims(metav1.NamespaceAll).List(ctx, metav1.ListOptions{}) if err != nil { retErrs = append(retErrs, fmt.Errorf("failed to list persistent volumes: %w", err)) return nil, retErrs @@ -120,9 +121,9 @@ func (vc *NodeVolumeAttachmentsCleanup) getFilteredPods() ([]corev1.Pod, []error wg.Add(1) go func(pvc corev1.PersistentVolumeClaim) { defer wg.Done() - pods, err := vc.kubeClient.CoreV1().Pods(pvc.Namespace).List(vc.ctx, metav1.ListOptions{}) + pods, err := vc.kubeClient.CoreV1().Pods(pvc.Namespace).List(ctx, metav1.ListOptions{}) switch { - case kerrors.IsTooManyRequests(err): + case apierrors.IsTooManyRequests(err): return case err != nil: errCh <- fmt.Errorf("failed to list pod: %w", err) @@ -151,21 +152,21 @@ func (vc *NodeVolumeAttachmentsCleanup) getFilteredPods() ([]corev1.Pod, []error } // nodeCanBeDeleted checks if all the volumeAttachments related to the node have already been collected by the external CSI driver. -func (vc *NodeVolumeAttachmentsCleanup) nodeCanBeDeleted() (bool, error) { - volumeAttachments, err := vc.kubeClient.StorageV1().VolumeAttachments().List(vc.ctx, metav1.ListOptions{}) +func (vc *NodeVolumeAttachmentsCleanup) nodeCanBeDeleted(ctx context.Context, log *zap.SugaredLogger) (bool, error) { + volumeAttachments, err := vc.kubeClient.StorageV1().VolumeAttachments().List(ctx, metav1.ListOptions{}) if err != nil { return false, fmt.Errorf("error while listing volumeAttachments: %w", err) } for _, va := range volumeAttachments.Items { if va.Spec.NodeName == vc.nodeName { - klog.V(3).Infof("waiting for the volumeAttachment %s to be deleted before deleting node %s", va.Name, vc.nodeName) + log.Infow("Waiting for VolumeAttachment to be deleted before deleting node", "volumeattachment", va.Name) return false, nil } } return true, nil } -func (vc *NodeVolumeAttachmentsCleanup) deletePods(pods []corev1.Pod) []error { +func (vc *NodeVolumeAttachmentsCleanup) deletePods(ctx context.Context, log *zap.SugaredLogger, pods []corev1.Pod) []error { errCh := make(chan error, len(pods)) retErrs := []error{} @@ -181,17 +182,15 @@ func (vc *NodeVolumeAttachmentsCleanup) deletePods(pods []corev1.Pod) []error { if isDone { return } - err := vc.kubeClient.CoreV1().Pods(p.Namespace).Delete(vc.ctx, p.Name, metav1.DeleteOptions{}) - if err == nil || kerrors.IsNotFound(err) { - klog.V(6).Infof("Successfully deleted pod %s/%s on node %s", p.Namespace, p.Name, vc.nodeName) + err := vc.kubeClient.CoreV1().Pods(p.Namespace).Delete(ctx, p.Name, metav1.DeleteOptions{}) + if err == nil || apierrors.IsNotFound(err) { + log.Debugw("Successfully deleted pod on node", "pod", ctrlruntimeclient.ObjectKeyFromObject(&p)) return - } else if kerrors.IsTooManyRequests(err) { + } else if apierrors.IsTooManyRequests(err) { // PDB prevents pod deletion, return and make the controller retry later. return - } else { - errCh <- fmt.Errorf("error deleting pod %s/%s on node %s: %w", p.Namespace, p.Name, vc.nodeName, err) - return } + errCh <- fmt.Errorf("error deleting pod %s/%s on node %s: %w", p.Namespace, p.Name, vc.nodeName, err) } }(pod) } diff --git a/pkg/rhsm/satellite_subscription_manager.go b/pkg/rhsm/satellite_subscription_manager.go index 396800941..18dfb6ad2 100644 --- a/pkg/rhsm/satellite_subscription_manager.go +++ b/pkg/rhsm/satellite_subscription_manager.go @@ -26,7 +26,7 @@ import ( "path" "time" - "k8s.io/klog" + "go.uber.org/zap" ) // SatelliteSubscriptionManager manages the communications between machine-controller and redhat satellite server. @@ -37,12 +37,13 @@ type SatelliteSubscriptionManager interface { // DefaultSatelliteSubscriptionManager default manager for redhat satellite server. type DefaultSatelliteSubscriptionManager struct { client *http.Client + log *zap.SugaredLogger useHTTP bool } // NewSatelliteSubscriptionManager creates a new Redhat satellite manager. -func NewSatelliteSubscriptionManager() SatelliteSubscriptionManager { +func NewSatelliteSubscriptionManager(log *zap.SugaredLogger) SatelliteSubscriptionManager { client := &http.Client{ Transport: &http.Transport{ TLSClientConfig: &tls.Config{ @@ -55,6 +56,7 @@ func NewSatelliteSubscriptionManager() SatelliteSubscriptionManager { return &DefaultSatelliteSubscriptionManager{ client: client, + log: log, } } @@ -68,15 +70,17 @@ func (s *DefaultSatelliteSubscriptionManager) DeleteSatelliteHost(ctx context.Co maxRetries = 15 ) + machineLog := s.log.With("machine", machineName) + for retries < maxRetries { if err := s.executeDeleteRequest(ctx, machineName, username, password, serverURL); err != nil { - klog.Errorf("failed to execute satellite subscription deletion: %v", err) + machineLog.Errorw("Failed to execute satellite subscription deletion", zap.Error(err)) retries++ time.Sleep(500 * time.Second) continue } - klog.Infof("subscription for machine %s deleted successfully", machineName) + machineLog.Info("Subscription for machine deleted successfully") return nil } @@ -92,7 +96,7 @@ func (s *DefaultSatelliteSubscriptionManager) executeDeleteRequest(ctx context.C requestURL.Host = serverURL requestURL.Path = path.Join("api", "v2", "hosts", machineName) - deleteHostRequest, err := http.NewRequest(http.MethodDelete, requestURL.String(), nil) + deleteHostRequest, err := http.NewRequestWithContext(ctx, http.MethodDelete, requestURL.String(), nil) deleteHostRequest = deleteHostRequest.WithContext(ctx) if err != nil { return fmt.Errorf("failed to create a delete host request: %w", err) @@ -110,7 +114,6 @@ func (s *DefaultSatelliteSubscriptionManager) executeDeleteRequest(ctx context.C return fmt.Errorf("error while executing request with status code: %v", response.StatusCode) } - klog.Infof("host %v has been deleted successfully", machineName) return nil } diff --git a/pkg/rhsm/satellite_subscription_manager_test.go b/pkg/rhsm/satellite_subscription_manager_test.go index 38c3f33c1..debeab623 100644 --- a/pkg/rhsm/satellite_subscription_manager_test.go +++ b/pkg/rhsm/satellite_subscription_manager_test.go @@ -23,6 +23,8 @@ import ( "net/http/httptest" "net/url" "testing" + + "go.uber.org/zap" ) func TestDefaultRedHatSatelliteManager_DeleteSatelliteHost(t *testing.T) { @@ -48,7 +50,7 @@ func TestDefaultRedHatSatelliteManager_DeleteSatelliteHost(t *testing.T) { tt.testingServer.Close() }() - manager := NewSatelliteSubscriptionManager() + manager := NewSatelliteSubscriptionManager(zap.NewNop().Sugar()) manager.(*DefaultSatelliteSubscriptionManager).useHTTP = true parsedURL, err := url.Parse(tt.testingServer.URL) diff --git a/pkg/rhsm/subscription_manager.go b/pkg/rhsm/subscription_manager.go index 9d73bdac0..64f2da14d 100644 --- a/pkg/rhsm/subscription_manager.go +++ b/pkg/rhsm/subscription_manager.go @@ -25,9 +25,8 @@ import ( "net/http" "time" + "go.uber.org/zap" "golang.org/x/oauth2" - - "k8s.io/klog" ) const defaultTimeout = 10 * time.Second @@ -54,6 +53,7 @@ type systemsResponse struct { } type defaultRedHatSubscriptionManager struct { + log *zap.SugaredLogger apiURL string authURL string requestsLimiter int @@ -61,8 +61,9 @@ type defaultRedHatSubscriptionManager struct { var errUnauthenticatedRequest = errors.New("unauthenticated") -func NewRedHatSubscriptionManager() RedHatSubscriptionManager { +func NewRedHatSubscriptionManager(log *zap.SugaredLogger) RedHatSubscriptionManager { return &defaultRedHatSubscriptionManager{ + log: log, apiURL: "https://api.access.redhat.com/management/v1/systems", authURL: "https://sso.redhat.com/auth/realms/redhat-external/protocol/openid-connect/token", requestsLimiter: 100, @@ -102,18 +103,20 @@ func (d *defaultRedHatSubscriptionManager) UnregisterInstance(ctx context.Contex return fmt.Errorf("failed to find system profile: %w", err) } + machineLog := d.log.With("uuid", machineUUID) + if machineUUID == "" { - klog.Infof("machine uuid %s is not found", machineUUID) + machineLog.Info("Machine UUID was not found") return nil } err = d.deleteSubscription(ctx, machineUUID, offlineToken) if err == nil { - klog.Infof("subscription for vm %v has been deleted successfully", machineUUID) + machineLog.Info("Subscription for VM has been deleted successfully") return nil } - klog.Errorf("failed to delete subscription for system: %s due to: %v", machineUUID, err) + machineLog.Errorw("Failed to delete subscription for system:", zap.Error(err)) time.Sleep(2 * time.Second) retries++ } @@ -142,13 +145,12 @@ func (d *defaultRedHatSubscriptionManager) findSystemsProfile(ctx context.Contex offset += len(systemsInfo.Body) } - klog.Infof("no machine name %s is found", name) return "", nil } func (d *defaultRedHatSubscriptionManager) deleteSubscription(ctx context.Context, uuid, offlineToken string) error { client := newOAuthClientWithRefreshToken(ctx, offlineToken, d.authURL) - req, err := http.NewRequest("DELETE", fmt.Sprintf("%s/%s", d.apiURL, uuid), nil) + req, err := http.NewRequestWithContext(ctx, "DELETE", fmt.Sprintf("%s/%s", d.apiURL, uuid), nil) if err != nil { return fmt.Errorf("failed to create delete system request: %w", err) } @@ -177,7 +179,7 @@ func (d *defaultRedHatSubscriptionManager) deleteSubscription(ctx context.Contex func (d *defaultRedHatSubscriptionManager) executeFindSystemsRequest(ctx context.Context, offlineToken string, offset int) (*systemsResponse, error) { client := newOAuthClientWithRefreshToken(ctx, offlineToken, d.authURL) - req, err := http.NewRequest("GET", fmt.Sprintf(d.apiURL+"?limit=%v&offset=%v", d.requestsLimiter, offset), nil) + req, err := http.NewRequestWithContext(ctx, "GET", fmt.Sprintf(d.apiURL+"?limit=%v&offset=%v", d.requestsLimiter, offset), nil) if err != nil { return nil, fmt.Errorf("failed to create fetch systems request: %w", err) } diff --git a/pkg/rhsm/subscription_manager_test.go b/pkg/rhsm/subscription_manager_test.go index e081401fd..64736b951 100644 --- a/pkg/rhsm/subscription_manager_test.go +++ b/pkg/rhsm/subscription_manager_test.go @@ -22,6 +22,8 @@ import ( "net/http" "net/http/httptest" "testing" + + "go.uber.org/zap" ) var ( @@ -58,7 +60,7 @@ func TestDefaultRedHatSubscriptionManager_UnregisterInstance(t *testing.T) { defer func() { tt.testingServer.Close() }() - manager := NewRedHatSubscriptionManager() + manager := NewRedHatSubscriptionManager(zap.NewNop().Sugar()) manager.(*defaultRedHatSubscriptionManager).apiURL = tt.testingServer.URL + apiPath manager.(*defaultRedHatSubscriptionManager).authURL = tt.testingServer.URL manager.(*defaultRedHatSubscriptionManager).requestsLimiter = tt.requestLimiter @@ -75,6 +77,9 @@ func createTestingServer(pagination bool) *httptest.Server { processedRequest = 1 result string ) + + const resultPrefix = "{\"pagination\": {\"offset\": 0, \"limit\": 2,\"count\": 5}, \"body\": [" + return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { switch r.URL.Path { case authPath: @@ -84,17 +89,17 @@ func createTestingServer(pagination bool) *httptest.Server { if pagination { switch processedRequest { case 1: - result = "{\"pagination\": {\"offset\": 0, \"limit\": 2,\"count\": 5}, \"body\": [" + + result = resultPrefix + "{\"name\": \"test-machine-1\", \"uuid\": \"4a3ee8d7-337d-4cef-a20c-dda011f28f96\"}," + "{\"name\": \"test-machine-2\", \"uuid\": \"4a3ee8d7-337d-4cef-a20c-dda011f28f91\"}" + "]}" case 2: - result = "{\"pagination\": {\"offset\": 0, \"limit\": 2,\"count\": 5}, \"body\": [" + + result = resultPrefix + "{\"name\": \"test-machine-3\", \"uuid\": \"4a3ee8d7-337d-4cef-a20c-dda011f28f98\"}," + "{\"name\": \"test-machine-4\", \"uuid\": \"4a3ee8d7-337d-4cef-a20c-dda011f28f95\"}" + "]}" case 3: - result = "{\"pagination\": {\"offset\": 0, \"limit\": 2,\"count\": 5}, \"body\": [" + + result = resultPrefix + "{\"name\": \"test-machine-5\", \"uuid\": \"4a3ee8d7-337d-4cef-a20c-dda011f28f99\"}" + "]}" } diff --git a/pkg/rhsm/util.go b/pkg/rhsm/util.go index 7fbf8f781..823c85ee2 100644 --- a/pkg/rhsm/util.go +++ b/pkg/rhsm/util.go @@ -17,9 +17,9 @@ limitations under the License. package rhsm import ( - "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/types" - kuberneteshelper "github.com/kubermatic/machine-controller/pkg/kubernetes" + "k8c.io/machine-controller/pkg/cloudprovider/types" + kuberneteshelper "k8c.io/machine-controller/pkg/kubernetes" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" ) const ( @@ -27,10 +27,10 @@ const ( ) // AddRHELSubscriptionFinalizer adds finalizer RedhatSubscriptionFinalizer to the machine object on rhel machine creation. -func AddRHELSubscriptionFinalizer(machine *v1alpha1.Machine, update types.MachineUpdater) error { +func AddRHELSubscriptionFinalizer(machine *clusterv1alpha1.Machine, update types.MachineUpdater) error { if !kuberneteshelper.HasFinalizer(machine, RedhatSubscriptionFinalizer) { - if err := update(machine, func(m *v1alpha1.Machine) { - machine.Finalizers = append(m.Finalizers, RedhatSubscriptionFinalizer) + if err := update(machine, func(m *clusterv1alpha1.Machine) { + m.Finalizers = append(m.Finalizers, RedhatSubscriptionFinalizer) }); err != nil { return err } @@ -40,10 +40,10 @@ func AddRHELSubscriptionFinalizer(machine *v1alpha1.Machine, update types.Machin } // RemoveRHELSubscriptionFinalizer removes finalizer RedhatSubscriptionFinalizer to the machine object on rhel machine deletion. -func RemoveRHELSubscriptionFinalizer(machine *v1alpha1.Machine, update types.MachineUpdater) error { +func RemoveRHELSubscriptionFinalizer(machine *clusterv1alpha1.Machine, update types.MachineUpdater) error { if kuberneteshelper.HasFinalizer(machine, RedhatSubscriptionFinalizer) { - if err := update(machine, func(m *v1alpha1.Machine) { - machine.Finalizers = kuberneteshelper.RemoveFinalizer(machine.Finalizers, RedhatSubscriptionFinalizer) + if err := update(machine, func(m *clusterv1alpha1.Machine) { + m.Finalizers = kuberneteshelper.RemoveFinalizer(m.Finalizers, RedhatSubscriptionFinalizer) }); err != nil { return err } diff --git a/pkg/userdata/amzn2/provider.go b/pkg/userdata/amzn2/provider.go deleted file mode 100644 index 67fb2f115..000000000 --- a/pkg/userdata/amzn2/provider.go +++ /dev/null @@ -1,337 +0,0 @@ -/* -Copyright 2021 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// -// UserData plugin for Amazon Linux 2. -// - -package amzn2 - -import ( - "errors" - "fmt" - "strings" - "text/template" - - "github.com/Masterminds/semver/v3" - - "github.com/kubermatic/machine-controller/pkg/apis/plugin" - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" - userdatahelper "github.com/kubermatic/machine-controller/pkg/userdata/helper" -) - -// Provider is a pkg/userdata/plugin.Provider implementation. -type Provider struct{} - -// UserData renders user-data template to string. -func (p Provider) UserData(req plugin.UserDataRequest) (string, error) { - tmpl, err := template.New("user-data").Funcs(userdatahelper.TxtFuncMap()).Parse(userDataTemplate) - if err != nil { - return "", fmt.Errorf("failed to parse user-data template: %w", err) - } - - kubeletVersion, err := semver.NewVersion(req.MachineSpec.Versions.Kubelet) - if err != nil { - return "", fmt.Errorf("invalid kubelet version: %w", err) - } - - pconfig, err := providerconfigtypes.GetConfig(req.MachineSpec.ProviderSpec) - if err != nil { - return "", fmt.Errorf("failed to get provider config: %w", err) - } - - if pconfig.OverwriteCloudConfig != nil { - req.CloudConfig = *pconfig.OverwriteCloudConfig - } - - if pconfig.Network.IsStaticIPConfig() { - return "", errors.New("static IP config is not supported with Amazon Linux 2") - } - - amznConfig, err := LoadConfig(pconfig.OperatingSystemSpec) - if err != nil { - return "", fmt.Errorf("failed to parse OperatingSystemSpec: %w", err) - } - - kubeconfigString, err := userdatahelper.StringifyKubeconfig(req.Kubeconfig) - if err != nil { - return "", err - } - - kubernetesCACert, err := userdatahelper.GetCACert(req.Kubeconfig) - if err != nil { - return "", fmt.Errorf("error extracting cacert: %w", err) - } - - crEngine := req.ContainerRuntime.Engine(kubeletVersion) - crScript, err := crEngine.ScriptFor(providerconfigtypes.OperatingSystemAmazonLinux2) - if err != nil { - return "", fmt.Errorf("failed to generate container runtime install script: %w", err) - } - - crConfig, err := crEngine.Config() - if err != nil { - return "", fmt.Errorf("failed to generate container runtime config: %w", err) - } - - crAuthConfig, err := crEngine.AuthConfig() - if err != nil { - return "", fmt.Errorf("failed to generate container runtime auth config: %w", err) - } - - data := struct { - plugin.UserDataRequest - ProviderSpec *providerconfigtypes.Config - OSConfig *Config - KubeletVersion string - Kubeconfig string - KubernetesCACert string - NodeIPScript string - ExtraKubeletFlags []string - ContainerRuntimeScript string - ContainerRuntimeConfigFileName string - ContainerRuntimeConfig string - ContainerRuntimeAuthConfigFileName string - ContainerRuntimeAuthConfig string - ContainerRuntimeName string - }{ - UserDataRequest: req, - ProviderSpec: pconfig, - OSConfig: amznConfig, - KubeletVersion: kubeletVersion.String(), - Kubeconfig: kubeconfigString, - KubernetesCACert: kubernetesCACert, - NodeIPScript: userdatahelper.SetupNodeIPEnvScript(pconfig.Network.GetIPFamily()), - ExtraKubeletFlags: crEngine.KubeletFlags(), - ContainerRuntimeScript: crScript, - ContainerRuntimeConfigFileName: crEngine.ConfigFileName(), - ContainerRuntimeConfig: crConfig, - ContainerRuntimeAuthConfigFileName: crEngine.AuthConfigFileName(), - ContainerRuntimeAuthConfig: crAuthConfig, - ContainerRuntimeName: crEngine.String(), - } - - buf := strings.Builder{} - if err = tmpl.Execute(&buf, data); err != nil { - return "", fmt.Errorf("failed to execute user-data template: %w", err) - } - - return userdatahelper.CleanupTemplateOutput(buf.String()) -} - -// UserData template. -const userDataTemplate = `#cloud-config -{{ if ne .CloudProviderName "aws" }} -hostname: {{ .MachineSpec.Name }} -{{- /* Never set the hostname on AWS nodes. Kubernetes(kube-proxy) requires the hostname to be the private dns name */}} -{{ end }} - -{{- if .OSConfig.DistUpgradeOnBoot }} -package_upgrade: true -package_reboot_if_required: true -{{- end }} - -ssh_pwauth: false - -{{- if ne (len .ProviderSpec.SSHPublicKeys) 0 }} -ssh_authorized_keys: -{{- range .ProviderSpec.SSHPublicKeys }} - - "{{ . }}" -{{- end }} -{{- end }} - -write_files: -{{- if .HTTPProxy }} -- path: "/etc/environment" - content: | -{{ proxyEnvironment .HTTPProxy .NoProxy | indent 4 }} -{{- end }} - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | -{{ journalDConfig | indent 4 }} - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | -{{ kernelModulesScript | indent 4 }} - -- path: "/etc/sysctl.d/k8s.conf" - content: | -{{ kernelSettings | indent 4 }} - -- path: /etc/selinux/config - content: | - # This file controls the state of SELinux on the system. - # SELINUX= can take one of these three values: - # enforcing - SELinux security policy is enforced. - # permissive - SELinux prints warnings instead of enforcing. - # disabled - No SELinux policy is loaded. - SELINUX=permissive - # SELINUXTYPE= can take one of three two values: - # targeted - Targeted processes are protected, - # minimum - Modification of targeted policy. Only selected processes are protected. - # mls - Multi Level Security protection. - SELINUXTYPE=targeted - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - - setenforce 0 || true - -{{- /* As we added some modules and don't want to reboot, restart the service */}} - systemctl restart systemd-modules-load.service - sysctl --system - {{ if ne .CloudProviderName "aws" }} -{{- /* The normal way of setting it via cloud-init is broken, see */}} -{{- /* https://bugs.launchpad.net/cloud-init/+bug/1662542 */}} - hostnamectl set-hostname {{ .MachineSpec.Name }} - {{ end }} - - yum install -y \ - device-mapper-persistent-data \ - lvm2 \ - ebtables \ - ethtool \ - nfs-utils \ - bash-completion \ - sudo \ - socat \ - wget \ - curl \ - {{- if or (eq .CloudProviderName "vsphere") (eq .CloudProviderName "vmware-cloud-director") }} - open-vm-tools \ - {{- end }} - ipvsadm - -{{ .ContainerRuntimeScript | indent 4 }} - -{{ safeDownloadBinariesScript .KubeletVersion | indent 4 }} - # set kubelet nodeip environment variable - mkdir -p /etc/systemd/system/kubelet.service.d/ - /opt/bin/setup_net_env.sh - - systemctl disable --now firewalld || true - {{ if eq .CloudProviderName "vsphere" }} - systemctl enable --now vmtoolsd.service - {{ end -}} - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - # Make sure we always disable swap - Otherwise the kubelet won't start as for some cloud - # providers swap gets enabled on reboot or after the setup script has finished executing. - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | -{{ kubeletSystemdUnit .ContainerRuntimeName .KubeletVersion .KubeletCloudProviderName .MachineSpec.Name .DNSIPs .ExternalCloudProvider .ProviderSpec.Network.GetIPFamily .PauseImage .MachineSpec.Taints .ExtraKubeletFlags true | indent 4 }} - -{{- if ne (len .CloudConfig) 0 }} -- path: "/etc/kubernetes/cloud-config" - permissions: "0600" - content: | -{{ .CloudConfig | indent 4 }} -{{- end }} - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | -{{ .NodeIPScript | indent 4 }} - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | -{{ .Kubeconfig | indent 4 }} - -- path: "/etc/kubernetes/kubelet.conf" - content: | -{{ kubeletConfiguration "cluster.local" .DNSIPs .KubeletFeatureGates .KubeletConfigs .ContainerRuntimeName | indent 4 }} - -- path: "/etc/kubernetes/pki/ca.crt" - content: | -{{ .KubernetesCACert | indent 4 }} - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: {{ .ContainerRuntimeConfigFileName }} - permissions: "0644" - content: | -{{ .ContainerRuntimeConfig | indent 4 }} - -{{- if and (eq .ContainerRuntimeName "docker") .ContainerRuntimeAuthConfig }} - -- path: {{ .ContainerRuntimeAuthConfigFileName }} - permissions: "0600" - content: | -{{ .ContainerRuntimeAuthConfig | indent 4 }} -{{- end }} - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | -{{ kubeletHealthCheckSystemdUnit | indent 4 }} - -{{- with .ProviderSpec.CAPublicKey }} - -- path: "/etc/ssh/trusted-user-ca-keys.pem" - content: | -{{ . | indent 4 }} - -- path: "/etc/ssh/sshd_config" - content: | -{{ sshConfigAddendum | indent 4 }} - append: true -{{- end }} - -runcmd: -- systemctl enable --now setup.service -` diff --git a/pkg/userdata/amzn2/provider_test.go b/pkg/userdata/amzn2/provider_test.go deleted file mode 100644 index 4f656d1d4..000000000 --- a/pkg/userdata/amzn2/provider_test.go +++ /dev/null @@ -1,276 +0,0 @@ -/* -Copyright 2019 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// -// UserData plugin for Amazon Linux 2. -// - -package amzn2 - -import ( - "flag" - "net" - "testing" - - clusterv1alpha1 "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" - "github.com/kubermatic/machine-controller/pkg/apis/plugin" - "github.com/kubermatic/machine-controller/pkg/containerruntime" - testhelper "github.com/kubermatic/machine-controller/pkg/test" - "github.com/kubermatic/machine-controller/pkg/userdata/convert" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - clientcmdapi "k8s.io/client-go/tools/clientcmd/api" -) - -var ( - update = flag.Bool("update", false, "update testdata files") - - pemCertificate = `-----BEGIN CERTIFICATE----- -MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV -BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG -A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 -DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 -NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG -cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv -c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B -AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS -R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT -ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk -JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 -mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW -caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G -A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt -hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB -MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES -MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv -bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h -U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao -eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 -UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD -58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n -sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF -kPe6XoSbiLm/kxk32T0= ------END CERTIFICATE-----` -) - -// fakeCloudConfigProvider simulates cloud config provider for test. -type fakeCloudConfigProvider struct { - config string - name string - err error -} - -func (p *fakeCloudConfigProvider) GetCloudConfig(spec clusterv1alpha1.MachineSpec) (config string, name string, err error) { - return p.config, p.name, p.err -} - -// userDataTestCase contains the data for a table-driven test. -type userDataTestCase struct { - name string - spec clusterv1alpha1.MachineSpec - clusterDNSIPs []net.IP - cloudProviderName *string - externalCloudProvider bool - httpProxy string - noProxy string - insecureRegistries string - registryMirrors string - pauseImage string - containerruntime string -} - -// TestUserDataGeneration runs the data generation for different -// environments. -func TestUserDataGeneration(t *testing.T) { - t.Parallel() - - tests := []userDataTestCase{ - { - name: "kubelet-v1.24.9-aws", - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{Name: "node1"}, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.24.9", - }, - }, - }, - { - name: "kubelet-v1.24.9-aws-external", - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{Name: "node1"}, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.24.9", - }, - }, - externalCloudProvider: true, - }, - { - name: "kubelet-v1.24.9-vsphere", - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{Name: "node1"}, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.24.9", - }, - }, - cloudProviderName: stringPtr("vsphere"), - }, - { - name: "kubelet-v1.24.9-vsphere-proxy", - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{Name: "node1"}, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.24.9", - }, - }, - cloudProviderName: stringPtr("vsphere"), - httpProxy: "http://192.168.100.100:3128", - noProxy: "192.168.1.0", - insecureRegistries: "192.168.100.100:5000, 10.0.0.1:5000", - pauseImage: "192.168.100.100:5000/kubernetes/pause:v3.1", - }, - { - name: "kubelet-v1.24.9-vsphere-mirrors", - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{Name: "node1"}, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.24.9", - }, - }, - cloudProviderName: stringPtr("vsphere"), - httpProxy: "http://192.168.100.100:3128", - noProxy: "192.168.1.0", - registryMirrors: "https://registry.docker-cn.com", - pauseImage: "192.168.100.100:5000/kubernetes/pause:v3.1", - }, - { - name: "kubelet-v1.24-aws", - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{Name: "node1"}, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.24.0", - }, - }, - }, - { - name: "kubelet-v1.25-aws", - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{Name: "node1"}, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.25.0", - }, - }, - }, - { - name: "kubelet-v1.26-aws", - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{Name: "node1"}, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.26.0", - }, - }, - }, - } - - defaultCloudProvider := &fakeCloudConfigProvider{ - name: "aws", - config: "{aws-config:true}", - err: nil, - } - kubeconfig := &clientcmdapi.Config{ - Clusters: map[string]*clientcmdapi.Cluster{ - "": { - Server: "https://server:443", - CertificateAuthorityData: []byte(pemCertificate), - }, - }, - AuthInfos: map[string]*clientcmdapi.AuthInfo{ - "": { - Token: "my-token", - }, - }, - } - provider := Provider{} - - kubeletFeatureGates := map[string]bool{ - "RotateKubeletServerCertificate": true, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - emptyProviderSpec := clusterv1alpha1.ProviderSpec{ - Value: &runtime.RawExtension{}, - } - test.spec.ProviderSpec = emptyProviderSpec - var cloudProvider *fakeCloudConfigProvider - if test.cloudProviderName != nil { - cloudProvider = &fakeCloudConfigProvider{ - name: *test.cloudProviderName, - config: "{config:true}", - err: nil, - } - } else { - cloudProvider = defaultCloudProvider - } - cloudConfig, cloudProviderName, err := cloudProvider.GetCloudConfig(test.spec) - if err != nil { - t.Fatalf("failed to get cloud config: %v", err) - } - - containerRuntimeOpts := containerruntime.Opts{ - ContainerRuntime: test.containerruntime, - InsecureRegistries: test.insecureRegistries, - RegistryMirrors: test.registryMirrors, - } - containerRuntimeConfig, err := containerruntime.BuildConfig(containerRuntimeOpts) - if err != nil { - t.Fatalf("failed to generate container runtime config: %v", err) - } - - req := plugin.UserDataRequest{ - MachineSpec: test.spec, - Kubeconfig: kubeconfig, - CloudConfig: cloudConfig, - CloudProviderName: cloudProviderName, - KubeletCloudProviderName: cloudProviderName, - DNSIPs: test.clusterDNSIPs, - ExternalCloudProvider: test.externalCloudProvider, - HTTPProxy: test.httpProxy, - NoProxy: test.noProxy, - PauseImage: test.pauseImage, - KubeletFeatureGates: kubeletFeatureGates, - ContainerRuntime: containerRuntimeConfig, - } - - s, err := provider.UserData(req) - if err != nil { - t.Errorf("error getting userdata: '%v'", err) - } - - // Check if we can gzip it. - if _, err := convert.GzipString(s); err != nil { - t.Fatal(err) - } - goldenName := test.name + ".yaml" - testhelper.CompareOutput(t, goldenName, s, *update) - }) - } -} - -// stringPtr returns pointer to given string. -func stringPtr(a string) *string { - return &a -} diff --git a/pkg/userdata/amzn2/testdata/kubelet-v1.24-aws.yaml b/pkg/userdata/amzn2/testdata/kubelet-v1.24-aws.yaml deleted file mode 100644 index 6cbbe4ad6..000000000 --- a/pkg/userdata/amzn2/testdata/kubelet-v1.24-aws.yaml +++ /dev/null @@ -1,453 +0,0 @@ -#cloud-config - - -ssh_pwauth: false - -write_files: - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: /etc/selinux/config - content: | - # This file controls the state of SELinux on the system. - # SELINUX= can take one of these three values: - # enforcing - SELinux security policy is enforced. - # permissive - SELinux prints warnings instead of enforcing. - # disabled - No SELinux policy is loaded. - SELINUX=permissive - # SELINUXTYPE= can take one of three two values: - # targeted - Targeted processes are protected, - # minimum - Modification of targeted policy. Only selected processes are protected. - # mls - Multi Level Security protection. - SELINUXTYPE=targeted - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - - setenforce 0 || true - systemctl restart systemd-modules-load.service - sysctl --system - - - yum install -y \ - device-mapper-persistent-data \ - lvm2 \ - ebtables \ - ethtool \ - nfs-utils \ - bash-completion \ - sudo \ - socat \ - wget \ - curl \ - ipvsadm - - - mkdir -p /etc/systemd/system/containerd.service.d - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - # set kubelet nodeip environment variable - mkdir -p /etc/systemd/system/kubelet.service.d/ - /opt/bin/setup_net_env.sh - - systemctl disable --now firewalld || true - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - # Make sure we always disable swap - Otherwise the kubelet won't start as for some cloud - # providers swap gets enabled on reboot or after the setup script has finished executing. - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=containerd.service - Requires=containerd.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --cloud-provider=aws \ - --cloud-config=/etc/kubernetes/cloud-config \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --container-runtime=remote \ - --container-runtime-endpoint=unix:///run/containerd/containerd.sock \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target -- path: "/etc/kubernetes/cloud-config" - permissions: "0600" - content: | - {aws-config:true} - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/containerd/config.toml - permissions: "0644" - content: | - version = 2 - - [metrics] - address = "127.0.0.1:1338" - - [plugins] - [plugins."io.containerd.grpc.v1.cri"] - [plugins."io.containerd.grpc.v1.cri".containerd] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] - runtime_type = "io.containerd.runc.v2" - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] - SystemdCgroup = true - [plugins."io.containerd.grpc.v1.cri".registry] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] - endpoint = ["https://registry-1.docker.io"] - - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -runcmd: -- systemctl enable --now setup.service diff --git a/pkg/userdata/amzn2/testdata/kubelet-v1.24.9-aws-external.yaml b/pkg/userdata/amzn2/testdata/kubelet-v1.24.9-aws-external.yaml deleted file mode 100644 index 0e2a2237b..000000000 --- a/pkg/userdata/amzn2/testdata/kubelet-v1.24.9-aws-external.yaml +++ /dev/null @@ -1,453 +0,0 @@ -#cloud-config - - -ssh_pwauth: false - -write_files: - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: /etc/selinux/config - content: | - # This file controls the state of SELinux on the system. - # SELINUX= can take one of these three values: - # enforcing - SELinux security policy is enforced. - # permissive - SELinux prints warnings instead of enforcing. - # disabled - No SELinux policy is loaded. - SELINUX=permissive - # SELINUXTYPE= can take one of three two values: - # targeted - Targeted processes are protected, - # minimum - Modification of targeted policy. Only selected processes are protected. - # mls - Multi Level Security protection. - SELINUXTYPE=targeted - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - - setenforce 0 || true - systemctl restart systemd-modules-load.service - sysctl --system - - - yum install -y \ - device-mapper-persistent-data \ - lvm2 \ - ebtables \ - ethtool \ - nfs-utils \ - bash-completion \ - sudo \ - socat \ - wget \ - curl \ - ipvsadm - - - mkdir -p /etc/systemd/system/containerd.service.d - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - # set kubelet nodeip environment variable - mkdir -p /etc/systemd/system/kubelet.service.d/ - /opt/bin/setup_net_env.sh - - systemctl disable --now firewalld || true - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - # Make sure we always disable swap - Otherwise the kubelet won't start as for some cloud - # providers swap gets enabled on reboot or after the setup script has finished executing. - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=containerd.service - Requires=containerd.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --cloud-provider=external \ - --hostname-override=${KUBELET_HOSTNAME} \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --container-runtime=remote \ - --container-runtime-endpoint=unix:///run/containerd/containerd.sock \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target -- path: "/etc/kubernetes/cloud-config" - permissions: "0600" - content: | - {aws-config:true} - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/containerd/config.toml - permissions: "0644" - content: | - version = 2 - - [metrics] - address = "127.0.0.1:1338" - - [plugins] - [plugins."io.containerd.grpc.v1.cri"] - [plugins."io.containerd.grpc.v1.cri".containerd] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] - runtime_type = "io.containerd.runc.v2" - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] - SystemdCgroup = true - [plugins."io.containerd.grpc.v1.cri".registry] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] - endpoint = ["https://registry-1.docker.io"] - - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -runcmd: -- systemctl enable --now setup.service diff --git a/pkg/userdata/amzn2/testdata/kubelet-v1.24.9-aws.yaml b/pkg/userdata/amzn2/testdata/kubelet-v1.24.9-aws.yaml deleted file mode 100644 index 43a9853b2..000000000 --- a/pkg/userdata/amzn2/testdata/kubelet-v1.24.9-aws.yaml +++ /dev/null @@ -1,453 +0,0 @@ -#cloud-config - - -ssh_pwauth: false - -write_files: - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: /etc/selinux/config - content: | - # This file controls the state of SELinux on the system. - # SELINUX= can take one of these three values: - # enforcing - SELinux security policy is enforced. - # permissive - SELinux prints warnings instead of enforcing. - # disabled - No SELinux policy is loaded. - SELINUX=permissive - # SELINUXTYPE= can take one of three two values: - # targeted - Targeted processes are protected, - # minimum - Modification of targeted policy. Only selected processes are protected. - # mls - Multi Level Security protection. - SELINUXTYPE=targeted - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - - setenforce 0 || true - systemctl restart systemd-modules-load.service - sysctl --system - - - yum install -y \ - device-mapper-persistent-data \ - lvm2 \ - ebtables \ - ethtool \ - nfs-utils \ - bash-completion \ - sudo \ - socat \ - wget \ - curl \ - ipvsadm - - - mkdir -p /etc/systemd/system/containerd.service.d - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - # set kubelet nodeip environment variable - mkdir -p /etc/systemd/system/kubelet.service.d/ - /opt/bin/setup_net_env.sh - - systemctl disable --now firewalld || true - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - # Make sure we always disable swap - Otherwise the kubelet won't start as for some cloud - # providers swap gets enabled on reboot or after the setup script has finished executing. - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=containerd.service - Requires=containerd.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --cloud-provider=aws \ - --cloud-config=/etc/kubernetes/cloud-config \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --container-runtime=remote \ - --container-runtime-endpoint=unix:///run/containerd/containerd.sock \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target -- path: "/etc/kubernetes/cloud-config" - permissions: "0600" - content: | - {aws-config:true} - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/containerd/config.toml - permissions: "0644" - content: | - version = 2 - - [metrics] - address = "127.0.0.1:1338" - - [plugins] - [plugins."io.containerd.grpc.v1.cri"] - [plugins."io.containerd.grpc.v1.cri".containerd] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] - runtime_type = "io.containerd.runc.v2" - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] - SystemdCgroup = true - [plugins."io.containerd.grpc.v1.cri".registry] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] - endpoint = ["https://registry-1.docker.io"] - - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -runcmd: -- systemctl enable --now setup.service diff --git a/pkg/userdata/amzn2/testdata/kubelet-v1.24.9-vsphere-mirrors.yaml b/pkg/userdata/amzn2/testdata/kubelet-v1.24.9-vsphere-mirrors.yaml deleted file mode 100644 index b90489c90..000000000 --- a/pkg/userdata/amzn2/testdata/kubelet-v1.24.9-vsphere-mirrors.yaml +++ /dev/null @@ -1,470 +0,0 @@ -#cloud-config - -hostname: node1 - - -ssh_pwauth: false - -write_files: -- path: "/etc/environment" - content: | - HTTP_PROXY=http://192.168.100.100:3128 - http_proxy=http://192.168.100.100:3128 - HTTPS_PROXY=http://192.168.100.100:3128 - https_proxy=http://192.168.100.100:3128 - NO_PROXY=192.168.1.0 - no_proxy=192.168.1.0 - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: /etc/selinux/config - content: | - # This file controls the state of SELinux on the system. - # SELINUX= can take one of these three values: - # enforcing - SELinux security policy is enforced. - # permissive - SELinux prints warnings instead of enforcing. - # disabled - No SELinux policy is loaded. - SELINUX=permissive - # SELINUXTYPE= can take one of three two values: - # targeted - Targeted processes are protected, - # minimum - Modification of targeted policy. Only selected processes are protected. - # mls - Multi Level Security protection. - SELINUXTYPE=targeted - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - - setenforce 0 || true - systemctl restart systemd-modules-load.service - sysctl --system - - hostnamectl set-hostname node1 - - - yum install -y \ - device-mapper-persistent-data \ - lvm2 \ - ebtables \ - ethtool \ - nfs-utils \ - bash-completion \ - sudo \ - socat \ - wget \ - curl \ - open-vm-tools \ - ipvsadm - - - mkdir -p /etc/systemd/system/containerd.service.d - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - # set kubelet nodeip environment variable - mkdir -p /etc/systemd/system/kubelet.service.d/ - /opt/bin/setup_net_env.sh - - systemctl disable --now firewalld || true - - systemctl enable --now vmtoolsd.service - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - # Make sure we always disable swap - Otherwise the kubelet won't start as for some cloud - # providers swap gets enabled on reboot or after the setup script has finished executing. - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=containerd.service - Requires=containerd.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --cloud-provider=vsphere \ - --cloud-config=/etc/kubernetes/cloud-config \ - --hostname-override=node1 \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --pod-infra-container-image=192.168.100.100:5000/kubernetes/pause:v3.1 \ - --container-runtime=remote \ - --container-runtime-endpoint=unix:///run/containerd/containerd.sock \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target -- path: "/etc/kubernetes/cloud-config" - permissions: "0600" - content: | - {config:true} - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/containerd/config.toml - permissions: "0644" - content: | - version = 2 - - [metrics] - address = "127.0.0.1:1338" - - [plugins] - [plugins."io.containerd.grpc.v1.cri"] - [plugins."io.containerd.grpc.v1.cri".containerd] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] - runtime_type = "io.containerd.runc.v2" - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] - SystemdCgroup = true - [plugins."io.containerd.grpc.v1.cri".registry] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] - endpoint = ["https://registry.docker-cn.com"] - - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -runcmd: -- systemctl enable --now setup.service diff --git a/pkg/userdata/amzn2/testdata/kubelet-v1.24.9-vsphere-proxy.yaml b/pkg/userdata/amzn2/testdata/kubelet-v1.24.9-vsphere-proxy.yaml deleted file mode 100644 index 5e0a08c15..000000000 --- a/pkg/userdata/amzn2/testdata/kubelet-v1.24.9-vsphere-proxy.yaml +++ /dev/null @@ -1,477 +0,0 @@ -#cloud-config - -hostname: node1 - - -ssh_pwauth: false - -write_files: -- path: "/etc/environment" - content: | - HTTP_PROXY=http://192.168.100.100:3128 - http_proxy=http://192.168.100.100:3128 - HTTPS_PROXY=http://192.168.100.100:3128 - https_proxy=http://192.168.100.100:3128 - NO_PROXY=192.168.1.0 - no_proxy=192.168.1.0 - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: /etc/selinux/config - content: | - # This file controls the state of SELinux on the system. - # SELINUX= can take one of these three values: - # enforcing - SELinux security policy is enforced. - # permissive - SELinux prints warnings instead of enforcing. - # disabled - No SELinux policy is loaded. - SELINUX=permissive - # SELINUXTYPE= can take one of three two values: - # targeted - Targeted processes are protected, - # minimum - Modification of targeted policy. Only selected processes are protected. - # mls - Multi Level Security protection. - SELINUXTYPE=targeted - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - - setenforce 0 || true - systemctl restart systemd-modules-load.service - sysctl --system - - hostnamectl set-hostname node1 - - - yum install -y \ - device-mapper-persistent-data \ - lvm2 \ - ebtables \ - ethtool \ - nfs-utils \ - bash-completion \ - sudo \ - socat \ - wget \ - curl \ - open-vm-tools \ - ipvsadm - - - mkdir -p /etc/systemd/system/containerd.service.d - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - # set kubelet nodeip environment variable - mkdir -p /etc/systemd/system/kubelet.service.d/ - /opt/bin/setup_net_env.sh - - systemctl disable --now firewalld || true - - systemctl enable --now vmtoolsd.service - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - # Make sure we always disable swap - Otherwise the kubelet won't start as for some cloud - # providers swap gets enabled on reboot or after the setup script has finished executing. - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=containerd.service - Requires=containerd.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --cloud-provider=vsphere \ - --cloud-config=/etc/kubernetes/cloud-config \ - --hostname-override=node1 \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --pod-infra-container-image=192.168.100.100:5000/kubernetes/pause:v3.1 \ - --container-runtime=remote \ - --container-runtime-endpoint=unix:///run/containerd/containerd.sock \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target -- path: "/etc/kubernetes/cloud-config" - permissions: "0600" - content: | - {config:true} - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/containerd/config.toml - permissions: "0644" - content: | - version = 2 - - [metrics] - address = "127.0.0.1:1338" - - [plugins] - [plugins."io.containerd.grpc.v1.cri"] - [plugins."io.containerd.grpc.v1.cri".containerd] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] - runtime_type = "io.containerd.runc.v2" - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] - SystemdCgroup = true - [plugins."io.containerd.grpc.v1.cri".registry] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] - endpoint = ["https://registry-1.docker.io"] - [plugins."io.containerd.grpc.v1.cri".registry.configs] - [plugins."io.containerd.grpc.v1.cri".registry.configs."10.0.0.1:5000"] - [plugins."io.containerd.grpc.v1.cri".registry.configs."10.0.0.1:5000".tls] - insecure_skip_verify = true - [plugins."io.containerd.grpc.v1.cri".registry.configs."192.168.100.100:5000"] - [plugins."io.containerd.grpc.v1.cri".registry.configs."192.168.100.100:5000".tls] - insecure_skip_verify = true - - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -runcmd: -- systemctl enable --now setup.service diff --git a/pkg/userdata/amzn2/testdata/kubelet-v1.24.9-vsphere.yaml b/pkg/userdata/amzn2/testdata/kubelet-v1.24.9-vsphere.yaml deleted file mode 100644 index 880f0ec4b..000000000 --- a/pkg/userdata/amzn2/testdata/kubelet-v1.24.9-vsphere.yaml +++ /dev/null @@ -1,461 +0,0 @@ -#cloud-config - -hostname: node1 - - -ssh_pwauth: false - -write_files: - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: /etc/selinux/config - content: | - # This file controls the state of SELinux on the system. - # SELINUX= can take one of these three values: - # enforcing - SELinux security policy is enforced. - # permissive - SELinux prints warnings instead of enforcing. - # disabled - No SELinux policy is loaded. - SELINUX=permissive - # SELINUXTYPE= can take one of three two values: - # targeted - Targeted processes are protected, - # minimum - Modification of targeted policy. Only selected processes are protected. - # mls - Multi Level Security protection. - SELINUXTYPE=targeted - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - - setenforce 0 || true - systemctl restart systemd-modules-load.service - sysctl --system - - hostnamectl set-hostname node1 - - - yum install -y \ - device-mapper-persistent-data \ - lvm2 \ - ebtables \ - ethtool \ - nfs-utils \ - bash-completion \ - sudo \ - socat \ - wget \ - curl \ - open-vm-tools \ - ipvsadm - - - mkdir -p /etc/systemd/system/containerd.service.d - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - # set kubelet nodeip environment variable - mkdir -p /etc/systemd/system/kubelet.service.d/ - /opt/bin/setup_net_env.sh - - systemctl disable --now firewalld || true - - systemctl enable --now vmtoolsd.service - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - # Make sure we always disable swap - Otherwise the kubelet won't start as for some cloud - # providers swap gets enabled on reboot or after the setup script has finished executing. - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=containerd.service - Requires=containerd.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --cloud-provider=vsphere \ - --cloud-config=/etc/kubernetes/cloud-config \ - --hostname-override=node1 \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --container-runtime=remote \ - --container-runtime-endpoint=unix:///run/containerd/containerd.sock \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target -- path: "/etc/kubernetes/cloud-config" - permissions: "0600" - content: | - {config:true} - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/containerd/config.toml - permissions: "0644" - content: | - version = 2 - - [metrics] - address = "127.0.0.1:1338" - - [plugins] - [plugins."io.containerd.grpc.v1.cri"] - [plugins."io.containerd.grpc.v1.cri".containerd] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] - runtime_type = "io.containerd.runc.v2" - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] - SystemdCgroup = true - [plugins."io.containerd.grpc.v1.cri".registry] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] - endpoint = ["https://registry-1.docker.io"] - - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -runcmd: -- systemctl enable --now setup.service diff --git a/pkg/userdata/amzn2/testdata/kubelet-v1.25-aws.yaml b/pkg/userdata/amzn2/testdata/kubelet-v1.25-aws.yaml deleted file mode 100644 index 318b1b646..000000000 --- a/pkg/userdata/amzn2/testdata/kubelet-v1.25-aws.yaml +++ /dev/null @@ -1,453 +0,0 @@ -#cloud-config - - -ssh_pwauth: false - -write_files: - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: /etc/selinux/config - content: | - # This file controls the state of SELinux on the system. - # SELINUX= can take one of these three values: - # enforcing - SELinux security policy is enforced. - # permissive - SELinux prints warnings instead of enforcing. - # disabled - No SELinux policy is loaded. - SELINUX=permissive - # SELINUXTYPE= can take one of three two values: - # targeted - Targeted processes are protected, - # minimum - Modification of targeted policy. Only selected processes are protected. - # mls - Multi Level Security protection. - SELINUXTYPE=targeted - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - - setenforce 0 || true - systemctl restart systemd-modules-load.service - sysctl --system - - - yum install -y \ - device-mapper-persistent-data \ - lvm2 \ - ebtables \ - ethtool \ - nfs-utils \ - bash-completion \ - sudo \ - socat \ - wget \ - curl \ - ipvsadm - - - mkdir -p /etc/systemd/system/containerd.service.d - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - # set kubelet nodeip environment variable - mkdir -p /etc/systemd/system/kubelet.service.d/ - /opt/bin/setup_net_env.sh - - systemctl disable --now firewalld || true - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - # Make sure we always disable swap - Otherwise the kubelet won't start as for some cloud - # providers swap gets enabled on reboot or after the setup script has finished executing. - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=containerd.service - Requires=containerd.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --cloud-provider=aws \ - --cloud-config=/etc/kubernetes/cloud-config \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --container-runtime=remote \ - --container-runtime-endpoint=unix:///run/containerd/containerd.sock \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target -- path: "/etc/kubernetes/cloud-config" - permissions: "0600" - content: | - {aws-config:true} - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/containerd/config.toml - permissions: "0644" - content: | - version = 2 - - [metrics] - address = "127.0.0.1:1338" - - [plugins] - [plugins."io.containerd.grpc.v1.cri"] - [plugins."io.containerd.grpc.v1.cri".containerd] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] - runtime_type = "io.containerd.runc.v2" - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] - SystemdCgroup = true - [plugins."io.containerd.grpc.v1.cri".registry] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] - endpoint = ["https://registry-1.docker.io"] - - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -runcmd: -- systemctl enable --now setup.service diff --git a/pkg/userdata/amzn2/testdata/kubelet-v1.26-aws.yaml b/pkg/userdata/amzn2/testdata/kubelet-v1.26-aws.yaml deleted file mode 100644 index 4115f30da..000000000 --- a/pkg/userdata/amzn2/testdata/kubelet-v1.26-aws.yaml +++ /dev/null @@ -1,453 +0,0 @@ -#cloud-config - - -ssh_pwauth: false - -write_files: - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: /etc/selinux/config - content: | - # This file controls the state of SELinux on the system. - # SELINUX= can take one of these three values: - # enforcing - SELinux security policy is enforced. - # permissive - SELinux prints warnings instead of enforcing. - # disabled - No SELinux policy is loaded. - SELINUX=permissive - # SELINUXTYPE= can take one of three two values: - # targeted - Targeted processes are protected, - # minimum - Modification of targeted policy. Only selected processes are protected. - # mls - Multi Level Security protection. - SELINUXTYPE=targeted - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - - setenforce 0 || true - systemctl restart systemd-modules-load.service - sysctl --system - - - yum install -y \ - device-mapper-persistent-data \ - lvm2 \ - ebtables \ - ethtool \ - nfs-utils \ - bash-completion \ - sudo \ - socat \ - wget \ - curl \ - ipvsadm - - - mkdir -p /etc/systemd/system/containerd.service.d - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - # set kubelet nodeip environment variable - mkdir -p /etc/systemd/system/kubelet.service.d/ - /opt/bin/setup_net_env.sh - - systemctl disable --now firewalld || true - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - # Make sure we always disable swap - Otherwise the kubelet won't start as for some cloud - # providers swap gets enabled on reboot or after the setup script has finished executing. - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=containerd.service - Requires=containerd.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --cloud-provider=aws \ - --cloud-config=/etc/kubernetes/cloud-config \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --container-runtime=remote \ - --container-runtime-endpoint=unix:///run/containerd/containerd.sock \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target -- path: "/etc/kubernetes/cloud-config" - permissions: "0600" - content: | - {aws-config:true} - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/containerd/config.toml - permissions: "0644" - content: | - version = 2 - - [metrics] - address = "127.0.0.1:1338" - - [plugins] - [plugins."io.containerd.grpc.v1.cri"] - [plugins."io.containerd.grpc.v1.cri".containerd] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] - runtime_type = "io.containerd.runc.v2" - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] - SystemdCgroup = true - [plugins."io.containerd.grpc.v1.cri".registry] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] - endpoint = ["https://registry-1.docker.io"] - - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -runcmd: -- systemctl enable --now setup.service diff --git a/pkg/userdata/centos/centos.go b/pkg/userdata/centos/centos.go deleted file mode 100644 index 0350c43b5..000000000 --- a/pkg/userdata/centos/centos.go +++ /dev/null @@ -1,59 +0,0 @@ -/* -Copyright 2019 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package centos - -import ( - "encoding/json" - - "k8s.io/apimachinery/pkg/runtime" -) - -// Config contains specific configuration for CentOS. -type Config struct { - DistUpgradeOnBoot bool `json:"distUpgradeOnBoot"` -} - -func DefaultConfig(operatingSystemSpec runtime.RawExtension) runtime.RawExtension { - if operatingSystemSpec.Raw == nil { - operatingSystemSpec.Raw, _ = json.Marshal(Config{}) - } - - return operatingSystemSpec -} - -// LoadConfig retrieves the CentOS configuration from raw data. -func LoadConfig(r runtime.RawExtension) (*Config, error) { - r = DefaultConfig(r) - cfg := Config{} - - if err := json.Unmarshal(r.Raw, &cfg); err != nil { - return nil, err - } - return &cfg, nil -} - -// Spec return the configuration as raw data. -func (cfg *Config) Spec() (*runtime.RawExtension, error) { - ext := &runtime.RawExtension{} - b, err := json.Marshal(cfg) - if err != nil { - return nil, err - } - - ext.Raw = b - return ext, nil -} diff --git a/pkg/userdata/centos/provider.go b/pkg/userdata/centos/provider.go deleted file mode 100644 index 353e8ed68..000000000 --- a/pkg/userdata/centos/provider.go +++ /dev/null @@ -1,391 +0,0 @@ -/* -Copyright 2019 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// -// UserData plugin for CentOS. -// - -package centos - -import ( - "errors" - "fmt" - "strings" - "text/template" - - "github.com/Masterminds/semver/v3" - - "github.com/kubermatic/machine-controller/pkg/apis/plugin" - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" - userdatahelper "github.com/kubermatic/machine-controller/pkg/userdata/helper" -) - -// Provider is a pkg/userdata/plugin.Provider implementation. -type Provider struct{} - -// UserData renders user-data template to string. -func (p Provider) UserData(req plugin.UserDataRequest) (string, error) { - tmpl, err := template.New("user-data").Funcs(userdatahelper.TxtFuncMap()).Parse(userDataTemplate) - if err != nil { - return "", fmt.Errorf("failed to parse user-data template: %w", err) - } - - kubeletVersion, err := semver.NewVersion(req.MachineSpec.Versions.Kubelet) - if err != nil { - return "", fmt.Errorf("invalid kubelet version: %w", err) - } - - pconfig, err := providerconfigtypes.GetConfig(req.MachineSpec.ProviderSpec) - if err != nil { - return "", fmt.Errorf("failed to get provider config: %w", err) - } - - if pconfig.OverwriteCloudConfig != nil { - req.CloudConfig = *pconfig.OverwriteCloudConfig - } - - if pconfig.Network.IsStaticIPConfig() { - return "", errors.New("static IP config is not supported with CentOS") - } - - centosConfig, err := LoadConfig(pconfig.OperatingSystemSpec) - if err != nil { - return "", fmt.Errorf("failed to parse OperatingSystemSpec: %w", err) - } - - kubeconfigString, err := userdatahelper.StringifyKubeconfig(req.Kubeconfig) - if err != nil { - return "", err - } - - kubernetesCACert, err := userdatahelper.GetCACert(req.Kubeconfig) - if err != nil { - return "", fmt.Errorf("error extracting cacert: %w", err) - } - - crEngine := req.ContainerRuntime.Engine(kubeletVersion) - crScript, err := crEngine.ScriptFor(providerconfigtypes.OperatingSystemCentOS) - if err != nil { - return "", fmt.Errorf("failed to generate container runtime install script: %w", err) - } - - crConfig, err := crEngine.Config() - if err != nil { - return "", fmt.Errorf("failed to generate container runtime config: %w", err) - } - - crAuthConfig, err := crEngine.AuthConfig() - if err != nil { - return "", fmt.Errorf("failed to generate container runtime auth config: %w", err) - } - - data := struct { - plugin.UserDataRequest - ProviderSpec *providerconfigtypes.Config - OSConfig *Config - KubeletVersion string - Kubeconfig string - KubernetesCACert string - NodeIPScript string - ExtraKubeletFlags []string - ContainerRuntimeScript string - ContainerRuntimeConfigFileName string - ContainerRuntimeConfig string - ContainerRuntimeAuthConfigFileName string - ContainerRuntimeAuthConfig string - ContainerRuntimeName string - }{ - UserDataRequest: req, - ProviderSpec: pconfig, - OSConfig: centosConfig, - KubeletVersion: kubeletVersion.String(), - Kubeconfig: kubeconfigString, - KubernetesCACert: kubernetesCACert, - NodeIPScript: userdatahelper.SetupNodeIPEnvScript(pconfig.Network.GetIPFamily()), - ExtraKubeletFlags: crEngine.KubeletFlags(), - ContainerRuntimeScript: crScript, - ContainerRuntimeConfigFileName: crEngine.ConfigFileName(), - ContainerRuntimeConfig: crConfig, - ContainerRuntimeAuthConfigFileName: crEngine.AuthConfigFileName(), - ContainerRuntimeAuthConfig: crAuthConfig, - ContainerRuntimeName: crEngine.String(), - } - - buf := strings.Builder{} - if err = tmpl.Execute(&buf, data); err != nil { - return "", fmt.Errorf("failed to execute user-data template: %w", err) - } - - return userdatahelper.CleanupTemplateOutput(buf.String()) -} - -// UserData template. -const userDataTemplate = `#cloud-config -{{ if ne .CloudProviderName "aws" }} -hostname: {{ .MachineSpec.Name }} -{{- /* Never set the hostname on AWS nodes. Kubernetes(kube-proxy) requires the hostname to be the private dns name */}} -{{ end }} - -{{- if .OSConfig.DistUpgradeOnBoot }} -package_upgrade: true -package_reboot_if_required: true -{{- end }} - -ssh_pwauth: false - -{{- if ne (len .ProviderSpec.SSHPublicKeys) 0 }} -ssh_authorized_keys: -{{- range .ProviderSpec.SSHPublicKeys }} - - "{{ . }}" -{{- end }} -{{- end }} - -write_files: -{{- if .HTTPProxy }} -- path: "/etc/environment" - content: | -{{ proxyEnvironment .HTTPProxy .NoProxy | indent 4 }} -{{- end }} - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | -{{ journalDConfig | indent 4 }} - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | -{{ kernelModulesScript | indent 4 }} - -- path: "/etc/sysctl.d/k8s.conf" - content: | -{{ kernelSettings | indent 4 }} - -- path: /etc/selinux/config - content: | - # This file controls the state of SELinux on the system. - # SELINUX= can take one of these three values: - # enforcing - SELinux security policy is enforced. - # permissive - SELinux prints warnings instead of enforcing. - # disabled - No SELinux policy is loaded. - SELINUX=permissive - # SELINUXTYPE= can take one of three two values: - # targeted - Targeted processes are protected, - # minimum - Modification of targeted policy. Only selected processes are protected. - # mls - Multi Level Security protection. - SELINUXTYPE=targeted - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - - setenforce 0 || true - -{{- /* As we added some modules and don't want to reboot, restart the service */}} - systemctl restart systemd-modules-load.service - sysctl --system - - {{ if ne .CloudProviderName "aws" }} -{{- /* The normal way of setting it via cloud-init is broken, see */}} -{{- /* https://bugs.launchpad.net/cloud-init/+bug/1662542 */}} - hostnamectl set-hostname {{ .MachineSpec.Name }} - {{ end }} - -{{- /* CentOS 8 has reached EOL and all packages were moved to vault.centos.org -- https://www.centos.org/centos-linux-eol/ */}} - source /etc/os-release - if [ "$ID" == "centos" ] && [ "$VERSION_ID" == "8" ]; then - sudo sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-* - sudo sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-* - fi - - yum install -y \ - device-mapper-persistent-data \ - lvm2 \ - ebtables \ - ethtool \ - nfs-utils \ - bash-completion \ - sudo \ - socat \ - wget \ - curl \ - {{- if or (eq .CloudProviderName "vsphere") (eq .CloudProviderName "vmware-cloud-director") }} - open-vm-tools \ - {{- end }} - {{- if eq .CloudProviderName "nutanix" }} - iscsi-initiator-utils \ - {{- end }} - ipvsadm - - {{- /* iscsid service is required on Nutanix machines for CSI driver to attach volumes. */}} - {{- if eq .CloudProviderName "nutanix" }} - systemctl enable --now iscsid - {{ end }} -{{ .ContainerRuntimeScript | indent 4 }} - -{{ safeDownloadBinariesScript .KubeletVersion | indent 4 }} - # set kubelet nodeip environment variable - mkdir -p /etc/systemd/system/kubelet.service.d/ - /opt/bin/setup_net_env.sh - - systemctl disable --now firewalld || true - {{ if eq .CloudProviderName "vsphere" }} - systemctl enable --now vmtoolsd.service - {{ end -}} - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - {{- if eq .CloudProviderName "kubevirt" }} - systemctl enable --now --no-block restart-kubelet.service - {{ end }} - systemctl disable setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - # Make sure we always disable swap - Otherwise the kubelet won't start as for some cloud - # providers swap gets enabled on reboot or after the setup script has finished executing. - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | -{{ kubeletSystemdUnit .ContainerRuntimeName .KubeletVersion .KubeletCloudProviderName .MachineSpec.Name .DNSIPs .ExternalCloudProvider .ProviderSpec.Network.GetIPFamily .PauseImage .MachineSpec.Taints .ExtraKubeletFlags true | indent 4 }} - -{{- if ne (len .CloudConfig) 0 }} -- path: "/etc/kubernetes/cloud-config" - permissions: "0600" - content: | -{{ .CloudConfig | indent 4 }} -{{- end }} - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | -{{ .NodeIPScript | indent 4 }} - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | -{{ .Kubeconfig | indent 4 }} - -- path: "/etc/kubernetes/kubelet.conf" - content: | -{{ kubeletConfiguration "cluster.local" .DNSIPs .KubeletFeatureGates .KubeletConfigs .ContainerRuntimeName | indent 4 }} - -- path: "/etc/kubernetes/pki/ca.crt" - content: | -{{ .KubernetesCACert | indent 4 }} - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: {{ .ContainerRuntimeConfigFileName }} - permissions: "0644" - content: | -{{ .ContainerRuntimeConfig | indent 4 }} - -{{- if and (eq .ContainerRuntimeName "docker") .ContainerRuntimeAuthConfig }} - -- path: {{ .ContainerRuntimeAuthConfigFileName }} - permissions: "0600" - content: | -{{ .ContainerRuntimeAuthConfig | indent 4 }} -{{- end }} - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | -{{ kubeletHealthCheckSystemdUnit | indent 4 }} - -{{- with .ProviderSpec.CAPublicKey }} - -- path: "/etc/ssh/trusted-user-ca-keys.pem" - content: | -{{ . | indent 4 }} - -- path: "/etc/ssh/sshd_config" - content: | -{{ sshConfigAddendum | indent 4 }} - append: true -{{- end }} - -{{- if eq .CloudProviderName "kubevirt" }} -- path: "/opt/bin/restart-kubelet.sh" - permissions: "0744" - content: | - #!/bin/bash - # Needed for Kubevirt provider because if the virt-launcher pod is deleted, - # the VM and DataVolume states are kept and VM is rebooted. We need to restart the kubelet - # with the new config (new IP) and run this at every boot. - set -xeuo pipefail - - # This helps us avoid an unnecessary restart for kubelet on the first boot - if [ -f /etc/kubelet_needs_restart ]; then - # restart kubelet since it's not the first boot - systemctl daemon-reload - systemctl restart kubelet.service - else - touch /etc/kubelet_needs_restart - fi - -- path: "/etc/systemd/system/restart-kubelet.service" - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - Description=Service responsible for restarting kubelet when the machine is rebooted - - [Service] - Type=oneshot - ExecStart=/opt/bin/restart-kubelet.sh - - [Install] - WantedBy=multi-user.target -{{- end }} - -runcmd: -- systemctl enable --now setup.service -` diff --git a/pkg/userdata/centos/provider_test.go b/pkg/userdata/centos/provider_test.go deleted file mode 100644 index d8fa5a6c1..000000000 --- a/pkg/userdata/centos/provider_test.go +++ /dev/null @@ -1,277 +0,0 @@ -/* -Copyright 2019 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// -// UserData plugin for CentOS. -// - -package centos - -import ( - "flag" - "net" - "testing" - - clusterv1alpha1 "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" - "github.com/kubermatic/machine-controller/pkg/apis/plugin" - "github.com/kubermatic/machine-controller/pkg/containerruntime" - testhelper "github.com/kubermatic/machine-controller/pkg/test" - "github.com/kubermatic/machine-controller/pkg/userdata/convert" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - clientcmdapi "k8s.io/client-go/tools/clientcmd/api" -) - -var ( - update = flag.Bool("update", false, "update testdata files") - - pemCertificate = `-----BEGIN CERTIFICATE----- -MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV -BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG -A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 -DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 -NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG -cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv -c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B -AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS -R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT -ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk -JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 -mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW -caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G -A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt -hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB -MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES -MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv -bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h -U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao -eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 -UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD -58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n -sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF -kPe6XoSbiLm/kxk32T0= ------END CERTIFICATE-----` -) - -// fakeCloudConfigProvider simulates cloud config provider for test. -type fakeCloudConfigProvider struct { - config string - name string - err error -} - -func (p *fakeCloudConfigProvider) GetCloudConfig(spec clusterv1alpha1.MachineSpec) (config string, name string, err error) { - return p.config, p.name, p.err -} - -// userDataTestCase contains the data for a table-driven test. -type userDataTestCase struct { - name string - spec clusterv1alpha1.MachineSpec - clusterDNSIPs []net.IP - cloudProviderName *string - externalCloudProvider bool - httpProxy string - noProxy string - insecureRegistries string - registryMirrors string - pauseImage string - containerruntime string -} - -// TestUserDataGeneration runs the data generation for different -// environments. -func TestUserDataGeneration(t *testing.T) { - t.Parallel() - - tests := []userDataTestCase{ - { - name: "kubelet-v1.24.9-aws", - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{Name: "node1"}, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.24.9", - }, - }, - }, - { - name: "kubelet-v1.24.9-nutanix", - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{Name: "node1"}, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.24.9", - }, - }, - cloudProviderName: stringPtr("nutanix"), - }, - { - name: "kubelet-v1.24.9-aws-external", - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{Name: "node1"}, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.24.9", - }, - }, - externalCloudProvider: true, - }, - { - name: "kubelet-v1.24.9-vsphere", - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{Name: "node1"}, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.24.9", - }, - }, - cloudProviderName: stringPtr("vsphere"), - }, - { - name: "kubelet-v1.24.9-vsphere-proxy", - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{Name: "node1"}, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.24.9", - }, - }, - cloudProviderName: stringPtr("vsphere"), - httpProxy: "http://192.168.100.100:3128", - noProxy: "192.168.1.0", - insecureRegistries: "192.168.100.100:5000, 10.0.0.1:5000", - pauseImage: "192.168.100.100:5000/kubernetes/pause:v3.1", - }, - { - name: "kubelet-v1.24.9-vsphere-mirrors", - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{Name: "node1"}, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.24.9", - }, - }, - cloudProviderName: stringPtr("vsphere"), - httpProxy: "http://192.168.100.100:3128", - noProxy: "192.168.1.0", - registryMirrors: "https://registry.docker-cn.com", - pauseImage: "192.168.100.100:5000/kubernetes/pause:v3.1", - }, - { - name: "kubelet-v1.25-aws", - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{Name: "node1"}, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.25.0", - }, - }, - }, - { - name: "kubelet-v1.26-aws", - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{Name: "node1"}, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.26.0", - }, - }, - }, - } - - defaultCloudProvider := &fakeCloudConfigProvider{ - name: "aws", - config: "{aws-config:true}", - err: nil, - } - kubeconfig := &clientcmdapi.Config{ - Clusters: map[string]*clientcmdapi.Cluster{ - "": { - Server: "https://server:443", - CertificateAuthorityData: []byte(pemCertificate), - }, - }, - AuthInfos: map[string]*clientcmdapi.AuthInfo{ - "": { - Token: "my-token", - }, - }, - } - provider := Provider{} - - kubeletFeatureGates := map[string]bool{ - "RotateKubeletServerCertificate": true, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - emptyProviderSpec := clusterv1alpha1.ProviderSpec{ - Value: &runtime.RawExtension{}, - } - test.spec.ProviderSpec = emptyProviderSpec - var cloudProvider *fakeCloudConfigProvider - if test.cloudProviderName != nil { - cloudProvider = &fakeCloudConfigProvider{ - name: *test.cloudProviderName, - config: "{config:true}", - err: nil, - } - } else { - cloudProvider = defaultCloudProvider - } - cloudConfig, cloudProviderName, err := cloudProvider.GetCloudConfig(test.spec) - if err != nil { - t.Fatalf("failed to get cloud config: %v", err) - } - - containerRuntimeOpts := containerruntime.Opts{ - ContainerRuntime: test.containerruntime, - InsecureRegistries: test.insecureRegistries, - RegistryMirrors: test.registryMirrors, - } - containerRuntimeConfig, err := containerruntime.BuildConfig(containerRuntimeOpts) - if err != nil { - t.Fatalf("failed to generate container runtime config: %v", err) - } - - req := plugin.UserDataRequest{ - MachineSpec: test.spec, - Kubeconfig: kubeconfig, - CloudConfig: cloudConfig, - CloudProviderName: cloudProviderName, - KubeletCloudProviderName: cloudProviderName, - DNSIPs: test.clusterDNSIPs, - ExternalCloudProvider: test.externalCloudProvider, - HTTPProxy: test.httpProxy, - NoProxy: test.noProxy, - PauseImage: test.pauseImage, - KubeletFeatureGates: kubeletFeatureGates, - ContainerRuntime: containerRuntimeConfig, - } - - s, err := provider.UserData(req) - if err != nil { - t.Errorf("error getting userdata: '%v'", err) - } - - // Check if we can gzip it. - if _, err := convert.GzipString(s); err != nil { - t.Fatal(err) - } - goldenName := test.name + ".yaml" - testhelper.CompareOutput(t, goldenName, s, *update) - }) - } -} - -// stringPtr returns pointer to given string. -func stringPtr(a string) *string { - return &a -} diff --git a/pkg/userdata/centos/testdata/kubelet-v1.24.9-aws-external.yaml b/pkg/userdata/centos/testdata/kubelet-v1.24.9-aws-external.yaml deleted file mode 100644 index e9998c449..000000000 --- a/pkg/userdata/centos/testdata/kubelet-v1.24.9-aws-external.yaml +++ /dev/null @@ -1,459 +0,0 @@ -#cloud-config - - -ssh_pwauth: false - -write_files: - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: /etc/selinux/config - content: | - # This file controls the state of SELinux on the system. - # SELINUX= can take one of these three values: - # enforcing - SELinux security policy is enforced. - # permissive - SELinux prints warnings instead of enforcing. - # disabled - No SELinux policy is loaded. - SELINUX=permissive - # SELINUXTYPE= can take one of three two values: - # targeted - Targeted processes are protected, - # minimum - Modification of targeted policy. Only selected processes are protected. - # mls - Multi Level Security protection. - SELINUXTYPE=targeted - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - - setenforce 0 || true - systemctl restart systemd-modules-load.service - sysctl --system - - - source /etc/os-release - if [ "$ID" == "centos" ] && [ "$VERSION_ID" == "8" ]; then - sudo sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-* - sudo sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-* - fi - - yum install -y \ - device-mapper-persistent-data \ - lvm2 \ - ebtables \ - ethtool \ - nfs-utils \ - bash-completion \ - sudo \ - socat \ - wget \ - curl \ - ipvsadm - - yum install -y yum-utils - yum-config-manager --add-repo=https://download.docker.com/linux/centos/docker-ce.repo - yum-config-manager --save --setopt=docker-ce-stable.module_hotfixes=true - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - # set kubelet nodeip environment variable - mkdir -p /etc/systemd/system/kubelet.service.d/ - /opt/bin/setup_net_env.sh - - systemctl disable --now firewalld || true - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - # Make sure we always disable swap - Otherwise the kubelet won't start as for some cloud - # providers swap gets enabled on reboot or after the setup script has finished executing. - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=containerd.service - Requires=containerd.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --cloud-provider=external \ - --hostname-override=${KUBELET_HOSTNAME} \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --container-runtime=remote \ - --container-runtime-endpoint=unix:///run/containerd/containerd.sock \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target -- path: "/etc/kubernetes/cloud-config" - permissions: "0600" - content: | - {aws-config:true} - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/containerd/config.toml - permissions: "0644" - content: | - version = 2 - - [metrics] - address = "127.0.0.1:1338" - - [plugins] - [plugins."io.containerd.grpc.v1.cri"] - [plugins."io.containerd.grpc.v1.cri".containerd] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] - runtime_type = "io.containerd.runc.v2" - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] - SystemdCgroup = true - [plugins."io.containerd.grpc.v1.cri".registry] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] - endpoint = ["https://registry-1.docker.io"] - - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -runcmd: -- systemctl enable --now setup.service diff --git a/pkg/userdata/centos/testdata/kubelet-v1.24.9-aws.yaml b/pkg/userdata/centos/testdata/kubelet-v1.24.9-aws.yaml deleted file mode 100644 index a3213cfad..000000000 --- a/pkg/userdata/centos/testdata/kubelet-v1.24.9-aws.yaml +++ /dev/null @@ -1,459 +0,0 @@ -#cloud-config - - -ssh_pwauth: false - -write_files: - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: /etc/selinux/config - content: | - # This file controls the state of SELinux on the system. - # SELINUX= can take one of these three values: - # enforcing - SELinux security policy is enforced. - # permissive - SELinux prints warnings instead of enforcing. - # disabled - No SELinux policy is loaded. - SELINUX=permissive - # SELINUXTYPE= can take one of three two values: - # targeted - Targeted processes are protected, - # minimum - Modification of targeted policy. Only selected processes are protected. - # mls - Multi Level Security protection. - SELINUXTYPE=targeted - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - - setenforce 0 || true - systemctl restart systemd-modules-load.service - sysctl --system - - - source /etc/os-release - if [ "$ID" == "centos" ] && [ "$VERSION_ID" == "8" ]; then - sudo sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-* - sudo sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-* - fi - - yum install -y \ - device-mapper-persistent-data \ - lvm2 \ - ebtables \ - ethtool \ - nfs-utils \ - bash-completion \ - sudo \ - socat \ - wget \ - curl \ - ipvsadm - - yum install -y yum-utils - yum-config-manager --add-repo=https://download.docker.com/linux/centos/docker-ce.repo - yum-config-manager --save --setopt=docker-ce-stable.module_hotfixes=true - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - # set kubelet nodeip environment variable - mkdir -p /etc/systemd/system/kubelet.service.d/ - /opt/bin/setup_net_env.sh - - systemctl disable --now firewalld || true - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - # Make sure we always disable swap - Otherwise the kubelet won't start as for some cloud - # providers swap gets enabled on reboot or after the setup script has finished executing. - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=containerd.service - Requires=containerd.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --cloud-provider=aws \ - --cloud-config=/etc/kubernetes/cloud-config \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --container-runtime=remote \ - --container-runtime-endpoint=unix:///run/containerd/containerd.sock \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target -- path: "/etc/kubernetes/cloud-config" - permissions: "0600" - content: | - {aws-config:true} - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/containerd/config.toml - permissions: "0644" - content: | - version = 2 - - [metrics] - address = "127.0.0.1:1338" - - [plugins] - [plugins."io.containerd.grpc.v1.cri"] - [plugins."io.containerd.grpc.v1.cri".containerd] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] - runtime_type = "io.containerd.runc.v2" - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] - SystemdCgroup = true - [plugins."io.containerd.grpc.v1.cri".registry] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] - endpoint = ["https://registry-1.docker.io"] - - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -runcmd: -- systemctl enable --now setup.service diff --git a/pkg/userdata/centos/testdata/kubelet-v1.24.9-nutanix.yaml b/pkg/userdata/centos/testdata/kubelet-v1.24.9-nutanix.yaml deleted file mode 100644 index 98e8e1f9a..000000000 --- a/pkg/userdata/centos/testdata/kubelet-v1.24.9-nutanix.yaml +++ /dev/null @@ -1,467 +0,0 @@ -#cloud-config - -hostname: node1 - - -ssh_pwauth: false - -write_files: - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: /etc/selinux/config - content: | - # This file controls the state of SELinux on the system. - # SELINUX= can take one of these three values: - # enforcing - SELinux security policy is enforced. - # permissive - SELinux prints warnings instead of enforcing. - # disabled - No SELinux policy is loaded. - SELINUX=permissive - # SELINUXTYPE= can take one of three two values: - # targeted - Targeted processes are protected, - # minimum - Modification of targeted policy. Only selected processes are protected. - # mls - Multi Level Security protection. - SELINUXTYPE=targeted - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - - setenforce 0 || true - systemctl restart systemd-modules-load.service - sysctl --system - - - hostnamectl set-hostname node1 - - source /etc/os-release - if [ "$ID" == "centos" ] && [ "$VERSION_ID" == "8" ]; then - sudo sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-* - sudo sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-* - fi - - yum install -y \ - device-mapper-persistent-data \ - lvm2 \ - ebtables \ - ethtool \ - nfs-utils \ - bash-completion \ - sudo \ - socat \ - wget \ - curl \ - iscsi-initiator-utils \ - ipvsadm - systemctl enable --now iscsid - - - yum install -y yum-utils - yum-config-manager --add-repo=https://download.docker.com/linux/centos/docker-ce.repo - yum-config-manager --save --setopt=docker-ce-stable.module_hotfixes=true - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - # set kubelet nodeip environment variable - mkdir -p /etc/systemd/system/kubelet.service.d/ - /opt/bin/setup_net_env.sh - - systemctl disable --now firewalld || true - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - # Make sure we always disable swap - Otherwise the kubelet won't start as for some cloud - # providers swap gets enabled on reboot or after the setup script has finished executing. - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=containerd.service - Requires=containerd.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --cloud-provider=nutanix \ - --cloud-config=/etc/kubernetes/cloud-config \ - --hostname-override=node1 \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --container-runtime=remote \ - --container-runtime-endpoint=unix:///run/containerd/containerd.sock \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target -- path: "/etc/kubernetes/cloud-config" - permissions: "0600" - content: | - {config:true} - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/containerd/config.toml - permissions: "0644" - content: | - version = 2 - - [metrics] - address = "127.0.0.1:1338" - - [plugins] - [plugins."io.containerd.grpc.v1.cri"] - [plugins."io.containerd.grpc.v1.cri".containerd] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] - runtime_type = "io.containerd.runc.v2" - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] - SystemdCgroup = true - [plugins."io.containerd.grpc.v1.cri".registry] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] - endpoint = ["https://registry-1.docker.io"] - - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -runcmd: -- systemctl enable --now setup.service diff --git a/pkg/userdata/centos/testdata/kubelet-v1.24.9-vsphere-mirrors.yaml b/pkg/userdata/centos/testdata/kubelet-v1.24.9-vsphere-mirrors.yaml deleted file mode 100644 index 78d8f31a7..000000000 --- a/pkg/userdata/centos/testdata/kubelet-v1.24.9-vsphere-mirrors.yaml +++ /dev/null @@ -1,476 +0,0 @@ -#cloud-config - -hostname: node1 - - -ssh_pwauth: false - -write_files: -- path: "/etc/environment" - content: | - HTTP_PROXY=http://192.168.100.100:3128 - http_proxy=http://192.168.100.100:3128 - HTTPS_PROXY=http://192.168.100.100:3128 - https_proxy=http://192.168.100.100:3128 - NO_PROXY=192.168.1.0 - no_proxy=192.168.1.0 - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: /etc/selinux/config - content: | - # This file controls the state of SELinux on the system. - # SELINUX= can take one of these three values: - # enforcing - SELinux security policy is enforced. - # permissive - SELinux prints warnings instead of enforcing. - # disabled - No SELinux policy is loaded. - SELINUX=permissive - # SELINUXTYPE= can take one of three two values: - # targeted - Targeted processes are protected, - # minimum - Modification of targeted policy. Only selected processes are protected. - # mls - Multi Level Security protection. - SELINUXTYPE=targeted - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - - setenforce 0 || true - systemctl restart systemd-modules-load.service - sysctl --system - - - hostnamectl set-hostname node1 - - source /etc/os-release - if [ "$ID" == "centos" ] && [ "$VERSION_ID" == "8" ]; then - sudo sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-* - sudo sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-* - fi - - yum install -y \ - device-mapper-persistent-data \ - lvm2 \ - ebtables \ - ethtool \ - nfs-utils \ - bash-completion \ - sudo \ - socat \ - wget \ - curl \ - open-vm-tools \ - ipvsadm - - yum install -y yum-utils - yum-config-manager --add-repo=https://download.docker.com/linux/centos/docker-ce.repo - yum-config-manager --save --setopt=docker-ce-stable.module_hotfixes=true - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - # set kubelet nodeip environment variable - mkdir -p /etc/systemd/system/kubelet.service.d/ - /opt/bin/setup_net_env.sh - - systemctl disable --now firewalld || true - - systemctl enable --now vmtoolsd.service - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - # Make sure we always disable swap - Otherwise the kubelet won't start as for some cloud - # providers swap gets enabled on reboot or after the setup script has finished executing. - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=containerd.service - Requires=containerd.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --cloud-provider=vsphere \ - --cloud-config=/etc/kubernetes/cloud-config \ - --hostname-override=node1 \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --pod-infra-container-image=192.168.100.100:5000/kubernetes/pause:v3.1 \ - --container-runtime=remote \ - --container-runtime-endpoint=unix:///run/containerd/containerd.sock \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target -- path: "/etc/kubernetes/cloud-config" - permissions: "0600" - content: | - {config:true} - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/containerd/config.toml - permissions: "0644" - content: | - version = 2 - - [metrics] - address = "127.0.0.1:1338" - - [plugins] - [plugins."io.containerd.grpc.v1.cri"] - [plugins."io.containerd.grpc.v1.cri".containerd] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] - runtime_type = "io.containerd.runc.v2" - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] - SystemdCgroup = true - [plugins."io.containerd.grpc.v1.cri".registry] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] - endpoint = ["https://registry.docker-cn.com"] - - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -runcmd: -- systemctl enable --now setup.service diff --git a/pkg/userdata/centos/testdata/kubelet-v1.24.9-vsphere-proxy.yaml b/pkg/userdata/centos/testdata/kubelet-v1.24.9-vsphere-proxy.yaml deleted file mode 100644 index e746b4110..000000000 --- a/pkg/userdata/centos/testdata/kubelet-v1.24.9-vsphere-proxy.yaml +++ /dev/null @@ -1,483 +0,0 @@ -#cloud-config - -hostname: node1 - - -ssh_pwauth: false - -write_files: -- path: "/etc/environment" - content: | - HTTP_PROXY=http://192.168.100.100:3128 - http_proxy=http://192.168.100.100:3128 - HTTPS_PROXY=http://192.168.100.100:3128 - https_proxy=http://192.168.100.100:3128 - NO_PROXY=192.168.1.0 - no_proxy=192.168.1.0 - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: /etc/selinux/config - content: | - # This file controls the state of SELinux on the system. - # SELINUX= can take one of these three values: - # enforcing - SELinux security policy is enforced. - # permissive - SELinux prints warnings instead of enforcing. - # disabled - No SELinux policy is loaded. - SELINUX=permissive - # SELINUXTYPE= can take one of three two values: - # targeted - Targeted processes are protected, - # minimum - Modification of targeted policy. Only selected processes are protected. - # mls - Multi Level Security protection. - SELINUXTYPE=targeted - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - - setenforce 0 || true - systemctl restart systemd-modules-load.service - sysctl --system - - - hostnamectl set-hostname node1 - - source /etc/os-release - if [ "$ID" == "centos" ] && [ "$VERSION_ID" == "8" ]; then - sudo sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-* - sudo sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-* - fi - - yum install -y \ - device-mapper-persistent-data \ - lvm2 \ - ebtables \ - ethtool \ - nfs-utils \ - bash-completion \ - sudo \ - socat \ - wget \ - curl \ - open-vm-tools \ - ipvsadm - - yum install -y yum-utils - yum-config-manager --add-repo=https://download.docker.com/linux/centos/docker-ce.repo - yum-config-manager --save --setopt=docker-ce-stable.module_hotfixes=true - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - # set kubelet nodeip environment variable - mkdir -p /etc/systemd/system/kubelet.service.d/ - /opt/bin/setup_net_env.sh - - systemctl disable --now firewalld || true - - systemctl enable --now vmtoolsd.service - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - # Make sure we always disable swap - Otherwise the kubelet won't start as for some cloud - # providers swap gets enabled on reboot or after the setup script has finished executing. - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=containerd.service - Requires=containerd.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --cloud-provider=vsphere \ - --cloud-config=/etc/kubernetes/cloud-config \ - --hostname-override=node1 \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --pod-infra-container-image=192.168.100.100:5000/kubernetes/pause:v3.1 \ - --container-runtime=remote \ - --container-runtime-endpoint=unix:///run/containerd/containerd.sock \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target -- path: "/etc/kubernetes/cloud-config" - permissions: "0600" - content: | - {config:true} - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/containerd/config.toml - permissions: "0644" - content: | - version = 2 - - [metrics] - address = "127.0.0.1:1338" - - [plugins] - [plugins."io.containerd.grpc.v1.cri"] - [plugins."io.containerd.grpc.v1.cri".containerd] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] - runtime_type = "io.containerd.runc.v2" - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] - SystemdCgroup = true - [plugins."io.containerd.grpc.v1.cri".registry] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] - endpoint = ["https://registry-1.docker.io"] - [plugins."io.containerd.grpc.v1.cri".registry.configs] - [plugins."io.containerd.grpc.v1.cri".registry.configs."10.0.0.1:5000"] - [plugins."io.containerd.grpc.v1.cri".registry.configs."10.0.0.1:5000".tls] - insecure_skip_verify = true - [plugins."io.containerd.grpc.v1.cri".registry.configs."192.168.100.100:5000"] - [plugins."io.containerd.grpc.v1.cri".registry.configs."192.168.100.100:5000".tls] - insecure_skip_verify = true - - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -runcmd: -- systemctl enable --now setup.service diff --git a/pkg/userdata/centos/testdata/kubelet-v1.24.9-vsphere.yaml b/pkg/userdata/centos/testdata/kubelet-v1.24.9-vsphere.yaml deleted file mode 100644 index e2000bbb3..000000000 --- a/pkg/userdata/centos/testdata/kubelet-v1.24.9-vsphere.yaml +++ /dev/null @@ -1,467 +0,0 @@ -#cloud-config - -hostname: node1 - - -ssh_pwauth: false - -write_files: - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: /etc/selinux/config - content: | - # This file controls the state of SELinux on the system. - # SELINUX= can take one of these three values: - # enforcing - SELinux security policy is enforced. - # permissive - SELinux prints warnings instead of enforcing. - # disabled - No SELinux policy is loaded. - SELINUX=permissive - # SELINUXTYPE= can take one of three two values: - # targeted - Targeted processes are protected, - # minimum - Modification of targeted policy. Only selected processes are protected. - # mls - Multi Level Security protection. - SELINUXTYPE=targeted - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - - setenforce 0 || true - systemctl restart systemd-modules-load.service - sysctl --system - - - hostnamectl set-hostname node1 - - source /etc/os-release - if [ "$ID" == "centos" ] && [ "$VERSION_ID" == "8" ]; then - sudo sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-* - sudo sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-* - fi - - yum install -y \ - device-mapper-persistent-data \ - lvm2 \ - ebtables \ - ethtool \ - nfs-utils \ - bash-completion \ - sudo \ - socat \ - wget \ - curl \ - open-vm-tools \ - ipvsadm - - yum install -y yum-utils - yum-config-manager --add-repo=https://download.docker.com/linux/centos/docker-ce.repo - yum-config-manager --save --setopt=docker-ce-stable.module_hotfixes=true - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - # set kubelet nodeip environment variable - mkdir -p /etc/systemd/system/kubelet.service.d/ - /opt/bin/setup_net_env.sh - - systemctl disable --now firewalld || true - - systemctl enable --now vmtoolsd.service - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - # Make sure we always disable swap - Otherwise the kubelet won't start as for some cloud - # providers swap gets enabled on reboot or after the setup script has finished executing. - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=containerd.service - Requires=containerd.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --cloud-provider=vsphere \ - --cloud-config=/etc/kubernetes/cloud-config \ - --hostname-override=node1 \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --container-runtime=remote \ - --container-runtime-endpoint=unix:///run/containerd/containerd.sock \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target -- path: "/etc/kubernetes/cloud-config" - permissions: "0600" - content: | - {config:true} - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/containerd/config.toml - permissions: "0644" - content: | - version = 2 - - [metrics] - address = "127.0.0.1:1338" - - [plugins] - [plugins."io.containerd.grpc.v1.cri"] - [plugins."io.containerd.grpc.v1.cri".containerd] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] - runtime_type = "io.containerd.runc.v2" - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] - SystemdCgroup = true - [plugins."io.containerd.grpc.v1.cri".registry] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] - endpoint = ["https://registry-1.docker.io"] - - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -runcmd: -- systemctl enable --now setup.service diff --git a/pkg/userdata/centos/testdata/kubelet-v1.25-aws.yaml b/pkg/userdata/centos/testdata/kubelet-v1.25-aws.yaml deleted file mode 100644 index be894d2a5..000000000 --- a/pkg/userdata/centos/testdata/kubelet-v1.25-aws.yaml +++ /dev/null @@ -1,459 +0,0 @@ -#cloud-config - - -ssh_pwauth: false - -write_files: - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: /etc/selinux/config - content: | - # This file controls the state of SELinux on the system. - # SELINUX= can take one of these three values: - # enforcing - SELinux security policy is enforced. - # permissive - SELinux prints warnings instead of enforcing. - # disabled - No SELinux policy is loaded. - SELINUX=permissive - # SELINUXTYPE= can take one of three two values: - # targeted - Targeted processes are protected, - # minimum - Modification of targeted policy. Only selected processes are protected. - # mls - Multi Level Security protection. - SELINUXTYPE=targeted - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - - setenforce 0 || true - systemctl restart systemd-modules-load.service - sysctl --system - - - source /etc/os-release - if [ "$ID" == "centos" ] && [ "$VERSION_ID" == "8" ]; then - sudo sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-* - sudo sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-* - fi - - yum install -y \ - device-mapper-persistent-data \ - lvm2 \ - ebtables \ - ethtool \ - nfs-utils \ - bash-completion \ - sudo \ - socat \ - wget \ - curl \ - ipvsadm - - yum install -y yum-utils - yum-config-manager --add-repo=https://download.docker.com/linux/centos/docker-ce.repo - yum-config-manager --save --setopt=docker-ce-stable.module_hotfixes=true - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - # set kubelet nodeip environment variable - mkdir -p /etc/systemd/system/kubelet.service.d/ - /opt/bin/setup_net_env.sh - - systemctl disable --now firewalld || true - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - # Make sure we always disable swap - Otherwise the kubelet won't start as for some cloud - # providers swap gets enabled on reboot or after the setup script has finished executing. - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=containerd.service - Requires=containerd.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --cloud-provider=aws \ - --cloud-config=/etc/kubernetes/cloud-config \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --container-runtime=remote \ - --container-runtime-endpoint=unix:///run/containerd/containerd.sock \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target -- path: "/etc/kubernetes/cloud-config" - permissions: "0600" - content: | - {aws-config:true} - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/containerd/config.toml - permissions: "0644" - content: | - version = 2 - - [metrics] - address = "127.0.0.1:1338" - - [plugins] - [plugins."io.containerd.grpc.v1.cri"] - [plugins."io.containerd.grpc.v1.cri".containerd] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] - runtime_type = "io.containerd.runc.v2" - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] - SystemdCgroup = true - [plugins."io.containerd.grpc.v1.cri".registry] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] - endpoint = ["https://registry-1.docker.io"] - - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -runcmd: -- systemctl enable --now setup.service diff --git a/pkg/userdata/centos/testdata/kubelet-v1.26-aws.yaml b/pkg/userdata/centos/testdata/kubelet-v1.26-aws.yaml deleted file mode 100644 index 847bd55cf..000000000 --- a/pkg/userdata/centos/testdata/kubelet-v1.26-aws.yaml +++ /dev/null @@ -1,459 +0,0 @@ -#cloud-config - - -ssh_pwauth: false - -write_files: - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: /etc/selinux/config - content: | - # This file controls the state of SELinux on the system. - # SELINUX= can take one of these three values: - # enforcing - SELinux security policy is enforced. - # permissive - SELinux prints warnings instead of enforcing. - # disabled - No SELinux policy is loaded. - SELINUX=permissive - # SELINUXTYPE= can take one of three two values: - # targeted - Targeted processes are protected, - # minimum - Modification of targeted policy. Only selected processes are protected. - # mls - Multi Level Security protection. - SELINUXTYPE=targeted - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - - setenforce 0 || true - systemctl restart systemd-modules-load.service - sysctl --system - - - source /etc/os-release - if [ "$ID" == "centos" ] && [ "$VERSION_ID" == "8" ]; then - sudo sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-* - sudo sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-* - fi - - yum install -y \ - device-mapper-persistent-data \ - lvm2 \ - ebtables \ - ethtool \ - nfs-utils \ - bash-completion \ - sudo \ - socat \ - wget \ - curl \ - ipvsadm - - yum install -y yum-utils - yum-config-manager --add-repo=https://download.docker.com/linux/centos/docker-ce.repo - yum-config-manager --save --setopt=docker-ce-stable.module_hotfixes=true - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - # set kubelet nodeip environment variable - mkdir -p /etc/systemd/system/kubelet.service.d/ - /opt/bin/setup_net_env.sh - - systemctl disable --now firewalld || true - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - # Make sure we always disable swap - Otherwise the kubelet won't start as for some cloud - # providers swap gets enabled on reboot or after the setup script has finished executing. - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=containerd.service - Requires=containerd.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --cloud-provider=aws \ - --cloud-config=/etc/kubernetes/cloud-config \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --container-runtime=remote \ - --container-runtime-endpoint=unix:///run/containerd/containerd.sock \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target -- path: "/etc/kubernetes/cloud-config" - permissions: "0600" - content: | - {aws-config:true} - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/containerd/config.toml - permissions: "0644" - content: | - version = 2 - - [metrics] - address = "127.0.0.1:1338" - - [plugins] - [plugins."io.containerd.grpc.v1.cri"] - [plugins."io.containerd.grpc.v1.cri".containerd] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] - runtime_type = "io.containerd.runc.v2" - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] - SystemdCgroup = true - [plugins."io.containerd.grpc.v1.cri".registry] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] - endpoint = ["https://registry-1.docker.io"] - - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -runcmd: -- systemctl enable --now setup.service diff --git a/pkg/userdata/convert/ignition-converter.go b/pkg/userdata/convert/ignition-converter.go deleted file mode 100644 index bb9d4c865..000000000 --- a/pkg/userdata/convert/ignition-converter.go +++ /dev/null @@ -1,64 +0,0 @@ -/* -Copyright 2019 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package convert - -import ( - "encoding/json" - "fmt" - - ctconfig "github.com/coreos/container-linux-config-transpiler/config" - - pluginapi "github.com/kubermatic/machine-controller/pkg/apis/plugin" - "github.com/kubermatic/machine-controller/pkg/userdata/plugin" -) - -func NewIgnition(p plugin.Provider) *Ignition { - return &Ignition{p: p} -} - -type Ignition struct { - p plugin.Provider -} - -func (j *Ignition) UserData(req pluginapi.UserDataRequest) (string, error) { - before, err := j.p.UserData(req) - if err != nil { - return "", err - } - - return ToIgnition(before) -} - -func ToIgnition(s string) (string, error) { - // Convert to ignition - cfg, ast, report := ctconfig.Parse([]byte(s)) - if len(report.Entries) > 0 { - return "", fmt.Errorf("failed to validate coreos cloud config: %s", report.String()) - } - - ignCfg, report := ctconfig.Convert(cfg, "", ast) - if len(report.Entries) > 0 { - return "", fmt.Errorf("failed to convert container linux config to ignition: %s", report.String()) - } - - out, err := json.Marshal(ignCfg) - if err != nil { - return "", fmt.Errorf("failed to marshal ignition config: %w", err) - } - - return string(out), nil -} diff --git a/pkg/userdata/flatcar/provider.go b/pkg/userdata/flatcar/provider.go deleted file mode 100644 index daf3cba76..000000000 --- a/pkg/userdata/flatcar/provider.go +++ /dev/null @@ -1,845 +0,0 @@ -/* -Copyright 2019 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// -// UserData plugin for Flatcar. -// - -package flatcar - -import ( - "bytes" - "fmt" - "text/template" - - "github.com/Masterminds/semver/v3" - - "github.com/kubermatic/machine-controller/pkg/apis/plugin" - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" - "github.com/kubermatic/machine-controller/pkg/userdata/convert" - userdatahelper "github.com/kubermatic/machine-controller/pkg/userdata/helper" -) - -// Provider is a pkg/userdata/plugin.Provider implementation. -type Provider struct{} - -// UserData renders user-data template to string. -func (p Provider) UserData(req plugin.UserDataRequest) (string, error) { - pconfig, err := providerconfigtypes.GetConfig(req.MachineSpec.ProviderSpec) - if err != nil { - return "", fmt.Errorf("failed to get provider config: %w", err) - } - - if pconfig.OverwriteCloudConfig != nil { - req.CloudConfig = *pconfig.OverwriteCloudConfig - } - - flatcarConfig, err := LoadConfig(pconfig.OperatingSystemSpec) - if err != nil { - return "", fmt.Errorf("failed to get flatcar config from provider config: %w", err) - } - - userDataTemplate, err := getUserDataTemplate(flatcarConfig.ProvisioningUtility) - if err != nil { - return "", fmt.Errorf("failed to get an appropriate user-data template: %w", err) - } - - tmpl, err := template.New("user-data").Funcs(userdatahelper.TxtFuncMap()).Parse(userDataTemplate) - if err != nil { - return "", fmt.Errorf("failed to parse user-data template: %w", err) - } - - kubeletVersion, err := semver.NewVersion(req.MachineSpec.Versions.Kubelet) - if err != nil { - return "", fmt.Errorf("invalid kubelet version: %w", err) - } - - kubeconfigString, err := userdatahelper.StringifyKubeconfig(req.Kubeconfig) - if err != nil { - return "", err - } - - kubernetesCACert, err := userdatahelper.GetCACert(req.Kubeconfig) - if err != nil { - return "", fmt.Errorf("error extracting cacert: %w", err) - } - - if flatcarConfig.DisableAutoUpdate { - flatcarConfig.DisableLocksmithD = true - flatcarConfig.DisableUpdateEngine = true - } - - crEngine := req.ContainerRuntime.Engine(kubeletVersion) - crScript, err := crEngine.ScriptFor(providerconfigtypes.OperatingSystemFlatcar) - if err != nil { - return "", fmt.Errorf("failed to generate container runtime install script: %w", err) - } - - crConfig, err := crEngine.Config() - if err != nil { - return "", fmt.Errorf("failed to generate container runtime config: %w", err) - } - - crAuthConfig, err := crEngine.AuthConfig() - if err != nil { - return "", fmt.Errorf("failed to generate container runtime auth config: %w", err) - } - - data := struct { - plugin.UserDataRequest - ProviderSpec *providerconfigtypes.Config - FlatcarConfig *Config - KubeletVersion string - Kubeconfig string - KubernetesCACert string - NodeIPScript string - ExtraKubeletFlags []string - ContainerRuntimeScript string - ContainerRuntimeConfigFileName string - ContainerRuntimeConfig string - ContainerRuntimeAuthConfigFileName string - ContainerRuntimeAuthConfig string - ContainerRuntimeName string - }{ - UserDataRequest: req, - ProviderSpec: pconfig, - FlatcarConfig: flatcarConfig, - KubeletVersion: kubeletVersion.String(), - Kubeconfig: kubeconfigString, - KubernetesCACert: kubernetesCACert, - NodeIPScript: userdatahelper.SetupNodeIPEnvScript(pconfig.Network.GetIPFamily()), - ExtraKubeletFlags: crEngine.KubeletFlags(), - ContainerRuntimeScript: crScript, - ContainerRuntimeConfigFileName: crEngine.ConfigFileName(), - ContainerRuntimeConfig: crConfig, - ContainerRuntimeAuthConfigFileName: crEngine.AuthConfigFileName(), - ContainerRuntimeAuthConfig: crAuthConfig, - ContainerRuntimeName: crEngine.String(), - } - - b := &bytes.Buffer{} - err = tmpl.Execute(b, data) - if err != nil { - return "", fmt.Errorf("failed to execute user-data template: %w", err) - } - - out, err := userdatahelper.CleanupTemplateOutput(b.String()) - if err != nil { - return "", fmt.Errorf("failed to cleanup user-data template: %w", err) - } - - if flatcarConfig.ProvisioningUtility == CloudInit { - return out, nil - } - - return convert.ToIgnition(out) -} - -func getUserDataTemplate(pUtil ProvisioningUtility) (string, error) { - switch pUtil { - case Ignition, "": - return userDataIgnitionTemplate, nil - case CloudInit: - return userDataCloudInitTemplate, nil - default: - return "", fmt.Errorf("invalid provisioning utility %s, allowed values are %s or %s", - pUtil, Ignition, CloudInit) - } -} - -// Ignition template. -const userDataIgnitionTemplate = `passwd: -{{- if ne (len .ProviderSpec.SSHPublicKeys) 0 }} - users: - - name: core - ssh_authorized_keys: - {{range .ProviderSpec.SSHPublicKeys}}- {{.}} - {{end}} -{{- end }} - -{{- if .ProviderSpec.Network.IsStaticIPConfig }} -networkd: - units: - - name: static-nic.network - contents: | - [Match] - # Because of difficulty predicting specific NIC names on different cloud providers, - # we only support static addressing on VSphere. There should be a single NIC attached - # that we will match by name prefix 'en' which denotes ethernet devices. - Name=en* - - [Network] - DHCP=no - Address={{ .ProviderSpec.Network.CIDR }} - Gateway={{ .ProviderSpec.Network.Gateway }} - {{range .ProviderSpec.Network.DNS.Servers}}DNS={{.}} - {{end}} -{{- end }} - -systemd: - units: -{{- if .FlatcarConfig.DisableUpdateEngine }} - - name: update-engine.service - mask: true -{{- end }} -{{- if .FlatcarConfig.DisableLocksmithD }} - - name: locksmithd.service - mask: true -{{- end }} - -{{- if .HTTPProxy }} - - name: update-engine.service - dropins: - - name: 50-proxy.conf - contents: | - [Service] - Environment=ALL_PROXY={{ .HTTPProxy }} -{{- end }} - - - name: setup.service - enabled: true - contents: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - Requires=nodeip.service - After=network-online.target - After=nodeip.service - - Description=Service responsible for configuring the flatcar machine - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/setup.sh - - - name: download-script.service - enabled: true - contents: | - [Unit] - Requires=network-online.target - Requires=setup.service - After=network-online.target - After=setup.service - [Service] - Type=oneshot - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/download.sh - [Install] - WantedBy=multi-user.target - - - name: kubelet-healthcheck.service - enabled: true - dropins: - - name: 40-download.conf - contents: | - [Unit] - Requires=download-script.service - After=download-script.service - contents: | -{{ kubeletHealthCheckSystemdUnit | indent 10 }} - - - name: nodeip.service - enabled: true - contents: | - [Unit] - Description=Setup Kubelet Node IP Env - Requires=network-online.target - After=network-online.target - - [Service] - ExecStart=/opt/bin/setup_net_env.sh - RemainAfterExit=yes - Type=oneshot - [Install] - WantedBy=multi-user.target - -{{- if eq .CloudProviderName "kubevirt" }} - - name: restart-kubelet.service - enabled: true - contents: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - Description=Service responsible for restarting kubelet when the machine is rebooted - - [Service] - Type=oneshot - ExecStart=/opt/bin/restart-kubelet.sh - - [Install] - WantedBy=multi-user.target -{{- end }} - - - name: kubelet.service - enabled: true - dropins: - - name: 10-nodeip.conf - contents: | - [Service] - EnvironmentFile=/etc/kubernetes/nodeip.conf - - name: resolv.conf - contents: | - [Service] - Environment="KUBELET_EXTRA_ARGS=--resolv-conf=/run/systemd/resolve/resolv.conf" - - name: 40-download.conf - contents: | - [Unit] - Requires=download-script.service - After=download-script.service - contents: | -{{ kubeletSystemdUnit .ContainerRuntimeName .KubeletVersion .KubeletCloudProviderName .MachineSpec.Name .DNSIPs .ExternalCloudProvider .ProviderSpec.Network.GetIPFamily .PauseImage .MachineSpec.Taints .ExtraKubeletFlags false | indent 8 }} - -storage: - files: -{{- if .HTTPProxy }} - - path: /etc/environment - filesystem: root - mode: 0644 - contents: - inline: | -{{ proxyEnvironment .HTTPProxy .NoProxy | indent 10 }} -{{- end }} - - - path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - filesystem: root - mode: 0644 - contents: - inline: | -{{ journalDConfig | indent 10 }} - - - path: "/etc/kubernetes/kubelet.conf" - filesystem: root - mode: 0644 - contents: - inline: | -{{ kubeletConfiguration "cluster.local" .DNSIPs .KubeletFeatureGates .KubeletConfigs .ContainerRuntimeName | indent 10 }} - - - path: /opt/load-kernel-modules.sh - filesystem: root - mode: 0755 - contents: - inline: | -{{ kernelModulesScript | indent 10 }} - - - path: /etc/sysctl.d/k8s.conf - filesystem: root - mode: 0644 - contents: - inline: | -{{ kernelSettings | indent 10 }} - - - path: /proc/sys/kernel/panic_on_oops - filesystem: root - mode: 0644 - contents: - inline: | - 1 - - - path: /proc/sys/kernel/panic - filesystem: root - mode: 0644 - contents: - inline: | - 10 - - - path: /proc/sys/vm/overcommit_memory - filesystem: root - mode: 0644 - contents: - inline: | - 1 - - - path: "/opt/bin/setup_net_env.sh" - filesystem: root - mode: 0755 - contents: - inline: | -{{ .NodeIPScript | indent 10 }} - - - path: "/etc/systemd/network/zz-default.network.d/ipv6-fix.conf" - filesystem: root - mode: 0755 - contents: - inline: | - [Network] - IPv6AcceptRA=true - - path: /etc/kubernetes/bootstrap-kubelet.conf - filesystem: root - mode: 0400 - contents: - inline: | -{{ .Kubeconfig | indent 10 }} - -{{- if ne (len .CloudConfig) 0 }} - - path: /etc/kubernetes/cloud-config - filesystem: root - mode: 0400 - contents: - inline: | -{{ .CloudConfig | indent 10 }} -{{- end }} - - - path: /etc/kubernetes/pki/ca.crt - filesystem: root - mode: 0644 - contents: - inline: | -{{ .KubernetesCACert | indent 10 }} -{{ if ne .CloudProviderName "aws" }} - - path: /etc/hostname - filesystem: root - mode: 0600 - contents: - inline: '{{ .MachineSpec.Name }}' -{{- end }} - -{{- if eq .CloudProviderName "kubevirt" }} - - path: /opt/bin/restart-kubelet.sh - filesystem: root - mode: 0744 - contents: - inline: | - #!/bin/bash - # Needed for Kubevirt provider because if the virt-launcher pod is deleted, - # the VM and DataVolume states are kept and VM is rebooted. We need to restart the kubelet - # with the new config (new IP) and run this at every boot. - set -xeuo pipefail - - # This helps us avoid an unnecessary restart for kubelet on the first boot - if [ -f /etc/kubelet_needs_restart ]; then - # restart kubelet since it's not the first boot - systemctl daemon-reload - systemctl restart kubelet.service - else - touch /etc/kubelet_needs_restart - fi -{{- end }} - - - path: /etc/ssh/sshd_config - filesystem: root - mode: 0600 - user: - id: 0 - group: - id: 0 - contents: - inline: | - # Use most defaults for sshd configuration. - Subsystem sftp internal-sftp - ClientAliveInterval 180 - UseDNS no - UsePAM yes - PrintLastLog no # handled by PAM - PrintMotd no # handled by PAM - PasswordAuthentication no - ChallengeResponseAuthentication no - -{{- if not .FlatcarConfig.DisableAutoUpdate }} - - path: "/etc/polkit-1/rules.d/60-noreboot_norestart.rules" - filesystem: root - mode: 0644 - contents: - inline: | - polkit.addRule(function(action, subject) { - if (action.id == "org.freedesktop.login1.reboot" || - action.id == "org.freedesktop.login1.reboot-multiple-sessions") { - if (subject.user == "core") { - return polkit.Result.YES; - } else { - return polkit.Result.AUTH_ADMIN; - } - } - }); -{{- end }} - - - path: /opt/bin/setup.sh - filesystem: root - mode: 0755 - contents: - inline: | - #!/bin/bash - set -xeuo pipefail - - # We stop these services here explicitly since masking only removes the symlinks for these services so that they can't be started. - # But that wouldn't "stop" the already running services on the first boot. - - {{- if or .FlatcarConfig.DisableUpdateEngine .FlatcarConfig.DisableAutoUpdate }} - systemctl stop update-engine.service - {{- end }} - - {{- if or .FlatcarConfig.DisableLocksmithD .FlatcarConfig.DisableAutoUpdate }} - systemctl stop locksmithd.service - {{- end }} - systemctl disable setup.service - - - path: /opt/bin/download.sh - filesystem: root - mode: 0755 - contents: - inline: | - #!/bin/bash - set -xeuo pipefail - -{{ safeDownloadBinariesScript .KubeletVersion | indent 10 }} - mkdir -p /etc/systemd/system/containerd.service.d /etc/systemd/system/docker.service.d - cat < /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: /etc/sysctl.d/k8s.conf - permissions: "0644" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/systemd/network/zz-default.network.d/ipv6-fix.conf" - permissions: "0755" - content: | - # IPv6 autoconfiguration doesn't work out of the box on some versions of Flatcar - # so we enable IPv6 Router Advertisement here. - # See for details https://github.com/flatcar-linux/Flatcar/issues/384 - [Network] - IPv6AcceptRA=true - -- path: /etc/kubernetes/bootstrap-kubelet.conf - permissions: "0400" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: /etc/kubernetes/cloud-config - permissions: "0400" - content: | - {anexia-config:true} - -- path: /etc/kubernetes/pki/ca.crt - permissions: "0644" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - - -- path: /etc/hostname - permissions: "0600" - content: 'node1' - -- path: /etc/ssh/sshd_config - permissions: "0600" - user: root - content: | - # Use most defaults for sshd configuration. - Subsystem sftp internal-sftp - ClientAliveInterval 180 - UseDNS no - UsePAM yes - PrintLastLog no # handled by PAM - PrintMotd no # handled by PAM - PasswordAuthentication no - ChallengeResponseAuthentication no - -- path: /opt/bin/download.sh - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - opt_bin=/opt/bin - usr_local_bin=/usr/local/bin - cni_bin_dir=/opt/cni/bin - mkdir -p /etc/cni/net.d /etc/kubernetes/manifests "$opt_bin" "$cni_bin_dir" - arch=${HOST_ARCH-} - if [ -z "$arch" ] - then - case $(uname -m) in - x86_64) - arch="amd64" - ;; - aarch64) - arch="arm64" - ;; - *) - echo "unsupported CPU architecture, exiting" - exit 1 - ;; - esac - fi - CNI_VERSION="${CNI_VERSION:-v1.2.0}" - cni_base_url="https://github.com/containernetworking/plugins/releases/download/$CNI_VERSION" - cni_filename="cni-plugins-linux-$arch-$CNI_VERSION.tgz" - curl -Lfo "$cni_bin_dir/$cni_filename" "$cni_base_url/$cni_filename" - cni_sum=$(curl -Lf "$cni_base_url/$cni_filename.sha256") - cd "$cni_bin_dir" - sha256sum -c <<<"$cni_sum" - tar xvf "$cni_filename" - rm -f "$cni_filename" - cd - - CRI_TOOLS_RELEASE="${CRI_TOOLS_RELEASE:-v1.26.0}" - cri_tools_base_url="https://github.com/kubernetes-sigs/cri-tools/releases/download/${CRI_TOOLS_RELEASE}" - cri_tools_filename="crictl-${CRI_TOOLS_RELEASE}-linux-${arch}.tar.gz" - curl -Lfo "$opt_bin/$cri_tools_filename" "$cri_tools_base_url/$cri_tools_filename" - cri_tools_sum_value=$(curl -Lf "$cri_tools_base_url/$cri_tools_filename.sha256") - cri_tools_sum="$cri_tools_sum_value $cri_tools_filename" - cd "$opt_bin" - sha256sum -c <<<"$cri_tools_sum" - tar xvf "$cri_tools_filename" - rm -f "$cri_tools_filename" - ln -sf "$opt_bin/crictl" "$usr_local_bin"/crictl || echo "symbolic link is skipped" - cd - - KUBE_VERSION="${KUBE_VERSION:-v1.24.0}" - kube_dir="$opt_bin/kubernetes-$KUBE_VERSION" - kube_base_url="https://storage.googleapis.com/kubernetes-release/release/$KUBE_VERSION/bin/linux/$arch" - kube_sum_file="$kube_dir/sha256" - mkdir -p "$kube_dir" - : >"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - - mkdir -p /etc/systemd/system/containerd.service.d - - cat < /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: /etc/sysctl.d/k8s.conf - permissions: "0644" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/systemd/network/zz-default.network.d/ipv6-fix.conf" - permissions: "0755" - content: | - # IPv6 autoconfiguration doesn't work out of the box on some versions of Flatcar - # so we enable IPv6 Router Advertisement here. - # See for details https://github.com/flatcar-linux/Flatcar/issues/384 - [Network] - IPv6AcceptRA=true - -- path: /etc/kubernetes/bootstrap-kubelet.conf - permissions: "0400" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: /etc/kubernetes/cloud-config - permissions: "0400" - content: | - {anexia-config:true} - -- path: /etc/kubernetes/pki/ca.crt - permissions: "0644" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - - -- path: /etc/hostname - permissions: "0600" - content: 'node1' - -- path: /etc/ssh/sshd_config - permissions: "0600" - user: root - content: | - # Use most defaults for sshd configuration. - Subsystem sftp internal-sftp - ClientAliveInterval 180 - UseDNS no - UsePAM yes - PrintLastLog no # handled by PAM - PrintMotd no # handled by PAM - PasswordAuthentication no - ChallengeResponseAuthentication no - -- path: /opt/bin/download.sh - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - opt_bin=/opt/bin - usr_local_bin=/usr/local/bin - cni_bin_dir=/opt/cni/bin - mkdir -p /etc/cni/net.d /etc/kubernetes/manifests "$opt_bin" "$cni_bin_dir" - arch=${HOST_ARCH-} - if [ -z "$arch" ] - then - case $(uname -m) in - x86_64) - arch="amd64" - ;; - aarch64) - arch="arm64" - ;; - *) - echo "unsupported CPU architecture, exiting" - exit 1 - ;; - esac - fi - CNI_VERSION="${CNI_VERSION:-v1.2.0}" - cni_base_url="https://github.com/containernetworking/plugins/releases/download/$CNI_VERSION" - cni_filename="cni-plugins-linux-$arch-$CNI_VERSION.tgz" - curl -Lfo "$cni_bin_dir/$cni_filename" "$cni_base_url/$cni_filename" - cni_sum=$(curl -Lf "$cni_base_url/$cni_filename.sha256") - cd "$cni_bin_dir" - sha256sum -c <<<"$cni_sum" - tar xvf "$cni_filename" - rm -f "$cni_filename" - cd - - CRI_TOOLS_RELEASE="${CRI_TOOLS_RELEASE:-v1.26.0}" - cri_tools_base_url="https://github.com/kubernetes-sigs/cri-tools/releases/download/${CRI_TOOLS_RELEASE}" - cri_tools_filename="crictl-${CRI_TOOLS_RELEASE}-linux-${arch}.tar.gz" - curl -Lfo "$opt_bin/$cri_tools_filename" "$cri_tools_base_url/$cri_tools_filename" - cri_tools_sum_value=$(curl -Lf "$cri_tools_base_url/$cri_tools_filename.sha256") - cri_tools_sum="$cri_tools_sum_value $cri_tools_filename" - cd "$opt_bin" - sha256sum -c <<<"$cri_tools_sum" - tar xvf "$cri_tools_filename" - rm -f "$cri_tools_filename" - ln -sf "$opt_bin/crictl" "$usr_local_bin"/crictl || echo "symbolic link is skipped" - cd - - KUBE_VERSION="${KUBE_VERSION:-v1.24.9}" - kube_dir="$opt_bin/kubernetes-$KUBE_VERSION" - kube_base_url="https://storage.googleapis.com/kubernetes-release/release/$KUBE_VERSION/bin/linux/$arch" - kube_sum_file="$kube_dir/sha256" - mkdir -p "$kube_dir" - : >"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - - mkdir -p /etc/systemd/system/containerd.service.d - - cat < /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: /etc/sysctl.d/k8s.conf - permissions: "0644" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/systemd/network/zz-default.network.d/ipv6-fix.conf" - permissions: "0755" - content: | - # IPv6 autoconfiguration doesn't work out of the box on some versions of Flatcar - # so we enable IPv6 Router Advertisement here. - # See for details https://github.com/flatcar-linux/Flatcar/issues/384 - [Network] - IPv6AcceptRA=true - -- path: /etc/kubernetes/bootstrap-kubelet.conf - permissions: "0400" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: /etc/kubernetes/cloud-config - permissions: "0400" - content: | - {anexia-config:true} - -- path: /etc/kubernetes/pki/ca.crt - permissions: "0644" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - - -- path: /etc/hostname - permissions: "0600" - content: 'node1' - -- path: /etc/ssh/sshd_config - permissions: "0600" - user: root - content: | - # Use most defaults for sshd configuration. - Subsystem sftp internal-sftp - ClientAliveInterval 180 - UseDNS no - UsePAM yes - PrintLastLog no # handled by PAM - PrintMotd no # handled by PAM - PasswordAuthentication no - ChallengeResponseAuthentication no - -- path: /opt/bin/download.sh - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - opt_bin=/opt/bin - usr_local_bin=/usr/local/bin - cni_bin_dir=/opt/cni/bin - mkdir -p /etc/cni/net.d /etc/kubernetes/manifests "$opt_bin" "$cni_bin_dir" - arch=${HOST_ARCH-} - if [ -z "$arch" ] - then - case $(uname -m) in - x86_64) - arch="amd64" - ;; - aarch64) - arch="arm64" - ;; - *) - echo "unsupported CPU architecture, exiting" - exit 1 - ;; - esac - fi - CNI_VERSION="${CNI_VERSION:-v1.2.0}" - cni_base_url="https://github.com/containernetworking/plugins/releases/download/$CNI_VERSION" - cni_filename="cni-plugins-linux-$arch-$CNI_VERSION.tgz" - curl -Lfo "$cni_bin_dir/$cni_filename" "$cni_base_url/$cni_filename" - cni_sum=$(curl -Lf "$cni_base_url/$cni_filename.sha256") - cd "$cni_bin_dir" - sha256sum -c <<<"$cni_sum" - tar xvf "$cni_filename" - rm -f "$cni_filename" - cd - - CRI_TOOLS_RELEASE="${CRI_TOOLS_RELEASE:-v1.26.0}" - cri_tools_base_url="https://github.com/kubernetes-sigs/cri-tools/releases/download/${CRI_TOOLS_RELEASE}" - cri_tools_filename="crictl-${CRI_TOOLS_RELEASE}-linux-${arch}.tar.gz" - curl -Lfo "$opt_bin/$cri_tools_filename" "$cri_tools_base_url/$cri_tools_filename" - cri_tools_sum_value=$(curl -Lf "$cri_tools_base_url/$cri_tools_filename.sha256") - cri_tools_sum="$cri_tools_sum_value $cri_tools_filename" - cd "$opt_bin" - sha256sum -c <<<"$cri_tools_sum" - tar xvf "$cri_tools_filename" - rm -f "$cri_tools_filename" - ln -sf "$opt_bin/crictl" "$usr_local_bin"/crictl || echo "symbolic link is skipped" - cd - - KUBE_VERSION="${KUBE_VERSION:-v1.25.0}" - kube_dir="$opt_bin/kubernetes-$KUBE_VERSION" - kube_base_url="https://storage.googleapis.com/kubernetes-release/release/$KUBE_VERSION/bin/linux/$arch" - kube_sum_file="$kube_dir/sha256" - mkdir -p "$kube_dir" - : >"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - - mkdir -p /etc/systemd/system/containerd.service.d - - cat < /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: /etc/sysctl.d/k8s.conf - permissions: "0644" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/systemd/network/zz-default.network.d/ipv6-fix.conf" - permissions: "0755" - content: | - # IPv6 autoconfiguration doesn't work out of the box on some versions of Flatcar - # so we enable IPv6 Router Advertisement here. - # See for details https://github.com/flatcar-linux/Flatcar/issues/384 - [Network] - IPv6AcceptRA=true - -- path: /etc/kubernetes/bootstrap-kubelet.conf - permissions: "0400" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: /etc/kubernetes/cloud-config - permissions: "0400" - content: | - - -- path: /etc/kubernetes/pki/ca.crt - permissions: "0644" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - - -- path: /etc/hostname - permissions: "0600" - content: 'node1' - -- path: /etc/ssh/sshd_config - permissions: "0600" - user: root - content: | - # Use most defaults for sshd configuration. - Subsystem sftp internal-sftp - ClientAliveInterval 180 - UseDNS no - UsePAM yes - PrintLastLog no # handled by PAM - PrintMotd no # handled by PAM - PasswordAuthentication no - ChallengeResponseAuthentication no - -- path: /opt/bin/download.sh - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - opt_bin=/opt/bin - usr_local_bin=/usr/local/bin - cni_bin_dir=/opt/cni/bin - mkdir -p /etc/cni/net.d /etc/kubernetes/manifests "$opt_bin" "$cni_bin_dir" - arch=${HOST_ARCH-} - if [ -z "$arch" ] - then - case $(uname -m) in - x86_64) - arch="amd64" - ;; - aarch64) - arch="arm64" - ;; - *) - echo "unsupported CPU architecture, exiting" - exit 1 - ;; - esac - fi - CNI_VERSION="${CNI_VERSION:-v1.2.0}" - cni_base_url="https://github.com/containernetworking/plugins/releases/download/$CNI_VERSION" - cni_filename="cni-plugins-linux-$arch-$CNI_VERSION.tgz" - curl -Lfo "$cni_bin_dir/$cni_filename" "$cni_base_url/$cni_filename" - cni_sum=$(curl -Lf "$cni_base_url/$cni_filename.sha256") - cd "$cni_bin_dir" - sha256sum -c <<<"$cni_sum" - tar xvf "$cni_filename" - rm -f "$cni_filename" - cd - - CRI_TOOLS_RELEASE="${CRI_TOOLS_RELEASE:-v1.26.0}" - cri_tools_base_url="https://github.com/kubernetes-sigs/cri-tools/releases/download/${CRI_TOOLS_RELEASE}" - cri_tools_filename="crictl-${CRI_TOOLS_RELEASE}-linux-${arch}.tar.gz" - curl -Lfo "$opt_bin/$cri_tools_filename" "$cri_tools_base_url/$cri_tools_filename" - cri_tools_sum_value=$(curl -Lf "$cri_tools_base_url/$cri_tools_filename.sha256") - cri_tools_sum="$cri_tools_sum_value $cri_tools_filename" - cd "$opt_bin" - sha256sum -c <<<"$cri_tools_sum" - tar xvf "$cri_tools_filename" - rm -f "$cri_tools_filename" - ln -sf "$opt_bin/crictl" "$usr_local_bin"/crictl || echo "symbolic link is skipped" - cd - - KUBE_VERSION="${KUBE_VERSION:-v1.24.0}" - kube_dir="$opt_bin/kubernetes-$KUBE_VERSION" - kube_base_url="https://storage.googleapis.com/kubernetes-release/release/$KUBE_VERSION/bin/linux/$arch" - kube_sum_file="$kube_dir/sha256" - mkdir -p "$kube_dir" - : >"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - - mkdir -p /etc/systemd/system/containerd.service.d - - cat <"$kube_sum_file" - -for bin in kubelet kubeadm kubectl; do - {{- /* download kube binary */}} - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - - {{- /* download kube binary checksum */}} - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - - {{- /* save kube binary checksum */}} - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" -done - -{{- /* check kube binaries checksum */}} -sha256sum -c "$kube_sum_file" - -for bin in kubelet kubeadm kubectl; do - {{- /* link kube binaries from verioned dir to $opt_bin */}} - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin -done - -if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh -fi -` - - downloadBinariesTpl = `{{- /*setup some common directories */ -}} -mkdir -p /opt/bin/ -mkdir -p /var/lib/calico -mkdir -p /etc/kubernetes/manifests -mkdir -p /etc/cni/net.d -mkdir -p /opt/cni/bin - -{{- /* # cni */}} -if [ ! -f /opt/cni/bin/loopback ]; then - curl -L https://github.com/containernetworking/plugins/releases/download/v0.8.7/cni-plugins-linux-amd64-v0.8.7.tgz | tar -xvzC /opt/cni/bin -f - -fi - -{{- if .DownloadKubelet }} -{{- /* kubelet */}} -if [ ! -f /opt/bin/kubelet ]; then - curl -Lfo /opt/bin/kubelet https://storage.googleapis.com/kubernetes-release/release/v{{ .KubeletVersion }}/bin/linux/amd64/kubelet - chmod +x /opt/bin/kubelet -fi -{{- end }} - -if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/8b5b66e4910a6228dfaecccaa0a3b05ec4902f8e/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh -fi -` -) - -// SafeDownloadBinariesScript returns the script which is responsible to -// download and check checksums of all required binaries. -func SafeDownloadBinariesScript(kubeVersion string) (string, error) { - tmpl, err := template.New("download-binaries").Funcs(TxtFuncMap()).Parse(safeDownloadBinariesTpl) - if err != nil { - return "", fmt.Errorf("failed to parse download-binaries template: %w", err) - } - - const ( - CNIVersion = "v1.2.0" - CRIToolsVersion = "v1.26.0" - ) - - // force v in case if it's not there - if !strings.HasPrefix(kubeVersion, "v") { - kubeVersion = "v" + kubeVersion - } - - data := struct { - KubeVersion string - CNIVersion string - CRIToolsVersion string - }{ - KubeVersion: kubeVersion, - CNIVersion: CNIVersion, - CRIToolsVersion: CRIToolsVersion, - } - - b := &bytes.Buffer{} - err = tmpl.Execute(b, data) - if err != nil { - return "", fmt.Errorf("failed to execute download-binaries template: %w", err) - } - - return b.String(), nil -} - -// DownloadBinariesScript returns the script which is responsible to download -// all required binaries. -func DownloadBinariesScript(kubeletVersion string, downloadKubelet bool) (string, error) { - tmpl, err := template.New("download-binaries").Funcs(TxtFuncMap()).Parse(downloadBinariesTpl) - if err != nil { - return "", fmt.Errorf("failed to parse download-binaries template: %w", err) - } - - data := struct { - KubeletVersion string - DownloadKubelet bool - }{ - KubeletVersion: kubeletVersion, - DownloadKubelet: downloadKubelet, - } - b := &bytes.Buffer{} - err = tmpl.Execute(b, data) - if err != nil { - return "", fmt.Errorf("failed to execute download-binaries template: %w", err) - } - - return b.String(), nil -} diff --git a/pkg/userdata/helper/download_binaries_script_test.go b/pkg/userdata/helper/download_binaries_script_test.go deleted file mode 100644 index 9ecb063fa..000000000 --- a/pkg/userdata/helper/download_binaries_script_test.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright 2019 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package helper - -import ( - "fmt" - "testing" - - "github.com/kubermatic/machine-controller/pkg/test" -) - -func TestDownloadBinariesScript(t *testing.T) { - for _, version := range versions { - name := fmt.Sprintf("download_binaries_%s", version.Original()) - t.Run(name, func(t *testing.T) { - script, err := DownloadBinariesScript(version.String(), true) - if err != nil { - t.Error(err) - } - goldenName := name + ".golden" - test.CompareOutput(t, goldenName, script, *update) - }) - } -} - -func TestSafeDownloadBinariesScript(t *testing.T) { - name := "safe_download_binaries_v1.24.9" - t.Run(name, func(t *testing.T) { - script, err := SafeDownloadBinariesScript("v1.24.9") - if err != nil { - t.Error(err) - } - goldenName := name + ".golden" - test.CompareOutput(t, goldenName, script, *update) - }) -} diff --git a/pkg/userdata/helper/helper.go b/pkg/userdata/helper/helper.go deleted file mode 100644 index 59c8af94f..000000000 --- a/pkg/userdata/helper/helper.go +++ /dev/null @@ -1,216 +0,0 @@ -/* -Copyright 2019 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package helper - -import ( - "encoding/json" - "fmt" - "strings" - - "github.com/kubermatic/machine-controller/pkg/cloudprovider/util" - - "k8s.io/client-go/tools/clientcmd" - clientcmdapi "k8s.io/client-go/tools/clientcmd/api" -) - -const ( - DefaultDockerContainerLogMaxFiles = "5" - DefaultDockerContainerLogMaxSize = "100m" -) - -func GetCACert(kubeconfig *clientcmdapi.Config) (string, error) { - if len(kubeconfig.Clusters) != 1 { - return "", fmt.Errorf("kubeconfig does not contain exactly one cluster, can not extract server address") - } - // Clusters is a map so we have to use range here. - for _, clusterConfig := range kubeconfig.Clusters { - return string(clusterConfig.CertificateAuthorityData), nil - } - - return "", fmt.Errorf("no CACert found") -} - -// StringifyKubeconfig marshals a kubeconfig to its text form. -func StringifyKubeconfig(kubeconfig *clientcmdapi.Config) (string, error) { - kubeconfigBytes, err := clientcmd.Write(*kubeconfig) - if err != nil { - return "", fmt.Errorf("error writing kubeconfig: %w", err) - } - - return string(kubeconfigBytes), nil -} - -// LoadKernelModules returns a script which is responsible for loading all required kernel modules -// The nf_conntrack_ipv4 module get removed in newer kernel versions. -func LoadKernelModulesScript() string { - return `#!/usr/bin/env bash -set -euo pipefail - -modprobe ip_vs -modprobe ip_vs_rr -modprobe ip_vs_wrr -modprobe ip_vs_sh - -if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 -else - modprobe nf_conntrack -fi -` -} - -// KernelSettings returns the list of kernel settings required for a kubernetes worker node -// inotify changes according to https://github.com/kubernetes/kubernetes/issues/10421 - better than letting the kubelet die. -func KernelSettings() string { - return `net.bridge.bridge-nf-call-ip6tables = 1 -net.bridge.bridge-nf-call-iptables = 1 -kernel.panic_on_oops = 1 -kernel.panic = 10 -net.ipv4.ip_forward = 1 -vm.overcommit_memory = 1 -fs.inotify.max_user_watches = 1048576 -fs.inotify.max_user_instances = 8192 -` -} - -// JournalDConfig returns the journal config preferable on every node. -func JournalDConfig() string { - // JournaldMaxUse defines the maximum space that journalD logs can occupy. - // https://www.freedesktop.org/software/systemd/man/journald.conf.html#SystemMaxUse= - return `[Journal] -SystemMaxUse=5G -` -} - -type dockerConfig struct { - ExecOpts []string `json:"exec-opts,omitempty"` - StorageDriver string `json:"storage-driver,omitempty"` - StorageOpts []string `json:"storage-opts,omitempty"` - LogDriver string `json:"log-driver,omitempty"` - LogOpts map[string]string `json:"log-opts,omitempty"` - InsecureRegistries []string `json:"insecure-registries,omitempty"` - RegistryMirrors []string `json:"registry-mirrors,omitempty"` -} - -// DockerConfig returns the docker daemon.json. -func DockerConfig(insecureRegistries, registryMirrors []string, logMaxFiles string, logMaxSize string) (string, error) { - if len(logMaxSize) > 0 { - // Parse log max size to ensure that it has the correct units - logMaxSize = strings.ToLower(logMaxSize) - logMaxSize = strings.ReplaceAll(logMaxSize, "ki", "k") - logMaxSize = strings.ReplaceAll(logMaxSize, "mi", "m") - logMaxSize = strings.ReplaceAll(logMaxSize, "gi", "g") - } else { - logMaxSize = DefaultDockerContainerLogMaxSize - } - - // Default if value is not provided - if len(logMaxFiles) == 0 { - logMaxFiles = DefaultDockerContainerLogMaxFiles - } - - cfg := dockerConfig{ - ExecOpts: []string{"native.cgroupdriver=systemd"}, - StorageDriver: "overlay2", - LogDriver: "json-file", - LogOpts: map[string]string{ - "max-size": logMaxSize, - "max-file": logMaxFiles, - }, - InsecureRegistries: insecureRegistries, - RegistryMirrors: registryMirrors, - } - - b, err := json.Marshal(cfg) - return string(b), err -} - -func ProxyEnvironment(proxy, noProxy string) string { - return fmt.Sprintf(`HTTP_PROXY=%s -http_proxy=%s -HTTPS_PROXY=%s -https_proxy=%s -NO_PROXY=%s -no_proxy=%s`, proxy, proxy, proxy, proxy, noProxy, noProxy) -} - -func SetupNodeIPEnvScript(ipFamily util.IPFamily) string { - const defaultIfcIPv4 = `DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+")` - - var defaultIfcIP string - switch ipFamily { - case util.IPFamilyIPv4: - defaultIfcIP = defaultIfcIPv4 - case util.IPFamilyIPv6: - defaultIfcIP = `DEFAULT_IFC_IP=$(ip -o -6 route get 1:: | grep -oP "src \K\S+")` - case util.IPFamilyIPv4IPv6: - defaultIfcIP = `DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") -DEFAULT_IFC_IP6=$(ip -o -6 route get 1:: | grep -oP "src \K\S+") -if [ -z "${DEFAULT_IFC_IP6}" ] -then - echodate "Failed to get IPv6 address for the default route interface" - exit 1 -fi -DEFAULT_IFC_IP=$DEFAULT_IFC_IP,$DEFAULT_IFC_IP6` - case util.IPFamilyIPv6IPv4: - defaultIfcIP = `DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") -DEFAULT_IFC_IP6=$(ip -o -6 route get 1:: | grep -oP "src \K\S+") -if [ -z "${DEFAULT_IFC_IP6}" ] -then - echodate "Failed to get IPv6 address for the default route interface" - exit 1 -fi -DEFAULT_IFC_IP=$DEFAULT_IFC_IP6,$DEFAULT_IFC_IP` - default: - defaultIfcIP = defaultIfcIPv4 - } - return `#!/usr/bin/env bash -echodate() { - echo "[$(date -Is)]" "$@" -} - -# get the default interface IP address -` + defaultIfcIP + ` - -# get the full hostname -FULL_HOSTNAME=$(hostname -f) - -if [ -z "${DEFAULT_IFC_IP}" ] -then - echodate "Failed to get IP address for the default route interface" - exit 1 -fi - -# write the nodeip_env file -# we need the line below because flatcar has the same string "coreos" in that file -if grep -q coreos /etc/os-release -then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf -elif [ ! -d /etc/systemd/system/kubelet.service.d ] -then - echodate "Can't find kubelet service extras directory" - exit 1 -else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf -fi - ` -} - -func SSHConfigAddendum() string { - return `TrustedUserCAKeys /etc/ssh/trusted-user-ca-keys.pem -CASignatureAlgorithms ecdsa-sha2-nistp256,ecdsa-sha2-nistp384,ecdsa-sha2-nistp521,ssh-ed25519,rsa-sha2-512,rsa-sha2-256,ssh-rsa` -} diff --git a/pkg/userdata/helper/kubelet.go b/pkg/userdata/helper/kubelet.go deleted file mode 100644 index 7d278637a..000000000 --- a/pkg/userdata/helper/kubelet.go +++ /dev/null @@ -1,390 +0,0 @@ -/* -Copyright 2019 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package helper - -import ( - "fmt" - "net" - "strconv" - "strings" - "text/template" - - "github.com/kubermatic/machine-controller/pkg/apis/cluster/common" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/util" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/klog" - kubeletv1b1 "k8s.io/kubelet/config/v1beta1" - "k8s.io/utils/pointer" - kyaml "sigs.k8s.io/yaml" -) - -const ( - defaultKubeletContainerLogMaxSize = "100Mi" -) - -func kubeletFlagsTpl(withNodeIP bool) string { - flagsTemplate := `--bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ ---kubeconfig=/var/lib/kubelet/kubeconfig \ ---config=/etc/kubernetes/kubelet.conf \ ---cert-dir=/etc/kubernetes/pki \` - - flagsTemplate += ` -{{- if or (.CloudProvider) (.IsExternal) }} -{{ cloudProviderFlags .CloudProvider .IsExternal }} \ -{{- end }}` - - flagsTemplate += `{{- if and (.Hostname) (ne .CloudProvider "aws") }} ---hostname-override={{ .Hostname }} \ -{{- else if and (eq .CloudProvider "aws") (.IsExternal) }} ---hostname-override=${KUBELET_HOSTNAME} \ -{{- end }} ---exit-on-lock-contention \ ---lock-file=/tmp/kubelet.lock \ -{{- if .PauseImage }} ---pod-infra-container-image={{ .PauseImage }} \ -{{- end }} -{{- if .InitialTaints }} ---register-with-taints={{- .InitialTaints }} \ -{{- end }} -{{- range .ExtraKubeletFlags }} -{{ . }} \ -{{- end }}` - - if withNodeIP { - flagsTemplate += ` ---node-ip ${KUBELET_NODE_IP}` - } - - return flagsTemplate -} - -const ( - kubeletSystemdUnitTpl = `[Unit] -After={{ .ContainerRuntime }}.service -Requires={{ .ContainerRuntime }}.service - -Description=kubelet: The Kubernetes Node Agent -Documentation=https://kubernetes.io/docs/home/ - -[Service] -User=root -Restart=always -StartLimitInterval=0 -RestartSec=10 -CPUAccounting=true -MemoryAccounting=true - -Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" -EnvironmentFile=-/etc/environment - -ExecStartPre=/bin/bash /opt/load-kernel-modules.sh -{{ if .DisableSwap }} -ExecStartPre=/bin/bash /opt/disable-swap.sh -{{ end }} -ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh -ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ -{{ kubeletFlags .KubeletVersion .CloudProvider .Hostname .ClusterDNSIPs .IsExternal .IPFamily .PauseImage .InitialTaints .ExtraKubeletFlags | indent 2 }} - -[Install] -WantedBy=multi-user.target` - - containerRuntimeHealthCheckSystemdUnitTpl = `[Unit] -Requires={{ .ContainerRuntime }}.service -After={{ .ContainerRuntime }}.service - -[Service] -ExecStart=/opt/bin/health-monitor.sh container-runtime - -[Install] -WantedBy=multi-user.target` -) - -const cpFlags = `--cloud-provider=%s \ ---cloud-config=/etc/kubernetes/cloud-config` - -// List of allowed TLS cipher suites for kubelet. -var kubeletTLSCipherSuites = []string{ - // TLS 1.3 cipher suites - "TLS_AES_128_GCM_SHA256", - "TLS_AES_256_GCM_SHA384", - "TLS_CHACHA20_POLY1305_SHA256", - // TLS 1.0 - 1.2 cipher suites - "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", - "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", - "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305", - "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", - "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", - "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305", -} - -func withNodeIPFlag(ipFamily util.IPFamily, cloudProvider string, external bool) bool { - // If external or in-tree CCM is in use we don't need to set --node-ip - // as the cloud provider will know what IPs to return. - if ipFamily.IsDualstack() { - if external || cloudProvider != "" { - return false - } - } - return true -} - -// CloudProviderFlags returns --cloud-provider and --cloud-config flags. -func CloudProviderFlags(cpName string, external bool) string { - if cpName == "" && !external { - return "" - } - - if external { - return `--cloud-provider=external` - } - - return fmt.Sprintf(cpFlags, cpName) -} - -// KubeletSystemdUnit returns the systemd unit for the kubelet. -func KubeletSystemdUnit(containerRuntime, kubeletVersion, cloudProvider, hostname string, dnsIPs []net.IP, external bool, ipFamily util.IPFamily, pauseImage string, initialTaints []corev1.Taint, extraKubeletFlags []string, disableSwap bool) (string, error) { - tmpl, err := template.New("kubelet-systemd-unit").Funcs(TxtFuncMap()).Parse(kubeletSystemdUnitTpl) - if err != nil { - return "", fmt.Errorf("failed to parse kubelet-systemd-unit template: %w", err) - } - - data := struct { - ContainerRuntime string - KubeletVersion string - CloudProvider string - Hostname string - ClusterDNSIPs []net.IP - IsExternal bool - IPFamily util.IPFamily - PauseImage string - InitialTaints []corev1.Taint - ExtraKubeletFlags []string - DisableSwap bool - }{ - ContainerRuntime: containerRuntime, - KubeletVersion: kubeletVersion, - CloudProvider: cloudProvider, - Hostname: hostname, - ClusterDNSIPs: dnsIPs, - IsExternal: external, - IPFamily: ipFamily, - PauseImage: pauseImage, - InitialTaints: initialTaints, - ExtraKubeletFlags: extraKubeletFlags, - DisableSwap: disableSwap, - } - - var buf strings.Builder - if err = tmpl.Execute(&buf, data); err != nil { - return "", fmt.Errorf("failed to execute kubelet-systemd-unit template: %w", err) - } - - return buf.String(), nil -} - -// kubeletConfiguration returns marshaled kubelet.config.k8s.io/v1beta1 KubeletConfiguration. -func kubeletConfiguration(clusterDomain string, clusterDNS []net.IP, featureGates map[string]bool, kubeletConfigs map[string]string, containerRuntime string) (string, error) { - clusterDNSstr := make([]string, 0, len(clusterDNS)) - for _, ip := range clusterDNS { - clusterDNSstr = append(clusterDNSstr, ip.String()) - } - - cfg := kubeletv1b1.KubeletConfiguration{ - TypeMeta: metav1.TypeMeta{ - Kind: "KubeletConfiguration", - APIVersion: kubeletv1b1.SchemeGroupVersion.String(), - }, - Authentication: kubeletv1b1.KubeletAuthentication{ - X509: kubeletv1b1.KubeletX509Authentication{ - ClientCAFile: "/etc/kubernetes/pki/ca.crt", - }, - Webhook: kubeletv1b1.KubeletWebhookAuthentication{ - Enabled: pointer.Bool(true), - }, - Anonymous: kubeletv1b1.KubeletAnonymousAuthentication{ - Enabled: pointer.Bool(false), - }, - }, - Authorization: kubeletv1b1.KubeletAuthorization{ - Mode: kubeletv1b1.KubeletAuthorizationModeWebhook, - }, - CgroupDriver: "systemd", - ClusterDNS: clusterDNSstr, - ClusterDomain: clusterDomain, - FeatureGates: featureGates, - ProtectKernelDefaults: true, - ReadOnlyPort: 0, - RotateCertificates: true, - ServerTLSBootstrap: true, - StaticPodPath: "/etc/kubernetes/manifests", - KubeReserved: map[string]string{"cpu": "200m", "memory": "200Mi", "ephemeral-storage": "1Gi"}, - SystemReserved: map[string]string{"cpu": "200m", "memory": "200Mi", "ephemeral-storage": "1Gi"}, - EvictionHard: map[string]string{"memory.available": "100Mi", "nodefs.available": "10%", "nodefs.inodesFree": "5%", "imagefs.available": "15%"}, - VolumePluginDir: "/var/lib/kubelet/volumeplugins", - TLSCipherSuites: kubeletTLSCipherSuites, - ContainerLogMaxSize: defaultKubeletContainerLogMaxSize, - } - - if kubeReserved, ok := kubeletConfigs[common.KubeReservedKubeletConfig]; ok { - for _, krPair := range strings.Split(kubeReserved, ",") { - krKV := strings.SplitN(krPair, "=", 2) - if len(krKV) != 2 { - continue - } - cfg.KubeReserved[krKV[0]] = krKV[1] - } - } - - if systemReserved, ok := kubeletConfigs[common.SystemReservedKubeletConfig]; ok { - for _, srPair := range strings.Split(systemReserved, ",") { - srKV := strings.SplitN(srPair, "=", 2) - if len(srKV) != 2 { - continue - } - cfg.SystemReserved[srKV[0]] = srKV[1] - } - } - - if evictionHard, ok := kubeletConfigs[common.EvictionHardKubeletConfig]; ok { - for _, ehPair := range strings.Split(evictionHard, ",") { - ehKV := strings.SplitN(ehPair, "<", 2) - if len(ehKV) != 2 { - continue - } - cfg.EvictionHard[ehKV[0]] = ehKV[1] - } - } - - if maxPods, ok := kubeletConfigs[common.MaxPodsKubeletConfig]; ok { - mp, err := strconv.ParseInt(maxPods, 10, 32) - if err != nil { - // Instead of breaking the workflow, just print a warning and skip the configuration - klog.Warningf("Skipping invalid MaxPods value %v for Kubelet configuration", maxPods) - } else { - cfg.MaxPods = int32(mp) - } - } - - if containerLogMaxSize, ok := kubeletConfigs[common.ContainerLogMaxSizeKubeletConfig]; ok { - cfg.ContainerLogMaxSize = containerLogMaxSize - } - if containerLogMaxFiles, ok := kubeletConfigs[common.ContainerLogMaxFilesKubeletConfig]; ok { - maxFiles, err := strconv.Atoi(containerLogMaxFiles) - if err != nil || maxFiles < 0 { - // Instead of breaking the workflow, just print a warning and skip the configuration - klog.Warningf("Skipping invalid ContainerLogMaxSize value %v for Kubelet configuration", containerLogMaxFiles) - } else { - cfg.ContainerLogMaxFiles = pointer.Int32(int32(maxFiles)) - } - } - - if enabled, ok := featureGates["SeccompDefault"]; ok && enabled { - cfg.SeccompDefault = pointer.Bool(true) - } - - buf, err := kyaml.Marshal(cfg) - return string(buf), err -} - -// KubeletFlags returns the kubelet flags. -// --node-ip and --cloud-provider kubelet flags conflict in the dualstack setup. -// In general, it is not expected to need to use --node-ip with external CCMs, -// as the cloud provider is expected to know the correct IPs to return. -// For details read kubernetes/sig-networking channel discussion -// https://kubernetes.slack.com/archives/C09QYUH5W/p1654003958331739 -func KubeletFlags(version, cloudProvider, hostname string, dnsIPs []net.IP, external bool, ipFamily util.IPFamily, pauseImage string, initialTaints []corev1.Taint, extraKubeletFlags []string) (string, error) { - withNodeIPFlag := withNodeIPFlag(ipFamily, cloudProvider, external) - - tmpl, err := template.New("kubelet-flags").Funcs(TxtFuncMap()). - Parse(kubeletFlagsTpl(withNodeIPFlag)) - if err != nil { - return "", fmt.Errorf("failed to parse kubelet-flags template: %w", err) - } - - initialTaintsArgs := []string{} - for _, taint := range initialTaints { - initialTaintsArgs = append(initialTaintsArgs, fmt.Sprintf("%s=%s:%s", taint.Key, taint.Value, taint.Effect)) - } - - kubeletFlags := make([]string, len(extraKubeletFlags)) - copy(kubeletFlags, extraKubeletFlags) - - data := struct { - CloudProvider string - Hostname string - ClusterDNSIPs []net.IP - KubeletVersion string - IsExternal bool - IPFamily util.IPFamily - PauseImage string - InitialTaints string - ExtraKubeletFlags []string - }{ - CloudProvider: cloudProvider, - Hostname: hostname, - ClusterDNSIPs: dnsIPs, - KubeletVersion: version, - IsExternal: external, - IPFamily: ipFamily, - PauseImage: pauseImage, - InitialTaints: strings.Join(initialTaintsArgs, ","), - ExtraKubeletFlags: kubeletFlags, - } - - var buf strings.Builder - if err = tmpl.Execute(&buf, data); err != nil { - return "", fmt.Errorf("failed to execute kubelet-flags template: %w", err) - } - - return buf.String(), nil -} - -// KubeletHealthCheckSystemdUnit kubelet health checking systemd unit. -func KubeletHealthCheckSystemdUnit() string { - return `[Unit] -Requires=kubelet.service -After=kubelet.service - -[Service] -ExecStart=/opt/bin/health-monitor.sh kubelet - -[Install] -WantedBy=multi-user.target -` -} - -// ContainerRuntimeHealthCheckSystemdUnit container-runtime health checking systemd unit. -func ContainerRuntimeHealthCheckSystemdUnit(containerRuntime string) (string, error) { - tmpl, err := template.New("container-runtime-healthcheck-systemd-unit").Funcs(TxtFuncMap()).Parse(containerRuntimeHealthCheckSystemdUnitTpl) - if err != nil { - return "", fmt.Errorf("failed to parse container-runtime-healthcheck-systemd-unit template: %w", err) - } - - data := struct { - ContainerRuntime string - }{ - ContainerRuntime: containerRuntime, - } - - var buf strings.Builder - if err = tmpl.Execute(&buf, data); err != nil { - return "", fmt.Errorf("failed to execute container-runtime-healthcheck-systemd-unit template: %w", err) - } - return buf.String(), nil -} diff --git a/pkg/userdata/helper/kubelet_test.go b/pkg/userdata/helper/kubelet_test.go deleted file mode 100644 index f706bc9c6..000000000 --- a/pkg/userdata/helper/kubelet_test.go +++ /dev/null @@ -1,143 +0,0 @@ -/* -Copyright 2019 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package helper - -import ( - "fmt" - "net" - "testing" - - "github.com/Masterminds/semver/v3" - - "github.com/kubermatic/machine-controller/pkg/cloudprovider/util" - testhelper "github.com/kubermatic/machine-controller/pkg/test" - - corev1 "k8s.io/api/core/v1" -) - -type kubeletFlagTestCase struct { - name string - containerRuntime string - version *semver.Version - dnsIPs []net.IP - hostname string - cloudProvider string - external bool - ipFamily util.IPFamily - pauseImage string - initialTaints []corev1.Taint - extraFlags []string -} - -func TestKubeletSystemdUnit(t *testing.T) { - var tests []kubeletFlagTestCase - for _, version := range versions { - tests = append(tests, - kubeletFlagTestCase{ - name: fmt.Sprintf("version-%s", version.Original()), - version: version, - dnsIPs: []net.IP{net.ParseIP("10.10.10.10")}, - hostname: "some-test-node", - }, - kubeletFlagTestCase{ - name: fmt.Sprintf("version-%s-external", version.Original()), - version: version, - dnsIPs: []net.IP{net.ParseIP("10.10.10.10")}, - hostname: "some-test-node", - external: true, - }, - ) - } - tests = append(tests, []kubeletFlagTestCase{ - { - name: "multiple-dns-servers", - version: semver.MustParse("v1.24.9"), - dnsIPs: []net.IP{ - net.ParseIP("10.10.10.10"), - net.ParseIP("10.10.10.11"), - net.ParseIP("10.10.10.12"), - }, - hostname: "some-test-node", - }, - { - name: "cloud-provider-set", - version: semver.MustParse("v1.24.9"), - dnsIPs: []net.IP{net.ParseIP("10.10.10.10")}, - hostname: "some-test-node", - cloudProvider: "aws", - }, - { - name: "pause-image-set", - version: semver.MustParse("v1.24.9"), - dnsIPs: []net.IP{net.ParseIP("10.10.10.10")}, - hostname: "some-test-node", - cloudProvider: "aws", - pauseImage: "192.168.100.100:5000/kubernetes/pause:v3.1", - }, - { - name: "taints-set", - version: semver.MustParse("v1.24.9"), - dnsIPs: []net.IP{net.ParseIP("10.10.10.10")}, - hostname: "some-test-node", - cloudProvider: "aws", - initialTaints: []corev1.Taint{ - { - Key: "key1", - Value: "value1", - Effect: corev1.TaintEffectNoSchedule, - }, - { - Key: "key2", - Value: "value2", - Effect: corev1.TaintEffectNoExecute, - }, - }, - }, - }...) - - for _, test := range tests { - name := fmt.Sprintf("kublet_systemd_unit_%s", test.name) - t.Run(name, func(t *testing.T) { - out, err := KubeletSystemdUnit( - defaultTo(test.containerRuntime, "docker"), - test.version.String(), - test.cloudProvider, - test.hostname, - test.dnsIPs, - test.external, - test.ipFamily, - test.pauseImage, - test.initialTaints, - test.extraFlags, - true, - ) - if err != nil { - t.Error(err) - } - goldenName := name + ".golden" - testhelper.CompareOutput(t, goldenName, out, *update) - }) - } -} - -func defaultTo(in string, defaultValue string) string { - if in == "" { - return defaultValue - } - - return in -} diff --git a/pkg/userdata/helper/template_functions.go b/pkg/userdata/helper/template_functions.go deleted file mode 100644 index faeb1ea00..000000000 --- a/pkg/userdata/helper/template_functions.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright 2019 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package helper - -import ( - "regexp" - "text/template" - - "github.com/Masterminds/sprig/v3" -) - -// TxtFuncMap returns an aggregated template function map. Currently (custom functions + sprig). -func TxtFuncMap() template.FuncMap { - funcMap := sprig.TxtFuncMap() - - funcMap["downloadBinariesScript"] = DownloadBinariesScript - funcMap["safeDownloadBinariesScript"] = SafeDownloadBinariesScript - funcMap["kubeletSystemdUnit"] = KubeletSystemdUnit - funcMap["kubeletConfiguration"] = kubeletConfiguration - funcMap["kubeletFlags"] = KubeletFlags - funcMap["cloudProviderFlags"] = CloudProviderFlags - funcMap["kernelModulesScript"] = LoadKernelModulesScript - funcMap["kernelSettings"] = KernelSettings - funcMap["journalDConfig"] = JournalDConfig - funcMap["kubeletHealthCheckSystemdUnit"] = KubeletHealthCheckSystemdUnit - funcMap["containerRuntimeHealthCheckSystemdUnit"] = ContainerRuntimeHealthCheckSystemdUnit - funcMap["dockerConfig"] = DockerConfig - funcMap["proxyEnvironment"] = ProxyEnvironment - funcMap["sshConfigAddendum"] = SSHConfigAddendum - - return funcMap -} - -// CleanupTemplateOutput postprocesses the output of the template processing. Those -// may exist due to the working of template functions like those of the sprig package -// or template condition. -func CleanupTemplateOutput(output string) (string, error) { - // Valid YAML files are not allowed to have empty lines containing spaces or tabs. - // So far only cleanup. - woBlankLines := regexp.MustCompile(`(?m)^[ \t]+$`).ReplaceAllString(output, "") - return woBlankLines, nil -} diff --git a/pkg/userdata/helper/testdata/download_binaries_v1.24.9.golden b/pkg/userdata/helper/testdata/download_binaries_v1.24.9.golden deleted file mode 100644 index 507832442..000000000 --- a/pkg/userdata/helper/testdata/download_binaries_v1.24.9.golden +++ /dev/null @@ -1,17 +0,0 @@ -mkdir -p /opt/bin/ -mkdir -p /var/lib/calico -mkdir -p /etc/kubernetes/manifests -mkdir -p /etc/cni/net.d -mkdir -p /opt/cni/bin -if [ ! -f /opt/cni/bin/loopback ]; then - curl -L https://github.com/containernetworking/plugins/releases/download/v0.8.7/cni-plugins-linux-amd64-v0.8.7.tgz | tar -xvzC /opt/cni/bin -f - -fi -if [ ! -f /opt/bin/kubelet ]; then - curl -Lfo /opt/bin/kubelet https://storage.googleapis.com/kubernetes-release/release/v1.24.9/bin/linux/amd64/kubelet - chmod +x /opt/bin/kubelet -fi - -if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/8b5b66e4910a6228dfaecccaa0a3b05ec4902f8e/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh -fi diff --git a/pkg/userdata/helper/testdata/download_binaries_v1.25.5.golden b/pkg/userdata/helper/testdata/download_binaries_v1.25.5.golden deleted file mode 100644 index c824f0189..000000000 --- a/pkg/userdata/helper/testdata/download_binaries_v1.25.5.golden +++ /dev/null @@ -1,17 +0,0 @@ -mkdir -p /opt/bin/ -mkdir -p /var/lib/calico -mkdir -p /etc/kubernetes/manifests -mkdir -p /etc/cni/net.d -mkdir -p /opt/cni/bin -if [ ! -f /opt/cni/bin/loopback ]; then - curl -L https://github.com/containernetworking/plugins/releases/download/v0.8.7/cni-plugins-linux-amd64-v0.8.7.tgz | tar -xvzC /opt/cni/bin -f - -fi -if [ ! -f /opt/bin/kubelet ]; then - curl -Lfo /opt/bin/kubelet https://storage.googleapis.com/kubernetes-release/release/v1.25.5/bin/linux/amd64/kubelet - chmod +x /opt/bin/kubelet -fi - -if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/8b5b66e4910a6228dfaecccaa0a3b05ec4902f8e/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh -fi diff --git a/pkg/userdata/helper/testdata/download_binaries_v1.26.0.golden b/pkg/userdata/helper/testdata/download_binaries_v1.26.0.golden deleted file mode 100644 index 2d3ea0401..000000000 --- a/pkg/userdata/helper/testdata/download_binaries_v1.26.0.golden +++ /dev/null @@ -1,17 +0,0 @@ -mkdir -p /opt/bin/ -mkdir -p /var/lib/calico -mkdir -p /etc/kubernetes/manifests -mkdir -p /etc/cni/net.d -mkdir -p /opt/cni/bin -if [ ! -f /opt/cni/bin/loopback ]; then - curl -L https://github.com/containernetworking/plugins/releases/download/v0.8.7/cni-plugins-linux-amd64-v0.8.7.tgz | tar -xvzC /opt/cni/bin -f - -fi -if [ ! -f /opt/bin/kubelet ]; then - curl -Lfo /opt/bin/kubelet https://storage.googleapis.com/kubernetes-release/release/v1.26.0/bin/linux/amd64/kubelet - chmod +x /opt/bin/kubelet -fi - -if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/8b5b66e4910a6228dfaecccaa0a3b05ec4902f8e/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh -fi diff --git a/pkg/userdata/helper/testdata/kublet_systemd_unit_cloud-provider-set.golden b/pkg/userdata/helper/testdata/kublet_systemd_unit_cloud-provider-set.golden deleted file mode 100644 index 668276671..000000000 --- a/pkg/userdata/helper/testdata/kublet_systemd_unit_cloud-provider-set.golden +++ /dev/null @@ -1,36 +0,0 @@ -[Unit] -After=docker.service -Requires=docker.service - -Description=kubelet: The Kubernetes Node Agent -Documentation=https://kubernetes.io/docs/home/ - -[Service] -User=root -Restart=always -StartLimitInterval=0 -RestartSec=10 -CPUAccounting=true -MemoryAccounting=true - -Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" -EnvironmentFile=-/etc/environment - -ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - -ExecStartPre=/bin/bash /opt/disable-swap.sh - -ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh -ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --cloud-provider=aws \ - --cloud-config=/etc/kubernetes/cloud-config \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --node-ip ${KUBELET_NODE_IP} - -[Install] -WantedBy=multi-user.target \ No newline at end of file diff --git a/pkg/userdata/helper/testdata/kublet_systemd_unit_multiple-dns-servers.golden b/pkg/userdata/helper/testdata/kublet_systemd_unit_multiple-dns-servers.golden deleted file mode 100644 index e70567560..000000000 --- a/pkg/userdata/helper/testdata/kublet_systemd_unit_multiple-dns-servers.golden +++ /dev/null @@ -1,35 +0,0 @@ -[Unit] -After=docker.service -Requires=docker.service - -Description=kubelet: The Kubernetes Node Agent -Documentation=https://kubernetes.io/docs/home/ - -[Service] -User=root -Restart=always -StartLimitInterval=0 -RestartSec=10 -CPUAccounting=true -MemoryAccounting=true - -Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" -EnvironmentFile=-/etc/environment - -ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - -ExecStartPre=/bin/bash /opt/disable-swap.sh - -ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh -ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --hostname-override=some-test-node \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --node-ip ${KUBELET_NODE_IP} - -[Install] -WantedBy=multi-user.target \ No newline at end of file diff --git a/pkg/userdata/helper/testdata/kublet_systemd_unit_pause-image-set.golden b/pkg/userdata/helper/testdata/kublet_systemd_unit_pause-image-set.golden deleted file mode 100644 index 25b83f161..000000000 --- a/pkg/userdata/helper/testdata/kublet_systemd_unit_pause-image-set.golden +++ /dev/null @@ -1,37 +0,0 @@ -[Unit] -After=docker.service -Requires=docker.service - -Description=kubelet: The Kubernetes Node Agent -Documentation=https://kubernetes.io/docs/home/ - -[Service] -User=root -Restart=always -StartLimitInterval=0 -RestartSec=10 -CPUAccounting=true -MemoryAccounting=true - -Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" -EnvironmentFile=-/etc/environment - -ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - -ExecStartPre=/bin/bash /opt/disable-swap.sh - -ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh -ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --cloud-provider=aws \ - --cloud-config=/etc/kubernetes/cloud-config \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --pod-infra-container-image=192.168.100.100:5000/kubernetes/pause:v3.1 \ - --node-ip ${KUBELET_NODE_IP} - -[Install] -WantedBy=multi-user.target \ No newline at end of file diff --git a/pkg/userdata/helper/testdata/kublet_systemd_unit_taints-set.golden b/pkg/userdata/helper/testdata/kublet_systemd_unit_taints-set.golden deleted file mode 100644 index 689c9f202..000000000 --- a/pkg/userdata/helper/testdata/kublet_systemd_unit_taints-set.golden +++ /dev/null @@ -1,37 +0,0 @@ -[Unit] -After=docker.service -Requires=docker.service - -Description=kubelet: The Kubernetes Node Agent -Documentation=https://kubernetes.io/docs/home/ - -[Service] -User=root -Restart=always -StartLimitInterval=0 -RestartSec=10 -CPUAccounting=true -MemoryAccounting=true - -Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" -EnvironmentFile=-/etc/environment - -ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - -ExecStartPre=/bin/bash /opt/disable-swap.sh - -ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh -ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --cloud-provider=aws \ - --cloud-config=/etc/kubernetes/cloud-config \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --register-with-taints=key1=value1:NoSchedule,key2=value2:NoExecute \ - --node-ip ${KUBELET_NODE_IP} - -[Install] -WantedBy=multi-user.target \ No newline at end of file diff --git a/pkg/userdata/helper/testdata/kublet_systemd_unit_version-v1.24.9-external.golden b/pkg/userdata/helper/testdata/kublet_systemd_unit_version-v1.24.9-external.golden deleted file mode 100644 index 50f4f5138..000000000 --- a/pkg/userdata/helper/testdata/kublet_systemd_unit_version-v1.24.9-external.golden +++ /dev/null @@ -1,36 +0,0 @@ -[Unit] -After=docker.service -Requires=docker.service - -Description=kubelet: The Kubernetes Node Agent -Documentation=https://kubernetes.io/docs/home/ - -[Service] -User=root -Restart=always -StartLimitInterval=0 -RestartSec=10 -CPUAccounting=true -MemoryAccounting=true - -Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" -EnvironmentFile=-/etc/environment - -ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - -ExecStartPre=/bin/bash /opt/disable-swap.sh - -ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh -ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --cloud-provider=external \ - --hostname-override=some-test-node \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --node-ip ${KUBELET_NODE_IP} - -[Install] -WantedBy=multi-user.target \ No newline at end of file diff --git a/pkg/userdata/helper/testdata/kublet_systemd_unit_version-v1.24.9.golden b/pkg/userdata/helper/testdata/kublet_systemd_unit_version-v1.24.9.golden deleted file mode 100644 index e70567560..000000000 --- a/pkg/userdata/helper/testdata/kublet_systemd_unit_version-v1.24.9.golden +++ /dev/null @@ -1,35 +0,0 @@ -[Unit] -After=docker.service -Requires=docker.service - -Description=kubelet: The Kubernetes Node Agent -Documentation=https://kubernetes.io/docs/home/ - -[Service] -User=root -Restart=always -StartLimitInterval=0 -RestartSec=10 -CPUAccounting=true -MemoryAccounting=true - -Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" -EnvironmentFile=-/etc/environment - -ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - -ExecStartPre=/bin/bash /opt/disable-swap.sh - -ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh -ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --hostname-override=some-test-node \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --node-ip ${KUBELET_NODE_IP} - -[Install] -WantedBy=multi-user.target \ No newline at end of file diff --git a/pkg/userdata/helper/testdata/kublet_systemd_unit_version-v1.25.5-external.golden b/pkg/userdata/helper/testdata/kublet_systemd_unit_version-v1.25.5-external.golden deleted file mode 100644 index 50f4f5138..000000000 --- a/pkg/userdata/helper/testdata/kublet_systemd_unit_version-v1.25.5-external.golden +++ /dev/null @@ -1,36 +0,0 @@ -[Unit] -After=docker.service -Requires=docker.service - -Description=kubelet: The Kubernetes Node Agent -Documentation=https://kubernetes.io/docs/home/ - -[Service] -User=root -Restart=always -StartLimitInterval=0 -RestartSec=10 -CPUAccounting=true -MemoryAccounting=true - -Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" -EnvironmentFile=-/etc/environment - -ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - -ExecStartPre=/bin/bash /opt/disable-swap.sh - -ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh -ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --cloud-provider=external \ - --hostname-override=some-test-node \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --node-ip ${KUBELET_NODE_IP} - -[Install] -WantedBy=multi-user.target \ No newline at end of file diff --git a/pkg/userdata/helper/testdata/kublet_systemd_unit_version-v1.25.5.golden b/pkg/userdata/helper/testdata/kublet_systemd_unit_version-v1.25.5.golden deleted file mode 100644 index e70567560..000000000 --- a/pkg/userdata/helper/testdata/kublet_systemd_unit_version-v1.25.5.golden +++ /dev/null @@ -1,35 +0,0 @@ -[Unit] -After=docker.service -Requires=docker.service - -Description=kubelet: The Kubernetes Node Agent -Documentation=https://kubernetes.io/docs/home/ - -[Service] -User=root -Restart=always -StartLimitInterval=0 -RestartSec=10 -CPUAccounting=true -MemoryAccounting=true - -Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" -EnvironmentFile=-/etc/environment - -ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - -ExecStartPre=/bin/bash /opt/disable-swap.sh - -ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh -ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --hostname-override=some-test-node \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --node-ip ${KUBELET_NODE_IP} - -[Install] -WantedBy=multi-user.target \ No newline at end of file diff --git a/pkg/userdata/helper/testdata/kublet_systemd_unit_version-v1.26.0-external.golden b/pkg/userdata/helper/testdata/kublet_systemd_unit_version-v1.26.0-external.golden deleted file mode 100644 index 50f4f5138..000000000 --- a/pkg/userdata/helper/testdata/kublet_systemd_unit_version-v1.26.0-external.golden +++ /dev/null @@ -1,36 +0,0 @@ -[Unit] -After=docker.service -Requires=docker.service - -Description=kubelet: The Kubernetes Node Agent -Documentation=https://kubernetes.io/docs/home/ - -[Service] -User=root -Restart=always -StartLimitInterval=0 -RestartSec=10 -CPUAccounting=true -MemoryAccounting=true - -Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" -EnvironmentFile=-/etc/environment - -ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - -ExecStartPre=/bin/bash /opt/disable-swap.sh - -ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh -ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --cloud-provider=external \ - --hostname-override=some-test-node \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --node-ip ${KUBELET_NODE_IP} - -[Install] -WantedBy=multi-user.target \ No newline at end of file diff --git a/pkg/userdata/helper/testdata/kublet_systemd_unit_version-v1.26.0.golden b/pkg/userdata/helper/testdata/kublet_systemd_unit_version-v1.26.0.golden deleted file mode 100644 index e70567560..000000000 --- a/pkg/userdata/helper/testdata/kublet_systemd_unit_version-v1.26.0.golden +++ /dev/null @@ -1,35 +0,0 @@ -[Unit] -After=docker.service -Requires=docker.service - -Description=kubelet: The Kubernetes Node Agent -Documentation=https://kubernetes.io/docs/home/ - -[Service] -User=root -Restart=always -StartLimitInterval=0 -RestartSec=10 -CPUAccounting=true -MemoryAccounting=true - -Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" -EnvironmentFile=-/etc/environment - -ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - -ExecStartPre=/bin/bash /opt/disable-swap.sh - -ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh -ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --hostname-override=some-test-node \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --node-ip ${KUBELET_NODE_IP} - -[Install] -WantedBy=multi-user.target \ No newline at end of file diff --git a/pkg/userdata/helper/testdata/safe_download_binaries_v1.24.9.golden b/pkg/userdata/helper/testdata/safe_download_binaries_v1.24.9.golden deleted file mode 100644 index 98f57b9f7..000000000 --- a/pkg/userdata/helper/testdata/safe_download_binaries_v1.24.9.golden +++ /dev/null @@ -1,65 +0,0 @@ -opt_bin=/opt/bin -usr_local_bin=/usr/local/bin -cni_bin_dir=/opt/cni/bin -mkdir -p /etc/cni/net.d /etc/kubernetes/manifests "$opt_bin" "$cni_bin_dir" -arch=${HOST_ARCH-} -if [ -z "$arch" ] -then -case $(uname -m) in -x86_64) - arch="amd64" - ;; -aarch64) - arch="arm64" - ;; -*) - echo "unsupported CPU architecture, exiting" - exit 1 - ;; -esac -fi -CNI_VERSION="${CNI_VERSION:-v1.2.0}" -cni_base_url="https://github.com/containernetworking/plugins/releases/download/$CNI_VERSION" -cni_filename="cni-plugins-linux-$arch-$CNI_VERSION.tgz" -curl -Lfo "$cni_bin_dir/$cni_filename" "$cni_base_url/$cni_filename" -cni_sum=$(curl -Lf "$cni_base_url/$cni_filename.sha256") -cd "$cni_bin_dir" -sha256sum -c <<<"$cni_sum" -tar xvf "$cni_filename" -rm -f "$cni_filename" -cd - -CRI_TOOLS_RELEASE="${CRI_TOOLS_RELEASE:-v1.26.0}" -cri_tools_base_url="https://github.com/kubernetes-sigs/cri-tools/releases/download/${CRI_TOOLS_RELEASE}" -cri_tools_filename="crictl-${CRI_TOOLS_RELEASE}-linux-${arch}.tar.gz" -curl -Lfo "$opt_bin/$cri_tools_filename" "$cri_tools_base_url/$cri_tools_filename" -cri_tools_sum_value=$(curl -Lf "$cri_tools_base_url/$cri_tools_filename.sha256") -cri_tools_sum="$cri_tools_sum_value $cri_tools_filename" -cd "$opt_bin" -sha256sum -c <<<"$cri_tools_sum" -tar xvf "$cri_tools_filename" -rm -f "$cri_tools_filename" -ln -sf "$opt_bin/crictl" "$usr_local_bin"/crictl || echo "symbolic link is skipped" -cd - -KUBE_VERSION="${KUBE_VERSION:-v1.24.9}" -kube_dir="$opt_bin/kubernetes-$KUBE_VERSION" -kube_base_url="https://storage.googleapis.com/kubernetes-release/release/$KUBE_VERSION/bin/linux/$arch" -kube_sum_file="$kube_dir/sha256" -mkdir -p "$kube_dir" -: >"$kube_sum_file" - -for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" -done -sha256sum -c "$kube_sum_file" - -for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin -done - -if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh -fi diff --git a/pkg/userdata/manager/manager.go b/pkg/userdata/manager/manager.go deleted file mode 100644 index 473899333..000000000 --- a/pkg/userdata/manager/manager.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright 2019 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// -// UserData plugin manager. -// - -// Package manager provides the instantiation and -// running of the plugins on machine controller side. -package manager - -import ( - "errors" - "flag" - - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" - - "k8s.io/klog" -) - -var ( - // ErrLocatingPlugins is returned when a new manager cannot locate - // the plugins for the supported operating systems. - ErrLocatingPlugins = errors.New("one or more user data plugins not found") - - // ErrPluginNotFound describes an invalid operating system for - // a user data plugin. Here directory has to be checked if - // correct ones are installed. - ErrPluginNotFound = errors.New("no user data plugin for the given operating system found") - - // supportedOS contains a list of operating systems the machine - // controller supports. - supportedOS = []providerconfigtypes.OperatingSystem{ - providerconfigtypes.OperatingSystemAmazonLinux2, - providerconfigtypes.OperatingSystemCentOS, - providerconfigtypes.OperatingSystemFlatcar, - providerconfigtypes.OperatingSystemRHEL, - providerconfigtypes.OperatingSystemUbuntu, - providerconfigtypes.OperatingSystemRockyLinux, - } -) - -// Manager inits and manages the userdata plugins. -type Manager struct { - debug bool - plugins map[providerconfigtypes.OperatingSystem]*Plugin -} - -// New returns an initialised plugin manager. -func New() (*Manager, error) { - m := &Manager{ - plugins: make(map[providerconfigtypes.OperatingSystem]*Plugin), - } - flag.BoolVar(&m.debug, "plugin-debug", false, "Switch for enabling the plugin debugging") - m.locatePlugins() - if len(m.plugins) < len(supportedOS) { - return nil, ErrLocatingPlugins - } - return m, nil -} - -// ForOS returns the plugin for the given operating system. -func (m *Manager) ForOS(os providerconfigtypes.OperatingSystem) (p *Plugin, err error) { - var found bool - if p, found = m.plugins[os]; !found { - return nil, ErrPluginNotFound - } - return p, nil -} - -// locatePlugins tries to find the plugins and inits their wrapper. -func (m *Manager) locatePlugins() { - for _, os := range supportedOS { - plugin, err := newPlugin(os, m.debug) - if err != nil { - klog.Errorf("cannot use plugin '%v': %v", os, err) - continue - } - m.plugins[os] = plugin - } -} diff --git a/pkg/userdata/manager/plugin.go b/pkg/userdata/manager/plugin.go deleted file mode 100644 index 69fdc7199..000000000 --- a/pkg/userdata/manager/plugin.go +++ /dev/null @@ -1,141 +0,0 @@ -/* -Copyright 2019 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// -// UserData plugin manager. -// - -package manager - -import ( - "encoding/json" - "fmt" - "os" - "os/exec" - "path/filepath" - "strings" - - "github.com/kubermatic/machine-controller/pkg/apis/plugin" - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" - - "k8s.io/klog" -) - -const ( - // pluginPrefix has to be the prefix of all plugin filenames. - pluginPrefix = "machine-controller-userdata-" -) - -// Plugin looks for the plugin executable and calls it for -// each request. -type Plugin struct { - debug bool - command string -} - -// newPlugin creates a new plugin manager. It starts the named -// binary and connects to it via net/rpc. -func newPlugin(os providerconfigtypes.OperatingSystem, debug bool) (*Plugin, error) { - p := &Plugin{ - debug: debug, - } - if err := p.findPlugin(string(os)); err != nil { - return nil, err - } - return p, nil -} - -// UserData retrieves the user data of the given resource via -// plugin handling the communication. -func (p *Plugin) UserData(req plugin.UserDataRequest) (string, error) { - // Prepare command. - var argv []string - if p.debug { - argv = append(argv, "-debug") - } - cmd := exec.Command(p.command, argv...) - // Set environment. - reqj, err := json.Marshal(req) - if err != nil { - return "", err - } - cmd.Env = append(os.Environ(), fmt.Sprintf("%s=%s", plugin.EnvUserDataRequest, string(reqj))) - // Execute command. - out, err := cmd.CombinedOutput() - if err != nil { - return "", fmt.Errorf("failed to execute command %q: output: %q error: %w", p.command, string(out), err) - } - var resp plugin.UserDataResponse - err = json.Unmarshal(out, &resp) - if err != nil { - return "", err - } - if resp.Err != "" { - return "", fmt.Errorf("%s", resp.Err) - } - return resp.UserData, nil -} - -// findPlugin tries to find the executable of the plugin. -func (p *Plugin) findPlugin(name string) error { - filename := pluginPrefix + name - klog.Infof("looking for plugin %q", filename) - // Create list to search in. - var dirs []string - envDir := os.Getenv(plugin.EnvPluginDir) - if envDir != "" { - dirs = append(dirs, envDir) - } - executable, err := os.Executable() - if err != nil { - return err - } - ownDir, _ := filepath.Split(executable) - ownDir, err = filepath.Abs(ownDir) - if err != nil { - return err - } - dirs = append(dirs, ownDir) - workingDir, err := os.Getwd() - if err != nil { - return err - } - dirs = append(dirs, workingDir) - path := os.Getenv("PATH") - pathDirs := strings.Split(path, string(os.PathListSeparator)) - dirs = append(dirs, pathDirs...) - // Now take a look. - for _, dir := range dirs { - command := dir + string(os.PathSeparator) + filename - klog.V(3).Infof("checking %q", command) - fi, err := os.Stat(command) - if err != nil { - if os.IsNotExist(err) { - continue - } - return fmt.Errorf("error when looking for %q: %w", command, err) - } - if fi.IsDir() || (fi.Mode()&0111 == 0) { - klog.Infof("found '%s', but is no executable", command) - continue - } - p.command = command - klog.Infof("found '%s'", command) - return nil - } - klog.Errorf("did not find '%s'", filename) - return ErrPluginNotFound -} diff --git a/pkg/userdata/plugin/plugin.go b/pkg/userdata/plugin/plugin.go deleted file mode 100644 index 4ceb3a581..000000000 --- a/pkg/userdata/plugin/plugin.go +++ /dev/null @@ -1,88 +0,0 @@ -/* -Copyright 2019 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// -// Core UserData plugin. -// - -// Package plugin provides the plugin side of the plugin mechanism. -// Individual plugins have to implement the provider interface, -// pass it to a new plugin instance, and call run. -package plugin - -import ( - "encoding/json" - "fmt" - "os" - - "github.com/kubermatic/machine-controller/pkg/apis/plugin" -) - -// Provider defines the interface each plugin has to implement -// for the retrieval of the userdata based on the given arguments. -type Provider interface { - UserData(req plugin.UserDataRequest) (string, error) -} - -// Plugin implements a convenient helper to map the request to the given -// provider and return the response. -type Plugin struct { - provider Provider - debug bool -} - -// New creates a new plugin. -func New(provider Provider, debug bool) *Plugin { - return &Plugin{ - provider: provider, - debug: debug, - } -} - -// Run looks for the given request and executes it. -func (p *Plugin) Run() error { - reqEnv := os.Getenv(plugin.EnvUserDataRequest) - if reqEnv == "" { - resp := plugin.ErrorResponse{ - Err: fmt.Sprintf("environment variable '%s' not set", plugin.EnvUserDataRequest), - } - return p.printResponse(resp) - } - // Handle the request for user data. - var req plugin.UserDataRequest - err := json.Unmarshal([]byte(reqEnv), &req) - if err != nil { - return err - } - userData, err := p.provider.UserData(req) - var resp plugin.UserDataResponse - if err != nil { - resp.Err = err.Error() - } else { - resp.UserData = userData - } - return p.printResponse(resp) -} - -// printResponse marshals the response and prints it to stdout. -func (p *Plugin) printResponse(resp interface{}) error { - bs, err := json.Marshal(resp) - if err != nil { - return err - } - _, err = fmt.Printf("%s", string(bs)) - return err -} diff --git a/pkg/userdata/rhel/provider.go b/pkg/userdata/rhel/provider.go deleted file mode 100644 index 5eee08984..000000000 --- a/pkg/userdata/rhel/provider.go +++ /dev/null @@ -1,439 +0,0 @@ -/* -Copyright 2019 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// -// UserData plugin for RHEL. -// - -package rhel - -import ( - "errors" - "fmt" - "strings" - "text/template" - - "github.com/Masterminds/semver/v3" - - "github.com/kubermatic/machine-controller/pkg/apis/plugin" - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" - userdatahelper "github.com/kubermatic/machine-controller/pkg/userdata/helper" -) - -// Provider is a pkg/userdata/plugin.Provider implementation. -type Provider struct{} - -// UserData renders user-data template to string. -func (p Provider) UserData(req plugin.UserDataRequest) (string, error) { - tmpl, err := template.New("user-data").Funcs(userdatahelper.TxtFuncMap()).Parse(userDataTemplate) - if err != nil { - return "", fmt.Errorf("failed to parse user-data template: %w", err) - } - - kubeletVersion, err := semver.NewVersion(req.MachineSpec.Versions.Kubelet) - if err != nil { - return "", fmt.Errorf("invalid kubelet version: %w", err) - } - - pconfig, err := providerconfigtypes.GetConfig(req.MachineSpec.ProviderSpec) - if err != nil { - return "", fmt.Errorf("failed to get provider config: %w", err) - } - - if pconfig.OverwriteCloudConfig != nil { - req.CloudConfig = *pconfig.OverwriteCloudConfig - } - - if pconfig.Network.IsStaticIPConfig() { - return "", errors.New("static IP config is not supported with RHEL") - } - - rhelConfig, err := LoadConfig(pconfig.OperatingSystemSpec) - if err != nil { - return "", fmt.Errorf("failed to parse OperatingSystemSpec: %w", err) - } - - kubeconfigString, err := userdatahelper.StringifyKubeconfig(req.Kubeconfig) - if err != nil { - return "", err - } - - kubernetesCACert, err := userdatahelper.GetCACert(req.Kubeconfig) - if err != nil { - return "", fmt.Errorf("error extracting cacert: %w", err) - } - - crEngine := req.ContainerRuntime.Engine(kubeletVersion) - crScript, err := crEngine.ScriptFor(providerconfigtypes.OperatingSystemRHEL) - if err != nil { - return "", fmt.Errorf("failed to generate container runtime install script: %w", err) - } - - crConfig, err := crEngine.Config() - if err != nil { - return "", fmt.Errorf("failed to generate container runtime config: %w", err) - } - - crAuthConfig, err := crEngine.AuthConfig() - if err != nil { - return "", fmt.Errorf("failed to generate container runtime auth config: %w", err) - } - - data := struct { - plugin.UserDataRequest - ProviderSpec *providerconfigtypes.Config - OSConfig *Config - KubeletVersion string - Kubeconfig string - KubernetesCACert string - NodeIPScript string - ExtraKubeletFlags []string - ContainerRuntimeScript string - ContainerRuntimeConfigFileName string - ContainerRuntimeConfig string - ContainerRuntimeAuthConfigFileName string - ContainerRuntimeAuthConfig string - ContainerRuntimeName string - }{ - UserDataRequest: req, - ProviderSpec: pconfig, - OSConfig: rhelConfig, - KubeletVersion: kubeletVersion.String(), - Kubeconfig: kubeconfigString, - KubernetesCACert: kubernetesCACert, - NodeIPScript: userdatahelper.SetupNodeIPEnvScript(pconfig.Network.GetIPFamily()), - ExtraKubeletFlags: crEngine.KubeletFlags(), - ContainerRuntimeScript: crScript, - ContainerRuntimeConfigFileName: crEngine.ConfigFileName(), - ContainerRuntimeConfig: crConfig, - ContainerRuntimeAuthConfigFileName: crEngine.AuthConfigFileName(), - ContainerRuntimeAuthConfig: crAuthConfig, - ContainerRuntimeName: crEngine.String(), - } - - var buf strings.Builder - if err = tmpl.Execute(&buf, data); err != nil { - return "", fmt.Errorf("failed to execute user-data template: %w", err) - } - - return userdatahelper.CleanupTemplateOutput(buf.String()) -} - -// UserData template. -const userDataTemplate = `#cloud-config -bootcmd: -- modprobe ip_tables -{{ if ne .CloudProviderName "aws" }} -hostname: {{ .MachineSpec.Name }} -fqdn: {{ .MachineSpec.Name }} -{{- /* Never set the hostname on AWS nodes. Kubernetes(kube-proxy) requires the hostname to be the private dns name */}} -{{ end }} - -{{- if .OSConfig.DistUpgradeOnBoot }} -package_upgrade: true -package_reboot_if_required: true -{{- end }} - -ssh_pwauth: false - -{{- if ne (len .ProviderSpec.SSHPublicKeys) 0 }} -ssh_authorized_keys: -{{- range .ProviderSpec.SSHPublicKeys }} - - "{{ . }}" -{{- end }} -{{- end }} - -write_files: -{{- if .HTTPProxy }} -- path: "/etc/environment" - content: | -{{ proxyEnvironment .HTTPProxy .NoProxy | indent 4 }} -{{- end }} - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | -{{ journalDConfig | indent 4 }} - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | -{{ kernelModulesScript | indent 4 }} - -- path: "/etc/sysctl.d/k8s.conf" - content: | -{{ kernelSettings | indent 4 }} - -- path: /etc/selinux/config - content: | - # This file controls the state of SELinux on the system. - # SELINUX= can take one of these three values: - # enforcing - SELinux security policy is enforced. - # permissive - SELinux prints warnings instead of enforcing. - # disabled - No SELinux policy is loaded. - SELINUX=permissive - # SELINUXTYPE= can take one of three two values: - # targeted - Targeted processes are protected, - # minimum - Modification of targeted policy. Only selected processes are protected. - # mls - Multi Level Security protection. - SELINUXTYPE=targeted - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - - setenforce 0 || true - -{{- /* As we added some modules and don't want to reboot, restart the service */}} - systemctl restart systemd-modules-load.service - sysctl --system - {{ if ne .CloudProviderName "aws" }} -{{- /* The normal way of setting it via cloud-init is broken, see */}} -{{- /* https://bugs.launchpad.net/cloud-init/+bug/1662542 */}} - hostnamectl set-hostname {{ .MachineSpec.Name }} - {{ end }} - yum install -y \ - device-mapper-persistent-data \ - lvm2 \ - ebtables \ - ethtool \ - nfs-utils \ - bash-completion \ - sudo \ - socat \ - wget \ - curl \ - {{- if or (eq .CloudProviderName "vsphere") (eq .CloudProviderName "vmware-cloud-director") }} - open-vm-tools \ - {{- end }} - {{- if eq .CloudProviderName "nutanix" }} - iscsi-initiator-utils \ - {{- end }} - ipvsadm - - {{- /* iscsid service is required on Nutanix machines for CSI driver to attach volumes. */}} - {{- if eq .CloudProviderName "nutanix" }} - systemctl enable --now iscsid - {{ end }} -{{ .ContainerRuntimeScript | indent 4 }} -{{ safeDownloadBinariesScript .KubeletVersion | indent 4 }} - DEFAULT_IFC_NAME=$(ip -o route get 1 | grep -oP "dev \K\S+") - IFC_CFG_FILE=/etc/sysconfig/network-scripts/ifcfg-$DEFAULT_IFC_NAME - # Enable IPv6 and DHCPv6 on the default interface - grep IPV6INIT $IFC_CFG_FILE && sed -i '/IPV6INIT*/c IPV6INIT=yes' $IFC_CFG_FILE || echo "IPV6INIT=yes" >> $IFC_CFG_FILE - grep DHCPV6C $IFC_CFG_FILE && sed -i '/DHCPV6C*/c DHCPV6C=yes' $IFC_CFG_FILE || echo "DHCPV6C=yes" >> $IFC_CFG_FILE - grep IPV6_AUTOCONF $IFC_CFG_FILE && sed -i '/IPV6_AUTOCONF*/c IPV6_AUTOCONF=yes' $IFC_CFG_FILE || echo "IPV6_AUTOCONF=yes" >> $IFC_CFG_FILE - - # Restart NetworkManager to apply for IPv6 configs - systemctl restart NetworkManager - # Let NetworkManager apply the DHCPv6 configs - sleep 3 - - # set kubelet nodeip environment variable - mkdir -p /etc/systemd/system/kubelet.service.d/ - /opt/bin/setup_net_env.sh - - systemctl disable --now firewalld || true - {{ if eq .CloudProviderName "vsphere" }} - systemctl enable --now vmtoolsd.service - {{ end -}} - - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - {{- if eq .CloudProviderName "kubevirt" }} - systemctl enable --now --no-block restart-kubelet.service - {{ end }} - systemctl disable setup.service - systemctl disable disable-nm-cloud-setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - # Make sure we always disable swap - Otherwise the kubelet won't start as for some cloud - # providers swap gets enabled on reboot or after the setup script has finished executing. - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | -{{ kubeletSystemdUnit .ContainerRuntimeName .KubeletVersion .KubeletCloudProviderName .MachineSpec.Name .DNSIPs .ExternalCloudProvider .ProviderSpec.Network.GetIPFamily .PauseImage .MachineSpec.Taints .ExtraKubeletFlags true | indent 4 }} - -{{- if ne (len .CloudConfig) 0 }} -- path: "/etc/kubernetes/cloud-config" - permissions: "0600" - content: | -{{ .CloudConfig | indent 4 }} -{{- end }} - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | -{{ .NodeIPScript | indent 4 }} - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | -{{ .Kubeconfig | indent 4 }} - -- path: "/etc/kubernetes/kubelet.conf" - content: | -{{ kubeletConfiguration "cluster.local" .DNSIPs .KubeletFeatureGates .KubeletConfigs .ContainerRuntimeName | indent 4 }} - -- path: "/etc/kubernetes/pki/ca.crt" - content: | -{{ .KubernetesCACert | indent 4 }} - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: {{ .ContainerRuntimeConfigFileName }} - permissions: "0644" - content: | -{{ .ContainerRuntimeConfig | indent 4 }} - -{{- if and (eq .ContainerRuntimeName "docker") .ContainerRuntimeAuthConfig }} - -- path: {{ .ContainerRuntimeAuthConfigFileName }} - permissions: "0600" - content: | -{{ .ContainerRuntimeAuthConfig | indent 4 }} -{{- end }} - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | -{{ kubeletHealthCheckSystemdUnit | indent 4 }} - -{{- with .ProviderSpec.CAPublicKey }} - -- path: "/etc/ssh/trusted-user-ca-keys.pem" - content: | -{{ . | indent 4 }} - -- path: "/etc/ssh/sshd_config" - content: | -{{ sshConfigAddendum | indent 4 }} - append: true -{{- end }} - -- path: "/opt/bin/disable-nm-cloud-setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - if systemctl status 'nm-cloud-setup.timer' 2> /dev/null | grep -Fq "Active:"; then - systemctl stop nm-cloud-setup.timer - systemctl disable nm-cloud-setup.service - systemctl disable nm-cloud-setup.timer - reboot - fi - -- path: "/etc/systemd/system/disable-nm-cloud-setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/disable-nm-cloud-setup - -{{- if eq .CloudProviderName "kubevirt" }} -- path: "/opt/bin/restart-kubelet.sh" - permissions: "0744" - content: | - #!/bin/bash - # Needed for Kubevirt provider because if the virt-launcher pod is deleted, - # the VM and DataVolume states are kept and VM is rebooted. We need to restart the kubelet - # with the new config (new IP) and run this at every boot. - set -xeuo pipefail - - # This helps us avoid an unnecessary restart for kubelet on the first boot - if [ -f /etc/kubelet_needs_restart ]; then - # restart kubelet since it's not the first boot - systemctl daemon-reload - systemctl restart kubelet.service - else - touch /etc/kubelet_needs_restart - fi - -- path: "/etc/systemd/system/restart-kubelet.service" - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - Description=Service responsible for restarting kubelet when the machine is rebooted - - [Service] - Type=oneshot - ExecStart=/opt/bin/restart-kubelet.sh - - [Install] - WantedBy=multi-user.target -{{- end }} - -rh_subscription: -{{- if .OSConfig.RHELUseSatelliteServer }} - org: "{{.OSConfig.RHELOrganizationName}}" - activation-key: "{{.OSConfig.RHELActivationKey}}" - server-hostname: {{ .OSConfig.RHELSatelliteServer }} - rhsm-baseurl: https://{{ .OSConfig.RHELSatelliteServer }}/pulp/repos -{{- else }} - username: "{{.OSConfig.RHELSubscriptionManagerUser}}" - password: "{{.OSConfig.RHELSubscriptionManagerPassword}}" - auto-attach: {{.OSConfig.AttachSubscription}} -{{- end }} - -runcmd: -- systemctl enable --now setup.service -- systemctl enable --now disable-nm-cloud-setup.service -` diff --git a/pkg/userdata/rhel/provider_test.go b/pkg/userdata/rhel/provider_test.go deleted file mode 100644 index ee0434e49..000000000 --- a/pkg/userdata/rhel/provider_test.go +++ /dev/null @@ -1,296 +0,0 @@ -/* -Copyright 2019 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// -// UserData plugin for RHEL. -// - -package rhel - -import ( - "flag" - "net" - "testing" - - clusterv1alpha1 "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" - "github.com/kubermatic/machine-controller/pkg/apis/plugin" - "github.com/kubermatic/machine-controller/pkg/containerruntime" - testhelper "github.com/kubermatic/machine-controller/pkg/test" - "github.com/kubermatic/machine-controller/pkg/userdata/convert" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - clientcmdapi "k8s.io/client-go/tools/clientcmd/api" -) - -var ( - update = flag.Bool("update", false, "update testdata files") - - pemCertificate = `-----BEGIN CERTIFICATE----- -MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV -BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG -A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 -DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 -NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG -cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv -c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B -AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS -R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT -ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk -JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 -mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW -caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G -A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt -hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB -MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES -MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv -bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h -U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao -eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 -UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD -58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n -sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF -kPe6XoSbiLm/kxk32T0= ------END CERTIFICATE-----` -) - -// fakeCloudConfigProvider simulates cloud config provider for test. -type fakeCloudConfigProvider struct { - config string - name string - err error -} - -func (p *fakeCloudConfigProvider) GetCloudConfig(spec clusterv1alpha1.MachineSpec) (config string, name string, err error) { - return p.config, p.name, p.err -} - -// userDataTestCase contains the data for a table-driven test. -type userDataTestCase struct { - name string - spec clusterv1alpha1.MachineSpec - clusterDNSIPs []net.IP - cloudProviderName *string - externalCloudProvider bool - httpProxy string - noProxy string - insecureRegistries string - registryMirrors string - pauseImage string - containerruntime string -} - -// TestUserDataGeneration runs the data generation for different -// environments. -func TestUserDataGeneration(t *testing.T) { - t.Parallel() - - tests := []userDataTestCase{ - { - name: "kubelet-v1.25-aws", - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{Name: "node1"}, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.25.0", - }, - }, - }, - { - name: "kubelet-v1.24.9-aws", - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{Name: "node1"}, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.24.9", - }, - }, - }, - { - name: "kubelet-v1.24.9-aws-external", - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{Name: "node1"}, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.24.9", - }, - }, - externalCloudProvider: true, - }, - { - name: "kubelet-v1.24.9-vsphere", - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{Name: "node1"}, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.24.9", - }, - }, - cloudProviderName: stringPtr("vsphere"), - }, - { - name: "kubelet-v1.24.9-vsphere-proxy", - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{Name: "node1"}, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.24.9", - }, - }, - cloudProviderName: stringPtr("vsphere"), - httpProxy: "http://192.168.100.100:3128", - noProxy: "192.168.1.0", - insecureRegistries: "192.168.100.100:5000, 10.0.0.1:5000", - pauseImage: "192.168.100.100:5000/kubernetes/pause:v3.1", - }, - { - name: "kubelet-v1.24.9-vsphere-mirrors", - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{Name: "node1"}, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.24.9", - }, - }, - cloudProviderName: stringPtr("vsphere"), - httpProxy: "http://192.168.100.100:3128", - noProxy: "192.168.1.0", - registryMirrors: "https://registry.docker-cn.com", - pauseImage: "192.168.100.100:5000/kubernetes/pause:v3.1", - }, - { - name: "kubelet-v1.24-aws", - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{Name: "node1"}, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.24.0", - }, - }, - }, - { - name: "kubelet-v1.24-aws-external", - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{Name: "node1"}, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.24.0", - }, - }, - externalCloudProvider: true, - }, - { - name: "kubelet-v1.25-nutanix", - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{Name: "node1"}, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.25.0", - }, - }, - cloudProviderName: stringPtr("nutanix"), - }, - { - name: "pod-cidr-azure-rhel", - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{Name: "node1"}, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.25.0", - }, - }, - cloudProviderName: stringPtr("azure"), - }, - } - - defaultCloudProvider := &fakeCloudConfigProvider{ - name: "aws", - config: "{aws-config:true}", - err: nil, - } - kubeconfig := &clientcmdapi.Config{ - Clusters: map[string]*clientcmdapi.Cluster{ - "": { - Server: "https://server:443", - CertificateAuthorityData: []byte(pemCertificate), - }, - }, - AuthInfos: map[string]*clientcmdapi.AuthInfo{ - "": { - Token: "my-token", - }, - }, - } - provider := Provider{} - - kubeletFeatureGates := map[string]bool{ - "RotateKubeletServerCertificate": true, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - emptyProviderSpec := clusterv1alpha1.ProviderSpec{ - Value: &runtime.RawExtension{}, - } - test.spec.ProviderSpec = emptyProviderSpec - var cloudProvider *fakeCloudConfigProvider - if test.cloudProviderName != nil { - cloudProvider = &fakeCloudConfigProvider{ - name: *test.cloudProviderName, - config: "{config:true}", - err: nil, - } - } else { - cloudProvider = defaultCloudProvider - } - cloudConfig, cloudProviderName, err := cloudProvider.GetCloudConfig(test.spec) - if err != nil { - t.Fatalf("failed to get cloud config: %v", err) - } - - containerRuntimeOpts := containerruntime.Opts{ - ContainerRuntime: test.containerruntime, - InsecureRegistries: test.insecureRegistries, - RegistryMirrors: test.registryMirrors, - } - containerRuntimeConfig, err := containerruntime.BuildConfig(containerRuntimeOpts) - if err != nil { - t.Fatalf("failed to generate container runtime config: %v", err) - } - - req := plugin.UserDataRequest{ - MachineSpec: test.spec, - Kubeconfig: kubeconfig, - CloudConfig: cloudConfig, - CloudProviderName: cloudProviderName, - KubeletCloudProviderName: cloudProviderName, - DNSIPs: test.clusterDNSIPs, - ExternalCloudProvider: test.externalCloudProvider, - HTTPProxy: test.httpProxy, - NoProxy: test.noProxy, - PauseImage: test.pauseImage, - KubeletFeatureGates: kubeletFeatureGates, - ContainerRuntime: containerRuntimeConfig, - } - s, err := provider.UserData(req) - if err != nil { - t.Errorf("error getting userdata: '%v'", err) - } - - // Check if we can gzip it. - if _, err := convert.GzipString(s); err != nil { - t.Fatal(err) - } - goldenName := test.name + ".yaml" - testhelper.CompareOutput(t, goldenName, s, *update) - }) - } -} - -// stringPtr returns pointer to given string. -func stringPtr(a string) *string { - return &a -} diff --git a/pkg/userdata/rhel/testdata/kubelet-v1.24-aws-external.yaml b/pkg/userdata/rhel/testdata/kubelet-v1.24-aws-external.yaml deleted file mode 100644 index 9bdf86c67..000000000 --- a/pkg/userdata/rhel/testdata/kubelet-v1.24-aws-external.yaml +++ /dev/null @@ -1,500 +0,0 @@ -#cloud-config -bootcmd: -- modprobe ip_tables - - -ssh_pwauth: false - -write_files: - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: /etc/selinux/config - content: | - # This file controls the state of SELinux on the system. - # SELINUX= can take one of these three values: - # enforcing - SELinux security policy is enforced. - # permissive - SELinux prints warnings instead of enforcing. - # disabled - No SELinux policy is loaded. - SELINUX=permissive - # SELINUXTYPE= can take one of three two values: - # targeted - Targeted processes are protected, - # minimum - Modification of targeted policy. Only selected processes are protected. - # mls - Multi Level Security protection. - SELINUXTYPE=targeted - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - - setenforce 0 || true - systemctl restart systemd-modules-load.service - sysctl --system - - yum install -y \ - device-mapper-persistent-data \ - lvm2 \ - ebtables \ - ethtool \ - nfs-utils \ - bash-completion \ - sudo \ - socat \ - wget \ - curl \ - ipvsadm - - yum install -y yum-utils - yum-config-manager --add-repo=https://download.docker.com/linux/centos/docker-ce.repo - yum-config-manager --save --setopt=docker-ce-stable.module_hotfixes=true - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - DEFAULT_IFC_NAME=$(ip -o route get 1 | grep -oP "dev \K\S+") - IFC_CFG_FILE=/etc/sysconfig/network-scripts/ifcfg-$DEFAULT_IFC_NAME - # Enable IPv6 and DHCPv6 on the default interface - grep IPV6INIT $IFC_CFG_FILE && sed -i '/IPV6INIT*/c IPV6INIT=yes' $IFC_CFG_FILE || echo "IPV6INIT=yes" >> $IFC_CFG_FILE - grep DHCPV6C $IFC_CFG_FILE && sed -i '/DHCPV6C*/c DHCPV6C=yes' $IFC_CFG_FILE || echo "DHCPV6C=yes" >> $IFC_CFG_FILE - grep IPV6_AUTOCONF $IFC_CFG_FILE && sed -i '/IPV6_AUTOCONF*/c IPV6_AUTOCONF=yes' $IFC_CFG_FILE || echo "IPV6_AUTOCONF=yes" >> $IFC_CFG_FILE - - # Restart NetworkManager to apply for IPv6 configs - systemctl restart NetworkManager - # Let NetworkManager apply the DHCPv6 configs - sleep 3 - - # set kubelet nodeip environment variable - mkdir -p /etc/systemd/system/kubelet.service.d/ - /opt/bin/setup_net_env.sh - - systemctl disable --now firewalld || true - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - systemctl disable disable-nm-cloud-setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - # Make sure we always disable swap - Otherwise the kubelet won't start as for some cloud - # providers swap gets enabled on reboot or after the setup script has finished executing. - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=containerd.service - Requires=containerd.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --cloud-provider=external \ - --hostname-override=${KUBELET_HOSTNAME} \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --container-runtime=remote \ - --container-runtime-endpoint=unix:///run/containerd/containerd.sock \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target -- path: "/etc/kubernetes/cloud-config" - permissions: "0600" - content: | - {aws-config:true} - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/containerd/config.toml - permissions: "0644" - content: | - version = 2 - - [metrics] - address = "127.0.0.1:1338" - - [plugins] - [plugins."io.containerd.grpc.v1.cri"] - [plugins."io.containerd.grpc.v1.cri".containerd] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] - runtime_type = "io.containerd.runc.v2" - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] - SystemdCgroup = true - [plugins."io.containerd.grpc.v1.cri".registry] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] - endpoint = ["https://registry-1.docker.io"] - - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -- path: "/opt/bin/disable-nm-cloud-setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - if systemctl status 'nm-cloud-setup.timer' 2> /dev/null | grep -Fq "Active:"; then - systemctl stop nm-cloud-setup.timer - systemctl disable nm-cloud-setup.service - systemctl disable nm-cloud-setup.timer - reboot - fi - -- path: "/etc/systemd/system/disable-nm-cloud-setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/disable-nm-cloud-setup - -rh_subscription: - username: "" - password: "" - auto-attach: false - -runcmd: -- systemctl enable --now setup.service -- systemctl enable --now disable-nm-cloud-setup.service diff --git a/pkg/userdata/rhel/testdata/kubelet-v1.24-aws.yaml b/pkg/userdata/rhel/testdata/kubelet-v1.24-aws.yaml deleted file mode 100644 index d67ddaf1f..000000000 --- a/pkg/userdata/rhel/testdata/kubelet-v1.24-aws.yaml +++ /dev/null @@ -1,500 +0,0 @@ -#cloud-config -bootcmd: -- modprobe ip_tables - - -ssh_pwauth: false - -write_files: - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: /etc/selinux/config - content: | - # This file controls the state of SELinux on the system. - # SELINUX= can take one of these three values: - # enforcing - SELinux security policy is enforced. - # permissive - SELinux prints warnings instead of enforcing. - # disabled - No SELinux policy is loaded. - SELINUX=permissive - # SELINUXTYPE= can take one of three two values: - # targeted - Targeted processes are protected, - # minimum - Modification of targeted policy. Only selected processes are protected. - # mls - Multi Level Security protection. - SELINUXTYPE=targeted - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - - setenforce 0 || true - systemctl restart systemd-modules-load.service - sysctl --system - - yum install -y \ - device-mapper-persistent-data \ - lvm2 \ - ebtables \ - ethtool \ - nfs-utils \ - bash-completion \ - sudo \ - socat \ - wget \ - curl \ - ipvsadm - - yum install -y yum-utils - yum-config-manager --add-repo=https://download.docker.com/linux/centos/docker-ce.repo - yum-config-manager --save --setopt=docker-ce-stable.module_hotfixes=true - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - DEFAULT_IFC_NAME=$(ip -o route get 1 | grep -oP "dev \K\S+") - IFC_CFG_FILE=/etc/sysconfig/network-scripts/ifcfg-$DEFAULT_IFC_NAME - # Enable IPv6 and DHCPv6 on the default interface - grep IPV6INIT $IFC_CFG_FILE && sed -i '/IPV6INIT*/c IPV6INIT=yes' $IFC_CFG_FILE || echo "IPV6INIT=yes" >> $IFC_CFG_FILE - grep DHCPV6C $IFC_CFG_FILE && sed -i '/DHCPV6C*/c DHCPV6C=yes' $IFC_CFG_FILE || echo "DHCPV6C=yes" >> $IFC_CFG_FILE - grep IPV6_AUTOCONF $IFC_CFG_FILE && sed -i '/IPV6_AUTOCONF*/c IPV6_AUTOCONF=yes' $IFC_CFG_FILE || echo "IPV6_AUTOCONF=yes" >> $IFC_CFG_FILE - - # Restart NetworkManager to apply for IPv6 configs - systemctl restart NetworkManager - # Let NetworkManager apply the DHCPv6 configs - sleep 3 - - # set kubelet nodeip environment variable - mkdir -p /etc/systemd/system/kubelet.service.d/ - /opt/bin/setup_net_env.sh - - systemctl disable --now firewalld || true - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - systemctl disable disable-nm-cloud-setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - # Make sure we always disable swap - Otherwise the kubelet won't start as for some cloud - # providers swap gets enabled on reboot or after the setup script has finished executing. - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=containerd.service - Requires=containerd.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --cloud-provider=aws \ - --cloud-config=/etc/kubernetes/cloud-config \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --container-runtime=remote \ - --container-runtime-endpoint=unix:///run/containerd/containerd.sock \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target -- path: "/etc/kubernetes/cloud-config" - permissions: "0600" - content: | - {aws-config:true} - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/containerd/config.toml - permissions: "0644" - content: | - version = 2 - - [metrics] - address = "127.0.0.1:1338" - - [plugins] - [plugins."io.containerd.grpc.v1.cri"] - [plugins."io.containerd.grpc.v1.cri".containerd] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] - runtime_type = "io.containerd.runc.v2" - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] - SystemdCgroup = true - [plugins."io.containerd.grpc.v1.cri".registry] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] - endpoint = ["https://registry-1.docker.io"] - - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -- path: "/opt/bin/disable-nm-cloud-setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - if systemctl status 'nm-cloud-setup.timer' 2> /dev/null | grep -Fq "Active:"; then - systemctl stop nm-cloud-setup.timer - systemctl disable nm-cloud-setup.service - systemctl disable nm-cloud-setup.timer - reboot - fi - -- path: "/etc/systemd/system/disable-nm-cloud-setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/disable-nm-cloud-setup - -rh_subscription: - username: "" - password: "" - auto-attach: false - -runcmd: -- systemctl enable --now setup.service -- systemctl enable --now disable-nm-cloud-setup.service diff --git a/pkg/userdata/rhel/testdata/kubelet-v1.24.9-aws-external.yaml b/pkg/userdata/rhel/testdata/kubelet-v1.24.9-aws-external.yaml deleted file mode 100644 index d77002e64..000000000 --- a/pkg/userdata/rhel/testdata/kubelet-v1.24.9-aws-external.yaml +++ /dev/null @@ -1,500 +0,0 @@ -#cloud-config -bootcmd: -- modprobe ip_tables - - -ssh_pwauth: false - -write_files: - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: /etc/selinux/config - content: | - # This file controls the state of SELinux on the system. - # SELINUX= can take one of these three values: - # enforcing - SELinux security policy is enforced. - # permissive - SELinux prints warnings instead of enforcing. - # disabled - No SELinux policy is loaded. - SELINUX=permissive - # SELINUXTYPE= can take one of three two values: - # targeted - Targeted processes are protected, - # minimum - Modification of targeted policy. Only selected processes are protected. - # mls - Multi Level Security protection. - SELINUXTYPE=targeted - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - - setenforce 0 || true - systemctl restart systemd-modules-load.service - sysctl --system - - yum install -y \ - device-mapper-persistent-data \ - lvm2 \ - ebtables \ - ethtool \ - nfs-utils \ - bash-completion \ - sudo \ - socat \ - wget \ - curl \ - ipvsadm - - yum install -y yum-utils - yum-config-manager --add-repo=https://download.docker.com/linux/centos/docker-ce.repo - yum-config-manager --save --setopt=docker-ce-stable.module_hotfixes=true - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - DEFAULT_IFC_NAME=$(ip -o route get 1 | grep -oP "dev \K\S+") - IFC_CFG_FILE=/etc/sysconfig/network-scripts/ifcfg-$DEFAULT_IFC_NAME - # Enable IPv6 and DHCPv6 on the default interface - grep IPV6INIT $IFC_CFG_FILE && sed -i '/IPV6INIT*/c IPV6INIT=yes' $IFC_CFG_FILE || echo "IPV6INIT=yes" >> $IFC_CFG_FILE - grep DHCPV6C $IFC_CFG_FILE && sed -i '/DHCPV6C*/c DHCPV6C=yes' $IFC_CFG_FILE || echo "DHCPV6C=yes" >> $IFC_CFG_FILE - grep IPV6_AUTOCONF $IFC_CFG_FILE && sed -i '/IPV6_AUTOCONF*/c IPV6_AUTOCONF=yes' $IFC_CFG_FILE || echo "IPV6_AUTOCONF=yes" >> $IFC_CFG_FILE - - # Restart NetworkManager to apply for IPv6 configs - systemctl restart NetworkManager - # Let NetworkManager apply the DHCPv6 configs - sleep 3 - - # set kubelet nodeip environment variable - mkdir -p /etc/systemd/system/kubelet.service.d/ - /opt/bin/setup_net_env.sh - - systemctl disable --now firewalld || true - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - systemctl disable disable-nm-cloud-setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - # Make sure we always disable swap - Otherwise the kubelet won't start as for some cloud - # providers swap gets enabled on reboot or after the setup script has finished executing. - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=containerd.service - Requires=containerd.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --cloud-provider=external \ - --hostname-override=${KUBELET_HOSTNAME} \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --container-runtime=remote \ - --container-runtime-endpoint=unix:///run/containerd/containerd.sock \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target -- path: "/etc/kubernetes/cloud-config" - permissions: "0600" - content: | - {aws-config:true} - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/containerd/config.toml - permissions: "0644" - content: | - version = 2 - - [metrics] - address = "127.0.0.1:1338" - - [plugins] - [plugins."io.containerd.grpc.v1.cri"] - [plugins."io.containerd.grpc.v1.cri".containerd] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] - runtime_type = "io.containerd.runc.v2" - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] - SystemdCgroup = true - [plugins."io.containerd.grpc.v1.cri".registry] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] - endpoint = ["https://registry-1.docker.io"] - - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -- path: "/opt/bin/disable-nm-cloud-setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - if systemctl status 'nm-cloud-setup.timer' 2> /dev/null | grep -Fq "Active:"; then - systemctl stop nm-cloud-setup.timer - systemctl disable nm-cloud-setup.service - systemctl disable nm-cloud-setup.timer - reboot - fi - -- path: "/etc/systemd/system/disable-nm-cloud-setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/disable-nm-cloud-setup - -rh_subscription: - username: "" - password: "" - auto-attach: false - -runcmd: -- systemctl enable --now setup.service -- systemctl enable --now disable-nm-cloud-setup.service diff --git a/pkg/userdata/rhel/testdata/kubelet-v1.24.9-aws.yaml b/pkg/userdata/rhel/testdata/kubelet-v1.24.9-aws.yaml deleted file mode 100644 index ae510dc1d..000000000 --- a/pkg/userdata/rhel/testdata/kubelet-v1.24.9-aws.yaml +++ /dev/null @@ -1,500 +0,0 @@ -#cloud-config -bootcmd: -- modprobe ip_tables - - -ssh_pwauth: false - -write_files: - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: /etc/selinux/config - content: | - # This file controls the state of SELinux on the system. - # SELINUX= can take one of these three values: - # enforcing - SELinux security policy is enforced. - # permissive - SELinux prints warnings instead of enforcing. - # disabled - No SELinux policy is loaded. - SELINUX=permissive - # SELINUXTYPE= can take one of three two values: - # targeted - Targeted processes are protected, - # minimum - Modification of targeted policy. Only selected processes are protected. - # mls - Multi Level Security protection. - SELINUXTYPE=targeted - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - - setenforce 0 || true - systemctl restart systemd-modules-load.service - sysctl --system - - yum install -y \ - device-mapper-persistent-data \ - lvm2 \ - ebtables \ - ethtool \ - nfs-utils \ - bash-completion \ - sudo \ - socat \ - wget \ - curl \ - ipvsadm - - yum install -y yum-utils - yum-config-manager --add-repo=https://download.docker.com/linux/centos/docker-ce.repo - yum-config-manager --save --setopt=docker-ce-stable.module_hotfixes=true - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - DEFAULT_IFC_NAME=$(ip -o route get 1 | grep -oP "dev \K\S+") - IFC_CFG_FILE=/etc/sysconfig/network-scripts/ifcfg-$DEFAULT_IFC_NAME - # Enable IPv6 and DHCPv6 on the default interface - grep IPV6INIT $IFC_CFG_FILE && sed -i '/IPV6INIT*/c IPV6INIT=yes' $IFC_CFG_FILE || echo "IPV6INIT=yes" >> $IFC_CFG_FILE - grep DHCPV6C $IFC_CFG_FILE && sed -i '/DHCPV6C*/c DHCPV6C=yes' $IFC_CFG_FILE || echo "DHCPV6C=yes" >> $IFC_CFG_FILE - grep IPV6_AUTOCONF $IFC_CFG_FILE && sed -i '/IPV6_AUTOCONF*/c IPV6_AUTOCONF=yes' $IFC_CFG_FILE || echo "IPV6_AUTOCONF=yes" >> $IFC_CFG_FILE - - # Restart NetworkManager to apply for IPv6 configs - systemctl restart NetworkManager - # Let NetworkManager apply the DHCPv6 configs - sleep 3 - - # set kubelet nodeip environment variable - mkdir -p /etc/systemd/system/kubelet.service.d/ - /opt/bin/setup_net_env.sh - - systemctl disable --now firewalld || true - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - systemctl disable disable-nm-cloud-setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - # Make sure we always disable swap - Otherwise the kubelet won't start as for some cloud - # providers swap gets enabled on reboot or after the setup script has finished executing. - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=containerd.service - Requires=containerd.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --cloud-provider=aws \ - --cloud-config=/etc/kubernetes/cloud-config \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --container-runtime=remote \ - --container-runtime-endpoint=unix:///run/containerd/containerd.sock \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target -- path: "/etc/kubernetes/cloud-config" - permissions: "0600" - content: | - {aws-config:true} - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/containerd/config.toml - permissions: "0644" - content: | - version = 2 - - [metrics] - address = "127.0.0.1:1338" - - [plugins] - [plugins."io.containerd.grpc.v1.cri"] - [plugins."io.containerd.grpc.v1.cri".containerd] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] - runtime_type = "io.containerd.runc.v2" - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] - SystemdCgroup = true - [plugins."io.containerd.grpc.v1.cri".registry] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] - endpoint = ["https://registry-1.docker.io"] - - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -- path: "/opt/bin/disable-nm-cloud-setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - if systemctl status 'nm-cloud-setup.timer' 2> /dev/null | grep -Fq "Active:"; then - systemctl stop nm-cloud-setup.timer - systemctl disable nm-cloud-setup.service - systemctl disable nm-cloud-setup.timer - reboot - fi - -- path: "/etc/systemd/system/disable-nm-cloud-setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/disable-nm-cloud-setup - -rh_subscription: - username: "" - password: "" - auto-attach: false - -runcmd: -- systemctl enable --now setup.service -- systemctl enable --now disable-nm-cloud-setup.service diff --git a/pkg/userdata/rhel/testdata/kubelet-v1.24.9-vsphere-mirrors.yaml b/pkg/userdata/rhel/testdata/kubelet-v1.24.9-vsphere-mirrors.yaml deleted file mode 100644 index a4341ba36..000000000 --- a/pkg/userdata/rhel/testdata/kubelet-v1.24.9-vsphere-mirrors.yaml +++ /dev/null @@ -1,518 +0,0 @@ -#cloud-config -bootcmd: -- modprobe ip_tables - -hostname: node1 -fqdn: node1 - - -ssh_pwauth: false - -write_files: -- path: "/etc/environment" - content: | - HTTP_PROXY=http://192.168.100.100:3128 - http_proxy=http://192.168.100.100:3128 - HTTPS_PROXY=http://192.168.100.100:3128 - https_proxy=http://192.168.100.100:3128 - NO_PROXY=192.168.1.0 - no_proxy=192.168.1.0 - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: /etc/selinux/config - content: | - # This file controls the state of SELinux on the system. - # SELINUX= can take one of these three values: - # enforcing - SELinux security policy is enforced. - # permissive - SELinux prints warnings instead of enforcing. - # disabled - No SELinux policy is loaded. - SELINUX=permissive - # SELINUXTYPE= can take one of three two values: - # targeted - Targeted processes are protected, - # minimum - Modification of targeted policy. Only selected processes are protected. - # mls - Multi Level Security protection. - SELINUXTYPE=targeted - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - - setenforce 0 || true - systemctl restart systemd-modules-load.service - sysctl --system - - hostnamectl set-hostname node1 - - yum install -y \ - device-mapper-persistent-data \ - lvm2 \ - ebtables \ - ethtool \ - nfs-utils \ - bash-completion \ - sudo \ - socat \ - wget \ - curl \ - open-vm-tools \ - ipvsadm - - yum install -y yum-utils - yum-config-manager --add-repo=https://download.docker.com/linux/centos/docker-ce.repo - yum-config-manager --save --setopt=docker-ce-stable.module_hotfixes=true - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - DEFAULT_IFC_NAME=$(ip -o route get 1 | grep -oP "dev \K\S+") - IFC_CFG_FILE=/etc/sysconfig/network-scripts/ifcfg-$DEFAULT_IFC_NAME - # Enable IPv6 and DHCPv6 on the default interface - grep IPV6INIT $IFC_CFG_FILE && sed -i '/IPV6INIT*/c IPV6INIT=yes' $IFC_CFG_FILE || echo "IPV6INIT=yes" >> $IFC_CFG_FILE - grep DHCPV6C $IFC_CFG_FILE && sed -i '/DHCPV6C*/c DHCPV6C=yes' $IFC_CFG_FILE || echo "DHCPV6C=yes" >> $IFC_CFG_FILE - grep IPV6_AUTOCONF $IFC_CFG_FILE && sed -i '/IPV6_AUTOCONF*/c IPV6_AUTOCONF=yes' $IFC_CFG_FILE || echo "IPV6_AUTOCONF=yes" >> $IFC_CFG_FILE - - # Restart NetworkManager to apply for IPv6 configs - systemctl restart NetworkManager - # Let NetworkManager apply the DHCPv6 configs - sleep 3 - - # set kubelet nodeip environment variable - mkdir -p /etc/systemd/system/kubelet.service.d/ - /opt/bin/setup_net_env.sh - - systemctl disable --now firewalld || true - - systemctl enable --now vmtoolsd.service - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - systemctl disable disable-nm-cloud-setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - # Make sure we always disable swap - Otherwise the kubelet won't start as for some cloud - # providers swap gets enabled on reboot or after the setup script has finished executing. - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=containerd.service - Requires=containerd.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --cloud-provider=vsphere \ - --cloud-config=/etc/kubernetes/cloud-config \ - --hostname-override=node1 \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --pod-infra-container-image=192.168.100.100:5000/kubernetes/pause:v3.1 \ - --container-runtime=remote \ - --container-runtime-endpoint=unix:///run/containerd/containerd.sock \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target -- path: "/etc/kubernetes/cloud-config" - permissions: "0600" - content: | - {config:true} - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/containerd/config.toml - permissions: "0644" - content: | - version = 2 - - [metrics] - address = "127.0.0.1:1338" - - [plugins] - [plugins."io.containerd.grpc.v1.cri"] - [plugins."io.containerd.grpc.v1.cri".containerd] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] - runtime_type = "io.containerd.runc.v2" - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] - SystemdCgroup = true - [plugins."io.containerd.grpc.v1.cri".registry] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] - endpoint = ["https://registry.docker-cn.com"] - - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -- path: "/opt/bin/disable-nm-cloud-setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - if systemctl status 'nm-cloud-setup.timer' 2> /dev/null | grep -Fq "Active:"; then - systemctl stop nm-cloud-setup.timer - systemctl disable nm-cloud-setup.service - systemctl disable nm-cloud-setup.timer - reboot - fi - -- path: "/etc/systemd/system/disable-nm-cloud-setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/disable-nm-cloud-setup - -rh_subscription: - username: "" - password: "" - auto-attach: false - -runcmd: -- systemctl enable --now setup.service -- systemctl enable --now disable-nm-cloud-setup.service diff --git a/pkg/userdata/rhel/testdata/kubelet-v1.24.9-vsphere-proxy.yaml b/pkg/userdata/rhel/testdata/kubelet-v1.24.9-vsphere-proxy.yaml deleted file mode 100644 index 9a3a0586d..000000000 --- a/pkg/userdata/rhel/testdata/kubelet-v1.24.9-vsphere-proxy.yaml +++ /dev/null @@ -1,525 +0,0 @@ -#cloud-config -bootcmd: -- modprobe ip_tables - -hostname: node1 -fqdn: node1 - - -ssh_pwauth: false - -write_files: -- path: "/etc/environment" - content: | - HTTP_PROXY=http://192.168.100.100:3128 - http_proxy=http://192.168.100.100:3128 - HTTPS_PROXY=http://192.168.100.100:3128 - https_proxy=http://192.168.100.100:3128 - NO_PROXY=192.168.1.0 - no_proxy=192.168.1.0 - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: /etc/selinux/config - content: | - # This file controls the state of SELinux on the system. - # SELINUX= can take one of these three values: - # enforcing - SELinux security policy is enforced. - # permissive - SELinux prints warnings instead of enforcing. - # disabled - No SELinux policy is loaded. - SELINUX=permissive - # SELINUXTYPE= can take one of three two values: - # targeted - Targeted processes are protected, - # minimum - Modification of targeted policy. Only selected processes are protected. - # mls - Multi Level Security protection. - SELINUXTYPE=targeted - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - - setenforce 0 || true - systemctl restart systemd-modules-load.service - sysctl --system - - hostnamectl set-hostname node1 - - yum install -y \ - device-mapper-persistent-data \ - lvm2 \ - ebtables \ - ethtool \ - nfs-utils \ - bash-completion \ - sudo \ - socat \ - wget \ - curl \ - open-vm-tools \ - ipvsadm - - yum install -y yum-utils - yum-config-manager --add-repo=https://download.docker.com/linux/centos/docker-ce.repo - yum-config-manager --save --setopt=docker-ce-stable.module_hotfixes=true - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - DEFAULT_IFC_NAME=$(ip -o route get 1 | grep -oP "dev \K\S+") - IFC_CFG_FILE=/etc/sysconfig/network-scripts/ifcfg-$DEFAULT_IFC_NAME - # Enable IPv6 and DHCPv6 on the default interface - grep IPV6INIT $IFC_CFG_FILE && sed -i '/IPV6INIT*/c IPV6INIT=yes' $IFC_CFG_FILE || echo "IPV6INIT=yes" >> $IFC_CFG_FILE - grep DHCPV6C $IFC_CFG_FILE && sed -i '/DHCPV6C*/c DHCPV6C=yes' $IFC_CFG_FILE || echo "DHCPV6C=yes" >> $IFC_CFG_FILE - grep IPV6_AUTOCONF $IFC_CFG_FILE && sed -i '/IPV6_AUTOCONF*/c IPV6_AUTOCONF=yes' $IFC_CFG_FILE || echo "IPV6_AUTOCONF=yes" >> $IFC_CFG_FILE - - # Restart NetworkManager to apply for IPv6 configs - systemctl restart NetworkManager - # Let NetworkManager apply the DHCPv6 configs - sleep 3 - - # set kubelet nodeip environment variable - mkdir -p /etc/systemd/system/kubelet.service.d/ - /opt/bin/setup_net_env.sh - - systemctl disable --now firewalld || true - - systemctl enable --now vmtoolsd.service - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - systemctl disable disable-nm-cloud-setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - # Make sure we always disable swap - Otherwise the kubelet won't start as for some cloud - # providers swap gets enabled on reboot or after the setup script has finished executing. - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=containerd.service - Requires=containerd.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --cloud-provider=vsphere \ - --cloud-config=/etc/kubernetes/cloud-config \ - --hostname-override=node1 \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --pod-infra-container-image=192.168.100.100:5000/kubernetes/pause:v3.1 \ - --container-runtime=remote \ - --container-runtime-endpoint=unix:///run/containerd/containerd.sock \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target -- path: "/etc/kubernetes/cloud-config" - permissions: "0600" - content: | - {config:true} - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/containerd/config.toml - permissions: "0644" - content: | - version = 2 - - [metrics] - address = "127.0.0.1:1338" - - [plugins] - [plugins."io.containerd.grpc.v1.cri"] - [plugins."io.containerd.grpc.v1.cri".containerd] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] - runtime_type = "io.containerd.runc.v2" - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] - SystemdCgroup = true - [plugins."io.containerd.grpc.v1.cri".registry] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] - endpoint = ["https://registry-1.docker.io"] - [plugins."io.containerd.grpc.v1.cri".registry.configs] - [plugins."io.containerd.grpc.v1.cri".registry.configs."10.0.0.1:5000"] - [plugins."io.containerd.grpc.v1.cri".registry.configs."10.0.0.1:5000".tls] - insecure_skip_verify = true - [plugins."io.containerd.grpc.v1.cri".registry.configs."192.168.100.100:5000"] - [plugins."io.containerd.grpc.v1.cri".registry.configs."192.168.100.100:5000".tls] - insecure_skip_verify = true - - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -- path: "/opt/bin/disable-nm-cloud-setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - if systemctl status 'nm-cloud-setup.timer' 2> /dev/null | grep -Fq "Active:"; then - systemctl stop nm-cloud-setup.timer - systemctl disable nm-cloud-setup.service - systemctl disable nm-cloud-setup.timer - reboot - fi - -- path: "/etc/systemd/system/disable-nm-cloud-setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/disable-nm-cloud-setup - -rh_subscription: - username: "" - password: "" - auto-attach: false - -runcmd: -- systemctl enable --now setup.service -- systemctl enable --now disable-nm-cloud-setup.service diff --git a/pkg/userdata/rhel/testdata/kubelet-v1.24.9-vsphere.yaml b/pkg/userdata/rhel/testdata/kubelet-v1.24.9-vsphere.yaml deleted file mode 100644 index 854c91ec6..000000000 --- a/pkg/userdata/rhel/testdata/kubelet-v1.24.9-vsphere.yaml +++ /dev/null @@ -1,509 +0,0 @@ -#cloud-config -bootcmd: -- modprobe ip_tables - -hostname: node1 -fqdn: node1 - - -ssh_pwauth: false - -write_files: - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: /etc/selinux/config - content: | - # This file controls the state of SELinux on the system. - # SELINUX= can take one of these three values: - # enforcing - SELinux security policy is enforced. - # permissive - SELinux prints warnings instead of enforcing. - # disabled - No SELinux policy is loaded. - SELINUX=permissive - # SELINUXTYPE= can take one of three two values: - # targeted - Targeted processes are protected, - # minimum - Modification of targeted policy. Only selected processes are protected. - # mls - Multi Level Security protection. - SELINUXTYPE=targeted - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - - setenforce 0 || true - systemctl restart systemd-modules-load.service - sysctl --system - - hostnamectl set-hostname node1 - - yum install -y \ - device-mapper-persistent-data \ - lvm2 \ - ebtables \ - ethtool \ - nfs-utils \ - bash-completion \ - sudo \ - socat \ - wget \ - curl \ - open-vm-tools \ - ipvsadm - - yum install -y yum-utils - yum-config-manager --add-repo=https://download.docker.com/linux/centos/docker-ce.repo - yum-config-manager --save --setopt=docker-ce-stable.module_hotfixes=true - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - DEFAULT_IFC_NAME=$(ip -o route get 1 | grep -oP "dev \K\S+") - IFC_CFG_FILE=/etc/sysconfig/network-scripts/ifcfg-$DEFAULT_IFC_NAME - # Enable IPv6 and DHCPv6 on the default interface - grep IPV6INIT $IFC_CFG_FILE && sed -i '/IPV6INIT*/c IPV6INIT=yes' $IFC_CFG_FILE || echo "IPV6INIT=yes" >> $IFC_CFG_FILE - grep DHCPV6C $IFC_CFG_FILE && sed -i '/DHCPV6C*/c DHCPV6C=yes' $IFC_CFG_FILE || echo "DHCPV6C=yes" >> $IFC_CFG_FILE - grep IPV6_AUTOCONF $IFC_CFG_FILE && sed -i '/IPV6_AUTOCONF*/c IPV6_AUTOCONF=yes' $IFC_CFG_FILE || echo "IPV6_AUTOCONF=yes" >> $IFC_CFG_FILE - - # Restart NetworkManager to apply for IPv6 configs - systemctl restart NetworkManager - # Let NetworkManager apply the DHCPv6 configs - sleep 3 - - # set kubelet nodeip environment variable - mkdir -p /etc/systemd/system/kubelet.service.d/ - /opt/bin/setup_net_env.sh - - systemctl disable --now firewalld || true - - systemctl enable --now vmtoolsd.service - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - systemctl disable disable-nm-cloud-setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - # Make sure we always disable swap - Otherwise the kubelet won't start as for some cloud - # providers swap gets enabled on reboot or after the setup script has finished executing. - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=containerd.service - Requires=containerd.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --cloud-provider=vsphere \ - --cloud-config=/etc/kubernetes/cloud-config \ - --hostname-override=node1 \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --container-runtime=remote \ - --container-runtime-endpoint=unix:///run/containerd/containerd.sock \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target -- path: "/etc/kubernetes/cloud-config" - permissions: "0600" - content: | - {config:true} - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/containerd/config.toml - permissions: "0644" - content: | - version = 2 - - [metrics] - address = "127.0.0.1:1338" - - [plugins] - [plugins."io.containerd.grpc.v1.cri"] - [plugins."io.containerd.grpc.v1.cri".containerd] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] - runtime_type = "io.containerd.runc.v2" - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] - SystemdCgroup = true - [plugins."io.containerd.grpc.v1.cri".registry] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] - endpoint = ["https://registry-1.docker.io"] - - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -- path: "/opt/bin/disable-nm-cloud-setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - if systemctl status 'nm-cloud-setup.timer' 2> /dev/null | grep -Fq "Active:"; then - systemctl stop nm-cloud-setup.timer - systemctl disable nm-cloud-setup.service - systemctl disable nm-cloud-setup.timer - reboot - fi - -- path: "/etc/systemd/system/disable-nm-cloud-setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/disable-nm-cloud-setup - -rh_subscription: - username: "" - password: "" - auto-attach: false - -runcmd: -- systemctl enable --now setup.service -- systemctl enable --now disable-nm-cloud-setup.service diff --git a/pkg/userdata/rhel/testdata/kubelet-v1.25-aws.yaml b/pkg/userdata/rhel/testdata/kubelet-v1.25-aws.yaml deleted file mode 100644 index 599aa75eb..000000000 --- a/pkg/userdata/rhel/testdata/kubelet-v1.25-aws.yaml +++ /dev/null @@ -1,500 +0,0 @@ -#cloud-config -bootcmd: -- modprobe ip_tables - - -ssh_pwauth: false - -write_files: - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: /etc/selinux/config - content: | - # This file controls the state of SELinux on the system. - # SELINUX= can take one of these three values: - # enforcing - SELinux security policy is enforced. - # permissive - SELinux prints warnings instead of enforcing. - # disabled - No SELinux policy is loaded. - SELINUX=permissive - # SELINUXTYPE= can take one of three two values: - # targeted - Targeted processes are protected, - # minimum - Modification of targeted policy. Only selected processes are protected. - # mls - Multi Level Security protection. - SELINUXTYPE=targeted - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - - setenforce 0 || true - systemctl restart systemd-modules-load.service - sysctl --system - - yum install -y \ - device-mapper-persistent-data \ - lvm2 \ - ebtables \ - ethtool \ - nfs-utils \ - bash-completion \ - sudo \ - socat \ - wget \ - curl \ - ipvsadm - - yum install -y yum-utils - yum-config-manager --add-repo=https://download.docker.com/linux/centos/docker-ce.repo - yum-config-manager --save --setopt=docker-ce-stable.module_hotfixes=true - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - DEFAULT_IFC_NAME=$(ip -o route get 1 | grep -oP "dev \K\S+") - IFC_CFG_FILE=/etc/sysconfig/network-scripts/ifcfg-$DEFAULT_IFC_NAME - # Enable IPv6 and DHCPv6 on the default interface - grep IPV6INIT $IFC_CFG_FILE && sed -i '/IPV6INIT*/c IPV6INIT=yes' $IFC_CFG_FILE || echo "IPV6INIT=yes" >> $IFC_CFG_FILE - grep DHCPV6C $IFC_CFG_FILE && sed -i '/DHCPV6C*/c DHCPV6C=yes' $IFC_CFG_FILE || echo "DHCPV6C=yes" >> $IFC_CFG_FILE - grep IPV6_AUTOCONF $IFC_CFG_FILE && sed -i '/IPV6_AUTOCONF*/c IPV6_AUTOCONF=yes' $IFC_CFG_FILE || echo "IPV6_AUTOCONF=yes" >> $IFC_CFG_FILE - - # Restart NetworkManager to apply for IPv6 configs - systemctl restart NetworkManager - # Let NetworkManager apply the DHCPv6 configs - sleep 3 - - # set kubelet nodeip environment variable - mkdir -p /etc/systemd/system/kubelet.service.d/ - /opt/bin/setup_net_env.sh - - systemctl disable --now firewalld || true - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - systemctl disable disable-nm-cloud-setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - # Make sure we always disable swap - Otherwise the kubelet won't start as for some cloud - # providers swap gets enabled on reboot or after the setup script has finished executing. - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=containerd.service - Requires=containerd.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --cloud-provider=aws \ - --cloud-config=/etc/kubernetes/cloud-config \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --container-runtime=remote \ - --container-runtime-endpoint=unix:///run/containerd/containerd.sock \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target -- path: "/etc/kubernetes/cloud-config" - permissions: "0600" - content: | - {aws-config:true} - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/containerd/config.toml - permissions: "0644" - content: | - version = 2 - - [metrics] - address = "127.0.0.1:1338" - - [plugins] - [plugins."io.containerd.grpc.v1.cri"] - [plugins."io.containerd.grpc.v1.cri".containerd] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] - runtime_type = "io.containerd.runc.v2" - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] - SystemdCgroup = true - [plugins."io.containerd.grpc.v1.cri".registry] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] - endpoint = ["https://registry-1.docker.io"] - - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -- path: "/opt/bin/disable-nm-cloud-setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - if systemctl status 'nm-cloud-setup.timer' 2> /dev/null | grep -Fq "Active:"; then - systemctl stop nm-cloud-setup.timer - systemctl disable nm-cloud-setup.service - systemctl disable nm-cloud-setup.timer - reboot - fi - -- path: "/etc/systemd/system/disable-nm-cloud-setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/disable-nm-cloud-setup - -rh_subscription: - username: "" - password: "" - auto-attach: false - -runcmd: -- systemctl enable --now setup.service -- systemctl enable --now disable-nm-cloud-setup.service diff --git a/pkg/userdata/rhel/testdata/kubelet-v1.25-nutanix.yaml b/pkg/userdata/rhel/testdata/kubelet-v1.25-nutanix.yaml deleted file mode 100644 index 8e0a31352..000000000 --- a/pkg/userdata/rhel/testdata/kubelet-v1.25-nutanix.yaml +++ /dev/null @@ -1,509 +0,0 @@ -#cloud-config -bootcmd: -- modprobe ip_tables - -hostname: node1 -fqdn: node1 - - -ssh_pwauth: false - -write_files: - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: /etc/selinux/config - content: | - # This file controls the state of SELinux on the system. - # SELINUX= can take one of these three values: - # enforcing - SELinux security policy is enforced. - # permissive - SELinux prints warnings instead of enforcing. - # disabled - No SELinux policy is loaded. - SELINUX=permissive - # SELINUXTYPE= can take one of three two values: - # targeted - Targeted processes are protected, - # minimum - Modification of targeted policy. Only selected processes are protected. - # mls - Multi Level Security protection. - SELINUXTYPE=targeted - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - - setenforce 0 || true - systemctl restart systemd-modules-load.service - sysctl --system - - hostnamectl set-hostname node1 - - yum install -y \ - device-mapper-persistent-data \ - lvm2 \ - ebtables \ - ethtool \ - nfs-utils \ - bash-completion \ - sudo \ - socat \ - wget \ - curl \ - iscsi-initiator-utils \ - ipvsadm - systemctl enable --now iscsid - - - yum install -y yum-utils - yum-config-manager --add-repo=https://download.docker.com/linux/centos/docker-ce.repo - yum-config-manager --save --setopt=docker-ce-stable.module_hotfixes=true - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - DEFAULT_IFC_NAME=$(ip -o route get 1 | grep -oP "dev \K\S+") - IFC_CFG_FILE=/etc/sysconfig/network-scripts/ifcfg-$DEFAULT_IFC_NAME - # Enable IPv6 and DHCPv6 on the default interface - grep IPV6INIT $IFC_CFG_FILE && sed -i '/IPV6INIT*/c IPV6INIT=yes' $IFC_CFG_FILE || echo "IPV6INIT=yes" >> $IFC_CFG_FILE - grep DHCPV6C $IFC_CFG_FILE && sed -i '/DHCPV6C*/c DHCPV6C=yes' $IFC_CFG_FILE || echo "DHCPV6C=yes" >> $IFC_CFG_FILE - grep IPV6_AUTOCONF $IFC_CFG_FILE && sed -i '/IPV6_AUTOCONF*/c IPV6_AUTOCONF=yes' $IFC_CFG_FILE || echo "IPV6_AUTOCONF=yes" >> $IFC_CFG_FILE - - # Restart NetworkManager to apply for IPv6 configs - systemctl restart NetworkManager - # Let NetworkManager apply the DHCPv6 configs - sleep 3 - - # set kubelet nodeip environment variable - mkdir -p /etc/systemd/system/kubelet.service.d/ - /opt/bin/setup_net_env.sh - - systemctl disable --now firewalld || true - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - systemctl disable disable-nm-cloud-setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - # Make sure we always disable swap - Otherwise the kubelet won't start as for some cloud - # providers swap gets enabled on reboot or after the setup script has finished executing. - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=containerd.service - Requires=containerd.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --cloud-provider=nutanix \ - --cloud-config=/etc/kubernetes/cloud-config \ - --hostname-override=node1 \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --container-runtime=remote \ - --container-runtime-endpoint=unix:///run/containerd/containerd.sock \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target -- path: "/etc/kubernetes/cloud-config" - permissions: "0600" - content: | - {config:true} - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/containerd/config.toml - permissions: "0644" - content: | - version = 2 - - [metrics] - address = "127.0.0.1:1338" - - [plugins] - [plugins."io.containerd.grpc.v1.cri"] - [plugins."io.containerd.grpc.v1.cri".containerd] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] - runtime_type = "io.containerd.runc.v2" - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] - SystemdCgroup = true - [plugins."io.containerd.grpc.v1.cri".registry] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] - endpoint = ["https://registry-1.docker.io"] - - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -- path: "/opt/bin/disable-nm-cloud-setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - if systemctl status 'nm-cloud-setup.timer' 2> /dev/null | grep -Fq "Active:"; then - systemctl stop nm-cloud-setup.timer - systemctl disable nm-cloud-setup.service - systemctl disable nm-cloud-setup.timer - reboot - fi - -- path: "/etc/systemd/system/disable-nm-cloud-setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/disable-nm-cloud-setup - -rh_subscription: - username: "" - password: "" - auto-attach: false - -runcmd: -- systemctl enable --now setup.service -- systemctl enable --now disable-nm-cloud-setup.service diff --git a/pkg/userdata/rhel/testdata/pod-cidr-azure-rhel.yaml b/pkg/userdata/rhel/testdata/pod-cidr-azure-rhel.yaml deleted file mode 100644 index f3bc71e64..000000000 --- a/pkg/userdata/rhel/testdata/pod-cidr-azure-rhel.yaml +++ /dev/null @@ -1,506 +0,0 @@ -#cloud-config -bootcmd: -- modprobe ip_tables - -hostname: node1 -fqdn: node1 - - -ssh_pwauth: false - -write_files: - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: /etc/selinux/config - content: | - # This file controls the state of SELinux on the system. - # SELINUX= can take one of these three values: - # enforcing - SELinux security policy is enforced. - # permissive - SELinux prints warnings instead of enforcing. - # disabled - No SELinux policy is loaded. - SELINUX=permissive - # SELINUXTYPE= can take one of three two values: - # targeted - Targeted processes are protected, - # minimum - Modification of targeted policy. Only selected processes are protected. - # mls - Multi Level Security protection. - SELINUXTYPE=targeted - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - - setenforce 0 || true - systemctl restart systemd-modules-load.service - sysctl --system - - hostnamectl set-hostname node1 - - yum install -y \ - device-mapper-persistent-data \ - lvm2 \ - ebtables \ - ethtool \ - nfs-utils \ - bash-completion \ - sudo \ - socat \ - wget \ - curl \ - ipvsadm - - yum install -y yum-utils - yum-config-manager --add-repo=https://download.docker.com/linux/centos/docker-ce.repo - yum-config-manager --save --setopt=docker-ce-stable.module_hotfixes=true - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - DEFAULT_IFC_NAME=$(ip -o route get 1 | grep -oP "dev \K\S+") - IFC_CFG_FILE=/etc/sysconfig/network-scripts/ifcfg-$DEFAULT_IFC_NAME - # Enable IPv6 and DHCPv6 on the default interface - grep IPV6INIT $IFC_CFG_FILE && sed -i '/IPV6INIT*/c IPV6INIT=yes' $IFC_CFG_FILE || echo "IPV6INIT=yes" >> $IFC_CFG_FILE - grep DHCPV6C $IFC_CFG_FILE && sed -i '/DHCPV6C*/c DHCPV6C=yes' $IFC_CFG_FILE || echo "DHCPV6C=yes" >> $IFC_CFG_FILE - grep IPV6_AUTOCONF $IFC_CFG_FILE && sed -i '/IPV6_AUTOCONF*/c IPV6_AUTOCONF=yes' $IFC_CFG_FILE || echo "IPV6_AUTOCONF=yes" >> $IFC_CFG_FILE - - # Restart NetworkManager to apply for IPv6 configs - systemctl restart NetworkManager - # Let NetworkManager apply the DHCPv6 configs - sleep 3 - - # set kubelet nodeip environment variable - mkdir -p /etc/systemd/system/kubelet.service.d/ - /opt/bin/setup_net_env.sh - - systemctl disable --now firewalld || true - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - systemctl disable disable-nm-cloud-setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - # Make sure we always disable swap - Otherwise the kubelet won't start as for some cloud - # providers swap gets enabled on reboot or after the setup script has finished executing. - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=containerd.service - Requires=containerd.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --cloud-provider=azure \ - --cloud-config=/etc/kubernetes/cloud-config \ - --hostname-override=node1 \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --container-runtime=remote \ - --container-runtime-endpoint=unix:///run/containerd/containerd.sock \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target -- path: "/etc/kubernetes/cloud-config" - permissions: "0600" - content: | - {config:true} - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/containerd/config.toml - permissions: "0644" - content: | - version = 2 - - [metrics] - address = "127.0.0.1:1338" - - [plugins] - [plugins."io.containerd.grpc.v1.cri"] - [plugins."io.containerd.grpc.v1.cri".containerd] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] - runtime_type = "io.containerd.runc.v2" - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] - SystemdCgroup = true - [plugins."io.containerd.grpc.v1.cri".registry] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] - endpoint = ["https://registry-1.docker.io"] - - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -- path: "/opt/bin/disable-nm-cloud-setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - if systemctl status 'nm-cloud-setup.timer' 2> /dev/null | grep -Fq "Active:"; then - systemctl stop nm-cloud-setup.timer - systemctl disable nm-cloud-setup.service - systemctl disable nm-cloud-setup.timer - reboot - fi - -- path: "/etc/systemd/system/disable-nm-cloud-setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/disable-nm-cloud-setup - -rh_subscription: - username: "" - password: "" - auto-attach: false - -runcmd: -- systemctl enable --now setup.service -- systemctl enable --now disable-nm-cloud-setup.service diff --git a/pkg/userdata/rockylinux/provider.go b/pkg/userdata/rockylinux/provider.go deleted file mode 100644 index c19a5dcd1..000000000 --- a/pkg/userdata/rockylinux/provider.go +++ /dev/null @@ -1,359 +0,0 @@ -/* -Copyright 2022 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// -// UserData plugin for RockyLinux. -// - -package rockylinux - -import ( - "errors" - "fmt" - "strings" - "text/template" - - "github.com/Masterminds/semver/v3" - - "github.com/kubermatic/machine-controller/pkg/apis/plugin" - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" - userdatahelper "github.com/kubermatic/machine-controller/pkg/userdata/helper" -) - -// Provider is a pkg/userdata/plugin.Provider implementation. -type Provider struct{} - -// UserData renders user-data template to string. -func (p Provider) UserData(req plugin.UserDataRequest) (string, error) { - tmpl, err := template.New("user-data").Funcs(userdatahelper.TxtFuncMap()).Parse(userDataTemplate) - if err != nil { - return "", fmt.Errorf("failed to parse user-data template: %w", err) - } - - kubeletVersion, err := semver.NewVersion(req.MachineSpec.Versions.Kubelet) - if err != nil { - return "", fmt.Errorf("invalid kubelet version: %w", err) - } - - pconfig, err := providerconfigtypes.GetConfig(req.MachineSpec.ProviderSpec) - if err != nil { - return "", fmt.Errorf("failed to get provider config: %w", err) - } - - if pconfig.OverwriteCloudConfig != nil { - req.CloudConfig = *pconfig.OverwriteCloudConfig - } - - if pconfig.Network.IsStaticIPConfig() { - return "", errors.New("static IP config is not supported with RockyLinux") - } - - rockyLinuxConfig, err := LoadConfig(pconfig.OperatingSystemSpec) - if err != nil { - return "", fmt.Errorf("failed to parse OperatingSystemSpec: %w", err) - } - - kubeconfigString, err := userdatahelper.StringifyKubeconfig(req.Kubeconfig) - if err != nil { - return "", err - } - - kubernetesCACert, err := userdatahelper.GetCACert(req.Kubeconfig) - if err != nil { - return "", fmt.Errorf("error extracting cacert: %w", err) - } - - crEngine := req.ContainerRuntime.Engine(kubeletVersion) - crScript, err := crEngine.ScriptFor(providerconfigtypes.OperatingSystemRockyLinux) - if err != nil { - return "", fmt.Errorf("failed to generate container runtime install script: %w", err) - } - - crConfig, err := crEngine.Config() - if err != nil { - return "", fmt.Errorf("failed to generate container runtime config: %w", err) - } - - crAuthConfig, err := crEngine.AuthConfig() - if err != nil { - return "", fmt.Errorf("failed to generate container runtime auth config: %w", err) - } - - data := struct { - plugin.UserDataRequest - ProviderSpec *providerconfigtypes.Config - OSConfig *Config - KubeletVersion string - Kubeconfig string - KubernetesCACert string - NodeIPScript string - ExtraKubeletFlags []string - ContainerRuntimeScript string - ContainerRuntimeConfigFileName string - ContainerRuntimeConfig string - ContainerRuntimeAuthConfigFileName string - ContainerRuntimeAuthConfig string - ContainerRuntimeName string - }{ - UserDataRequest: req, - ProviderSpec: pconfig, - OSConfig: rockyLinuxConfig, - KubeletVersion: kubeletVersion.String(), - Kubeconfig: kubeconfigString, - KubernetesCACert: kubernetesCACert, - NodeIPScript: userdatahelper.SetupNodeIPEnvScript(pconfig.Network.GetIPFamily()), - ExtraKubeletFlags: crEngine.KubeletFlags(), - ContainerRuntimeScript: crScript, - ContainerRuntimeConfigFileName: crEngine.ConfigFileName(), - ContainerRuntimeConfig: crConfig, - ContainerRuntimeAuthConfigFileName: crEngine.AuthConfigFileName(), - ContainerRuntimeAuthConfig: crAuthConfig, - ContainerRuntimeName: crEngine.String(), - } - - buf := strings.Builder{} - if err = tmpl.Execute(&buf, data); err != nil { - return "", fmt.Errorf("failed to execute user-data template: %w", err) - } - - return userdatahelper.CleanupTemplateOutput(buf.String()) -} - -// UserData template. -const userDataTemplate = `#cloud-config -bootcmd: -- modprobe ip_tables -{{ if ne .CloudProviderName "aws" }} -hostname: {{ .MachineSpec.Name }} -{{- /* Never set the hostname on AWS nodes. Kubernetes(kube-proxy) requires the hostname to be the private dns name */}} -{{ end }} - -{{- if .OSConfig.DistUpgradeOnBoot }} -package_upgrade: true -package_reboot_if_required: true -{{- end }} - -ssh_pwauth: false - -{{- if ne (len .ProviderSpec.SSHPublicKeys) 0 }} -ssh_authorized_keys: -{{- range .ProviderSpec.SSHPublicKeys }} - - "{{ . }}" -{{- end }} -{{- end }} - -write_files: -{{- if .HTTPProxy }} -- path: "/etc/environment" - content: | -{{ proxyEnvironment .HTTPProxy .NoProxy | indent 4 }} -{{- end }} - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | -{{ journalDConfig | indent 4 }} - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | -{{ kernelModulesScript | indent 4 }} - -- path: "/etc/sysctl.d/k8s.conf" - content: | -{{ kernelSettings | indent 4 }} - -- path: /etc/selinux/config - content: | - # This file controls the state of SELinux on the system. - # SELINUX= can take one of these three values: - # enforcing - SELinux security policy is enforced. - # permissive - SELinux prints warnings instead of enforcing. - # disabled - No SELinux policy is loaded. - SELINUX=permissive - # SELINUXTYPE= can take one of three two values: - # targeted - Targeted processes are protected, - # minimum - Modification of targeted policy. Only selected processes are protected. - # mls - Multi Level Security protection. - SELINUXTYPE=targeted - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - - setenforce 0 || true - -{{- /* As we added some modules and don't want to reboot, restart the service */}} - systemctl restart systemd-modules-load.service - sysctl --system - - {{ if ne .CloudProviderName "aws" }} -{{- /* The normal way of setting it via cloud-init is broken, see */}} -{{- /* https://bugs.launchpad.net/cloud-init/+bug/1662542 */}} - hostnamectl set-hostname {{ .MachineSpec.Name }} - {{ end -}} - - yum install -y \ - device-mapper-persistent-data \ - lvm2 \ - ebtables \ - ethtool \ - nfs-utils \ - bash-completion \ - sudo \ - socat \ - wget \ - curl \ - tar \ - {{- if or (eq .CloudProviderName "vsphere") (eq .CloudProviderName "vmware-cloud-director") }} - open-vm-tools \ - {{- end }} - {{- if eq .CloudProviderName "nutanix" }} - iscsi-initiator-utils \ - {{- end }} - ipvsadm - - {{- /* iscsid service is required on Nutanix machines for CSI driver to attach volumes. */}} - {{- if eq .CloudProviderName "nutanix" }} - systemctl enable --now iscsid - {{ end }} -{{ .ContainerRuntimeScript | indent 4 }} -{{ safeDownloadBinariesScript .KubeletVersion | indent 4 }} - DEFAULT_IFC_NAME=$(ip -o route get 1 | grep -oP "dev \K\S+") - IFC_CFG_FILE=/etc/sysconfig/network-scripts/ifcfg-$DEFAULT_IFC_NAME - # Enable IPv6 and DHCPv6 on the default interface - grep IPV6INIT $IFC_CFG_FILE && sed -i '/IPV6INIT*/c IPV6INIT=yes' $IFC_CFG_FILE || echo "IPV6INIT=yes" >> $IFC_CFG_FILE - grep DHCPV6C $IFC_CFG_FILE && sed -i '/DHCPV6C*/c DHCPV6C=yes' $IFC_CFG_FILE || echo "DHCPV6C=yes" >> $IFC_CFG_FILE - grep IPV6_AUTOCONF $IFC_CFG_FILE && sed -i '/IPV6_AUTOCONF*/c IPV6_AUTOCONF=yes' $IFC_CFG_FILE || echo "IPV6_AUTOCONF=yes" >> $IFC_CFG_FILE - - # Restart NetworkManager to apply for IPv6 configs - systemctl restart NetworkManager - # Let NetworkManager apply the DHCPv6 configs - sleep 3 - - # set kubelet nodeip environment variable - mkdir -p /etc/systemd/system/kubelet.service.d/ - /opt/bin/setup_net_env.sh - - systemctl disable --now firewalld || true - {{ if eq .CloudProviderName "vsphere" }} - systemctl enable --now vmtoolsd.service - {{ end -}} - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - # Make sure we always disable swap - Otherwise the kubelet won't start as for some cloud - # providers swap gets enabled on reboot or after the setup script has finished executing. - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | -{{ kubeletSystemdUnit .ContainerRuntimeName .KubeletVersion .KubeletCloudProviderName .MachineSpec.Name .DNSIPs .ExternalCloudProvider .ProviderSpec.Network.GetIPFamily .PauseImage .MachineSpec.Taints .ExtraKubeletFlags true | indent 4 }} - -{{- if ne (len .CloudConfig) 0 }} -- path: "/etc/kubernetes/cloud-config" - permissions: "0600" - content: | -{{ .CloudConfig | indent 4 }} -{{- end }} - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | -{{ .NodeIPScript | indent 4 }} - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | -{{ .Kubeconfig | indent 4 }} - -- path: "/etc/kubernetes/kubelet.conf" - content: | -{{ kubeletConfiguration "cluster.local" .DNSIPs .KubeletFeatureGates .KubeletConfigs .ContainerRuntimeName | indent 4 }} - -- path: "/etc/kubernetes/pki/ca.crt" - content: | -{{ .KubernetesCACert | indent 4 }} - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: {{ .ContainerRuntimeConfigFileName }} - permissions: "0644" - content: | -{{ .ContainerRuntimeConfig | indent 4 }} - -{{- if and (eq .ContainerRuntimeName "docker") .ContainerRuntimeAuthConfig }} - -- path: {{ .ContainerRuntimeAuthConfigFileName }} - permissions: "0600" - content: | -{{ .ContainerRuntimeAuthConfig | indent 4 }} -{{- end }} - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | -{{ kubeletHealthCheckSystemdUnit | indent 4 }} - -{{- with .ProviderSpec.CAPublicKey }} - -- path: "/etc/ssh/trusted-user-ca-keys.pem" - content: | -{{ . | indent 4 }} - -- path: "/etc/ssh/sshd_config" - content: | -{{ sshConfigAddendum | indent 4 }} - append: true -{{- end }} - -runcmd: -- systemctl enable --now setup.service -` diff --git a/pkg/userdata/rockylinux/provider_test.go b/pkg/userdata/rockylinux/provider_test.go deleted file mode 100644 index aacab4e07..000000000 --- a/pkg/userdata/rockylinux/provider_test.go +++ /dev/null @@ -1,277 +0,0 @@ -/* -Copyright 2022 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// -// UserData plugin for RockyLinux. -// - -package rockylinux - -import ( - "flag" - "net" - "testing" - - clusterv1alpha1 "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" - "github.com/kubermatic/machine-controller/pkg/apis/plugin" - "github.com/kubermatic/machine-controller/pkg/containerruntime" - testhelper "github.com/kubermatic/machine-controller/pkg/test" - "github.com/kubermatic/machine-controller/pkg/userdata/convert" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - clientcmdapi "k8s.io/client-go/tools/clientcmd/api" -) - -var ( - update = flag.Bool("update", false, "update testdata files") - - pemCertificate = `-----BEGIN CERTIFICATE----- -MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV -BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG -A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 -DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 -NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG -cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv -c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B -AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS -R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT -ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk -JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 -mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW -caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G -A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt -hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB -MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES -MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv -bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h -U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao -eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 -UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD -58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n -sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF -kPe6XoSbiLm/kxk32T0= ------END CERTIFICATE-----` -) - -// fakeCloudConfigProvider simulates cloud config provider for test. -type fakeCloudConfigProvider struct { - config string - name string - err error -} - -func (p *fakeCloudConfigProvider) GetCloudConfig(spec clusterv1alpha1.MachineSpec) (config string, name string, err error) { - return p.config, p.name, p.err -} - -// userDataTestCase contains the data for a table-driven test. -type userDataTestCase struct { - name string - spec clusterv1alpha1.MachineSpec - clusterDNSIPs []net.IP - cloudProviderName *string - externalCloudProvider bool - httpProxy string - noProxy string - insecureRegistries string - registryMirrors string - pauseImage string - containerruntime string -} - -// TestUserDataGeneration runs the data generation for different -// environments. -func TestUserDataGeneration(t *testing.T) { - t.Parallel() - - tests := []userDataTestCase{ - { - name: "kubelet-v1.25-aws", - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{Name: "node1"}, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.25.0", - }, - }, - }, - { - name: "kubelet-v1.24.9-aws", - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{Name: "node1"}, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.24.9", - }, - }, - }, - { - name: "kubelet-v1.24.9-aws-external", - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{Name: "node1"}, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.24.9", - }, - }, - externalCloudProvider: true, - }, - { - name: "kubelet-v1.24.9-vsphere", - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{Name: "node1"}, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.24.9", - }, - }, - cloudProviderName: stringPtr("vsphere"), - }, - { - name: "kubelet-v1.24.9-vsphere-proxy", - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{Name: "node1"}, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.24.9", - }, - }, - cloudProviderName: stringPtr("vsphere"), - httpProxy: "http://192.168.100.100:3128", - noProxy: "192.168.1.0", - insecureRegistries: "192.168.100.100:5000, 10.0.0.1:5000", - pauseImage: "192.168.100.100:5000/kubernetes/pause:v3.1", - }, - { - name: "kubelet-v1.24.9-vsphere-mirrors", - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{Name: "node1"}, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.24.9", - }, - }, - cloudProviderName: stringPtr("vsphere"), - httpProxy: "http://192.168.100.100:3128", - noProxy: "192.168.1.0", - registryMirrors: "https://registry.docker-cn.com", - pauseImage: "192.168.100.100:5000/kubernetes/pause:v3.1", - }, - { - name: "kubelet-v1.24.9-nutanix", - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{Name: "node1"}, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.24.9", - }, - }, - cloudProviderName: stringPtr("nutanix"), - }, - { - name: "kubelet-v1.24-aws", - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{Name: "node1"}, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.24.0", - }, - }, - }, - } - - defaultCloudProvider := &fakeCloudConfigProvider{ - name: "aws", - config: "{aws-config:true}", - err: nil, - } - kubeconfig := &clientcmdapi.Config{ - Clusters: map[string]*clientcmdapi.Cluster{ - "": { - Server: "https://server:443", - CertificateAuthorityData: []byte(pemCertificate), - }, - }, - AuthInfos: map[string]*clientcmdapi.AuthInfo{ - "": { - Token: "my-token", - }, - }, - } - provider := Provider{} - - kubeletFeatureGates := map[string]bool{ - "RotateKubeletServerCertificate": true, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - emptyProviderSpec := clusterv1alpha1.ProviderSpec{ - Value: &runtime.RawExtension{}, - } - test.spec.ProviderSpec = emptyProviderSpec - var cloudProvider *fakeCloudConfigProvider - if test.cloudProviderName != nil { - cloudProvider = &fakeCloudConfigProvider{ - name: *test.cloudProviderName, - config: "{config:true}", - err: nil, - } - } else { - cloudProvider = defaultCloudProvider - } - cloudConfig, cloudProviderName, err := cloudProvider.GetCloudConfig(test.spec) - if err != nil { - t.Fatalf("failed to get cloud config: %v", err) - } - - containerRuntimeOpts := containerruntime.Opts{ - ContainerRuntime: test.containerruntime, - InsecureRegistries: test.insecureRegistries, - RegistryMirrors: test.registryMirrors, - } - containerRuntimeConfig, err := containerruntime.BuildConfig(containerRuntimeOpts) - if err != nil { - t.Fatalf("failed to generate container runtime config: %v", err) - } - - req := plugin.UserDataRequest{ - MachineSpec: test.spec, - Kubeconfig: kubeconfig, - CloudConfig: cloudConfig, - CloudProviderName: cloudProviderName, - KubeletCloudProviderName: cloudProviderName, - DNSIPs: test.clusterDNSIPs, - ExternalCloudProvider: test.externalCloudProvider, - HTTPProxy: test.httpProxy, - NoProxy: test.noProxy, - PauseImage: test.pauseImage, - KubeletFeatureGates: kubeletFeatureGates, - ContainerRuntime: containerRuntimeConfig, - } - - s, err := provider.UserData(req) - if err != nil { - t.Errorf("error getting userdata: '%v'", err) - } - - // Check if we can gzip it. - if _, err := convert.GzipString(s); err != nil { - t.Fatal(err) - } - goldenName := test.name + ".yaml" - testhelper.CompareOutput(t, goldenName, s, *update) - }) - } -} - -// stringPtr returns pointer to given string. -func stringPtr(a string) *string { - return &a -} diff --git a/pkg/userdata/rockylinux/testdata/kubelet-v1.24-aws.yaml b/pkg/userdata/rockylinux/testdata/kubelet-v1.24-aws.yaml deleted file mode 100644 index 910ed957c..000000000 --- a/pkg/userdata/rockylinux/testdata/kubelet-v1.24-aws.yaml +++ /dev/null @@ -1,466 +0,0 @@ -#cloud-config -bootcmd: -- modprobe ip_tables - - -ssh_pwauth: false - -write_files: - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: /etc/selinux/config - content: | - # This file controls the state of SELinux on the system. - # SELINUX= can take one of these three values: - # enforcing - SELinux security policy is enforced. - # permissive - SELinux prints warnings instead of enforcing. - # disabled - No SELinux policy is loaded. - SELINUX=permissive - # SELINUXTYPE= can take one of three two values: - # targeted - Targeted processes are protected, - # minimum - Modification of targeted policy. Only selected processes are protected. - # mls - Multi Level Security protection. - SELINUXTYPE=targeted - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - - setenforce 0 || true - systemctl restart systemd-modules-load.service - sysctl --system - - yum install -y \ - device-mapper-persistent-data \ - lvm2 \ - ebtables \ - ethtool \ - nfs-utils \ - bash-completion \ - sudo \ - socat \ - wget \ - curl \ - tar \ - ipvsadm - - yum install -y yum-utils - yum-config-manager --add-repo=https://download.docker.com/linux/centos/docker-ce.repo - yum-config-manager --save --setopt=docker-ce-stable.module_hotfixes=true - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - DEFAULT_IFC_NAME=$(ip -o route get 1 | grep -oP "dev \K\S+") - IFC_CFG_FILE=/etc/sysconfig/network-scripts/ifcfg-$DEFAULT_IFC_NAME - # Enable IPv6 and DHCPv6 on the default interface - grep IPV6INIT $IFC_CFG_FILE && sed -i '/IPV6INIT*/c IPV6INIT=yes' $IFC_CFG_FILE || echo "IPV6INIT=yes" >> $IFC_CFG_FILE - grep DHCPV6C $IFC_CFG_FILE && sed -i '/DHCPV6C*/c DHCPV6C=yes' $IFC_CFG_FILE || echo "DHCPV6C=yes" >> $IFC_CFG_FILE - grep IPV6_AUTOCONF $IFC_CFG_FILE && sed -i '/IPV6_AUTOCONF*/c IPV6_AUTOCONF=yes' $IFC_CFG_FILE || echo "IPV6_AUTOCONF=yes" >> $IFC_CFG_FILE - - # Restart NetworkManager to apply for IPv6 configs - systemctl restart NetworkManager - # Let NetworkManager apply the DHCPv6 configs - sleep 3 - - # set kubelet nodeip environment variable - mkdir -p /etc/systemd/system/kubelet.service.d/ - /opt/bin/setup_net_env.sh - - systemctl disable --now firewalld || true - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - # Make sure we always disable swap - Otherwise the kubelet won't start as for some cloud - # providers swap gets enabled on reboot or after the setup script has finished executing. - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=containerd.service - Requires=containerd.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --cloud-provider=aws \ - --cloud-config=/etc/kubernetes/cloud-config \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --container-runtime=remote \ - --container-runtime-endpoint=unix:///run/containerd/containerd.sock \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target -- path: "/etc/kubernetes/cloud-config" - permissions: "0600" - content: | - {aws-config:true} - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/containerd/config.toml - permissions: "0644" - content: | - version = 2 - - [metrics] - address = "127.0.0.1:1338" - - [plugins] - [plugins."io.containerd.grpc.v1.cri"] - [plugins."io.containerd.grpc.v1.cri".containerd] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] - runtime_type = "io.containerd.runc.v2" - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] - SystemdCgroup = true - [plugins."io.containerd.grpc.v1.cri".registry] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] - endpoint = ["https://registry-1.docker.io"] - - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -runcmd: -- systemctl enable --now setup.service diff --git a/pkg/userdata/rockylinux/testdata/kubelet-v1.24.9-aws-external.yaml b/pkg/userdata/rockylinux/testdata/kubelet-v1.24.9-aws-external.yaml deleted file mode 100644 index ea98aa141..000000000 --- a/pkg/userdata/rockylinux/testdata/kubelet-v1.24.9-aws-external.yaml +++ /dev/null @@ -1,466 +0,0 @@ -#cloud-config -bootcmd: -- modprobe ip_tables - - -ssh_pwauth: false - -write_files: - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: /etc/selinux/config - content: | - # This file controls the state of SELinux on the system. - # SELINUX= can take one of these three values: - # enforcing - SELinux security policy is enforced. - # permissive - SELinux prints warnings instead of enforcing. - # disabled - No SELinux policy is loaded. - SELINUX=permissive - # SELINUXTYPE= can take one of three two values: - # targeted - Targeted processes are protected, - # minimum - Modification of targeted policy. Only selected processes are protected. - # mls - Multi Level Security protection. - SELINUXTYPE=targeted - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - - setenforce 0 || true - systemctl restart systemd-modules-load.service - sysctl --system - - yum install -y \ - device-mapper-persistent-data \ - lvm2 \ - ebtables \ - ethtool \ - nfs-utils \ - bash-completion \ - sudo \ - socat \ - wget \ - curl \ - tar \ - ipvsadm - - yum install -y yum-utils - yum-config-manager --add-repo=https://download.docker.com/linux/centos/docker-ce.repo - yum-config-manager --save --setopt=docker-ce-stable.module_hotfixes=true - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - DEFAULT_IFC_NAME=$(ip -o route get 1 | grep -oP "dev \K\S+") - IFC_CFG_FILE=/etc/sysconfig/network-scripts/ifcfg-$DEFAULT_IFC_NAME - # Enable IPv6 and DHCPv6 on the default interface - grep IPV6INIT $IFC_CFG_FILE && sed -i '/IPV6INIT*/c IPV6INIT=yes' $IFC_CFG_FILE || echo "IPV6INIT=yes" >> $IFC_CFG_FILE - grep DHCPV6C $IFC_CFG_FILE && sed -i '/DHCPV6C*/c DHCPV6C=yes' $IFC_CFG_FILE || echo "DHCPV6C=yes" >> $IFC_CFG_FILE - grep IPV6_AUTOCONF $IFC_CFG_FILE && sed -i '/IPV6_AUTOCONF*/c IPV6_AUTOCONF=yes' $IFC_CFG_FILE || echo "IPV6_AUTOCONF=yes" >> $IFC_CFG_FILE - - # Restart NetworkManager to apply for IPv6 configs - systemctl restart NetworkManager - # Let NetworkManager apply the DHCPv6 configs - sleep 3 - - # set kubelet nodeip environment variable - mkdir -p /etc/systemd/system/kubelet.service.d/ - /opt/bin/setup_net_env.sh - - systemctl disable --now firewalld || true - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - # Make sure we always disable swap - Otherwise the kubelet won't start as for some cloud - # providers swap gets enabled on reboot or after the setup script has finished executing. - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=containerd.service - Requires=containerd.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --cloud-provider=external \ - --hostname-override=${KUBELET_HOSTNAME} \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --container-runtime=remote \ - --container-runtime-endpoint=unix:///run/containerd/containerd.sock \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target -- path: "/etc/kubernetes/cloud-config" - permissions: "0600" - content: | - {aws-config:true} - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/containerd/config.toml - permissions: "0644" - content: | - version = 2 - - [metrics] - address = "127.0.0.1:1338" - - [plugins] - [plugins."io.containerd.grpc.v1.cri"] - [plugins."io.containerd.grpc.v1.cri".containerd] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] - runtime_type = "io.containerd.runc.v2" - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] - SystemdCgroup = true - [plugins."io.containerd.grpc.v1.cri".registry] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] - endpoint = ["https://registry-1.docker.io"] - - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -runcmd: -- systemctl enable --now setup.service diff --git a/pkg/userdata/rockylinux/testdata/kubelet-v1.24.9-aws.yaml b/pkg/userdata/rockylinux/testdata/kubelet-v1.24.9-aws.yaml deleted file mode 100644 index eb87f3395..000000000 --- a/pkg/userdata/rockylinux/testdata/kubelet-v1.24.9-aws.yaml +++ /dev/null @@ -1,466 +0,0 @@ -#cloud-config -bootcmd: -- modprobe ip_tables - - -ssh_pwauth: false - -write_files: - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: /etc/selinux/config - content: | - # This file controls the state of SELinux on the system. - # SELINUX= can take one of these three values: - # enforcing - SELinux security policy is enforced. - # permissive - SELinux prints warnings instead of enforcing. - # disabled - No SELinux policy is loaded. - SELINUX=permissive - # SELINUXTYPE= can take one of three two values: - # targeted - Targeted processes are protected, - # minimum - Modification of targeted policy. Only selected processes are protected. - # mls - Multi Level Security protection. - SELINUXTYPE=targeted - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - - setenforce 0 || true - systemctl restart systemd-modules-load.service - sysctl --system - - yum install -y \ - device-mapper-persistent-data \ - lvm2 \ - ebtables \ - ethtool \ - nfs-utils \ - bash-completion \ - sudo \ - socat \ - wget \ - curl \ - tar \ - ipvsadm - - yum install -y yum-utils - yum-config-manager --add-repo=https://download.docker.com/linux/centos/docker-ce.repo - yum-config-manager --save --setopt=docker-ce-stable.module_hotfixes=true - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - DEFAULT_IFC_NAME=$(ip -o route get 1 | grep -oP "dev \K\S+") - IFC_CFG_FILE=/etc/sysconfig/network-scripts/ifcfg-$DEFAULT_IFC_NAME - # Enable IPv6 and DHCPv6 on the default interface - grep IPV6INIT $IFC_CFG_FILE && sed -i '/IPV6INIT*/c IPV6INIT=yes' $IFC_CFG_FILE || echo "IPV6INIT=yes" >> $IFC_CFG_FILE - grep DHCPV6C $IFC_CFG_FILE && sed -i '/DHCPV6C*/c DHCPV6C=yes' $IFC_CFG_FILE || echo "DHCPV6C=yes" >> $IFC_CFG_FILE - grep IPV6_AUTOCONF $IFC_CFG_FILE && sed -i '/IPV6_AUTOCONF*/c IPV6_AUTOCONF=yes' $IFC_CFG_FILE || echo "IPV6_AUTOCONF=yes" >> $IFC_CFG_FILE - - # Restart NetworkManager to apply for IPv6 configs - systemctl restart NetworkManager - # Let NetworkManager apply the DHCPv6 configs - sleep 3 - - # set kubelet nodeip environment variable - mkdir -p /etc/systemd/system/kubelet.service.d/ - /opt/bin/setup_net_env.sh - - systemctl disable --now firewalld || true - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - # Make sure we always disable swap - Otherwise the kubelet won't start as for some cloud - # providers swap gets enabled on reboot or after the setup script has finished executing. - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=containerd.service - Requires=containerd.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --cloud-provider=aws \ - --cloud-config=/etc/kubernetes/cloud-config \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --container-runtime=remote \ - --container-runtime-endpoint=unix:///run/containerd/containerd.sock \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target -- path: "/etc/kubernetes/cloud-config" - permissions: "0600" - content: | - {aws-config:true} - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/containerd/config.toml - permissions: "0644" - content: | - version = 2 - - [metrics] - address = "127.0.0.1:1338" - - [plugins] - [plugins."io.containerd.grpc.v1.cri"] - [plugins."io.containerd.grpc.v1.cri".containerd] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] - runtime_type = "io.containerd.runc.v2" - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] - SystemdCgroup = true - [plugins."io.containerd.grpc.v1.cri".registry] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] - endpoint = ["https://registry-1.docker.io"] - - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -runcmd: -- systemctl enable --now setup.service diff --git a/pkg/userdata/rockylinux/testdata/kubelet-v1.24.9-nutanix.yaml b/pkg/userdata/rockylinux/testdata/kubelet-v1.24.9-nutanix.yaml deleted file mode 100644 index 9cbbf5b50..000000000 --- a/pkg/userdata/rockylinux/testdata/kubelet-v1.24.9-nutanix.yaml +++ /dev/null @@ -1,474 +0,0 @@ -#cloud-config -bootcmd: -- modprobe ip_tables - -hostname: node1 - - -ssh_pwauth: false - -write_files: - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: /etc/selinux/config - content: | - # This file controls the state of SELinux on the system. - # SELINUX= can take one of these three values: - # enforcing - SELinux security policy is enforced. - # permissive - SELinux prints warnings instead of enforcing. - # disabled - No SELinux policy is loaded. - SELINUX=permissive - # SELINUXTYPE= can take one of three two values: - # targeted - Targeted processes are protected, - # minimum - Modification of targeted policy. Only selected processes are protected. - # mls - Multi Level Security protection. - SELINUXTYPE=targeted - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - - setenforce 0 || true - systemctl restart systemd-modules-load.service - sysctl --system - - - hostnamectl set-hostname node1 - yum install -y \ - device-mapper-persistent-data \ - lvm2 \ - ebtables \ - ethtool \ - nfs-utils \ - bash-completion \ - sudo \ - socat \ - wget \ - curl \ - tar \ - iscsi-initiator-utils \ - ipvsadm - systemctl enable --now iscsid - - - yum install -y yum-utils - yum-config-manager --add-repo=https://download.docker.com/linux/centos/docker-ce.repo - yum-config-manager --save --setopt=docker-ce-stable.module_hotfixes=true - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - DEFAULT_IFC_NAME=$(ip -o route get 1 | grep -oP "dev \K\S+") - IFC_CFG_FILE=/etc/sysconfig/network-scripts/ifcfg-$DEFAULT_IFC_NAME - # Enable IPv6 and DHCPv6 on the default interface - grep IPV6INIT $IFC_CFG_FILE && sed -i '/IPV6INIT*/c IPV6INIT=yes' $IFC_CFG_FILE || echo "IPV6INIT=yes" >> $IFC_CFG_FILE - grep DHCPV6C $IFC_CFG_FILE && sed -i '/DHCPV6C*/c DHCPV6C=yes' $IFC_CFG_FILE || echo "DHCPV6C=yes" >> $IFC_CFG_FILE - grep IPV6_AUTOCONF $IFC_CFG_FILE && sed -i '/IPV6_AUTOCONF*/c IPV6_AUTOCONF=yes' $IFC_CFG_FILE || echo "IPV6_AUTOCONF=yes" >> $IFC_CFG_FILE - - # Restart NetworkManager to apply for IPv6 configs - systemctl restart NetworkManager - # Let NetworkManager apply the DHCPv6 configs - sleep 3 - - # set kubelet nodeip environment variable - mkdir -p /etc/systemd/system/kubelet.service.d/ - /opt/bin/setup_net_env.sh - - systemctl disable --now firewalld || true - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - # Make sure we always disable swap - Otherwise the kubelet won't start as for some cloud - # providers swap gets enabled on reboot or after the setup script has finished executing. - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=containerd.service - Requires=containerd.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --cloud-provider=nutanix \ - --cloud-config=/etc/kubernetes/cloud-config \ - --hostname-override=node1 \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --container-runtime=remote \ - --container-runtime-endpoint=unix:///run/containerd/containerd.sock \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target -- path: "/etc/kubernetes/cloud-config" - permissions: "0600" - content: | - {config:true} - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/containerd/config.toml - permissions: "0644" - content: | - version = 2 - - [metrics] - address = "127.0.0.1:1338" - - [plugins] - [plugins."io.containerd.grpc.v1.cri"] - [plugins."io.containerd.grpc.v1.cri".containerd] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] - runtime_type = "io.containerd.runc.v2" - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] - SystemdCgroup = true - [plugins."io.containerd.grpc.v1.cri".registry] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] - endpoint = ["https://registry-1.docker.io"] - - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -runcmd: -- systemctl enable --now setup.service diff --git a/pkg/userdata/rockylinux/testdata/kubelet-v1.24.9-vsphere-mirrors.yaml b/pkg/userdata/rockylinux/testdata/kubelet-v1.24.9-vsphere-mirrors.yaml deleted file mode 100644 index b4452384f..000000000 --- a/pkg/userdata/rockylinux/testdata/kubelet-v1.24.9-vsphere-mirrors.yaml +++ /dev/null @@ -1,483 +0,0 @@ -#cloud-config -bootcmd: -- modprobe ip_tables - -hostname: node1 - - -ssh_pwauth: false - -write_files: -- path: "/etc/environment" - content: | - HTTP_PROXY=http://192.168.100.100:3128 - http_proxy=http://192.168.100.100:3128 - HTTPS_PROXY=http://192.168.100.100:3128 - https_proxy=http://192.168.100.100:3128 - NO_PROXY=192.168.1.0 - no_proxy=192.168.1.0 - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: /etc/selinux/config - content: | - # This file controls the state of SELinux on the system. - # SELINUX= can take one of these three values: - # enforcing - SELinux security policy is enforced. - # permissive - SELinux prints warnings instead of enforcing. - # disabled - No SELinux policy is loaded. - SELINUX=permissive - # SELINUXTYPE= can take one of three two values: - # targeted - Targeted processes are protected, - # minimum - Modification of targeted policy. Only selected processes are protected. - # mls - Multi Level Security protection. - SELINUXTYPE=targeted - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - - setenforce 0 || true - systemctl restart systemd-modules-load.service - sysctl --system - - - hostnamectl set-hostname node1 - yum install -y \ - device-mapper-persistent-data \ - lvm2 \ - ebtables \ - ethtool \ - nfs-utils \ - bash-completion \ - sudo \ - socat \ - wget \ - curl \ - tar \ - open-vm-tools \ - ipvsadm - - yum install -y yum-utils - yum-config-manager --add-repo=https://download.docker.com/linux/centos/docker-ce.repo - yum-config-manager --save --setopt=docker-ce-stable.module_hotfixes=true - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - DEFAULT_IFC_NAME=$(ip -o route get 1 | grep -oP "dev \K\S+") - IFC_CFG_FILE=/etc/sysconfig/network-scripts/ifcfg-$DEFAULT_IFC_NAME - # Enable IPv6 and DHCPv6 on the default interface - grep IPV6INIT $IFC_CFG_FILE && sed -i '/IPV6INIT*/c IPV6INIT=yes' $IFC_CFG_FILE || echo "IPV6INIT=yes" >> $IFC_CFG_FILE - grep DHCPV6C $IFC_CFG_FILE && sed -i '/DHCPV6C*/c DHCPV6C=yes' $IFC_CFG_FILE || echo "DHCPV6C=yes" >> $IFC_CFG_FILE - grep IPV6_AUTOCONF $IFC_CFG_FILE && sed -i '/IPV6_AUTOCONF*/c IPV6_AUTOCONF=yes' $IFC_CFG_FILE || echo "IPV6_AUTOCONF=yes" >> $IFC_CFG_FILE - - # Restart NetworkManager to apply for IPv6 configs - systemctl restart NetworkManager - # Let NetworkManager apply the DHCPv6 configs - sleep 3 - - # set kubelet nodeip environment variable - mkdir -p /etc/systemd/system/kubelet.service.d/ - /opt/bin/setup_net_env.sh - - systemctl disable --now firewalld || true - - systemctl enable --now vmtoolsd.service - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - # Make sure we always disable swap - Otherwise the kubelet won't start as for some cloud - # providers swap gets enabled on reboot or after the setup script has finished executing. - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=containerd.service - Requires=containerd.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --cloud-provider=vsphere \ - --cloud-config=/etc/kubernetes/cloud-config \ - --hostname-override=node1 \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --pod-infra-container-image=192.168.100.100:5000/kubernetes/pause:v3.1 \ - --container-runtime=remote \ - --container-runtime-endpoint=unix:///run/containerd/containerd.sock \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target -- path: "/etc/kubernetes/cloud-config" - permissions: "0600" - content: | - {config:true} - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/containerd/config.toml - permissions: "0644" - content: | - version = 2 - - [metrics] - address = "127.0.0.1:1338" - - [plugins] - [plugins."io.containerd.grpc.v1.cri"] - [plugins."io.containerd.grpc.v1.cri".containerd] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] - runtime_type = "io.containerd.runc.v2" - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] - SystemdCgroup = true - [plugins."io.containerd.grpc.v1.cri".registry] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] - endpoint = ["https://registry.docker-cn.com"] - - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -runcmd: -- systemctl enable --now setup.service diff --git a/pkg/userdata/rockylinux/testdata/kubelet-v1.24.9-vsphere-proxy.yaml b/pkg/userdata/rockylinux/testdata/kubelet-v1.24.9-vsphere-proxy.yaml deleted file mode 100644 index 1070b5fd3..000000000 --- a/pkg/userdata/rockylinux/testdata/kubelet-v1.24.9-vsphere-proxy.yaml +++ /dev/null @@ -1,490 +0,0 @@ -#cloud-config -bootcmd: -- modprobe ip_tables - -hostname: node1 - - -ssh_pwauth: false - -write_files: -- path: "/etc/environment" - content: | - HTTP_PROXY=http://192.168.100.100:3128 - http_proxy=http://192.168.100.100:3128 - HTTPS_PROXY=http://192.168.100.100:3128 - https_proxy=http://192.168.100.100:3128 - NO_PROXY=192.168.1.0 - no_proxy=192.168.1.0 - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: /etc/selinux/config - content: | - # This file controls the state of SELinux on the system. - # SELINUX= can take one of these three values: - # enforcing - SELinux security policy is enforced. - # permissive - SELinux prints warnings instead of enforcing. - # disabled - No SELinux policy is loaded. - SELINUX=permissive - # SELINUXTYPE= can take one of three two values: - # targeted - Targeted processes are protected, - # minimum - Modification of targeted policy. Only selected processes are protected. - # mls - Multi Level Security protection. - SELINUXTYPE=targeted - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - - setenforce 0 || true - systemctl restart systemd-modules-load.service - sysctl --system - - - hostnamectl set-hostname node1 - yum install -y \ - device-mapper-persistent-data \ - lvm2 \ - ebtables \ - ethtool \ - nfs-utils \ - bash-completion \ - sudo \ - socat \ - wget \ - curl \ - tar \ - open-vm-tools \ - ipvsadm - - yum install -y yum-utils - yum-config-manager --add-repo=https://download.docker.com/linux/centos/docker-ce.repo - yum-config-manager --save --setopt=docker-ce-stable.module_hotfixes=true - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - DEFAULT_IFC_NAME=$(ip -o route get 1 | grep -oP "dev \K\S+") - IFC_CFG_FILE=/etc/sysconfig/network-scripts/ifcfg-$DEFAULT_IFC_NAME - # Enable IPv6 and DHCPv6 on the default interface - grep IPV6INIT $IFC_CFG_FILE && sed -i '/IPV6INIT*/c IPV6INIT=yes' $IFC_CFG_FILE || echo "IPV6INIT=yes" >> $IFC_CFG_FILE - grep DHCPV6C $IFC_CFG_FILE && sed -i '/DHCPV6C*/c DHCPV6C=yes' $IFC_CFG_FILE || echo "DHCPV6C=yes" >> $IFC_CFG_FILE - grep IPV6_AUTOCONF $IFC_CFG_FILE && sed -i '/IPV6_AUTOCONF*/c IPV6_AUTOCONF=yes' $IFC_CFG_FILE || echo "IPV6_AUTOCONF=yes" >> $IFC_CFG_FILE - - # Restart NetworkManager to apply for IPv6 configs - systemctl restart NetworkManager - # Let NetworkManager apply the DHCPv6 configs - sleep 3 - - # set kubelet nodeip environment variable - mkdir -p /etc/systemd/system/kubelet.service.d/ - /opt/bin/setup_net_env.sh - - systemctl disable --now firewalld || true - - systemctl enable --now vmtoolsd.service - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - # Make sure we always disable swap - Otherwise the kubelet won't start as for some cloud - # providers swap gets enabled on reboot or after the setup script has finished executing. - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=containerd.service - Requires=containerd.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --cloud-provider=vsphere \ - --cloud-config=/etc/kubernetes/cloud-config \ - --hostname-override=node1 \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --pod-infra-container-image=192.168.100.100:5000/kubernetes/pause:v3.1 \ - --container-runtime=remote \ - --container-runtime-endpoint=unix:///run/containerd/containerd.sock \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target -- path: "/etc/kubernetes/cloud-config" - permissions: "0600" - content: | - {config:true} - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/containerd/config.toml - permissions: "0644" - content: | - version = 2 - - [metrics] - address = "127.0.0.1:1338" - - [plugins] - [plugins."io.containerd.grpc.v1.cri"] - [plugins."io.containerd.grpc.v1.cri".containerd] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] - runtime_type = "io.containerd.runc.v2" - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] - SystemdCgroup = true - [plugins."io.containerd.grpc.v1.cri".registry] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] - endpoint = ["https://registry-1.docker.io"] - [plugins."io.containerd.grpc.v1.cri".registry.configs] - [plugins."io.containerd.grpc.v1.cri".registry.configs."10.0.0.1:5000"] - [plugins."io.containerd.grpc.v1.cri".registry.configs."10.0.0.1:5000".tls] - insecure_skip_verify = true - [plugins."io.containerd.grpc.v1.cri".registry.configs."192.168.100.100:5000"] - [plugins."io.containerd.grpc.v1.cri".registry.configs."192.168.100.100:5000".tls] - insecure_skip_verify = true - - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -runcmd: -- systemctl enable --now setup.service diff --git a/pkg/userdata/rockylinux/testdata/kubelet-v1.24.9-vsphere.yaml b/pkg/userdata/rockylinux/testdata/kubelet-v1.24.9-vsphere.yaml deleted file mode 100644 index 64521cbc6..000000000 --- a/pkg/userdata/rockylinux/testdata/kubelet-v1.24.9-vsphere.yaml +++ /dev/null @@ -1,474 +0,0 @@ -#cloud-config -bootcmd: -- modprobe ip_tables - -hostname: node1 - - -ssh_pwauth: false - -write_files: - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: /etc/selinux/config - content: | - # This file controls the state of SELinux on the system. - # SELINUX= can take one of these three values: - # enforcing - SELinux security policy is enforced. - # permissive - SELinux prints warnings instead of enforcing. - # disabled - No SELinux policy is loaded. - SELINUX=permissive - # SELINUXTYPE= can take one of three two values: - # targeted - Targeted processes are protected, - # minimum - Modification of targeted policy. Only selected processes are protected. - # mls - Multi Level Security protection. - SELINUXTYPE=targeted - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - - setenforce 0 || true - systemctl restart systemd-modules-load.service - sysctl --system - - - hostnamectl set-hostname node1 - yum install -y \ - device-mapper-persistent-data \ - lvm2 \ - ebtables \ - ethtool \ - nfs-utils \ - bash-completion \ - sudo \ - socat \ - wget \ - curl \ - tar \ - open-vm-tools \ - ipvsadm - - yum install -y yum-utils - yum-config-manager --add-repo=https://download.docker.com/linux/centos/docker-ce.repo - yum-config-manager --save --setopt=docker-ce-stable.module_hotfixes=true - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - DEFAULT_IFC_NAME=$(ip -o route get 1 | grep -oP "dev \K\S+") - IFC_CFG_FILE=/etc/sysconfig/network-scripts/ifcfg-$DEFAULT_IFC_NAME - # Enable IPv6 and DHCPv6 on the default interface - grep IPV6INIT $IFC_CFG_FILE && sed -i '/IPV6INIT*/c IPV6INIT=yes' $IFC_CFG_FILE || echo "IPV6INIT=yes" >> $IFC_CFG_FILE - grep DHCPV6C $IFC_CFG_FILE && sed -i '/DHCPV6C*/c DHCPV6C=yes' $IFC_CFG_FILE || echo "DHCPV6C=yes" >> $IFC_CFG_FILE - grep IPV6_AUTOCONF $IFC_CFG_FILE && sed -i '/IPV6_AUTOCONF*/c IPV6_AUTOCONF=yes' $IFC_CFG_FILE || echo "IPV6_AUTOCONF=yes" >> $IFC_CFG_FILE - - # Restart NetworkManager to apply for IPv6 configs - systemctl restart NetworkManager - # Let NetworkManager apply the DHCPv6 configs - sleep 3 - - # set kubelet nodeip environment variable - mkdir -p /etc/systemd/system/kubelet.service.d/ - /opt/bin/setup_net_env.sh - - systemctl disable --now firewalld || true - - systemctl enable --now vmtoolsd.service - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - # Make sure we always disable swap - Otherwise the kubelet won't start as for some cloud - # providers swap gets enabled on reboot or after the setup script has finished executing. - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=containerd.service - Requires=containerd.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --cloud-provider=vsphere \ - --cloud-config=/etc/kubernetes/cloud-config \ - --hostname-override=node1 \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --container-runtime=remote \ - --container-runtime-endpoint=unix:///run/containerd/containerd.sock \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target -- path: "/etc/kubernetes/cloud-config" - permissions: "0600" - content: | - {config:true} - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/containerd/config.toml - permissions: "0644" - content: | - version = 2 - - [metrics] - address = "127.0.0.1:1338" - - [plugins] - [plugins."io.containerd.grpc.v1.cri"] - [plugins."io.containerd.grpc.v1.cri".containerd] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] - runtime_type = "io.containerd.runc.v2" - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] - SystemdCgroup = true - [plugins."io.containerd.grpc.v1.cri".registry] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] - endpoint = ["https://registry-1.docker.io"] - - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -runcmd: -- systemctl enable --now setup.service diff --git a/pkg/userdata/rockylinux/testdata/kubelet-v1.25-aws.yaml b/pkg/userdata/rockylinux/testdata/kubelet-v1.25-aws.yaml deleted file mode 100644 index ee32e7e03..000000000 --- a/pkg/userdata/rockylinux/testdata/kubelet-v1.25-aws.yaml +++ /dev/null @@ -1,466 +0,0 @@ -#cloud-config -bootcmd: -- modprobe ip_tables - - -ssh_pwauth: false - -write_files: - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: /etc/selinux/config - content: | - # This file controls the state of SELinux on the system. - # SELINUX= can take one of these three values: - # enforcing - SELinux security policy is enforced. - # permissive - SELinux prints warnings instead of enforcing. - # disabled - No SELinux policy is loaded. - SELINUX=permissive - # SELINUXTYPE= can take one of three two values: - # targeted - Targeted processes are protected, - # minimum - Modification of targeted policy. Only selected processes are protected. - # mls - Multi Level Security protection. - SELINUXTYPE=targeted - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - - setenforce 0 || true - systemctl restart systemd-modules-load.service - sysctl --system - - yum install -y \ - device-mapper-persistent-data \ - lvm2 \ - ebtables \ - ethtool \ - nfs-utils \ - bash-completion \ - sudo \ - socat \ - wget \ - curl \ - tar \ - ipvsadm - - yum install -y yum-utils - yum-config-manager --add-repo=https://download.docker.com/linux/centos/docker-ce.repo - yum-config-manager --save --setopt=docker-ce-stable.module_hotfixes=true - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - DEFAULT_IFC_NAME=$(ip -o route get 1 | grep -oP "dev \K\S+") - IFC_CFG_FILE=/etc/sysconfig/network-scripts/ifcfg-$DEFAULT_IFC_NAME - # Enable IPv6 and DHCPv6 on the default interface - grep IPV6INIT $IFC_CFG_FILE && sed -i '/IPV6INIT*/c IPV6INIT=yes' $IFC_CFG_FILE || echo "IPV6INIT=yes" >> $IFC_CFG_FILE - grep DHCPV6C $IFC_CFG_FILE && sed -i '/DHCPV6C*/c DHCPV6C=yes' $IFC_CFG_FILE || echo "DHCPV6C=yes" >> $IFC_CFG_FILE - grep IPV6_AUTOCONF $IFC_CFG_FILE && sed -i '/IPV6_AUTOCONF*/c IPV6_AUTOCONF=yes' $IFC_CFG_FILE || echo "IPV6_AUTOCONF=yes" >> $IFC_CFG_FILE - - # Restart NetworkManager to apply for IPv6 configs - systemctl restart NetworkManager - # Let NetworkManager apply the DHCPv6 configs - sleep 3 - - # set kubelet nodeip environment variable - mkdir -p /etc/systemd/system/kubelet.service.d/ - /opt/bin/setup_net_env.sh - - systemctl disable --now firewalld || true - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - # Make sure we always disable swap - Otherwise the kubelet won't start as for some cloud - # providers swap gets enabled on reboot or after the setup script has finished executing. - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=containerd.service - Requires=containerd.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --cloud-provider=aws \ - --cloud-config=/etc/kubernetes/cloud-config \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --container-runtime=remote \ - --container-runtime-endpoint=unix:///run/containerd/containerd.sock \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target -- path: "/etc/kubernetes/cloud-config" - permissions: "0600" - content: | - {aws-config:true} - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/containerd/config.toml - permissions: "0644" - content: | - version = 2 - - [metrics] - address = "127.0.0.1:1338" - - [plugins] - [plugins."io.containerd.grpc.v1.cri"] - [plugins."io.containerd.grpc.v1.cri".containerd] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] - runtime_type = "io.containerd.runc.v2" - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] - SystemdCgroup = true - [plugins."io.containerd.grpc.v1.cri".registry] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] - endpoint = ["https://registry-1.docker.io"] - - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -runcmd: -- systemctl enable --now setup.service diff --git a/pkg/userdata/scripts/health-monitor.sh b/pkg/userdata/scripts/health-monitor.sh deleted file mode 100644 index 515610346..000000000 --- a/pkg/userdata/scripts/health-monitor.sh +++ /dev/null @@ -1,117 +0,0 @@ -#!/usr/bin/env bash - -# Copyright 2016 The Kubernetes Authors. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This script is for master and node instance health monitoring, which is -# packed in kube-manifest tarball. It is executed through a systemd service -# in cluster/gce/gci/.yaml. The env variables come from an env -# file provided by the systemd service. - -# This script is a slightly adjusted version of -# https://github.com/kubernetes/kubernetes/blob/e1a1aa211224fcd9b213420b80b2ae680669683d/cluster/gce/gci/health-monitor.sh -# Adjustments are: -# * Kubelet health port is 10248 not 10255 -# * Removal of all all references to the KUBE_ENV file - -set -o nounset -set -o pipefail - -# We simply kill the process when there is a failure. Another systemd service will -# automatically restart the process. -function container_runtime_monitoring() { - local -r max_attempts=5 - local attempt=1 - local -r container_runtime_name="${CONTAINER_RUNTIME_NAME:-docker}" - # We still need to use 'docker ps' when container runtime is "docker". This is because - # dockershim is still part of kubelet today. When kubelet is down, crictl pods - # will also fail, and docker will be killed. This is undesirable especially when - # docker live restore is disabled. - local healthcheck_command="docker ps" - if [[ "${CONTAINER_RUNTIME:-docker}" != "docker" ]]; then - healthcheck_command="crictl pods" - fi - # Container runtime startup takes time. Make initial attempts before starting - # killing the container runtime. - until timeout 60 ${healthcheck_command} > /dev/null; do - if ((attempt == max_attempts)); then - echo "Max attempt ${max_attempts} reached! Proceeding to monitor container runtime healthiness." - break - fi - echo "$attempt initial attempt \"${healthcheck_command}\"! Trying again in $attempt seconds..." - sleep "$((2 ** attempt++))" - done - while true; do - if ! timeout 60 ${healthcheck_command} > /dev/null; then - echo "Container runtime ${container_runtime_name} failed!" - if [[ "$container_runtime_name" == "docker" ]]; then - # Dump stack of docker daemon for investigation. - # Log file name looks like goroutine-stacks-TIMESTAMP and will be saved to - # the exec root directory, which is /var/run/docker/ on Ubuntu and COS. - pkill -SIGUSR1 dockerd - fi - systemctl kill --kill-who=main "${container_runtime_name}" - # Wait for a while, as we don't want to kill it again before it is really up. - sleep 120 - else - sleep "${SLEEP_SECONDS}" - fi - done -} - -function kubelet_monitoring() { - echo "Wait for 2 minutes for kubelet to be functional" - # TODO(andyzheng0831): replace it with a more reliable method if possible. - sleep 120 - local -r max_seconds=10 - local output="" - while true; do - local failed=false - - if journalctl -u kubelet -n 1 | grep -q "use of closed network connection"; then - failed=true - echo "Kubelet stopped posting node status. Restarting" - elif ! output=$(curl -m "${max_seconds}" -f -s -S http://127.0.0.1:10248/healthz 2>&1); then - failed=true - # Print the response and/or errors. - echo "$output" - fi - - if [[ "$failed" == "true" ]]; then - echo "Kubelet is unhealthy!" - systemctl kill kubelet - # Wait for a while, as we don't want to kill it again before it is really up. - sleep 60 - else - sleep "${SLEEP_SECONDS}" - fi - done -} - -############## Main Function ################ -if [[ "$#" -ne 1 ]]; then - echo "Usage: health-monitor.sh " - exit 1 -fi - -SLEEP_SECONDS=10 -component=$1 -echo "Start kubernetes health monitoring for ${component}" -if [[ "${component}" == "container-runtime" ]]; then - container_runtime_monitoring -elif [[ "${component}" == "kubelet" ]]; then - kubelet_monitoring -else - echo "Health monitoring for component ${component} is not supported!" -fi diff --git a/pkg/userdata/ubuntu/provider.go b/pkg/userdata/ubuntu/provider.go deleted file mode 100644 index 2dfb8d3a7..000000000 --- a/pkg/userdata/ubuntu/provider.go +++ /dev/null @@ -1,387 +0,0 @@ -/* -Copyright 2019 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// -// UserData plugin for Ubuntu. -// - -package ubuntu - -import ( - "errors" - "fmt" - "strings" - "text/template" - - "github.com/Masterminds/semver/v3" - - "github.com/kubermatic/machine-controller/pkg/apis/plugin" - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" - userdatahelper "github.com/kubermatic/machine-controller/pkg/userdata/helper" -) - -// Provider is a pkg/userdata/plugin.Provider implementation. -type Provider struct{} - -// UserData renders user-data template to string. -func (p Provider) UserData(req plugin.UserDataRequest) (string, error) { - tmpl, err := template.New("user-data").Funcs(userdatahelper.TxtFuncMap()).Parse(userDataTemplate) - if err != nil { - return "", fmt.Errorf("failed to parse user-data template: %w", err) - } - - kubeletVersion, err := semver.NewVersion(req.MachineSpec.Versions.Kubelet) - if err != nil { - return "", fmt.Errorf("invalid kubelet version: %w", err) - } - - pconfig, err := providerconfigtypes.GetConfig(req.MachineSpec.ProviderSpec) - if err != nil { - return "", fmt.Errorf("failed to get providerSpec: %w", err) - } - - if pconfig.OverwriteCloudConfig != nil { - req.CloudConfig = *pconfig.OverwriteCloudConfig - } - - if pconfig.Network.IsStaticIPConfig() { - return "", errors.New("static IP config is not supported with Ubuntu") - } - - ubuntuConfig, err := LoadConfig(pconfig.OperatingSystemSpec) - if err != nil { - return "", fmt.Errorf("failed to get ubuntu config from provider config: %w", err) - } - - kubeconfigString, err := userdatahelper.StringifyKubeconfig(req.Kubeconfig) - if err != nil { - return "", err - } - - kubernetesCACert, err := userdatahelper.GetCACert(req.Kubeconfig) - if err != nil { - return "", fmt.Errorf("error extracting cacert: %w", err) - } - - crEngine := req.ContainerRuntime.Engine(kubeletVersion) - crScript, err := crEngine.ScriptFor(providerconfigtypes.OperatingSystemUbuntu) - if err != nil { - return "", fmt.Errorf("failed to generate container runtime install script: %w", err) - } - - crConfig, err := crEngine.Config() - if err != nil { - return "", fmt.Errorf("failed to generate container runtime config: %w", err) - } - - crAuthConfig, err := crEngine.AuthConfig() - if err != nil { - return "", fmt.Errorf("failed to generate container runtime auth config: %w", err) - } - - data := struct { - plugin.UserDataRequest - ProviderSpec *providerconfigtypes.Config - OSConfig *Config - KubeletVersion string - Kubeconfig string - KubernetesCACert string - NodeIPScript string - ExtraKubeletFlags []string - ContainerRuntimeScript string - ContainerRuntimeConfigFileName string - ContainerRuntimeConfig string - ContainerRuntimeAuthConfigFileName string - ContainerRuntimeAuthConfig string - ContainerRuntimeName string - }{ - UserDataRequest: req, - ProviderSpec: pconfig, - OSConfig: ubuntuConfig, - KubeletVersion: kubeletVersion.String(), - Kubeconfig: kubeconfigString, - KubernetesCACert: kubernetesCACert, - NodeIPScript: userdatahelper.SetupNodeIPEnvScript(pconfig.Network.GetIPFamily()), - ExtraKubeletFlags: crEngine.KubeletFlags(), - ContainerRuntimeScript: crScript, - ContainerRuntimeConfigFileName: crEngine.ConfigFileName(), - ContainerRuntimeConfig: crConfig, - ContainerRuntimeAuthConfigFileName: crEngine.AuthConfigFileName(), - ContainerRuntimeAuthConfig: crAuthConfig, - ContainerRuntimeName: crEngine.String(), - } - - var buf strings.Builder - if err = tmpl.Execute(&buf, data); err != nil { - return "", fmt.Errorf("failed to execute user-data template: %w", err) - } - - return userdatahelper.CleanupTemplateOutput(buf.String()) -} - -// UserData template. -const userDataTemplate = `#cloud-config -{{ if ne .CloudProviderName "aws" }} -hostname: {{ .MachineSpec.Name }} -{{- /* Never set the hostname on AWS nodes. Kubernetes(kube-proxy) requires the hostname to be the private dns name */}} -{{ end }} - -{{- if .OSConfig.DistUpgradeOnBoot }} -package_upgrade: true -package_reboot_if_required: true -{{- end }} - -ssh_pwauth: false - -{{- if .ProviderSpec.SSHPublicKeys }} -ssh_authorized_keys: -{{- range .ProviderSpec.SSHPublicKeys }} -- "{{ . }}" -{{- end }} -{{- end }} - -write_files: -{{- if .HTTPProxy }} -- path: "/etc/environment" - content: | - PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games" -{{ proxyEnvironment .HTTPProxy .NoProxy | indent 4 }} -{{- end }} - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | -{{ journalDConfig | indent 4 }} - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | -{{ kernelModulesScript | indent 4 }} - -- path: "/etc/sysctl.d/k8s.conf" - content: | -{{ kernelSettings | indent 4 }} - -- path: "/etc/default/grub.d/60-swap-accounting.cfg" - content: | - # Added by kubermatic machine-controller - # Enable cgroups memory and swap accounting - GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1" - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - if systemctl is-active ufw; then systemctl stop ufw; fi - systemctl mask ufw - -{{- /* As we added some modules and don't want to reboot, restart the service */}} - systemctl restart systemd-modules-load.service - sysctl --system - apt-get update - - DEBIAN_FRONTEND=noninteractive apt-get -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" install -y \ - curl \ - ca-certificates \ - ceph-common \ - cifs-utils \ - conntrack \ - e2fsprogs \ - ebtables \ - ethtool \ - glusterfs-client \ - iptables \ - jq \ - kmod \ - openssh-client \ - nfs-common \ - socat \ - util-linux \ - {{- if or (eq .CloudProviderName "vsphere") (eq .CloudProviderName "vmware-cloud-director") }} - open-vm-tools \ - {{- end }} - {{- if eq .CloudProviderName "nutanix" }} - open-iscsi \ - {{- end }} - ipvsadm - - {{- /* iscsid service is required on Nutanix machines for CSI driver to attach volumes. */}} - {{- if eq .CloudProviderName "nutanix" }} - systemctl enable --now iscsid - {{ end }} - - # Update grub to include kernel command options to enable swap accounting. - # Exclude alibaba cloud until this is fixed https://github.com/kubermatic/machine-controller/issues/682 - {{ if eq .CloudProviderName "alibaba" }} - if grep -v -q swapaccount=1 /proc/cmdline - then - echo "Reboot system if not alibaba cloud" - update-grub - touch /var/run/reboot-required - fi - {{ end }} -{{ .ContainerRuntimeScript | indent 4 }} - -{{ safeDownloadBinariesScript .KubeletVersion | indent 4 }} - # set kubelet nodeip environment variable - /opt/bin/setup_net_env.sh - - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - {{- if eq .CloudProviderName "kubevirt" }} - systemctl enable --now --no-block restart-kubelet.service - {{ end }} - systemctl disable setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | -{{ kubeletSystemdUnit .ContainerRuntimeName .KubeletVersion .KubeletCloudProviderName .MachineSpec.Name .DNSIPs .ExternalCloudProvider .ProviderSpec.Network.GetIPFamily .PauseImage .MachineSpec.Taints .ExtraKubeletFlags true | indent 4 }} - -- path: "/etc/systemd/system/kubelet.service.d/extras.conf" - content: | - [Service] - Environment="KUBELET_EXTRA_ARGS=--resolv-conf=/run/systemd/resolve/resolv.conf" - -{{- if ne (len .CloudConfig) 0 }} -- path: "/etc/kubernetes/cloud-config" - permissions: "0600" - content: | -{{ .CloudConfig | indent 4 }} -{{- end }} - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | -{{ .NodeIPScript | indent 4 }} - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | -{{ .Kubeconfig | indent 4 }} - -- path: "/etc/kubernetes/pki/ca.crt" - content: | -{{ .KubernetesCACert | indent 4 }} - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: {{ .ContainerRuntimeConfigFileName }} - permissions: "0644" - content: | -{{ .ContainerRuntimeConfig | indent 4 }} - -{{- if and (eq .ContainerRuntimeName "docker") .ContainerRuntimeAuthConfig }} - -- path: {{ .ContainerRuntimeAuthConfigFileName }} - permissions: "0600" - content: | -{{ .ContainerRuntimeAuthConfig | indent 4 }} -{{- end }} - -- path: "/etc/kubernetes/kubelet.conf" - content: | -{{ kubeletConfiguration "cluster.local" .DNSIPs .KubeletFeatureGates .KubeletConfigs .ContainerRuntimeName | indent 4 }} - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | -{{ kubeletHealthCheckSystemdUnit | indent 4 }} - -{{- with .ProviderSpec.CAPublicKey }} - -- path: "/etc/ssh/trusted-user-ca-keys.pem" - content: | -{{ . | indent 4 }} - -- path: "/etc/ssh/sshd_config" - content: | -{{ sshConfigAddendum | indent 4 }} - append: true -{{- end }} - -{{- if eq .CloudProviderName "kubevirt" }} -- path: "/opt/bin/restart-kubelet.sh" - permissions: "0744" - content: | - #!/bin/bash - # Needed for Kubevirt provider because if the virt-launcher pod is deleted, - # the VM and DataVolume states are kept and VM is rebooted. We need to restart the kubelet - # with the new config (new IP) and run this at every boot. - set -xeuo pipefail - - # This helps us avoid an unnecessary restart for kubelet on the first boot - if [ -f /etc/kubelet_needs_restart ]; then - # restart kubelet since it's not the first boot - systemctl daemon-reload - systemctl restart kubelet.service - else - touch /etc/kubelet_needs_restart - fi - -- path: "/etc/systemd/system/restart-kubelet.service" - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - Description=Service responsible for restarting kubelet when the machine is rebooted - - [Service] - Type=oneshot - ExecStart=/opt/bin/restart-kubelet.sh - - [Install] - WantedBy=multi-user.target -{{- end }} - -runcmd: -- systemctl enable --now setup.service -` diff --git a/pkg/userdata/ubuntu/provider_test.go b/pkg/userdata/ubuntu/provider_test.go deleted file mode 100644 index d30f210bb..000000000 --- a/pkg/userdata/ubuntu/provider_test.go +++ /dev/null @@ -1,691 +0,0 @@ -/* -Copyright 2019 The Machine Controller Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// -// UserData plugin for Ubuntu. -// - -package ubuntu - -import ( - "encoding/json" - "flag" - "fmt" - "net" - "testing" - - "github.com/Masterminds/semver/v3" - - clusterv1alpha1 "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" - "github.com/kubermatic/machine-controller/pkg/apis/plugin" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/util" - "github.com/kubermatic/machine-controller/pkg/containerruntime" - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" - testhelper "github.com/kubermatic/machine-controller/pkg/test" - "github.com/kubermatic/machine-controller/pkg/userdata/cloud" - "github.com/kubermatic/machine-controller/pkg/userdata/convert" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - clientcmdapi "k8s.io/client-go/tools/clientcmd/api" -) - -var ( - update = flag.Bool("update", false, "update testdata files") - - pemCertificate = `-----BEGIN CERTIFICATE----- -MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV -BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG -A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 -DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 -NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG -cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv -c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B -AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS -R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT -ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk -JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 -mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW -caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G -A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt -hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB -MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES -MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv -bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h -U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao -eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 -UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD -58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n -sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF -kPe6XoSbiLm/kxk32T0= ------END CERTIFICATE-----` - - kubeconfig = &clientcmdapi.Config{ - Clusters: map[string]*clientcmdapi.Cluster{ - "": { - Server: "https://server:443", - CertificateAuthorityData: []byte(pemCertificate), - }, - }, - AuthInfos: map[string]*clientcmdapi.AuthInfo{ - "": { - Token: "my-token", - }, - }, - } - - kubeletFeatureGates = map[string]bool{ - "RotateKubeletServerCertificate": true, - } -) - -const ( - defaultVersion = "1.25.5" -) - -type fakeCloudConfigProvider struct { - config string - name string - err error -} - -func (p *fakeCloudConfigProvider) GetCloudConfig(spec clusterv1alpha1.MachineSpec) (config string, name string, err error) { - return p.config, p.name, p.err -} - -// userDataTestCase contains the data for a table-driven test. -type userDataTestCase struct { - name string - spec clusterv1alpha1.MachineSpec - ccProvider cloud.ConfigProvider - osConfig *Config - providerSpec *providerconfigtypes.Config - DNSIPs []net.IP - kubernetesCACert string - externalCloudProvider bool - httpProxy string - noProxy string - insecureRegistries string - registryMirrors string - containerdRegistryMirrors containerruntime.RegistryMirrorsFlags - registryCredentials map[string]containerruntime.AuthConfig - pauseImage string - containerruntime string -} - -func simpleVersionTests() []userDataTestCase { - versions := []*semver.Version{ - semver.MustParse("v1.24.9"), - semver.MustParse("v1.25.5"), - semver.MustParse("v1.26.0"), - } - - var tests []userDataTestCase - for _, v := range versions { - tests = append(tests, userDataTestCase{ - name: fmt.Sprintf("version-%s", v.String()), - providerSpec: &providerconfigtypes.Config{ - CloudProvider: "", - SSHPublicKeys: []string{"ssh-rsa AAABBB"}, - }, - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{ - Name: "node1", - }, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: v.String(), - }, - }, - ccProvider: &fakeCloudConfigProvider{ - name: "", - config: "", - err: nil, - }, - DNSIPs: []net.IP{net.ParseIP("10.10.10.10")}, - kubernetesCACert: "CACert", - osConfig: &Config{ - DistUpgradeOnBoot: false, - }, - }) - } - - return tests -} - -// TestUserDataGeneration runs the data generation for different -// environments. -func TestUserDataGeneration(t *testing.T) { - t.Parallel() - - tests := simpleVersionTests() - tests = append(tests, []userDataTestCase{ - { - name: "dist-upgrade-on-boot", - providerSpec: &providerconfigtypes.Config{ - CloudProvider: "", - SSHPublicKeys: []string{"ssh-rsa AAABBB"}, - }, - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{ - Name: "node1", - }, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: defaultVersion, - }, - }, - ccProvider: &fakeCloudConfigProvider{ - name: "", - config: "", - err: nil, - }, - DNSIPs: []net.IP{net.ParseIP("10.10.10.10")}, - kubernetesCACert: "CACert", - osConfig: &Config{ - DistUpgradeOnBoot: true, - }, - }, - { - name: "multiple-dns-servers", - providerSpec: &providerconfigtypes.Config{ - CloudProvider: "", - SSHPublicKeys: []string{"ssh-rsa AAABBB"}, - }, - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{ - Name: "node1", - }, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: defaultVersion, - }, - }, - ccProvider: &fakeCloudConfigProvider{ - name: "", - config: "", - err: nil, - }, - DNSIPs: []net.IP{net.ParseIP("10.10.10.10"), net.ParseIP("10.10.10.11"), net.ParseIP("10.10.10.12")}, - kubernetesCACert: "CACert", - osConfig: &Config{ - DistUpgradeOnBoot: false, - }, - }, - { - name: "kubelet-version-without-v-prefix", - providerSpec: &providerconfigtypes.Config{ - CloudProvider: "", - SSHPublicKeys: []string{"ssh-rsa AAABBB"}, - }, - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{ - Name: "node1", - }, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.22.7", - }, - }, - ccProvider: &fakeCloudConfigProvider{ - name: "", - config: "", - err: nil, - }, - DNSIPs: []net.IP{net.ParseIP("10.10.10.10")}, - kubernetesCACert: "CACert", - osConfig: &Config{ - DistUpgradeOnBoot: false, - }, - }, - { - name: "multiple-ssh-keys", - providerSpec: &providerconfigtypes.Config{ - CloudProvider: "", - SSHPublicKeys: []string{"ssh-rsa AAABBB", "ssh-rsa CCCDDD", "ssh-rsa EEEFFF"}, - }, - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{ - Name: "node1", - }, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.22.7", - }, - }, - ccProvider: &fakeCloudConfigProvider{ - name: "", - config: "", - err: nil, - }, - DNSIPs: []net.IP{net.ParseIP("10.10.10.10")}, - kubernetesCACert: "CACert", - osConfig: &Config{ - DistUpgradeOnBoot: false, - }, - }, - { - name: "openstack", - providerSpec: &providerconfigtypes.Config{ - CloudProvider: "openstack", - SSHPublicKeys: []string{"ssh-rsa AAABBB"}, - }, - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{ - Name: "node1", - }, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: defaultVersion, - }, - }, - ccProvider: &fakeCloudConfigProvider{ - name: "openstack", - config: "{openstack-config:true}", - err: nil, - }, - DNSIPs: []net.IP{net.ParseIP("10.10.10.10"), net.ParseIP("10.10.10.11"), net.ParseIP("10.10.10.12")}, - kubernetesCACert: "CACert", - osConfig: &Config{ - DistUpgradeOnBoot: false, - }, - }, - { - name: "openstack-dualstack", - providerSpec: &providerconfigtypes.Config{ - CloudProvider: "openstack", - SSHPublicKeys: []string{"ssh-rsa AAABBB"}, - Network: &providerconfigtypes.NetworkConfig{ - IPFamily: util.IPFamilyIPv4IPv6, - }, - }, - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{ - Name: "node1", - }, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: defaultVersion, - }, - }, - ccProvider: &fakeCloudConfigProvider{ - name: "openstack", - config: "{openstack-config:true}", - err: nil, - }, - DNSIPs: []net.IP{net.ParseIP("10.10.10.10"), net.ParseIP("10.10.10.11"), net.ParseIP("10.10.10.12")}, - kubernetesCACert: "CACert", - osConfig: &Config{ - DistUpgradeOnBoot: false, - }, - externalCloudProvider: true, - }, - { - name: "digitalocean-dualstack", - providerSpec: &providerconfigtypes.Config{ - CloudProvider: "digitalocean", - SSHPublicKeys: []string{"ssh-rsa AAABBB"}, - Network: &providerconfigtypes.NetworkConfig{ - IPFamily: util.IPFamilyIPv4IPv6, - }, - }, - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{ - Name: "node1", - }, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: defaultVersion, - }, - }, - ccProvider: &fakeCloudConfigProvider{ - config: "{digitalocean-config:true}", - err: nil, - }, - DNSIPs: []net.IP{net.ParseIP("10.10.10.10"), net.ParseIP("10.10.10.11"), net.ParseIP("10.10.10.12")}, - kubernetesCACert: "CACert", - osConfig: &Config{ - DistUpgradeOnBoot: false, - }, - }, - { - name: "openstack-dualstack-IPv6+IPv4", - providerSpec: &providerconfigtypes.Config{ - CloudProvider: "openstack", - SSHPublicKeys: []string{"ssh-rsa AAABBB"}, - Network: &providerconfigtypes.NetworkConfig{ - IPFamily: util.IPFamilyIPv6IPv4, - }, - }, - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{ - Name: "node1", - }, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: defaultVersion, - }, - }, - ccProvider: &fakeCloudConfigProvider{ - name: "openstack", - config: "{openstack-config:true}", - err: nil, - }, - DNSIPs: []net.IP{net.ParseIP("10.10.10.10"), net.ParseIP("10.10.10.11"), net.ParseIP("10.10.10.12")}, - kubernetesCACert: "CACert", - osConfig: &Config{ - DistUpgradeOnBoot: false, - }, - externalCloudProvider: true, - }, - { - name: "digitalocean-dualstack-IPv6+IPv4", - providerSpec: &providerconfigtypes.Config{ - CloudProvider: "digitalocean", - SSHPublicKeys: []string{"ssh-rsa AAABBB"}, - Network: &providerconfigtypes.NetworkConfig{ - IPFamily: util.IPFamilyIPv6IPv4, - }, - }, - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{ - Name: "node1", - }, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: defaultVersion, - }, - }, - ccProvider: &fakeCloudConfigProvider{ - config: "{digitalocean-config:true}", - err: nil, - }, - DNSIPs: []net.IP{net.ParseIP("10.10.10.10"), net.ParseIP("10.10.10.11"), net.ParseIP("10.10.10.12")}, - kubernetesCACert: "CACert", - osConfig: &Config{ - DistUpgradeOnBoot: false, - }, - }, - { - name: "openstack-overwrite-cloud-config", - providerSpec: &providerconfigtypes.Config{ - CloudProvider: "openstack", - SSHPublicKeys: []string{"ssh-rsa AAABBB"}, - OverwriteCloudConfig: stringPtr("custom\ncloud\nconfig"), - }, - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{ - Name: "node1", - }, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.22.7", - }, - }, - ccProvider: &fakeCloudConfigProvider{ - name: "openstack", - config: "{openstack-config:true}", - err: nil, - }, - DNSIPs: []net.IP{net.ParseIP("10.10.10.10")}, - kubernetesCACert: "CACert", - osConfig: &Config{ - DistUpgradeOnBoot: false, - }, - }, - { - name: "vsphere", - providerSpec: &providerconfigtypes.Config{ - CloudProvider: "vsphere", - SSHPublicKeys: []string{"ssh-rsa AAABBB"}, - OverwriteCloudConfig: stringPtr("custom\ncloud\nconfig"), - }, - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{ - Name: "node1", - }, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.22.7", - }, - }, - ccProvider: &fakeCloudConfigProvider{ - name: "vsphere", - config: "{vsphere-config:true}", - err: nil, - }, - DNSIPs: []net.IP{net.ParseIP("10.10.10.10")}, - kubernetesCACert: "CACert", - osConfig: &Config{ - DistUpgradeOnBoot: false, - }, - }, - { - name: "vsphere-proxy", - providerSpec: &providerconfigtypes.Config{ - CloudProvider: "vsphere", - SSHPublicKeys: []string{"ssh-rsa AAABBB"}, - OverwriteCloudConfig: stringPtr("custom\ncloud\nconfig"), - }, - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{ - Name: "node1", - }, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.22.7", - }, - }, - ccProvider: &fakeCloudConfigProvider{ - name: "vsphere", - config: "{vsphere-config:true}", - err: nil, - }, - DNSIPs: []net.IP{net.ParseIP("10.10.10.10")}, - kubernetesCACert: "CACert", - osConfig: &Config{ - DistUpgradeOnBoot: false, - }, - httpProxy: "http://192.168.100.100:3128", - noProxy: "192.168.1.0", - insecureRegistries: "192.168.100.100:5000, 10.0.0.1:5000", - pauseImage: "192.168.100.100:5000/kubernetes/pause:v3.1", - }, - { - name: "vsphere-mirrors", - providerSpec: &providerconfigtypes.Config{ - CloudProvider: "vsphere", - SSHPublicKeys: []string{"ssh-rsa AAABBB"}, - OverwriteCloudConfig: stringPtr("custom\ncloud\nconfig"), - }, - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{ - Name: "node1", - }, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.22.7", - }, - }, - ccProvider: &fakeCloudConfigProvider{ - name: "vsphere", - config: "{vsphere-config:true}", - err: nil, - }, - DNSIPs: []net.IP{net.ParseIP("10.10.10.10")}, - kubernetesCACert: "CACert", - osConfig: &Config{ - DistUpgradeOnBoot: false, - }, - httpProxy: "http://192.168.100.100:3128", - noProxy: "192.168.1.0", - registryMirrors: "https://registry.docker-cn.com", - pauseImage: "192.168.100.100:5000/kubernetes/pause:v3.1", - }, - { - name: "containerd", - containerruntime: "containerd", - registryCredentials: map[string]containerruntime.AuthConfig{ - "docker.io": { - Username: "login1", - Password: "passwd1", - }, - }, - insecureRegistries: "k8s.gcr.io", - containerdRegistryMirrors: map[string][]string{ - "k8s.gcr.io": {"https://intranet.local"}, - }, - providerSpec: &providerconfigtypes.Config{ - CloudProvider: "", - SSHPublicKeys: []string{"ssh-rsa AAABBB"}, - }, - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{ - Name: "node1", - }, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: defaultVersion, - }, - }, - ccProvider: &fakeCloudConfigProvider{ - name: "", - config: "", - err: nil, - }, - DNSIPs: []net.IP{net.ParseIP("10.10.10.10")}, - kubernetesCACert: "CACert", - osConfig: &Config{ - DistUpgradeOnBoot: true, - }, - }, - { - name: "docker", - containerruntime: "docker", - registryCredentials: map[string]containerruntime.AuthConfig{ - "docker.io": { - Username: "login1", - Password: "passwd1", - }, - }, - providerSpec: &providerconfigtypes.Config{ - CloudProvider: "", - SSHPublicKeys: []string{"ssh-rsa AAABBB"}, - }, - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{ - Name: "node1", - }, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: defaultVersion, - }, - }, - ccProvider: &fakeCloudConfigProvider{ - name: "", - config: "", - err: nil, - }, - DNSIPs: []net.IP{net.ParseIP("10.10.10.10")}, - kubernetesCACert: "CACert", - osConfig: &Config{ - DistUpgradeOnBoot: true, - }, - }, - { - name: "nutanix", - providerSpec: &providerconfigtypes.Config{ - CloudProvider: "nutanix", - SSHPublicKeys: []string{"ssh-rsa AAABBB"}, - OverwriteCloudConfig: stringPtr("custom\ncloud\nconfig"), - }, - spec: clusterv1alpha1.MachineSpec{ - ObjectMeta: metav1.ObjectMeta{ - Name: "node1", - }, - Versions: clusterv1alpha1.MachineVersionInfo{ - Kubelet: "1.24.9", - }, - }, - ccProvider: &fakeCloudConfigProvider{ - name: "nutanix", - config: "{nutanix-config:true}", - err: nil, - }, - DNSIPs: []net.IP{net.ParseIP("10.10.10.10")}, - kubernetesCACert: "CACert", - osConfig: &Config{ - DistUpgradeOnBoot: false, - }, - }, - }...) - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - rProviderSpec := test.providerSpec - osConfigByte, err := json.Marshal(test.osConfig) - if err != nil { - t.Fatal(err) - } - rProviderSpec.OperatingSystemSpec = runtime.RawExtension{ - Raw: osConfigByte, - } - - providerSpecRaw, err := json.Marshal(rProviderSpec) - if err != nil { - t.Fatal(err) - } - test.spec.ProviderSpec = clusterv1alpha1.ProviderSpec{ - Value: &runtime.RawExtension{ - Raw: providerSpecRaw, - }, - } - provider := Provider{} - - cloudConfig, cloudProviderName, err := test.ccProvider.GetCloudConfig(test.spec) - if err != nil { - t.Fatalf("failed to get cloud config: %v", err) - } - - containerRuntimeOpts := containerruntime.Opts{ - ContainerRuntime: test.containerruntime, - InsecureRegistries: test.insecureRegistries, - RegistryMirrors: test.registryMirrors, - ContainerdRegistryMirrors: test.containerdRegistryMirrors, - } - containerRuntimeConfig, err := containerruntime.BuildConfig(containerRuntimeOpts) - if err != nil { - t.Fatalf("failed to generate container runtime config: %v", err) - } - containerRuntimeConfig.RegistryCredentials = test.registryCredentials - - req := plugin.UserDataRequest{ - MachineSpec: test.spec, - Kubeconfig: kubeconfig, - CloudConfig: cloudConfig, - CloudProviderName: cloudProviderName, - KubeletCloudProviderName: cloudProviderName, - DNSIPs: test.DNSIPs, - ExternalCloudProvider: test.externalCloudProvider, - HTTPProxy: test.httpProxy, - NoProxy: test.noProxy, - PauseImage: test.pauseImage, - KubeletFeatureGates: kubeletFeatureGates, - ContainerRuntime: containerRuntimeConfig, - } - s, err := provider.UserData(req) - if err != nil { - t.Fatal(err) - } - - // Check if we can gzip it. - if _, err := convert.GzipString(s); err != nil { - t.Fatal(err) - } - goldenName := test.name + ".yaml" - testhelper.CompareOutput(t, goldenName, s, *update) - }) - } -} - -// stringPtr returns pointer to given string. -func stringPtr(str string) *string { - return &str -} diff --git a/pkg/userdata/ubuntu/testdata/containerd.yaml b/pkg/userdata/ubuntu/testdata/containerd.yaml deleted file mode 100644 index dd7dd8d91..000000000 --- a/pkg/userdata/ubuntu/testdata/containerd.yaml +++ /dev/null @@ -1,470 +0,0 @@ -#cloud-config - -hostname: node1 - -package_upgrade: true -package_reboot_if_required: true - -ssh_pwauth: false -ssh_authorized_keys: -- "ssh-rsa AAABBB" - -write_files: - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: "/etc/default/grub.d/60-swap-accounting.cfg" - content: | - # Added by kubermatic machine-controller - # Enable cgroups memory and swap accounting - GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1" - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - if systemctl is-active ufw; then systemctl stop ufw; fi - systemctl mask ufw - systemctl restart systemd-modules-load.service - sysctl --system - apt-get update - - DEBIAN_FRONTEND=noninteractive apt-get -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" install -y \ - curl \ - ca-certificates \ - ceph-common \ - cifs-utils \ - conntrack \ - e2fsprogs \ - ebtables \ - ethtool \ - glusterfs-client \ - iptables \ - jq \ - kmod \ - openssh-client \ - nfs-common \ - socat \ - util-linux \ - ipvsadm - - # Update grub to include kernel command options to enable swap accounting. - # Exclude alibaba cloud until this is fixed https://github.com/kubermatic/machine-controller/issues/682 - - - apt-get update - apt-get install -y apt-transport-https ca-certificates curl software-properties-common lsb-release - curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - - add-apt-repository "deb https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - # set kubelet nodeip environment variable - /opt/bin/setup_net_env.sh - - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=containerd.service - Requires=containerd.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --hostname-override=node1 \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --container-runtime=remote \ - --container-runtime-endpoint=unix:///run/containerd/containerd.sock \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target - -- path: "/etc/systemd/system/kubelet.service.d/extras.conf" - content: | - [Service] - Environment="KUBELET_EXTRA_ARGS=--resolv-conf=/run/systemd/resolve/resolv.conf" - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/containerd/config.toml - permissions: "0644" - content: | - version = 2 - - [metrics] - address = "127.0.0.1:1338" - - [plugins] - [plugins."io.containerd.grpc.v1.cri"] - [plugins."io.containerd.grpc.v1.cri".containerd] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] - runtime_type = "io.containerd.runc.v2" - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] - SystemdCgroup = true - [plugins."io.containerd.grpc.v1.cri".registry] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] - endpoint = ["https://registry-1.docker.io"] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."k8s.gcr.io"] - endpoint = ["https://intranet.local"] - [plugins."io.containerd.grpc.v1.cri".registry.configs] - [plugins."io.containerd.grpc.v1.cri".registry.configs."docker.io"] - [plugins."io.containerd.grpc.v1.cri".registry.configs."docker.io".auth] - username = "login1" - password = "passwd1" - [plugins."io.containerd.grpc.v1.cri".registry.configs."k8s.gcr.io"] - [plugins."io.containerd.grpc.v1.cri".registry.configs."k8s.gcr.io".tls] - insecure_skip_verify = true - - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDNS: - - 10.10.10.10 - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -runcmd: -- systemctl enable --now setup.service diff --git a/pkg/userdata/ubuntu/testdata/digitalocean-dualstack-IPv6+IPv4.yaml b/pkg/userdata/ubuntu/testdata/digitalocean-dualstack-IPv6+IPv4.yaml deleted file mode 100644 index 71c2a3f56..000000000 --- a/pkg/userdata/ubuntu/testdata/digitalocean-dualstack-IPv6+IPv4.yaml +++ /dev/null @@ -1,471 +0,0 @@ -#cloud-config - -hostname: node1 - - -ssh_pwauth: false -ssh_authorized_keys: -- "ssh-rsa AAABBB" - -write_files: - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: "/etc/default/grub.d/60-swap-accounting.cfg" - content: | - # Added by kubermatic machine-controller - # Enable cgroups memory and swap accounting - GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1" - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - if systemctl is-active ufw; then systemctl stop ufw; fi - systemctl mask ufw - systemctl restart systemd-modules-load.service - sysctl --system - apt-get update - - DEBIAN_FRONTEND=noninteractive apt-get -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" install -y \ - curl \ - ca-certificates \ - ceph-common \ - cifs-utils \ - conntrack \ - e2fsprogs \ - ebtables \ - ethtool \ - glusterfs-client \ - iptables \ - jq \ - kmod \ - openssh-client \ - nfs-common \ - socat \ - util-linux \ - ipvsadm - - # Update grub to include kernel command options to enable swap accounting. - # Exclude alibaba cloud until this is fixed https://github.com/kubermatic/machine-controller/issues/682 - - - apt-get update - apt-get install -y apt-transport-https ca-certificates curl software-properties-common lsb-release - curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - - add-apt-repository "deb https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - # set kubelet nodeip environment variable - /opt/bin/setup_net_env.sh - - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=containerd.service - Requires=containerd.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --hostname-override=node1 \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --container-runtime=remote \ - --container-runtime-endpoint=unix:///run/containerd/containerd.sock \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target - -- path: "/etc/systemd/system/kubelet.service.d/extras.conf" - content: | - [Service] - Environment="KUBELET_EXTRA_ARGS=--resolv-conf=/run/systemd/resolve/resolv.conf" -- path: "/etc/kubernetes/cloud-config" - permissions: "0600" - content: | - {digitalocean-config:true} - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - DEFAULT_IFC_IP6=$(ip -o -6 route get 1:: | grep -oP "src \K\S+") - if [ -z "${DEFAULT_IFC_IP6}" ] - then - echodate "Failed to get IPv6 address for the default route interface" - exit 1 - fi - DEFAULT_IFC_IP=$DEFAULT_IFC_IP6,$DEFAULT_IFC_IP - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/containerd/config.toml - permissions: "0644" - content: | - version = 2 - - [metrics] - address = "127.0.0.1:1338" - - [plugins] - [plugins."io.containerd.grpc.v1.cri"] - [plugins."io.containerd.grpc.v1.cri".containerd] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] - runtime_type = "io.containerd.runc.v2" - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] - SystemdCgroup = true - [plugins."io.containerd.grpc.v1.cri".registry] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] - endpoint = ["https://registry-1.docker.io"] - - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDNS: - - 10.10.10.10 - - 10.10.10.11 - - 10.10.10.12 - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -runcmd: -- systemctl enable --now setup.service diff --git a/pkg/userdata/ubuntu/testdata/digitalocean-dualstack.yaml b/pkg/userdata/ubuntu/testdata/digitalocean-dualstack.yaml deleted file mode 100644 index e1bf0ec28..000000000 --- a/pkg/userdata/ubuntu/testdata/digitalocean-dualstack.yaml +++ /dev/null @@ -1,471 +0,0 @@ -#cloud-config - -hostname: node1 - - -ssh_pwauth: false -ssh_authorized_keys: -- "ssh-rsa AAABBB" - -write_files: - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: "/etc/default/grub.d/60-swap-accounting.cfg" - content: | - # Added by kubermatic machine-controller - # Enable cgroups memory and swap accounting - GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1" - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - if systemctl is-active ufw; then systemctl stop ufw; fi - systemctl mask ufw - systemctl restart systemd-modules-load.service - sysctl --system - apt-get update - - DEBIAN_FRONTEND=noninteractive apt-get -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" install -y \ - curl \ - ca-certificates \ - ceph-common \ - cifs-utils \ - conntrack \ - e2fsprogs \ - ebtables \ - ethtool \ - glusterfs-client \ - iptables \ - jq \ - kmod \ - openssh-client \ - nfs-common \ - socat \ - util-linux \ - ipvsadm - - # Update grub to include kernel command options to enable swap accounting. - # Exclude alibaba cloud until this is fixed https://github.com/kubermatic/machine-controller/issues/682 - - - apt-get update - apt-get install -y apt-transport-https ca-certificates curl software-properties-common lsb-release - curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - - add-apt-repository "deb https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - # set kubelet nodeip environment variable - /opt/bin/setup_net_env.sh - - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=containerd.service - Requires=containerd.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --hostname-override=node1 \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --container-runtime=remote \ - --container-runtime-endpoint=unix:///run/containerd/containerd.sock \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target - -- path: "/etc/systemd/system/kubelet.service.d/extras.conf" - content: | - [Service] - Environment="KUBELET_EXTRA_ARGS=--resolv-conf=/run/systemd/resolve/resolv.conf" -- path: "/etc/kubernetes/cloud-config" - permissions: "0600" - content: | - {digitalocean-config:true} - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - DEFAULT_IFC_IP6=$(ip -o -6 route get 1:: | grep -oP "src \K\S+") - if [ -z "${DEFAULT_IFC_IP6}" ] - then - echodate "Failed to get IPv6 address for the default route interface" - exit 1 - fi - DEFAULT_IFC_IP=$DEFAULT_IFC_IP,$DEFAULT_IFC_IP6 - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/containerd/config.toml - permissions: "0644" - content: | - version = 2 - - [metrics] - address = "127.0.0.1:1338" - - [plugins] - [plugins."io.containerd.grpc.v1.cri"] - [plugins."io.containerd.grpc.v1.cri".containerd] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] - runtime_type = "io.containerd.runc.v2" - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] - SystemdCgroup = true - [plugins."io.containerd.grpc.v1.cri".registry] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] - endpoint = ["https://registry-1.docker.io"] - - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDNS: - - 10.10.10.10 - - 10.10.10.11 - - 10.10.10.12 - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -runcmd: -- systemctl enable --now setup.service diff --git a/pkg/userdata/ubuntu/testdata/dist-upgrade-on-boot.yaml b/pkg/userdata/ubuntu/testdata/dist-upgrade-on-boot.yaml deleted file mode 100644 index 2e56201ab..000000000 --- a/pkg/userdata/ubuntu/testdata/dist-upgrade-on-boot.yaml +++ /dev/null @@ -1,460 +0,0 @@ -#cloud-config - -hostname: node1 - -package_upgrade: true -package_reboot_if_required: true - -ssh_pwauth: false -ssh_authorized_keys: -- "ssh-rsa AAABBB" - -write_files: - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: "/etc/default/grub.d/60-swap-accounting.cfg" - content: | - # Added by kubermatic machine-controller - # Enable cgroups memory and swap accounting - GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1" - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - if systemctl is-active ufw; then systemctl stop ufw; fi - systemctl mask ufw - systemctl restart systemd-modules-load.service - sysctl --system - apt-get update - - DEBIAN_FRONTEND=noninteractive apt-get -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" install -y \ - curl \ - ca-certificates \ - ceph-common \ - cifs-utils \ - conntrack \ - e2fsprogs \ - ebtables \ - ethtool \ - glusterfs-client \ - iptables \ - jq \ - kmod \ - openssh-client \ - nfs-common \ - socat \ - util-linux \ - ipvsadm - - # Update grub to include kernel command options to enable swap accounting. - # Exclude alibaba cloud until this is fixed https://github.com/kubermatic/machine-controller/issues/682 - - - apt-get update - apt-get install -y apt-transport-https ca-certificates curl software-properties-common lsb-release - curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - - add-apt-repository "deb https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - # set kubelet nodeip environment variable - /opt/bin/setup_net_env.sh - - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=containerd.service - Requires=containerd.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --hostname-override=node1 \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --container-runtime=remote \ - --container-runtime-endpoint=unix:///run/containerd/containerd.sock \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target - -- path: "/etc/systemd/system/kubelet.service.d/extras.conf" - content: | - [Service] - Environment="KUBELET_EXTRA_ARGS=--resolv-conf=/run/systemd/resolve/resolv.conf" - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/containerd/config.toml - permissions: "0644" - content: | - version = 2 - - [metrics] - address = "127.0.0.1:1338" - - [plugins] - [plugins."io.containerd.grpc.v1.cri"] - [plugins."io.containerd.grpc.v1.cri".containerd] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] - runtime_type = "io.containerd.runc.v2" - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] - SystemdCgroup = true - [plugins."io.containerd.grpc.v1.cri".registry] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] - endpoint = ["https://registry-1.docker.io"] - - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDNS: - - 10.10.10.10 - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -runcmd: -- systemctl enable --now setup.service diff --git a/pkg/userdata/ubuntu/testdata/docker.yaml b/pkg/userdata/ubuntu/testdata/docker.yaml deleted file mode 100644 index 311ae4731..000000000 --- a/pkg/userdata/ubuntu/testdata/docker.yaml +++ /dev/null @@ -1,465 +0,0 @@ -#cloud-config - -hostname: node1 - -package_upgrade: true -package_reboot_if_required: true - -ssh_pwauth: false -ssh_authorized_keys: -- "ssh-rsa AAABBB" - -write_files: - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: "/etc/default/grub.d/60-swap-accounting.cfg" - content: | - # Added by kubermatic machine-controller - # Enable cgroups memory and swap accounting - GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1" - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - if systemctl is-active ufw; then systemctl stop ufw; fi - systemctl mask ufw - systemctl restart systemd-modules-load.service - sysctl --system - apt-get update - - DEBIAN_FRONTEND=noninteractive apt-get -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" install -y \ - curl \ - ca-certificates \ - ceph-common \ - cifs-utils \ - conntrack \ - e2fsprogs \ - ebtables \ - ethtool \ - glusterfs-client \ - iptables \ - jq \ - kmod \ - openssh-client \ - nfs-common \ - socat \ - util-linux \ - ipvsadm - - # Update grub to include kernel command options to enable swap accounting. - # Exclude alibaba cloud until this is fixed https://github.com/kubermatic/machine-controller/issues/682 - - - apt-get update - apt-get install -y apt-transport-https ca-certificates curl software-properties-common lsb-release - curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - - add-apt-repository "deb https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - # set kubelet nodeip environment variable - /opt/bin/setup_net_env.sh - - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=containerd.service - Requires=containerd.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --hostname-override=node1 \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --container-runtime=remote \ - --container-runtime-endpoint=unix:///run/containerd/containerd.sock \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target - -- path: "/etc/systemd/system/kubelet.service.d/extras.conf" - content: | - [Service] - Environment="KUBELET_EXTRA_ARGS=--resolv-conf=/run/systemd/resolve/resolv.conf" - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/containerd/config.toml - permissions: "0644" - content: | - version = 2 - - [metrics] - address = "127.0.0.1:1338" - - [plugins] - [plugins."io.containerd.grpc.v1.cri"] - [plugins."io.containerd.grpc.v1.cri".containerd] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] - runtime_type = "io.containerd.runc.v2" - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] - SystemdCgroup = true - [plugins."io.containerd.grpc.v1.cri".registry] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] - endpoint = ["https://registry-1.docker.io"] - [plugins."io.containerd.grpc.v1.cri".registry.configs] - [plugins."io.containerd.grpc.v1.cri".registry.configs."docker.io"] - [plugins."io.containerd.grpc.v1.cri".registry.configs."docker.io".auth] - username = "login1" - password = "passwd1" - - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDNS: - - 10.10.10.10 - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -runcmd: -- systemctl enable --now setup.service diff --git a/pkg/userdata/ubuntu/testdata/kubelet-version-without-v-prefix.yaml b/pkg/userdata/ubuntu/testdata/kubelet-version-without-v-prefix.yaml deleted file mode 100644 index 99f32fe25..000000000 --- a/pkg/userdata/ubuntu/testdata/kubelet-version-without-v-prefix.yaml +++ /dev/null @@ -1,441 +0,0 @@ -#cloud-config - -hostname: node1 - - -ssh_pwauth: false -ssh_authorized_keys: -- "ssh-rsa AAABBB" - -write_files: - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: "/etc/default/grub.d/60-swap-accounting.cfg" - content: | - # Added by kubermatic machine-controller - # Enable cgroups memory and swap accounting - GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1" - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - if systemctl is-active ufw; then systemctl stop ufw; fi - systemctl mask ufw - systemctl restart systemd-modules-load.service - sysctl --system - apt-get update - - DEBIAN_FRONTEND=noninteractive apt-get -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" install -y \ - curl \ - ca-certificates \ - ceph-common \ - cifs-utils \ - conntrack \ - e2fsprogs \ - ebtables \ - ethtool \ - glusterfs-client \ - iptables \ - jq \ - kmod \ - openssh-client \ - nfs-common \ - socat \ - util-linux \ - ipvsadm - - # Update grub to include kernel command options to enable swap accounting. - # Exclude alibaba cloud until this is fixed https://github.com/kubermatic/machine-controller/issues/682 - - - apt-get update - apt-get install -y apt-transport-https ca-certificates curl software-properties-common lsb-release - curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - - add-apt-repository "deb https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" - - mkdir -p /etc/systemd/system/containerd.service.d /etc/systemd/system/docker.service.d - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - # set kubelet nodeip environment variable - /opt/bin/setup_net_env.sh - - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=docker.service - Requires=docker.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --hostname-override=node1 \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --container-runtime=docker \ - --container-runtime-endpoint=unix:///var/run/dockershim.sock \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target - -- path: "/etc/systemd/system/kubelet.service.d/extras.conf" - content: | - [Service] - Environment="KUBELET_EXTRA_ARGS=--resolv-conf=/run/systemd/resolve/resolv.conf" - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/docker/daemon.json - permissions: "0644" - content: | - {"exec-opts":["native.cgroupdriver=systemd"],"storage-driver":"overlay2","log-driver":"json-file","log-opts":{"max-file":"5","max-size":"100m"}} - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDNS: - - 10.10.10.10 - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -runcmd: -- systemctl enable --now setup.service diff --git a/pkg/userdata/ubuntu/testdata/multiple-dns-servers.yaml b/pkg/userdata/ubuntu/testdata/multiple-dns-servers.yaml deleted file mode 100644 index 77949c18c..000000000 --- a/pkg/userdata/ubuntu/testdata/multiple-dns-servers.yaml +++ /dev/null @@ -1,460 +0,0 @@ -#cloud-config - -hostname: node1 - - -ssh_pwauth: false -ssh_authorized_keys: -- "ssh-rsa AAABBB" - -write_files: - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: "/etc/default/grub.d/60-swap-accounting.cfg" - content: | - # Added by kubermatic machine-controller - # Enable cgroups memory and swap accounting - GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1" - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - if systemctl is-active ufw; then systemctl stop ufw; fi - systemctl mask ufw - systemctl restart systemd-modules-load.service - sysctl --system - apt-get update - - DEBIAN_FRONTEND=noninteractive apt-get -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" install -y \ - curl \ - ca-certificates \ - ceph-common \ - cifs-utils \ - conntrack \ - e2fsprogs \ - ebtables \ - ethtool \ - glusterfs-client \ - iptables \ - jq \ - kmod \ - openssh-client \ - nfs-common \ - socat \ - util-linux \ - ipvsadm - - # Update grub to include kernel command options to enable swap accounting. - # Exclude alibaba cloud until this is fixed https://github.com/kubermatic/machine-controller/issues/682 - - - apt-get update - apt-get install -y apt-transport-https ca-certificates curl software-properties-common lsb-release - curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - - add-apt-repository "deb https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - # set kubelet nodeip environment variable - /opt/bin/setup_net_env.sh - - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=containerd.service - Requires=containerd.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --hostname-override=node1 \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --container-runtime=remote \ - --container-runtime-endpoint=unix:///run/containerd/containerd.sock \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target - -- path: "/etc/systemd/system/kubelet.service.d/extras.conf" - content: | - [Service] - Environment="KUBELET_EXTRA_ARGS=--resolv-conf=/run/systemd/resolve/resolv.conf" - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/containerd/config.toml - permissions: "0644" - content: | - version = 2 - - [metrics] - address = "127.0.0.1:1338" - - [plugins] - [plugins."io.containerd.grpc.v1.cri"] - [plugins."io.containerd.grpc.v1.cri".containerd] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] - runtime_type = "io.containerd.runc.v2" - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] - SystemdCgroup = true - [plugins."io.containerd.grpc.v1.cri".registry] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] - endpoint = ["https://registry-1.docker.io"] - - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDNS: - - 10.10.10.10 - - 10.10.10.11 - - 10.10.10.12 - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -runcmd: -- systemctl enable --now setup.service diff --git a/pkg/userdata/ubuntu/testdata/multiple-ssh-keys.yaml b/pkg/userdata/ubuntu/testdata/multiple-ssh-keys.yaml deleted file mode 100644 index 38e8e3432..000000000 --- a/pkg/userdata/ubuntu/testdata/multiple-ssh-keys.yaml +++ /dev/null @@ -1,443 +0,0 @@ -#cloud-config - -hostname: node1 - - -ssh_pwauth: false -ssh_authorized_keys: -- "ssh-rsa AAABBB" -- "ssh-rsa CCCDDD" -- "ssh-rsa EEEFFF" - -write_files: - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: "/etc/default/grub.d/60-swap-accounting.cfg" - content: | - # Added by kubermatic machine-controller - # Enable cgroups memory and swap accounting - GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1" - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - if systemctl is-active ufw; then systemctl stop ufw; fi - systemctl mask ufw - systemctl restart systemd-modules-load.service - sysctl --system - apt-get update - - DEBIAN_FRONTEND=noninteractive apt-get -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" install -y \ - curl \ - ca-certificates \ - ceph-common \ - cifs-utils \ - conntrack \ - e2fsprogs \ - ebtables \ - ethtool \ - glusterfs-client \ - iptables \ - jq \ - kmod \ - openssh-client \ - nfs-common \ - socat \ - util-linux \ - ipvsadm - - # Update grub to include kernel command options to enable swap accounting. - # Exclude alibaba cloud until this is fixed https://github.com/kubermatic/machine-controller/issues/682 - - - apt-get update - apt-get install -y apt-transport-https ca-certificates curl software-properties-common lsb-release - curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - - add-apt-repository "deb https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" - - mkdir -p /etc/systemd/system/containerd.service.d /etc/systemd/system/docker.service.d - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - # set kubelet nodeip environment variable - /opt/bin/setup_net_env.sh - - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=docker.service - Requires=docker.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --hostname-override=node1 \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --container-runtime=docker \ - --container-runtime-endpoint=unix:///var/run/dockershim.sock \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target - -- path: "/etc/systemd/system/kubelet.service.d/extras.conf" - content: | - [Service] - Environment="KUBELET_EXTRA_ARGS=--resolv-conf=/run/systemd/resolve/resolv.conf" - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/docker/daemon.json - permissions: "0644" - content: | - {"exec-opts":["native.cgroupdriver=systemd"],"storage-driver":"overlay2","log-driver":"json-file","log-opts":{"max-file":"5","max-size":"100m"}} - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDNS: - - 10.10.10.10 - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -runcmd: -- systemctl enable --now setup.service diff --git a/pkg/userdata/ubuntu/testdata/nutanix.yaml b/pkg/userdata/ubuntu/testdata/nutanix.yaml deleted file mode 100644 index 3b453a2b6..000000000 --- a/pkg/userdata/ubuntu/testdata/nutanix.yaml +++ /dev/null @@ -1,469 +0,0 @@ -#cloud-config - -hostname: node1 - - -ssh_pwauth: false -ssh_authorized_keys: -- "ssh-rsa AAABBB" - -write_files: - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: "/etc/default/grub.d/60-swap-accounting.cfg" - content: | - # Added by kubermatic machine-controller - # Enable cgroups memory and swap accounting - GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1" - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - if systemctl is-active ufw; then systemctl stop ufw; fi - systemctl mask ufw - systemctl restart systemd-modules-load.service - sysctl --system - apt-get update - - DEBIAN_FRONTEND=noninteractive apt-get -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" install -y \ - curl \ - ca-certificates \ - ceph-common \ - cifs-utils \ - conntrack \ - e2fsprogs \ - ebtables \ - ethtool \ - glusterfs-client \ - iptables \ - jq \ - kmod \ - openssh-client \ - nfs-common \ - socat \ - util-linux \ - open-iscsi \ - ipvsadm - systemctl enable --now iscsid - - - # Update grub to include kernel command options to enable swap accounting. - # Exclude alibaba cloud until this is fixed https://github.com/kubermatic/machine-controller/issues/682 - - - apt-get update - apt-get install -y apt-transport-https ca-certificates curl software-properties-common lsb-release - curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - - add-apt-repository "deb https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - # set kubelet nodeip environment variable - /opt/bin/setup_net_env.sh - - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=containerd.service - Requires=containerd.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --cloud-provider=nutanix \ - --cloud-config=/etc/kubernetes/cloud-config \ - --hostname-override=node1 \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --container-runtime=remote \ - --container-runtime-endpoint=unix:///run/containerd/containerd.sock \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target - -- path: "/etc/systemd/system/kubelet.service.d/extras.conf" - content: | - [Service] - Environment="KUBELET_EXTRA_ARGS=--resolv-conf=/run/systemd/resolve/resolv.conf" -- path: "/etc/kubernetes/cloud-config" - permissions: "0600" - content: | - custom - cloud - config - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/containerd/config.toml - permissions: "0644" - content: | - version = 2 - - [metrics] - address = "127.0.0.1:1338" - - [plugins] - [plugins."io.containerd.grpc.v1.cri"] - [plugins."io.containerd.grpc.v1.cri".containerd] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] - runtime_type = "io.containerd.runc.v2" - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] - SystemdCgroup = true - [plugins."io.containerd.grpc.v1.cri".registry] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] - endpoint = ["https://registry-1.docker.io"] - - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDNS: - - 10.10.10.10 - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -runcmd: -- systemctl enable --now setup.service diff --git a/pkg/userdata/ubuntu/testdata/openstack-dualstack-IPv6+IPv4.yaml b/pkg/userdata/ubuntu/testdata/openstack-dualstack-IPv6+IPv4.yaml deleted file mode 100644 index 331eea036..000000000 --- a/pkg/userdata/ubuntu/testdata/openstack-dualstack-IPv6+IPv4.yaml +++ /dev/null @@ -1,471 +0,0 @@ -#cloud-config - -hostname: node1 - - -ssh_pwauth: false -ssh_authorized_keys: -- "ssh-rsa AAABBB" - -write_files: - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: "/etc/default/grub.d/60-swap-accounting.cfg" - content: | - # Added by kubermatic machine-controller - # Enable cgroups memory and swap accounting - GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1" - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - if systemctl is-active ufw; then systemctl stop ufw; fi - systemctl mask ufw - systemctl restart systemd-modules-load.service - sysctl --system - apt-get update - - DEBIAN_FRONTEND=noninteractive apt-get -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" install -y \ - curl \ - ca-certificates \ - ceph-common \ - cifs-utils \ - conntrack \ - e2fsprogs \ - ebtables \ - ethtool \ - glusterfs-client \ - iptables \ - jq \ - kmod \ - openssh-client \ - nfs-common \ - socat \ - util-linux \ - ipvsadm - - # Update grub to include kernel command options to enable swap accounting. - # Exclude alibaba cloud until this is fixed https://github.com/kubermatic/machine-controller/issues/682 - - - apt-get update - apt-get install -y apt-transport-https ca-certificates curl software-properties-common lsb-release - curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - - add-apt-repository "deb https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - # set kubelet nodeip environment variable - /opt/bin/setup_net_env.sh - - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=containerd.service - Requires=containerd.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --cloud-provider=external \ - --hostname-override=node1 \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --container-runtime=remote \ - --container-runtime-endpoint=unix:///run/containerd/containerd.sock \ - - [Install] - WantedBy=multi-user.target - -- path: "/etc/systemd/system/kubelet.service.d/extras.conf" - content: | - [Service] - Environment="KUBELET_EXTRA_ARGS=--resolv-conf=/run/systemd/resolve/resolv.conf" -- path: "/etc/kubernetes/cloud-config" - permissions: "0600" - content: | - {openstack-config:true} - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - DEFAULT_IFC_IP6=$(ip -o -6 route get 1:: | grep -oP "src \K\S+") - if [ -z "${DEFAULT_IFC_IP6}" ] - then - echodate "Failed to get IPv6 address for the default route interface" - exit 1 - fi - DEFAULT_IFC_IP=$DEFAULT_IFC_IP6,$DEFAULT_IFC_IP - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/containerd/config.toml - permissions: "0644" - content: | - version = 2 - - [metrics] - address = "127.0.0.1:1338" - - [plugins] - [plugins."io.containerd.grpc.v1.cri"] - [plugins."io.containerd.grpc.v1.cri".containerd] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] - runtime_type = "io.containerd.runc.v2" - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] - SystemdCgroup = true - [plugins."io.containerd.grpc.v1.cri".registry] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] - endpoint = ["https://registry-1.docker.io"] - - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDNS: - - 10.10.10.10 - - 10.10.10.11 - - 10.10.10.12 - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -runcmd: -- systemctl enable --now setup.service diff --git a/pkg/userdata/ubuntu/testdata/openstack-dualstack.yaml b/pkg/userdata/ubuntu/testdata/openstack-dualstack.yaml deleted file mode 100644 index 8cc558edf..000000000 --- a/pkg/userdata/ubuntu/testdata/openstack-dualstack.yaml +++ /dev/null @@ -1,471 +0,0 @@ -#cloud-config - -hostname: node1 - - -ssh_pwauth: false -ssh_authorized_keys: -- "ssh-rsa AAABBB" - -write_files: - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: "/etc/default/grub.d/60-swap-accounting.cfg" - content: | - # Added by kubermatic machine-controller - # Enable cgroups memory and swap accounting - GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1" - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - if systemctl is-active ufw; then systemctl stop ufw; fi - systemctl mask ufw - systemctl restart systemd-modules-load.service - sysctl --system - apt-get update - - DEBIAN_FRONTEND=noninteractive apt-get -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" install -y \ - curl \ - ca-certificates \ - ceph-common \ - cifs-utils \ - conntrack \ - e2fsprogs \ - ebtables \ - ethtool \ - glusterfs-client \ - iptables \ - jq \ - kmod \ - openssh-client \ - nfs-common \ - socat \ - util-linux \ - ipvsadm - - # Update grub to include kernel command options to enable swap accounting. - # Exclude alibaba cloud until this is fixed https://github.com/kubermatic/machine-controller/issues/682 - - - apt-get update - apt-get install -y apt-transport-https ca-certificates curl software-properties-common lsb-release - curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - - add-apt-repository "deb https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - # set kubelet nodeip environment variable - /opt/bin/setup_net_env.sh - - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=containerd.service - Requires=containerd.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --cloud-provider=external \ - --hostname-override=node1 \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --container-runtime=remote \ - --container-runtime-endpoint=unix:///run/containerd/containerd.sock \ - - [Install] - WantedBy=multi-user.target - -- path: "/etc/systemd/system/kubelet.service.d/extras.conf" - content: | - [Service] - Environment="KUBELET_EXTRA_ARGS=--resolv-conf=/run/systemd/resolve/resolv.conf" -- path: "/etc/kubernetes/cloud-config" - permissions: "0600" - content: | - {openstack-config:true} - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - DEFAULT_IFC_IP6=$(ip -o -6 route get 1:: | grep -oP "src \K\S+") - if [ -z "${DEFAULT_IFC_IP6}" ] - then - echodate "Failed to get IPv6 address for the default route interface" - exit 1 - fi - DEFAULT_IFC_IP=$DEFAULT_IFC_IP,$DEFAULT_IFC_IP6 - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/containerd/config.toml - permissions: "0644" - content: | - version = 2 - - [metrics] - address = "127.0.0.1:1338" - - [plugins] - [plugins."io.containerd.grpc.v1.cri"] - [plugins."io.containerd.grpc.v1.cri".containerd] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] - runtime_type = "io.containerd.runc.v2" - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] - SystemdCgroup = true - [plugins."io.containerd.grpc.v1.cri".registry] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] - endpoint = ["https://registry-1.docker.io"] - - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDNS: - - 10.10.10.10 - - 10.10.10.11 - - 10.10.10.12 - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -runcmd: -- systemctl enable --now setup.service diff --git a/pkg/userdata/ubuntu/testdata/openstack-overwrite-cloud-config.yaml b/pkg/userdata/ubuntu/testdata/openstack-overwrite-cloud-config.yaml deleted file mode 100644 index b8ada4586..000000000 --- a/pkg/userdata/ubuntu/testdata/openstack-overwrite-cloud-config.yaml +++ /dev/null @@ -1,449 +0,0 @@ -#cloud-config - -hostname: node1 - - -ssh_pwauth: false -ssh_authorized_keys: -- "ssh-rsa AAABBB" - -write_files: - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: "/etc/default/grub.d/60-swap-accounting.cfg" - content: | - # Added by kubermatic machine-controller - # Enable cgroups memory and swap accounting - GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1" - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - if systemctl is-active ufw; then systemctl stop ufw; fi - systemctl mask ufw - systemctl restart systemd-modules-load.service - sysctl --system - apt-get update - - DEBIAN_FRONTEND=noninteractive apt-get -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" install -y \ - curl \ - ca-certificates \ - ceph-common \ - cifs-utils \ - conntrack \ - e2fsprogs \ - ebtables \ - ethtool \ - glusterfs-client \ - iptables \ - jq \ - kmod \ - openssh-client \ - nfs-common \ - socat \ - util-linux \ - ipvsadm - - # Update grub to include kernel command options to enable swap accounting. - # Exclude alibaba cloud until this is fixed https://github.com/kubermatic/machine-controller/issues/682 - - - apt-get update - apt-get install -y apt-transport-https ca-certificates curl software-properties-common lsb-release - curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - - add-apt-repository "deb https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" - - mkdir -p /etc/systemd/system/containerd.service.d /etc/systemd/system/docker.service.d - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - # set kubelet nodeip environment variable - /opt/bin/setup_net_env.sh - - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=docker.service - Requires=docker.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --cloud-provider=openstack \ - --cloud-config=/etc/kubernetes/cloud-config \ - --hostname-override=node1 \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --container-runtime=docker \ - --container-runtime-endpoint=unix:///var/run/dockershim.sock \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target - -- path: "/etc/systemd/system/kubelet.service.d/extras.conf" - content: | - [Service] - Environment="KUBELET_EXTRA_ARGS=--resolv-conf=/run/systemd/resolve/resolv.conf" -- path: "/etc/kubernetes/cloud-config" - permissions: "0600" - content: | - custom - cloud - config - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/docker/daemon.json - permissions: "0644" - content: | - {"exec-opts":["native.cgroupdriver=systemd"],"storage-driver":"overlay2","log-driver":"json-file","log-opts":{"max-file":"5","max-size":"100m"}} - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDNS: - - 10.10.10.10 - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -runcmd: -- systemctl enable --now setup.service diff --git a/pkg/userdata/ubuntu/testdata/openstack.yaml b/pkg/userdata/ubuntu/testdata/openstack.yaml deleted file mode 100644 index d35ee8ecd..000000000 --- a/pkg/userdata/ubuntu/testdata/openstack.yaml +++ /dev/null @@ -1,466 +0,0 @@ -#cloud-config - -hostname: node1 - - -ssh_pwauth: false -ssh_authorized_keys: -- "ssh-rsa AAABBB" - -write_files: - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: "/etc/default/grub.d/60-swap-accounting.cfg" - content: | - # Added by kubermatic machine-controller - # Enable cgroups memory and swap accounting - GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1" - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - if systemctl is-active ufw; then systemctl stop ufw; fi - systemctl mask ufw - systemctl restart systemd-modules-load.service - sysctl --system - apt-get update - - DEBIAN_FRONTEND=noninteractive apt-get -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" install -y \ - curl \ - ca-certificates \ - ceph-common \ - cifs-utils \ - conntrack \ - e2fsprogs \ - ebtables \ - ethtool \ - glusterfs-client \ - iptables \ - jq \ - kmod \ - openssh-client \ - nfs-common \ - socat \ - util-linux \ - ipvsadm - - # Update grub to include kernel command options to enable swap accounting. - # Exclude alibaba cloud until this is fixed https://github.com/kubermatic/machine-controller/issues/682 - - - apt-get update - apt-get install -y apt-transport-https ca-certificates curl software-properties-common lsb-release - curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - - add-apt-repository "deb https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - # set kubelet nodeip environment variable - /opt/bin/setup_net_env.sh - - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=containerd.service - Requires=containerd.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --cloud-provider=openstack \ - --cloud-config=/etc/kubernetes/cloud-config \ - --hostname-override=node1 \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --container-runtime=remote \ - --container-runtime-endpoint=unix:///run/containerd/containerd.sock \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target - -- path: "/etc/systemd/system/kubelet.service.d/extras.conf" - content: | - [Service] - Environment="KUBELET_EXTRA_ARGS=--resolv-conf=/run/systemd/resolve/resolv.conf" -- path: "/etc/kubernetes/cloud-config" - permissions: "0600" - content: | - {openstack-config:true} - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/containerd/config.toml - permissions: "0644" - content: | - version = 2 - - [metrics] - address = "127.0.0.1:1338" - - [plugins] - [plugins."io.containerd.grpc.v1.cri"] - [plugins."io.containerd.grpc.v1.cri".containerd] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] - runtime_type = "io.containerd.runc.v2" - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] - SystemdCgroup = true - [plugins."io.containerd.grpc.v1.cri".registry] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] - endpoint = ["https://registry-1.docker.io"] - - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDNS: - - 10.10.10.10 - - 10.10.10.11 - - 10.10.10.12 - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -runcmd: -- systemctl enable --now setup.service diff --git a/pkg/userdata/ubuntu/testdata/version-1.24.9.yaml b/pkg/userdata/ubuntu/testdata/version-1.24.9.yaml deleted file mode 100644 index b38c94941..000000000 --- a/pkg/userdata/ubuntu/testdata/version-1.24.9.yaml +++ /dev/null @@ -1,458 +0,0 @@ -#cloud-config - -hostname: node1 - - -ssh_pwauth: false -ssh_authorized_keys: -- "ssh-rsa AAABBB" - -write_files: - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: "/etc/default/grub.d/60-swap-accounting.cfg" - content: | - # Added by kubermatic machine-controller - # Enable cgroups memory and swap accounting - GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1" - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - if systemctl is-active ufw; then systemctl stop ufw; fi - systemctl mask ufw - systemctl restart systemd-modules-load.service - sysctl --system - apt-get update - - DEBIAN_FRONTEND=noninteractive apt-get -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" install -y \ - curl \ - ca-certificates \ - ceph-common \ - cifs-utils \ - conntrack \ - e2fsprogs \ - ebtables \ - ethtool \ - glusterfs-client \ - iptables \ - jq \ - kmod \ - openssh-client \ - nfs-common \ - socat \ - util-linux \ - ipvsadm - - # Update grub to include kernel command options to enable swap accounting. - # Exclude alibaba cloud until this is fixed https://github.com/kubermatic/machine-controller/issues/682 - - - apt-get update - apt-get install -y apt-transport-https ca-certificates curl software-properties-common lsb-release - curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - - add-apt-repository "deb https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - # set kubelet nodeip environment variable - /opt/bin/setup_net_env.sh - - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=containerd.service - Requires=containerd.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --hostname-override=node1 \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --container-runtime=remote \ - --container-runtime-endpoint=unix:///run/containerd/containerd.sock \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target - -- path: "/etc/systemd/system/kubelet.service.d/extras.conf" - content: | - [Service] - Environment="KUBELET_EXTRA_ARGS=--resolv-conf=/run/systemd/resolve/resolv.conf" - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/containerd/config.toml - permissions: "0644" - content: | - version = 2 - - [metrics] - address = "127.0.0.1:1338" - - [plugins] - [plugins."io.containerd.grpc.v1.cri"] - [plugins."io.containerd.grpc.v1.cri".containerd] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] - runtime_type = "io.containerd.runc.v2" - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] - SystemdCgroup = true - [plugins."io.containerd.grpc.v1.cri".registry] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] - endpoint = ["https://registry-1.docker.io"] - - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDNS: - - 10.10.10.10 - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -runcmd: -- systemctl enable --now setup.service diff --git a/pkg/userdata/ubuntu/testdata/version-1.25.5.yaml b/pkg/userdata/ubuntu/testdata/version-1.25.5.yaml deleted file mode 100644 index 0c53f84ed..000000000 --- a/pkg/userdata/ubuntu/testdata/version-1.25.5.yaml +++ /dev/null @@ -1,458 +0,0 @@ -#cloud-config - -hostname: node1 - - -ssh_pwauth: false -ssh_authorized_keys: -- "ssh-rsa AAABBB" - -write_files: - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: "/etc/default/grub.d/60-swap-accounting.cfg" - content: | - # Added by kubermatic machine-controller - # Enable cgroups memory and swap accounting - GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1" - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - if systemctl is-active ufw; then systemctl stop ufw; fi - systemctl mask ufw - systemctl restart systemd-modules-load.service - sysctl --system - apt-get update - - DEBIAN_FRONTEND=noninteractive apt-get -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" install -y \ - curl \ - ca-certificates \ - ceph-common \ - cifs-utils \ - conntrack \ - e2fsprogs \ - ebtables \ - ethtool \ - glusterfs-client \ - iptables \ - jq \ - kmod \ - openssh-client \ - nfs-common \ - socat \ - util-linux \ - ipvsadm - - # Update grub to include kernel command options to enable swap accounting. - # Exclude alibaba cloud until this is fixed https://github.com/kubermatic/machine-controller/issues/682 - - - apt-get update - apt-get install -y apt-transport-https ca-certificates curl software-properties-common lsb-release - curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - - add-apt-repository "deb https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - # set kubelet nodeip environment variable - /opt/bin/setup_net_env.sh - - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=containerd.service - Requires=containerd.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --hostname-override=node1 \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --container-runtime=remote \ - --container-runtime-endpoint=unix:///run/containerd/containerd.sock \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target - -- path: "/etc/systemd/system/kubelet.service.d/extras.conf" - content: | - [Service] - Environment="KUBELET_EXTRA_ARGS=--resolv-conf=/run/systemd/resolve/resolv.conf" - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/containerd/config.toml - permissions: "0644" - content: | - version = 2 - - [metrics] - address = "127.0.0.1:1338" - - [plugins] - [plugins."io.containerd.grpc.v1.cri"] - [plugins."io.containerd.grpc.v1.cri".containerd] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] - runtime_type = "io.containerd.runc.v2" - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] - SystemdCgroup = true - [plugins."io.containerd.grpc.v1.cri".registry] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] - endpoint = ["https://registry-1.docker.io"] - - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDNS: - - 10.10.10.10 - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -runcmd: -- systemctl enable --now setup.service diff --git a/pkg/userdata/ubuntu/testdata/version-1.26.0.yaml b/pkg/userdata/ubuntu/testdata/version-1.26.0.yaml deleted file mode 100644 index 6545373b7..000000000 --- a/pkg/userdata/ubuntu/testdata/version-1.26.0.yaml +++ /dev/null @@ -1,458 +0,0 @@ -#cloud-config - -hostname: node1 - - -ssh_pwauth: false -ssh_authorized_keys: -- "ssh-rsa AAABBB" - -write_files: - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: "/etc/default/grub.d/60-swap-accounting.cfg" - content: | - # Added by kubermatic machine-controller - # Enable cgroups memory and swap accounting - GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1" - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - if systemctl is-active ufw; then systemctl stop ufw; fi - systemctl mask ufw - systemctl restart systemd-modules-load.service - sysctl --system - apt-get update - - DEBIAN_FRONTEND=noninteractive apt-get -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" install -y \ - curl \ - ca-certificates \ - ceph-common \ - cifs-utils \ - conntrack \ - e2fsprogs \ - ebtables \ - ethtool \ - glusterfs-client \ - iptables \ - jq \ - kmod \ - openssh-client \ - nfs-common \ - socat \ - util-linux \ - ipvsadm - - # Update grub to include kernel command options to enable swap accounting. - # Exclude alibaba cloud until this is fixed https://github.com/kubermatic/machine-controller/issues/682 - - - apt-get update - apt-get install -y apt-transport-https ca-certificates curl software-properties-common lsb-release - curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - - add-apt-repository "deb https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - # set kubelet nodeip environment variable - /opt/bin/setup_net_env.sh - - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=containerd.service - Requires=containerd.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --hostname-override=node1 \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --container-runtime=remote \ - --container-runtime-endpoint=unix:///run/containerd/containerd.sock \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target - -- path: "/etc/systemd/system/kubelet.service.d/extras.conf" - content: | - [Service] - Environment="KUBELET_EXTRA_ARGS=--resolv-conf=/run/systemd/resolve/resolv.conf" - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/containerd/config.toml - permissions: "0644" - content: | - version = 2 - - [metrics] - address = "127.0.0.1:1338" - - [plugins] - [plugins."io.containerd.grpc.v1.cri"] - [plugins."io.containerd.grpc.v1.cri".containerd] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes] - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] - runtime_type = "io.containerd.runc.v2" - [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] - SystemdCgroup = true - [plugins."io.containerd.grpc.v1.cri".registry] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors] - [plugins."io.containerd.grpc.v1.cri".registry.mirrors."docker.io"] - endpoint = ["https://registry-1.docker.io"] - - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDNS: - - 10.10.10.10 - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -runcmd: -- systemctl enable --now setup.service diff --git a/pkg/userdata/ubuntu/testdata/vsphere-mirrors.yaml b/pkg/userdata/ubuntu/testdata/vsphere-mirrors.yaml deleted file mode 100644 index c24a42c5d..000000000 --- a/pkg/userdata/ubuntu/testdata/vsphere-mirrors.yaml +++ /dev/null @@ -1,460 +0,0 @@ -#cloud-config - -hostname: node1 - - -ssh_pwauth: false -ssh_authorized_keys: -- "ssh-rsa AAABBB" - -write_files: -- path: "/etc/environment" - content: | - PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games" - HTTP_PROXY=http://192.168.100.100:3128 - http_proxy=http://192.168.100.100:3128 - HTTPS_PROXY=http://192.168.100.100:3128 - https_proxy=http://192.168.100.100:3128 - NO_PROXY=192.168.1.0 - no_proxy=192.168.1.0 - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: "/etc/default/grub.d/60-swap-accounting.cfg" - content: | - # Added by kubermatic machine-controller - # Enable cgroups memory and swap accounting - GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1" - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - if systemctl is-active ufw; then systemctl stop ufw; fi - systemctl mask ufw - systemctl restart systemd-modules-load.service - sysctl --system - apt-get update - - DEBIAN_FRONTEND=noninteractive apt-get -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" install -y \ - curl \ - ca-certificates \ - ceph-common \ - cifs-utils \ - conntrack \ - e2fsprogs \ - ebtables \ - ethtool \ - glusterfs-client \ - iptables \ - jq \ - kmod \ - openssh-client \ - nfs-common \ - socat \ - util-linux \ - open-vm-tools \ - ipvsadm - - # Update grub to include kernel command options to enable swap accounting. - # Exclude alibaba cloud until this is fixed https://github.com/kubermatic/machine-controller/issues/682 - - - apt-get update - apt-get install -y apt-transport-https ca-certificates curl software-properties-common lsb-release - curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - - add-apt-repository "deb https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" - - mkdir -p /etc/systemd/system/containerd.service.d /etc/systemd/system/docker.service.d - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - # set kubelet nodeip environment variable - /opt/bin/setup_net_env.sh - - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=docker.service - Requires=docker.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --cloud-provider=vsphere \ - --cloud-config=/etc/kubernetes/cloud-config \ - --hostname-override=node1 \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --pod-infra-container-image=192.168.100.100:5000/kubernetes/pause:v3.1 \ - --container-runtime=docker \ - --container-runtime-endpoint=unix:///var/run/dockershim.sock \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target - -- path: "/etc/systemd/system/kubelet.service.d/extras.conf" - content: | - [Service] - Environment="KUBELET_EXTRA_ARGS=--resolv-conf=/run/systemd/resolve/resolv.conf" -- path: "/etc/kubernetes/cloud-config" - permissions: "0600" - content: | - custom - cloud - config - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/docker/daemon.json - permissions: "0644" - content: | - {"exec-opts":["native.cgroupdriver=systemd"],"storage-driver":"overlay2","log-driver":"json-file","log-opts":{"max-file":"5","max-size":"100m"},"registry-mirrors":["https://registry.docker-cn.com"]} - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDNS: - - 10.10.10.10 - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -runcmd: -- systemctl enable --now setup.service diff --git a/pkg/userdata/ubuntu/testdata/vsphere-proxy.yaml b/pkg/userdata/ubuntu/testdata/vsphere-proxy.yaml deleted file mode 100644 index eee9a28b5..000000000 --- a/pkg/userdata/ubuntu/testdata/vsphere-proxy.yaml +++ /dev/null @@ -1,460 +0,0 @@ -#cloud-config - -hostname: node1 - - -ssh_pwauth: false -ssh_authorized_keys: -- "ssh-rsa AAABBB" - -write_files: -- path: "/etc/environment" - content: | - PATH="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games" - HTTP_PROXY=http://192.168.100.100:3128 - http_proxy=http://192.168.100.100:3128 - HTTPS_PROXY=http://192.168.100.100:3128 - https_proxy=http://192.168.100.100:3128 - NO_PROXY=192.168.1.0 - no_proxy=192.168.1.0 - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: "/etc/default/grub.d/60-swap-accounting.cfg" - content: | - # Added by kubermatic machine-controller - # Enable cgroups memory and swap accounting - GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1" - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - if systemctl is-active ufw; then systemctl stop ufw; fi - systemctl mask ufw - systemctl restart systemd-modules-load.service - sysctl --system - apt-get update - - DEBIAN_FRONTEND=noninteractive apt-get -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" install -y \ - curl \ - ca-certificates \ - ceph-common \ - cifs-utils \ - conntrack \ - e2fsprogs \ - ebtables \ - ethtool \ - glusterfs-client \ - iptables \ - jq \ - kmod \ - openssh-client \ - nfs-common \ - socat \ - util-linux \ - open-vm-tools \ - ipvsadm - - # Update grub to include kernel command options to enable swap accounting. - # Exclude alibaba cloud until this is fixed https://github.com/kubermatic/machine-controller/issues/682 - - - apt-get update - apt-get install -y apt-transport-https ca-certificates curl software-properties-common lsb-release - curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - - add-apt-repository "deb https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" - - mkdir -p /etc/systemd/system/containerd.service.d /etc/systemd/system/docker.service.d - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - # set kubelet nodeip environment variable - /opt/bin/setup_net_env.sh - - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=docker.service - Requires=docker.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --cloud-provider=vsphere \ - --cloud-config=/etc/kubernetes/cloud-config \ - --hostname-override=node1 \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --pod-infra-container-image=192.168.100.100:5000/kubernetes/pause:v3.1 \ - --container-runtime=docker \ - --container-runtime-endpoint=unix:///var/run/dockershim.sock \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target - -- path: "/etc/systemd/system/kubelet.service.d/extras.conf" - content: | - [Service] - Environment="KUBELET_EXTRA_ARGS=--resolv-conf=/run/systemd/resolve/resolv.conf" -- path: "/etc/kubernetes/cloud-config" - permissions: "0600" - content: | - custom - cloud - config - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/docker/daemon.json - permissions: "0644" - content: | - {"exec-opts":["native.cgroupdriver=systemd"],"storage-driver":"overlay2","log-driver":"json-file","log-opts":{"max-file":"5","max-size":"100m"},"insecure-registries":["192.168.100.100:5000","10.0.0.1:5000"]} - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDNS: - - 10.10.10.10 - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -runcmd: -- systemctl enable --now setup.service diff --git a/pkg/userdata/ubuntu/testdata/vsphere.yaml b/pkg/userdata/ubuntu/testdata/vsphere.yaml deleted file mode 100644 index c5341ba32..000000000 --- a/pkg/userdata/ubuntu/testdata/vsphere.yaml +++ /dev/null @@ -1,450 +0,0 @@ -#cloud-config - -hostname: node1 - - -ssh_pwauth: false -ssh_authorized_keys: -- "ssh-rsa AAABBB" - -write_files: - -- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" - content: | - [Journal] - SystemMaxUse=5G - - -- path: "/opt/load-kernel-modules.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - set -euo pipefail - - modprobe ip_vs - modprobe ip_vs_rr - modprobe ip_vs_wrr - modprobe ip_vs_sh - - if modinfo nf_conntrack_ipv4 &> /dev/null; then - modprobe nf_conntrack_ipv4 - else - modprobe nf_conntrack - fi - - -- path: "/etc/sysctl.d/k8s.conf" - content: | - net.bridge.bridge-nf-call-ip6tables = 1 - net.bridge.bridge-nf-call-iptables = 1 - kernel.panic_on_oops = 1 - kernel.panic = 10 - net.ipv4.ip_forward = 1 - vm.overcommit_memory = 1 - fs.inotify.max_user_watches = 1048576 - fs.inotify.max_user_instances = 8192 - - -- path: "/etc/default/grub.d/60-swap-accounting.cfg" - content: | - # Added by kubermatic machine-controller - # Enable cgroups memory and swap accounting - GRUB_CMDLINE_LINUX="cgroup_enable=memory swapaccount=1" - -- path: "/opt/bin/setup" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - if systemctl is-active ufw; then systemctl stop ufw; fi - systemctl mask ufw - systemctl restart systemd-modules-load.service - sysctl --system - apt-get update - - DEBIAN_FRONTEND=noninteractive apt-get -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" install -y \ - curl \ - ca-certificates \ - ceph-common \ - cifs-utils \ - conntrack \ - e2fsprogs \ - ebtables \ - ethtool \ - glusterfs-client \ - iptables \ - jq \ - kmod \ - openssh-client \ - nfs-common \ - socat \ - util-linux \ - open-vm-tools \ - ipvsadm - - # Update grub to include kernel command options to enable swap accounting. - # Exclude alibaba cloud until this is fixed https://github.com/kubermatic/machine-controller/issues/682 - - - apt-get update - apt-get install -y apt-transport-https ca-certificates curl software-properties-common lsb-release - curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - - add-apt-repository "deb https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" - - mkdir -p /etc/systemd/system/containerd.service.d /etc/systemd/system/docker.service.d - - cat <"$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - curl -Lfo "$kube_dir/$bin" "$kube_base_url/$bin" - chmod +x "$kube_dir/$bin" - sum=$(curl -Lf "$kube_base_url/$bin.sha256") - echo "$sum $kube_dir/$bin" >>"$kube_sum_file" - done - sha256sum -c "$kube_sum_file" - - for bin in kubelet kubeadm kubectl; do - ln -sf "$kube_dir/$bin" "$opt_bin"/$bin - done - - if [[ ! -x /opt/bin/health-monitor.sh ]]; then - curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/7967a0af2b75f29ad2ab227eeaa26ea7b0f2fbde/pkg/userdata/scripts/health-monitor.sh - chmod +x /opt/bin/health-monitor.sh - fi - - # set kubelet nodeip environment variable - /opt/bin/setup_net_env.sh - - systemctl enable --now kubelet - systemctl enable --now --no-block kubelet-healthcheck.service - systemctl disable setup.service - -- path: "/opt/bin/supervise.sh" - permissions: "0755" - content: | - #!/bin/bash - set -xeuo pipefail - while ! "$@"; do - sleep 1 - done - -- path: "/opt/disable-swap.sh" - permissions: "0755" - content: | - sed -i.orig '/.*swap.*/d' /etc/fstab - swapoff -a - -- path: "/etc/systemd/system/kubelet.service" - content: | - [Unit] - After=docker.service - Requires=docker.service - - Description=kubelet: The Kubernetes Node Agent - Documentation=https://kubernetes.io/docs/home/ - - [Service] - User=root - Restart=always - StartLimitInterval=0 - RestartSec=10 - CPUAccounting=true - MemoryAccounting=true - - Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" - EnvironmentFile=-/etc/environment - - ExecStartPre=/bin/bash /opt/load-kernel-modules.sh - - ExecStartPre=/bin/bash /opt/disable-swap.sh - - ExecStartPre=/bin/bash /opt/bin/setup_net_env.sh - ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ - --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ - --kubeconfig=/var/lib/kubelet/kubeconfig \ - --config=/etc/kubernetes/kubelet.conf \ - --cert-dir=/etc/kubernetes/pki \ - --cloud-provider=vsphere \ - --cloud-config=/etc/kubernetes/cloud-config \ - --hostname-override=node1 \ - --exit-on-lock-contention \ - --lock-file=/tmp/kubelet.lock \ - --container-runtime=docker \ - --container-runtime-endpoint=unix:///var/run/dockershim.sock \ - --node-ip ${KUBELET_NODE_IP} - - [Install] - WantedBy=multi-user.target - -- path: "/etc/systemd/system/kubelet.service.d/extras.conf" - content: | - [Service] - Environment="KUBELET_EXTRA_ARGS=--resolv-conf=/run/systemd/resolve/resolv.conf" -- path: "/etc/kubernetes/cloud-config" - permissions: "0600" - content: | - custom - cloud - config - -- path: "/opt/bin/setup_net_env.sh" - permissions: "0755" - content: | - #!/usr/bin/env bash - echodate() { - echo "[$(date -Is)]" "$@" - } - - # get the default interface IP address - DEFAULT_IFC_IP=$(ip -o route get 1 | grep -oP "src \K\S+") - - # get the full hostname - FULL_HOSTNAME=$(hostname -f) - - if [ -z "${DEFAULT_IFC_IP}" ] - then - echodate "Failed to get IP address for the default route interface" - exit 1 - fi - - # write the nodeip_env file - # we need the line below because flatcar has the same string "coreos" in that file - if grep -q coreos /etc/os-release - then - echo -e "KUBELET_NODE_IP=${DEFAULT_IFC_IP}\nKUBELET_HOSTNAME=${FULL_HOSTNAME}" > /etc/kubernetes/nodeip.conf - elif [ ! -d /etc/systemd/system/kubelet.service.d ] - then - echodate "Can't find kubelet service extras directory" - exit 1 - else - echo -e "[Service]\nEnvironment=\"KUBELET_NODE_IP=${DEFAULT_IFC_IP}\"\nEnvironment=\"KUBELET_HOSTNAME=${FULL_HOSTNAME}\"" > /etc/systemd/system/kubelet.service.d/nodeip.conf - fi - - -- path: "/etc/kubernetes/bootstrap-kubelet.conf" - permissions: "0600" - content: | - apiVersion: v1 - clusters: - - cluster: - certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t - server: https://server:443 - name: "" - contexts: null - current-context: "" - kind: Config - preferences: {} - users: - - name: "" - user: - token: my-token - - -- path: "/etc/kubernetes/pki/ca.crt" - content: | - -----BEGIN CERTIFICATE----- - MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV - BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG - A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 - DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 - NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG - cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv - c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B - AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS - R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT - ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk - JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 - mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW - caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G - A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt - hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB - MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES - MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv - bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h - U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao - eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 - UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD - 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n - sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF - kPe6XoSbiLm/kxk32T0= - -----END CERTIFICATE----- - -- path: "/etc/systemd/system/setup.service" - permissions: "0644" - content: | - [Install] - WantedBy=multi-user.target - - [Unit] - Requires=network-online.target - After=network-online.target - - [Service] - Type=oneshot - RemainAfterExit=true - EnvironmentFile=-/etc/environment - ExecStart=/opt/bin/supervise.sh /opt/bin/setup - -- path: "/etc/profile.d/opt-bin-path.sh" - permissions: "0644" - content: | - export PATH="/opt/bin:$PATH" - -- path: /etc/docker/daemon.json - permissions: "0644" - content: | - {"exec-opts":["native.cgroupdriver=systemd"],"storage-driver":"overlay2","log-driver":"json-file","log-opts":{"max-file":"5","max-size":"100m"}} - -- path: "/etc/kubernetes/kubelet.conf" - content: | - apiVersion: kubelet.config.k8s.io/v1beta1 - authentication: - anonymous: - enabled: false - webhook: - cacheTTL: 0s - enabled: true - x509: - clientCAFile: /etc/kubernetes/pki/ca.crt - authorization: - mode: Webhook - webhook: - cacheAuthorizedTTL: 0s - cacheUnauthorizedTTL: 0s - cgroupDriver: systemd - clusterDNS: - - 10.10.10.10 - clusterDomain: cluster.local - containerLogMaxSize: 100Mi - cpuManagerReconcilePeriod: 0s - evictionHard: - imagefs.available: 15% - memory.available: 100Mi - nodefs.available: 10% - nodefs.inodesFree: 5% - evictionPressureTransitionPeriod: 0s - featureGates: - RotateKubeletServerCertificate: true - fileCheckFrequency: 0s - httpCheckFrequency: 0s - imageMinimumGCAge: 0s - kind: KubeletConfiguration - kubeReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - logging: - flushFrequency: 0 - options: - json: - infoBufferSize: "0" - verbosity: 0 - memorySwap: {} - nodeStatusReportFrequency: 0s - nodeStatusUpdateFrequency: 0s - protectKernelDefaults: true - rotateCertificates: true - runtimeRequestTimeout: 0s - serverTLSBootstrap: true - shutdownGracePeriod: 0s - shutdownGracePeriodCriticalPods: 0s - staticPodPath: /etc/kubernetes/manifests - streamingConnectionIdleTimeout: 0s - syncFrequency: 0s - systemReserved: - cpu: 200m - ephemeral-storage: 1Gi - memory: 200Mi - tlsCipherSuites: - - TLS_AES_128_GCM_SHA256 - - TLS_AES_256_GCM_SHA384 - - TLS_CHACHA20_POLY1305_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 - - TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 - - TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 - - TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 - volumePluginDir: /var/lib/kubelet/volumeplugins - volumeStatsAggPeriod: 0s - - -- path: /etc/systemd/system/kubelet-healthcheck.service - permissions: "0644" - content: | - [Unit] - Requires=kubelet.service - After=kubelet.service - - [Service] - ExecStart=/opt/bin/health-monitor.sh kubelet - - [Install] - WantedBy=multi-user.target - - -runcmd: -- systemctl enable --now setup.service diff --git a/pkg/version/version.go b/pkg/version/version.go new file mode 100644 index 000000000..e1959cc81 --- /dev/null +++ b/pkg/version/version.go @@ -0,0 +1,94 @@ +/* +Copyright 2026 The Machine Controller Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version + +import ( + "runtime/debug" +) + +// BuildInfoReader is a function type for reading build info, allowing dependency injection for testing. +type BuildInfoReader func() (*debug.BuildInfo, bool) + +// Info holds version information extracted from build metadata. +type Info struct { + ModuleVersion string // Module version from build info + Revision string // Git commit hash + Dirty bool // Whether working directory had uncommitted changes + readBuildInfo BuildInfoReader +} + +type Option func(*Info) + +func WithReadBuildInfoFunc(f BuildInfoReader) Option { + return func(i *Info) { + i.readBuildInfo = f + } +} + +// Get retrieves version information from build metadata using the default debug.ReadBuildInfo. +func Get(opts ...Option) Info { + info := Info{ + Revision: "unknown", + readBuildInfo: debug.ReadBuildInfo, + } + + for _, opt := range opts { + opt(&info) + } + + bi, ok := info.readBuildInfo() + if !ok { + return info + } + + // Save the main module version + if bi.Main.Version != "" && bi.Main.Version != "(devel)" { + info.ModuleVersion = bi.Main.Version + } + + // Extract VCS info from build settings + for _, setting := range bi.Settings { + switch setting.Key { + case "vcs.revision": + info.Revision = setting.Value + case "vcs.modified": + info.Dirty = setting.Value == "true" + } + } + + return info +} + +// String returns a formatted version string based on the available build version information. +func (i Info) String() string { + // Use module version if available + if i.ModuleVersion != "" { + return i.ModuleVersion + } + + // Fall back to VCS revision + if i.Revision == "unknown" { + return "dev" + } + + version := i.Revision + if i.Dirty { + version += "-dirty" + } + + return version +} diff --git a/pkg/version/version_test.go b/pkg/version/version_test.go new file mode 100644 index 000000000..3eb08629a --- /dev/null +++ b/pkg/version/version_test.go @@ -0,0 +1,253 @@ +/* +Copyright 2026 The Machine Controller Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version_test + +import ( + "runtime/debug" + "testing" + + "k8c.io/machine-controller/pkg/version" +) + +func TestGet(t *testing.T) { + tests := []struct { + name string + buildInfoFunc version.BuildInfoReader + wantRevision string + wantModuleVer string + wantDirty bool + }{ + { + name: "build info not available", + buildInfoFunc: func() (*debug.BuildInfo, bool) { + return nil, false + }, + wantRevision: "unknown", + wantModuleVer: "", + wantDirty: false, + }, + { + name: "build info with module version and vcs info", + buildInfoFunc: func() (*debug.BuildInfo, bool) { + return &debug.BuildInfo{ + Main: debug.Module{ + Version: "v1.2.3", + }, + Settings: []debug.BuildSetting{ + {Key: "vcs.revision", Value: "abc123def456"}, + {Key: "vcs.modified", Value: "false"}, + }, + }, true + }, + wantRevision: "abc123def456", + wantModuleVer: "v1.2.3", + wantDirty: false, + }, + { + name: "build info with dirty working directory", + buildInfoFunc: func() (*debug.BuildInfo, bool) { + return &debug.BuildInfo{ + Main: debug.Module{ + Version: "v0.5.0", + }, + Settings: []debug.BuildSetting{ + {Key: "vcs.revision", Value: "deadbeef"}, + {Key: "vcs.modified", Value: "true"}, + }, + }, true + }, + wantRevision: "deadbeef", + wantModuleVer: "v0.5.0", + wantDirty: true, + }, + { + name: "build info with devel version", + buildInfoFunc: func() (*debug.BuildInfo, bool) { + return &debug.BuildInfo{ + Main: debug.Module{ + Version: "(devel)", + }, + Settings: []debug.BuildSetting{ + {Key: "vcs.revision", Value: "cafe1234"}, + {Key: "vcs.modified", Value: "false"}, + }, + }, true + }, + wantRevision: "cafe1234", + wantModuleVer: "", + wantDirty: false, + }, + { + name: "build info with empty version", + buildInfoFunc: func() (*debug.BuildInfo, bool) { + return &debug.BuildInfo{ + Main: debug.Module{ + Version: "", + }, + Settings: []debug.BuildSetting{ + {Key: "vcs.revision", Value: "1a2b3c4d"}, + }, + }, true + }, + wantRevision: "1a2b3c4d", + wantModuleVer: "", + wantDirty: false, + }, + { + name: "build info without vcs settings", + buildInfoFunc: func() (*debug.BuildInfo, bool) { + return &debug.BuildInfo{ + Main: debug.Module{ + Version: "v2.0.0", + }, + Settings: []debug.BuildSetting{}, + }, true + }, + wantRevision: "unknown", + wantModuleVer: "v2.0.0", + wantDirty: false, + }, + { + name: "build info with only revision", + buildInfoFunc: func() (*debug.BuildInfo, bool) { + return &debug.BuildInfo{ + Main: debug.Module{}, + Settings: []debug.BuildSetting{ + {Key: "vcs.revision", Value: "abcdef123456"}, + }, + }, true + }, + wantRevision: "abcdef123456", + wantModuleVer: "", + wantDirty: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := version.Get(version.WithReadBuildInfoFunc(tt.buildInfoFunc)) + + if got.Revision != tt.wantRevision { + t.Errorf("Get().Revision = %v, want %v", got.Revision, tt.wantRevision) + } + if got.ModuleVersion != tt.wantModuleVer { + t.Errorf("Get().ModuleVersion = %v, want %v", got.ModuleVersion, tt.wantModuleVer) + } + if got.Dirty != tt.wantDirty { + t.Errorf("Get().Dirty = %v, want %v", got.Dirty, tt.wantDirty) + } + }) + } +} + +func TestInfo_String(t *testing.T) { + tests := []struct { + name string + info version.Info + want string + }{ + { + name: "module version takes precedence", + info: version.Info{ + ModuleVersion: "v1.2.3", + Revision: "abc123", + Dirty: false, + }, + want: "v1.2.3", + }, + { + name: "module version with dirty flag (dirty is ignored when module version present)", + info: version.Info{ + ModuleVersion: "v2.0.0", + Revision: "deadbeef", + Dirty: true, + }, + want: "v2.0.0", + }, + { + name: "revision without module version", + info: version.Info{ + ModuleVersion: "", + Revision: "cafe1234", + Dirty: false, + }, + want: "cafe1234", + }, + { + name: "revision with dirty flag", + info: version.Info{ + ModuleVersion: "", + Revision: "abc123def", + Dirty: true, + }, + want: "abc123def-dirty", + }, + { + name: "unknown revision returns dev", + info: version.Info{ + ModuleVersion: "", + Revision: "unknown", + Dirty: false, + }, + want: "dev", + }, + { + name: "unknown revision with dirty flag returns dev", + info: version.Info{ + ModuleVersion: "", + Revision: "unknown", + Dirty: true, + }, + want: "dev", + }, + { + name: "empty revision returns empty string", + info: version.Info{ + ModuleVersion: "", + Revision: "", + Dirty: false, + }, + want: "", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := tt.info.String() + if got != tt.want { + t.Errorf("Info.String() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestGet_WithoutOptions(t *testing.T) { + // Test calling Get without any options to ensure it doesn't panic + // and returns a valid Info struct + info := version.Get() + + // Basic validation - it should have some value for Revision + if info.Revision == "" { + t.Error("Get() without options returned empty Revision") + } + + // String() should not panic and return something + str := info.String() + if str == "" { + t.Error("Info.String() returned empty string") + } +} diff --git a/sdk/.golangci.yml b/sdk/.golangci.yml new file mode 100644 index 000000000..c678a6c2d --- /dev/null +++ b/sdk/.golangci.yml @@ -0,0 +1,53 @@ +# Copyright 2022 The Kubermatic Kubernetes Platform contributors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# This file contains *additional* linting rules that just apply to the SDK. +# When running `make lint`, the SDK is linted twice, once with the repository +# root's .golangci.yml and once with the SDK's config file. +# + +version: "2" +run: + modules-download-mode: readonly +linters: + default: none + enable: + - depguard + settings: + depguard: + rules: + noreverse: + deny: + - pkg: k8c.io/machine-controller/pkg + desc: SDK must not depend on the main module + exclusions: + generated: lax + presets: + - comments + - common-false-positives + - legacy + - std-error-handling + paths: + - zz_generated.*.go + - third_party$ + - builtin$ + - examples$ +formatters: + exclusions: + generated: lax + paths: + - third_party$ + - builtin$ + - examples$ diff --git a/sdk/LICENSE b/sdk/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/sdk/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/sdk/Makefile b/sdk/Makefile new file mode 100644 index 000000000..d49b0491f --- /dev/null +++ b/sdk/Makefile @@ -0,0 +1,18 @@ +# Copyright 2025 The Machine Controller Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +.PHONY: lint +lint: + golangci-lint run --verbose --print-resources-usage ./... + golangci-lint run --verbose --print-resources-usage --config ../.golangci.yml ./... diff --git a/sdk/README.md b/sdk/README.md new file mode 100644 index 000000000..19febcf65 --- /dev/null +++ b/sdk/README.md @@ -0,0 +1,41 @@ +# machine-controller SDK + +This directory contains the `k8c.io/machine-controller/sdk` Go module. If you're +looking at integrating the machine controller (MC) into your application, this +is where you should start. + +## Usage + +Simply `go get` the SDK to use it in your application: + +```shell +go get k8c.io/machine-controller/sdk +``` + +If necessary, you can also import the main MC module, but this comes with heavy +dependencies that might be too costly to maintain for you: + +```shell +go get k8c.io/machine-controller +go get k8c.io/machine-controller/sdk +``` + +In this case it's recommended to always keep both dependencies on the exact same +version. + +## Development + +There are two main design criteria for the SDK: + +1. The SDK should contain a minimal set of dependencies, in a perfect world it + would be only Kube dependencies. The idea behind the SDK is to make importing + KKP cheap and easy and to not force dependencies onto consumers. + +1. The SDK should not contain as few functions as possible. Functions always + represent application logic and usually that logic should not be hardcoded into + client apps. Every function in the SDK is therefore to be considered "eternal". + +1. The SDK should truly follow the Go Modules idea of declaring the _minimum_ + compatible versions of every dependency and even of Go. The main machine + controller module can and should have the _latest_ dependencies, but the SDK + should not force consumers to be on the most recent Kube version, for example. diff --git a/pkg/apis/cluster/common/consts.go b/sdk/apis/cluster/common/consts.go similarity index 76% rename from pkg/apis/cluster/common/consts.go rename to sdk/apis/cluster/common/consts.go index ca58aafca..add594654 100644 --- a/pkg/apis/cluster/common/consts.go +++ b/sdk/apis/cluster/common/consts.go @@ -18,8 +18,6 @@ package common import ( "fmt" - "strconv" - "strings" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -134,6 +132,37 @@ const ( ExternalCloudProviderKubeletFlag KubeletFlags = "ExternalCloudProvider" ) +const ( + DefaultMachineDeploymentUniqueLabelKey = "machine-template-hash" + + // RevisionAnnotation is the revision annotation of a machine deployment's machine sets which records its rollout sequence. + RevisionAnnotation = "machinedeployment.clusters.k8s.io/revision" + // RevisionHistoryAnnotation maintains the history of all old revisions that a machine set has served for a machine deployment. + RevisionHistoryAnnotation = "machinedeployment.clusters.k8s.io/revision-history" + // DesiredReplicasAnnotation is the desired replicas for a machine deployment recorded as an annotation + // in its machine sets. Helps in separating scaling events from the rollout process and for + // determining if the new machine set for a deployment is really saturated. + DesiredReplicasAnnotation = "machinedeployment.clusters.k8s.io/desired-replicas" + // MaxReplicasAnnotation is the maximum replicas a deployment can have at a given point, which + // is machinedeployment.spec.replicas + maxSurge. Used by the underlying machine sets to estimate their + // proportions in case the deployment has surge replicas. + MaxReplicasAnnotation = "machinedeployment.clusters.k8s.io/max-replicas" + + // FailedMSCreateReason is added in a machine deployment when it cannot create a new machine set. + FailedMSCreateReason = "MachineSetCreateError" + // FoundNewMSReason is added in a machine deployment when it adopts an existing machine set. + FoundNewMSReason = "FoundNewMachineSet" + // PausedDeployReason is added in a deployment when it is paused. Lack of progress shouldn't be + // estimated once a deployment is paused. + PausedDeployReason = "DeploymentPaused" + + // MinimumReplicasAvailable is added in a deployment when it has its minimum replicas required available. + MinimumReplicasAvailable = "MinimumReplicasAvailable" + // MinimumReplicasUnavailable is added in a deployment when it doesn't have the minimum required replicas + // available. + MinimumReplicasUnavailable = "MinimumReplicasUnavailable" +) + const ( SystemReservedKubeletConfig = "SystemReserved" KubeReservedKubeletConfig = "KubeReserved" @@ -141,6 +170,10 @@ const ( ContainerLogMaxSizeKubeletConfig = "ContainerLogMaxSize" ContainerLogMaxFilesKubeletConfig = "ContainerLogMaxFiles" MaxPodsKubeletConfig = "MaxPods" + ImageGCHighThresholdPercent = "ImageGCHighThresholdPercent" + ImageGCLowThresholdPercent = "ImageGCLowThresholdPercent" + ImageMinimumGCAge = "ImageMinimumGCAge" + ImageMaximumGCAge = "ImageMaximumGCAge" ) const ( @@ -175,49 +208,6 @@ func SetKubeletFlags(metaobj metav1.Object, flags map[KubeletFlags]string) { metaobj.SetAnnotations(annts) } -func GetKubeletConfigs(annotations map[string]string) map[string]string { - configs := map[string]string{} - for name, value := range annotations { - if strings.HasPrefix(name, KubeletConfigAnnotationPrefixV1) { - nameConfigValue := strings.SplitN(name, "/", 2) - if len(nameConfigValue) != 2 { - continue - } - configs[nameConfigValue[1]] = value - } - } - return configs -} - -func GetKubeletFeatureGates(annotations map[string]string) map[string]bool { - result := map[string]bool{} - for name, value := range annotations { - if strings.HasPrefix(name, KubeletFeatureGatesAnnotationPrefixV1) { - nameGateValue := strings.SplitN(name, "/", 2) - if len(nameGateValue) != 2 { - continue - } - realBool, _ := strconv.ParseBool(value) - result[nameGateValue[1]] = realBool - } - } - return result -} - -func GetKubeletFlags(annotations map[string]string) map[KubeletFlags]string { - result := map[KubeletFlags]string{} - for name, value := range annotations { - if strings.HasPrefix(name, KubeletFlagsGroupAnnotationPrefixV1) { - nameFlagValue := strings.SplitN(name, "/", 2) - if len(nameFlagValue) != 2 { - continue - } - result[KubeletFlags(nameFlagValue[1])] = value - } - } - return result -} - const OperatingSystemLabelV1 = "v1.machine-controller.kubermatic.io/operating-system" func SetOSLabel(metaobj metav1.Object, osName string) { diff --git a/pkg/apis/cluster/common/plugins.go b/sdk/apis/cluster/common/plugins.go similarity index 83% rename from pkg/apis/cluster/common/plugins.go rename to sdk/apis/cluster/common/plugins.go index a611863a1..7d5a7d774 100644 --- a/pkg/apis/cluster/common/plugins.go +++ b/sdk/apis/cluster/common/plugins.go @@ -17,11 +17,8 @@ limitations under the License. package common import ( + "fmt" "sync" - - "github.com/pkg/errors" - - "k8s.io/klog" ) var ( @@ -35,9 +32,8 @@ func RegisterClusterProvisioner(name string, provisioner interface{}) { providersMutex.Lock() defer providersMutex.Unlock() if _, found := providers[name]; found { - klog.Fatalf("Cluster provisioner %q was registered twice", name) + panic(fmt.Sprintf("Cluster provisioner %q was registered twice", name)) } - klog.V(1).Infof("Registered cluster provisioner %q", name) providers[name] = provisioner } @@ -46,7 +42,7 @@ func ClusterProvisioner(name string) (interface{}, error) { defer providersMutex.Unlock() provisioner, found := providers[name] if !found { - return nil, errors.Errorf("unable to find provisioner for %s", name) + return nil, fmt.Errorf("failed to find provisioner for %s", name) } return provisioner, nil } diff --git a/pkg/apis/cluster/v1alpha1/common_types.go b/sdk/apis/cluster/v1alpha1/common_types.go similarity index 100% rename from pkg/apis/cluster/v1alpha1/common_types.go rename to sdk/apis/cluster/v1alpha1/common_types.go diff --git a/pkg/apis/cluster/v1alpha1/conversions/conversions.go b/sdk/apis/cluster/v1alpha1/conversions/conversions.go similarity index 85% rename from pkg/apis/cluster/v1alpha1/conversions/conversions.go rename to sdk/apis/cluster/v1alpha1/conversions/conversions.go index d33350b8e..7a0f7a83b 100644 --- a/pkg/apis/cluster/v1alpha1/conversions/conversions.go +++ b/sdk/apis/cluster/v1alpha1/conversions/conversions.go @@ -20,8 +20,8 @@ import ( "encoding/json" "fmt" - clusterv1alpha1 "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" - machinesv1alpha1 "github.com/kubermatic/machine-controller/pkg/machines/v1alpha1" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" + machinesv1alpha1 "k8c.io/machine-controller/sdk/apis/machines/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -34,10 +34,10 @@ func Convert_MachinesV1alpha1Machine_To_ClusterV1alpha1Machine(in *machinesv1alp out.ResourceVersion = "" out.Generation = 0 out.CreationTimestamp = metav1.Time{} - out.ObjectMeta.Namespace = metav1.NamespaceSystem + out.Namespace = metav1.NamespaceSystem - // github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1.MachineStatus and - // pkg/machines/v1alpha1.MachineStatus are semantically identical, the former + // k8c.io/machine-controller/sdk/apis/cluster/v1alpha1.MachineStatus and + // sdk/apis/machines/v1alpha1.MachineStatus are semantically identical, the former // only has one additional field, so we cast by serializing and deserializing inStatusJSON, err := json.Marshal(in.Status) if err != nil { diff --git a/pkg/apis/cluster/v1alpha1/conversions/conversions_test.go b/sdk/apis/cluster/v1alpha1/conversions/conversions_test.go similarity index 92% rename from pkg/apis/cluster/v1alpha1/conversions/conversions_test.go rename to sdk/apis/cluster/v1alpha1/conversions/conversions_test.go index fe87ba1b9..52a57404a 100644 --- a/pkg/apis/cluster/v1alpha1/conversions/conversions_test.go +++ b/sdk/apis/cluster/v1alpha1/conversions/conversions_test.go @@ -23,12 +23,11 @@ import ( "os" "testing" - "github.com/ghodss/yaml" - - clusterv1alpha1 "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" - machinesv1alpha1 "github.com/kubermatic/machine-controller/pkg/machines/v1alpha1" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" + machinesv1alpha1 "k8c.io/machine-controller/sdk/apis/machines/v1alpha1" kyaml "k8s.io/apimachinery/pkg/util/yaml" + "sigs.k8s.io/yaml" ) var update = flag.Bool("update", false, "update .testdata files") diff --git a/pkg/apis/cluster/v1alpha1/conversions/providerconfig_to_providerspec.go b/sdk/apis/cluster/v1alpha1/conversions/providerconfig_to_providerspec.go similarity index 98% rename from pkg/apis/cluster/v1alpha1/conversions/providerconfig_to_providerspec.go rename to sdk/apis/cluster/v1alpha1/conversions/providerconfig_to_providerspec.go index d48b19462..c7e26fbb0 100644 --- a/pkg/apis/cluster/v1alpha1/conversions/providerconfig_to_providerspec.go +++ b/sdk/apis/cluster/v1alpha1/conversions/providerconfig_to_providerspec.go @@ -20,7 +20,7 @@ import ( "encoding/json" "fmt" - clusterv1alpha1 "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/pkg/apis/cluster/v1alpha1/conversions/providerconfig_to_providerspec_test.go b/sdk/apis/cluster/v1alpha1/conversions/providerconfig_to_providerspec_test.go similarity index 98% rename from pkg/apis/cluster/v1alpha1/conversions/providerconfig_to_providerspec_test.go rename to sdk/apis/cluster/v1alpha1/conversions/providerconfig_to_providerspec_test.go index 7f290e02a..feb220858 100644 --- a/pkg/apis/cluster/v1alpha1/conversions/providerconfig_to_providerspec_test.go +++ b/sdk/apis/cluster/v1alpha1/conversions/providerconfig_to_providerspec_test.go @@ -22,9 +22,9 @@ import ( "os" "testing" - "github.com/ghodss/yaml" + testhelper "k8c.io/machine-controller/sdk/internal/test" - testhelper "github.com/kubermatic/machine-controller/pkg/test" + "sigs.k8s.io/yaml" ) func Test_Convert_MachineDeployment_ProviderConfig_To_ProviderSpec(t *testing.T) { diff --git a/pkg/apis/cluster/v1alpha1/conversions/testdata/clusterv1alpha1machineDeploymentWithProviderConfig/hetzner.yaml b/sdk/apis/cluster/v1alpha1/conversions/testdata/clusterv1alpha1machineDeploymentWithProviderConfig/hetzner.yaml similarity index 100% rename from pkg/apis/cluster/v1alpha1/conversions/testdata/clusterv1alpha1machineDeploymentWithProviderConfig/hetzner.yaml rename to sdk/apis/cluster/v1alpha1/conversions/testdata/clusterv1alpha1machineDeploymentWithProviderConfig/hetzner.yaml diff --git a/pkg/apis/cluster/v1alpha1/conversions/testdata/clusterv1alpha1machineSetWithProviderConfig/hetzner.yaml b/sdk/apis/cluster/v1alpha1/conversions/testdata/clusterv1alpha1machineSetWithProviderConfig/hetzner.yaml similarity index 100% rename from pkg/apis/cluster/v1alpha1/conversions/testdata/clusterv1alpha1machineSetWithProviderConfig/hetzner.yaml rename to sdk/apis/cluster/v1alpha1/conversions/testdata/clusterv1alpha1machineSetWithProviderConfig/hetzner.yaml diff --git a/pkg/apis/cluster/v1alpha1/conversions/testdata/clusterv1alpha1machineWithProviderConfig/aws.yaml b/sdk/apis/cluster/v1alpha1/conversions/testdata/clusterv1alpha1machineWithProviderConfig/aws.yaml similarity index 96% rename from pkg/apis/cluster/v1alpha1/conversions/testdata/clusterv1alpha1machineWithProviderConfig/aws.yaml rename to sdk/apis/cluster/v1alpha1/conversions/testdata/clusterv1alpha1machineWithProviderConfig/aws.yaml index 9e94d17e3..52c66c07e 100644 --- a/pkg/apis/cluster/v1alpha1/conversions/testdata/clusterv1alpha1machineWithProviderConfig/aws.yaml +++ b/sdk/apis/cluster/v1alpha1/conversions/testdata/clusterv1alpha1machineWithProviderConfig/aws.yaml @@ -27,7 +27,7 @@ spec: subnetId: subnet-2bff4f43 tags: KubernetesCluster: 6qsm86c2d - vpcId: vpc-819f62e9 + vpcId: vpc-079f7648481a11e77 operatingSystem: flatcar operatingSystemSpec: disableAutoUpdate: true diff --git a/pkg/apis/cluster/v1alpha1/conversions/testdata/clusterv1alpha1machineWithProviderConfig/hetzner.yaml b/sdk/apis/cluster/v1alpha1/conversions/testdata/clusterv1alpha1machineWithProviderConfig/hetzner.yaml similarity index 95% rename from pkg/apis/cluster/v1alpha1/conversions/testdata/clusterv1alpha1machineWithProviderConfig/hetzner.yaml rename to sdk/apis/cluster/v1alpha1/conversions/testdata/clusterv1alpha1machineWithProviderConfig/hetzner.yaml index 78e599569..ffb468466 100644 --- a/pkg/apis/cluster/v1alpha1/conversions/testdata/clusterv1alpha1machineWithProviderConfig/hetzner.yaml +++ b/sdk/apis/cluster/v1alpha1/conversions/testdata/clusterv1alpha1machineWithProviderConfig/hetzner.yaml @@ -10,7 +10,7 @@ spec: cloudProviderSpec: datacenter: '' location: fsn1 - serverType: cx11 + serverType: cx23 token: << HETZNER_TOKEN >> operatingSystem: << OS_NAME >> operatingSystemSpec: diff --git a/pkg/apis/cluster/v1alpha1/conversions/testdata/machinesv1alpha1machine/aws.yaml b/sdk/apis/cluster/v1alpha1/conversions/testdata/machinesv1alpha1machine/aws.yaml similarity index 94% rename from pkg/apis/cluster/v1alpha1/conversions/testdata/machinesv1alpha1machine/aws.yaml rename to sdk/apis/cluster/v1alpha1/conversions/testdata/machinesv1alpha1machine/aws.yaml index e9a7245d7..89b00babe 100644 --- a/pkg/apis/cluster/v1alpha1/conversions/testdata/machinesv1alpha1machine/aws.yaml +++ b/sdk/apis/cluster/v1alpha1/conversions/testdata/machinesv1alpha1machine/aws.yaml @@ -16,7 +16,7 @@ spec: secretAccessKey: "val" region: "eu-central-1" availabilityZone: "eu-central-1a" - vpcId: "vpc-819f62e9" + vpcId: "vpc-079f7648481a11e77" subnetId: "subnet-2bff4f43" instanceType: "t2.micro" diskSize: 50 diff --git a/pkg/apis/cluster/v1alpha1/conversions/testdata/machinesv1alpha1machine/azure.yaml b/sdk/apis/cluster/v1alpha1/conversions/testdata/machinesv1alpha1machine/azure.yaml similarity index 100% rename from pkg/apis/cluster/v1alpha1/conversions/testdata/machinesv1alpha1machine/azure.yaml rename to sdk/apis/cluster/v1alpha1/conversions/testdata/machinesv1alpha1machine/azure.yaml diff --git a/pkg/apis/cluster/v1alpha1/conversions/testdata/machinesv1alpha1machine/digitalocean.yaml b/sdk/apis/cluster/v1alpha1/conversions/testdata/machinesv1alpha1machine/digitalocean.yaml similarity index 100% rename from pkg/apis/cluster/v1alpha1/conversions/testdata/machinesv1alpha1machine/digitalocean.yaml rename to sdk/apis/cluster/v1alpha1/conversions/testdata/machinesv1alpha1machine/digitalocean.yaml diff --git a/pkg/apis/cluster/v1alpha1/conversions/testdata/machinesv1alpha1machine/hetzner.yaml b/sdk/apis/cluster/v1alpha1/conversions/testdata/machinesv1alpha1machine/hetzner.yaml similarity index 95% rename from pkg/apis/cluster/v1alpha1/conversions/testdata/machinesv1alpha1machine/hetzner.yaml rename to sdk/apis/cluster/v1alpha1/conversions/testdata/machinesv1alpha1machine/hetzner.yaml index 62fa47ccc..c9bc8ec5f 100644 --- a/pkg/apis/cluster/v1alpha1/conversions/testdata/machinesv1alpha1machine/hetzner.yaml +++ b/sdk/apis/cluster/v1alpha1/conversions/testdata/machinesv1alpha1machine/hetzner.yaml @@ -13,7 +13,7 @@ spec: namespace: kube-system name: machine-controller-hetzner key: token - serverType: "cx11" + serverType: "cx23" datacenter: "" location: "fsn1" operatingSystem: "ubuntu" diff --git a/pkg/apis/cluster/v1alpha1/conversions/testdata/machinesv1alpha1machine/linode.yaml b/sdk/apis/cluster/v1alpha1/conversions/testdata/machinesv1alpha1machine/linode.yaml similarity index 100% rename from pkg/apis/cluster/v1alpha1/conversions/testdata/machinesv1alpha1machine/linode.yaml rename to sdk/apis/cluster/v1alpha1/conversions/testdata/machinesv1alpha1machine/linode.yaml diff --git a/pkg/apis/cluster/v1alpha1/conversions/testdata/machinesv1alpha1machine/openstack.yaml b/sdk/apis/cluster/v1alpha1/conversions/testdata/machinesv1alpha1machine/openstack.yaml similarity index 90% rename from pkg/apis/cluster/v1alpha1/conversions/testdata/machinesv1alpha1machine/openstack.yaml rename to sdk/apis/cluster/v1alpha1/conversions/testdata/machinesv1alpha1machine/openstack.yaml index 157d535d4..bd5e73696 100644 --- a/pkg/apis/cluster/v1alpha1/conversions/testdata/machinesv1alpha1machine/openstack.yaml +++ b/sdk/apis/cluster/v1alpha1/conversions/testdata/machinesv1alpha1machine/openstack.yaml @@ -56,6 +56,10 @@ spec: region: "" # Only required if there is more than one network available network: "" + # If you want to use multiple networks, you can specify them here. The first network in the list will be used as the primary network (e.g. for floating IP's). + # networks: + # - "private-network" + # - "public-network" # Only required if the network has more than one subnet subnet: "" # Can be increased for slower OpenStack setups. No values below 1m (60s) possible as this makes no sense. @@ -65,7 +69,6 @@ spec: # the list of tags you would like to attach to the instance tags: tagKey: tagValue - # Can be 'ubuntu' or 'centos' operatingSystem: "ubuntu" operatingSystemSpec: distUpgradeOnBoot: true diff --git a/pkg/apis/cluster/v1alpha1/conversions/testdata/machinesv1alpha1machine/vsphere-static-ip.yaml b/sdk/apis/cluster/v1alpha1/conversions/testdata/machinesv1alpha1machine/vsphere-static-ip.yaml similarity index 100% rename from pkg/apis/cluster/v1alpha1/conversions/testdata/machinesv1alpha1machine/vsphere-static-ip.yaml rename to sdk/apis/cluster/v1alpha1/conversions/testdata/machinesv1alpha1machine/vsphere-static-ip.yaml diff --git a/pkg/apis/cluster/v1alpha1/conversions/testdata/machinesv1alpha1machine/vsphere.yaml b/sdk/apis/cluster/v1alpha1/conversions/testdata/machinesv1alpha1machine/vsphere.yaml similarity index 100% rename from pkg/apis/cluster/v1alpha1/conversions/testdata/machinesv1alpha1machine/vsphere.yaml rename to sdk/apis/cluster/v1alpha1/conversions/testdata/machinesv1alpha1machine/vsphere.yaml diff --git a/pkg/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machine/aws.yaml b/sdk/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machine/aws.yaml similarity index 95% rename from pkg/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machine/aws.yaml rename to sdk/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machine/aws.yaml index 77d13a080..9d89343b9 100644 --- a/pkg/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machine/aws.yaml +++ b/sdk/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machine/aws.yaml @@ -22,7 +22,7 @@ spec: subnetId: subnet-2bff4f43 tags: KubernetesCluster: 6qsm86c2d - vpcId: vpc-819f62e9 + vpcId: vpc-079f7648481a11e77 operatingSystem: flatcar operatingSystemSpec: disableAutoUpdate: true diff --git a/pkg/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machine/azure.yaml b/sdk/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machine/azure.yaml similarity index 100% rename from pkg/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machine/azure.yaml rename to sdk/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machine/azure.yaml diff --git a/pkg/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machine/digitalocean.yaml b/sdk/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machine/digitalocean.yaml similarity index 100% rename from pkg/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machine/digitalocean.yaml rename to sdk/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machine/digitalocean.yaml diff --git a/pkg/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machine/hetzner.yaml b/sdk/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machine/hetzner.yaml similarity index 95% rename from pkg/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machine/hetzner.yaml rename to sdk/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machine/hetzner.yaml index e5a817e69..dbb95e18e 100644 --- a/pkg/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machine/hetzner.yaml +++ b/sdk/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machine/hetzner.yaml @@ -11,7 +11,7 @@ spec: cloudProviderSpec: datacenter: "" location: fsn1 - serverType: cx11 + serverType: cx23 token: secretKeyRef: key: token diff --git a/pkg/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machine/linode.yaml b/sdk/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machine/linode.yaml similarity index 100% rename from pkg/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machine/linode.yaml rename to sdk/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machine/linode.yaml diff --git a/pkg/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machine/openstack.yaml b/sdk/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machine/openstack.yaml similarity index 100% rename from pkg/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machine/openstack.yaml rename to sdk/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machine/openstack.yaml diff --git a/pkg/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machine/vsphere-static-ip.yaml b/sdk/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machine/vsphere-static-ip.yaml similarity index 100% rename from pkg/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machine/vsphere-static-ip.yaml rename to sdk/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machine/vsphere-static-ip.yaml diff --git a/pkg/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machine/vsphere.yaml b/sdk/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machine/vsphere.yaml similarity index 100% rename from pkg/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machine/vsphere.yaml rename to sdk/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machine/vsphere.yaml diff --git a/pkg/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machineDeploymentWithProviderConfig/hetzner.yaml b/sdk/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machineDeploymentWithProviderConfig/hetzner.yaml similarity index 100% rename from pkg/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machineDeploymentWithProviderConfig/hetzner.yaml rename to sdk/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machineDeploymentWithProviderConfig/hetzner.yaml diff --git a/pkg/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machineSetWithProviderConfig/hetzner.yaml b/sdk/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machineSetWithProviderConfig/hetzner.yaml similarity index 100% rename from pkg/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machineSetWithProviderConfig/hetzner.yaml rename to sdk/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machineSetWithProviderConfig/hetzner.yaml diff --git a/pkg/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machineWithProviderConfig/aws.yaml b/sdk/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machineWithProviderConfig/aws.yaml similarity index 96% rename from pkg/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machineWithProviderConfig/aws.yaml rename to sdk/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machineWithProviderConfig/aws.yaml index ec50f95a3..470fc9632 100644 --- a/pkg/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machineWithProviderConfig/aws.yaml +++ b/sdk/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machineWithProviderConfig/aws.yaml @@ -30,7 +30,7 @@ spec: subnetId: subnet-2bff4f43 tags: KubernetesCluster: 6qsm86c2d - vpcId: vpc-819f62e9 + vpcId: vpc-079f7648481a11e77 operatingSystem: flatcar operatingSystemSpec: disableAutoUpdate: true diff --git a/pkg/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machineWithProviderConfig/hetzner.yaml b/sdk/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machineWithProviderConfig/hetzner.yaml similarity index 95% rename from pkg/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machineWithProviderConfig/hetzner.yaml rename to sdk/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machineWithProviderConfig/hetzner.yaml index e25b81872..1441a7965 100644 --- a/pkg/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machineWithProviderConfig/hetzner.yaml +++ b/sdk/apis/cluster/v1alpha1/conversions/testdata/migrated_clusterv1alpha1machineWithProviderConfig/hetzner.yaml @@ -13,7 +13,7 @@ spec: cloudProviderSpec: datacenter: "" location: fsn1 - serverType: cx11 + serverType: cx23 token: << HETZNER_TOKEN >> operatingSystem: << OS_NAME >> operatingSystemSpec: diff --git a/pkg/apis/cluster/v1alpha1/defaults.go b/sdk/apis/cluster/v1alpha1/defaults.go similarity index 97% rename from pkg/apis/cluster/v1alpha1/defaults.go rename to sdk/apis/cluster/v1alpha1/defaults.go index 33be49315..a022f33ce 100644 --- a/pkg/apis/cluster/v1alpha1/defaults.go +++ b/sdk/apis/cluster/v1alpha1/defaults.go @@ -17,7 +17,7 @@ limitations under the License. package v1alpha1 import ( - "github.com/kubermatic/machine-controller/pkg/apis/cluster/common" + "k8c.io/machine-controller/sdk/apis/cluster/common" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" diff --git a/pkg/apis/cluster/v1alpha1/doc.go b/sdk/apis/cluster/v1alpha1/doc.go similarity index 90% rename from pkg/apis/cluster/v1alpha1/doc.go rename to sdk/apis/cluster/v1alpha1/doc.go index 1d1025365..3fe0390c2 100644 --- a/pkg/apis/cluster/v1alpha1/doc.go +++ b/sdk/apis/cluster/v1alpha1/doc.go @@ -17,7 +17,7 @@ limitations under the License. // Package v1alpha1 contains API Schema definitions for the cluster v1alpha1 API group // +k8s:openapi-gen=true // +k8s:deepcopy-gen=package,register -// +k8s:conversion-gen=github.com/kubermatic/machine-controller/pkg/apis/cluster +// +k8s:conversion-gen=k8c.io/machine-controller/sdk/apis/cluster // +k8s:defaulter-gen=TypeMeta // +groupName=cluster.k8s.io package v1alpha1 diff --git a/pkg/apis/cluster/v1alpha1/machine_types.go b/sdk/apis/cluster/v1alpha1/machine_types.go similarity index 98% rename from pkg/apis/cluster/v1alpha1/machine_types.go rename to sdk/apis/cluster/v1alpha1/machine_types.go index 4295c340e..6371ef270 100644 --- a/pkg/apis/cluster/v1alpha1/machine_types.go +++ b/sdk/apis/cluster/v1alpha1/machine_types.go @@ -17,7 +17,7 @@ limitations under the License. package v1alpha1 import ( - "github.com/kubermatic/machine-controller/pkg/apis/cluster/common" + "k8c.io/machine-controller/sdk/apis/cluster/common" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -248,7 +248,3 @@ type MachineList struct { metav1.ListMeta `json:"metadata,omitempty"` Items []Machine `json:"items"` } - -func init() { - SchemeBuilder.Register(&Machine{}, &MachineList{}) -} diff --git a/pkg/apis/cluster/v1alpha1/machineclass_types.go b/sdk/apis/cluster/v1alpha1/machineclass_types.go similarity index 97% rename from pkg/apis/cluster/v1alpha1/machineclass_types.go rename to sdk/apis/cluster/v1alpha1/machineclass_types.go index b73553fbd..f1e30f07b 100644 --- a/pkg/apis/cluster/v1alpha1/machineclass_types.go +++ b/sdk/apis/cluster/v1alpha1/machineclass_types.go @@ -73,7 +73,3 @@ type MachineClassList struct { metav1.ListMeta `json:"metadata,omitempty"` Items []MachineClass `json:"items"` } - -func init() { - SchemeBuilder.Register(&MachineClass{}, &MachineClassList{}) -} diff --git a/pkg/apis/cluster/v1alpha1/machinedeployment_types.go b/sdk/apis/cluster/v1alpha1/machinedeployment_types.go similarity index 97% rename from pkg/apis/cluster/v1alpha1/machinedeployment_types.go rename to sdk/apis/cluster/v1alpha1/machinedeployment_types.go index 68aa5410d..8faad9d9a 100644 --- a/pkg/apis/cluster/v1alpha1/machinedeployment_types.go +++ b/sdk/apis/cluster/v1alpha1/machinedeployment_types.go @@ -17,7 +17,7 @@ limitations under the License. package v1alpha1 import ( - "github.com/kubermatic/machine-controller/pkg/apis/cluster/common" + "k8c.io/machine-controller/sdk/apis/cluster/common" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" @@ -189,7 +189,3 @@ type MachineDeploymentList struct { metav1.ListMeta `json:"metadata,omitempty"` Items []MachineDeployment `json:"items"` } - -func init() { - SchemeBuilder.Register(&MachineDeployment{}, &MachineDeploymentList{}) -} diff --git a/pkg/apis/cluster/v1alpha1/machineset_types.go b/sdk/apis/cluster/v1alpha1/machineset_types.go similarity index 98% rename from pkg/apis/cluster/v1alpha1/machineset_types.go rename to sdk/apis/cluster/v1alpha1/machineset_types.go index dadf49d07..169c25f8a 100644 --- a/pkg/apis/cluster/v1alpha1/machineset_types.go +++ b/sdk/apis/cluster/v1alpha1/machineset_types.go @@ -19,7 +19,7 @@ package v1alpha1 import ( "log" - "github.com/kubermatic/machine-controller/pkg/apis/cluster/common" + "k8c.io/machine-controller/sdk/apis/cluster/common" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1validation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" @@ -217,7 +217,3 @@ type MachineSetList struct { metav1.ListMeta `json:"metadata,omitempty"` Items []MachineSet `json:"items"` } - -func init() { - SchemeBuilder.Register(&MachineSet{}, &MachineSetList{}) -} diff --git a/pkg/apis/cluster/v1alpha1/register.go b/sdk/apis/cluster/v1alpha1/register.go similarity index 51% rename from pkg/apis/cluster/v1alpha1/register.go rename to sdk/apis/cluster/v1alpha1/register.go index ce0eeac3e..3e6941d2b 100644 --- a/pkg/apis/cluster/v1alpha1/register.go +++ b/sdk/apis/cluster/v1alpha1/register.go @@ -19,31 +19,47 @@ limitations under the License. // Package v1alpha1 contains API Schema definitions for the cluster v1alpha1 API group // +k8s:openapi-gen=true // +k8s:deepcopy-gen=package,register -// +k8s:conversion-gen=github.com/kubermatic/machine-controller/pkg/apis/cluster +// +k8s:conversion-gen=k8c.io/machine-controller/pkg/apis/cluster // +k8s:defaulter-gen=TypeMeta // +groupName=cluster.k8s.io package v1alpha1 import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/controller-runtime/pkg/scheme" ) var ( - // SchemeGroupVersion is group version used to register these objects. - SchemeGroupVersion = schema.GroupVersion{Group: "cluster.k8s.io", Version: "v1alpha1"} + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) - // SchemeBuilder is used to add go types to the GroupVersionKind scheme. - SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} +// GroupName is the group name use in this package. +const GroupName = "cluster.k8s.io" +const GroupVersion = "v1alpha1" - // AddToScheme adds registered types to the builder. - // Required by pkg/client/... - // TODO(pwittrock): Remove this after removing pkg/client/... - AddToScheme = SchemeBuilder.AddToScheme -) +// SchemeGroupVersion is group version used to register these objects. +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: GroupVersion} -// Required by pkg/client/listers/... -// TODO(pwittrock): Remove this after removing pkg/client/... +// Resource takes an unqualified resource and returns a Group qualified GroupResource. func Resource(resource string) schema.GroupResource { return SchemeGroupVersion.WithResource(resource).GroupResource() } + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Machine{}, + &MachineList{}, + &MachineClass{}, + &MachineClassList{}, + &MachineDeployment{}, + &MachineDeploymentList{}, + &MachineSet{}, + &MachineSetList{}, + ) + + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/pkg/apis/cluster/v1alpha1/zz_generated.deepcopy.go b/sdk/apis/cluster/v1alpha1/zz_generated.deepcopy.go similarity index 98% rename from pkg/apis/cluster/v1alpha1/zz_generated.deepcopy.go rename to sdk/apis/cluster/v1alpha1/zz_generated.deepcopy.go index 9c64da837..c4d6068e2 100644 --- a/pkg/apis/cluster/v1alpha1/zz_generated.deepcopy.go +++ b/sdk/apis/cluster/v1alpha1/zz_generated.deepcopy.go @@ -22,8 +22,8 @@ limitations under the License. package v1alpha1 import ( - common "github.com/kubermatic/machine-controller/pkg/apis/cluster/common" - v1 "k8s.io/api/core/v1" + common "k8c.io/machine-controller/sdk/apis/cluster/common" + corev1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" intstr "k8s.io/apimachinery/pkg/util/intstr" ) @@ -156,7 +156,7 @@ func (in *MachineClassRef) DeepCopyInto(out *MachineClassRef) { *out = *in if in.ObjectReference != nil { in, out := &in.ObjectReference, &out.ObjectReference - *out = new(v1.ObjectReference) + *out = new(corev1.ObjectReference) **out = **in } return @@ -488,7 +488,7 @@ func (in *MachineSpec) DeepCopyInto(out *MachineSpec) { in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) if in.Taints != nil { in, out := &in.Taints, &out.Taints - *out = make([]v1.Taint, len(*in)) + *out = make([]corev1.Taint, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -497,7 +497,7 @@ func (in *MachineSpec) DeepCopyInto(out *MachineSpec) { out.Versions = in.Versions if in.ConfigSource != nil { in, out := &in.ConfigSource, &out.ConfigSource - *out = new(v1.NodeConfigSource) + *out = new(corev1.NodeConfigSource) (*in).DeepCopyInto(*out) } if in.ProviderID != nil { @@ -523,7 +523,7 @@ func (in *MachineStatus) DeepCopyInto(out *MachineStatus) { *out = *in if in.NodeRef != nil { in, out := &in.NodeRef, &out.NodeRef - *out = new(v1.ObjectReference) + *out = new(corev1.ObjectReference) **out = **in } if in.LastUpdated != nil { @@ -552,12 +552,12 @@ func (in *MachineStatus) DeepCopyInto(out *MachineStatus) { } if in.Addresses != nil { in, out := &in.Addresses, &out.Addresses - *out = make([]v1.NodeAddress, len(*in)) + *out = make([]corev1.NodeAddress, len(*in)) copy(*out, *in) } if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions - *out = make([]v1.NodeCondition, len(*in)) + *out = make([]corev1.NodeCondition, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } diff --git a/pkg/machines/register.go b/sdk/apis/machines/register.go similarity index 92% rename from pkg/machines/register.go rename to sdk/apis/machines/register.go index 64bca258e..69e53518d 100644 --- a/pkg/machines/register.go +++ b/sdk/apis/machines/register.go @@ -19,7 +19,7 @@ package machines import ( "reflect" - "github.com/kubermatic/machine-controller/pkg/machines/v1alpha1" + "k8c.io/machine-controller/sdk/apis/machines/v1alpha1" ) type resource struct { diff --git a/pkg/machines/v1alpha1/defaults.go b/sdk/apis/machines/v1alpha1/defaults.go similarity index 100% rename from pkg/machines/v1alpha1/defaults.go rename to sdk/apis/machines/v1alpha1/defaults.go diff --git a/pkg/machines/v1alpha1/doc.go b/sdk/apis/machines/v1alpha1/doc.go similarity index 100% rename from pkg/machines/v1alpha1/doc.go rename to sdk/apis/machines/v1alpha1/doc.go diff --git a/pkg/machines/v1alpha1/register.go b/sdk/apis/machines/v1alpha1/register.go similarity index 100% rename from pkg/machines/v1alpha1/register.go rename to sdk/apis/machines/v1alpha1/register.go diff --git a/pkg/machines/v1alpha1/types.go b/sdk/apis/machines/v1alpha1/types.go similarity index 100% rename from pkg/machines/v1alpha1/types.go rename to sdk/apis/machines/v1alpha1/types.go diff --git a/pkg/machines/v1alpha1/zz_generated.deepcopy.go b/sdk/apis/machines/v1alpha1/zz_generated.deepcopy.go similarity index 97% rename from pkg/machines/v1alpha1/zz_generated.deepcopy.go rename to sdk/apis/machines/v1alpha1/zz_generated.deepcopy.go index 2510c81ce..8dcc314b6 100644 --- a/pkg/machines/v1alpha1/zz_generated.deepcopy.go +++ b/sdk/apis/machines/v1alpha1/zz_generated.deepcopy.go @@ -22,7 +22,7 @@ limitations under the License. package v1alpha1 import ( - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -93,7 +93,7 @@ func (in *MachineSpec) DeepCopyInto(out *MachineSpec) { in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) if in.Taints != nil { in, out := &in.Taints, &out.Taints - *out = make([]v1.Taint, len(*in)) + *out = make([]corev1.Taint, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -107,7 +107,7 @@ func (in *MachineSpec) DeepCopyInto(out *MachineSpec) { out.Versions = in.Versions if in.ConfigSource != nil { in, out := &in.ConfigSource, &out.ConfigSource - *out = new(v1.NodeConfigSource) + *out = new(corev1.NodeConfigSource) (*in).DeepCopyInto(*out) } return @@ -128,7 +128,7 @@ func (in *MachineStatus) DeepCopyInto(out *MachineStatus) { *out = *in if in.NodeRef != nil { in, out := &in.NodeRef, &out.NodeRef - *out = new(v1.ObjectReference) + *out = new(corev1.ObjectReference) **out = **in } in.LastUpdated.DeepCopyInto(&out.LastUpdated) diff --git a/pkg/bootstrap/doc.go b/sdk/bootstrap/doc.go similarity index 100% rename from pkg/bootstrap/doc.go rename to sdk/bootstrap/doc.go diff --git a/pkg/bootstrap/types.go b/sdk/bootstrap/types.go similarity index 100% rename from pkg/bootstrap/types.go rename to sdk/bootstrap/types.go diff --git a/sdk/cloudprovider/alibaba/types.go b/sdk/cloudprovider/alibaba/types.go new file mode 100644 index 000000000..1d4192538 --- /dev/null +++ b/sdk/cloudprovider/alibaba/types.go @@ -0,0 +1,42 @@ +/* +Copyright 2019 The Machine Controller Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package alibaba + +import ( + "k8c.io/machine-controller/sdk/jsonutil" + "k8c.io/machine-controller/sdk/providerconfig" +) + +type RawConfig struct { + AccessKeyID providerconfig.ConfigVarString `json:"accessKeyID,omitempty"` + AccessKeySecret providerconfig.ConfigVarString `json:"accessKeySecret,omitempty"` + RegionID providerconfig.ConfigVarString `json:"regionID,omitempty"` + InstanceName providerconfig.ConfigVarString `json:"instanceName,omitempty"` + InstanceType providerconfig.ConfigVarString `json:"instanceType,omitempty"` + VSwitchID providerconfig.ConfigVarString `json:"vSwitchID,omitempty"` + InternetMaxBandwidthOut providerconfig.ConfigVarString `json:"internetMaxBandwidthOut,omitempty"` + Labels map[string]string `json:"labels,omitempty"` + ZoneID providerconfig.ConfigVarString `json:"zoneID,omitempty"` + DiskType providerconfig.ConfigVarString `json:"diskType,omitempty"` + DiskSize providerconfig.ConfigVarString `json:"diskSize,omitempty"` +} + +func GetConfig(pconfig providerconfig.Config) (*RawConfig, error) { + rawConfig := &RawConfig{} + + return rawConfig, jsonutil.StrictUnmarshal(pconfig.CloudProviderSpec.Raw, rawConfig) +} diff --git a/sdk/cloudprovider/anexia/types.go b/sdk/cloudprovider/anexia/types.go new file mode 100644 index 000000000..008ccb7e2 --- /dev/null +++ b/sdk/cloudprovider/anexia/types.go @@ -0,0 +1,120 @@ +/* +Copyright 2020 The Machine Controller Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package anexia + +import ( + "errors" + "time" + + "k8c.io/machine-controller/sdk/jsonutil" + "k8c.io/machine-controller/sdk/providerconfig" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + AnxTokenEnv = "ANEXIA_TOKEN" + + CreateRequestTimeout = 15 * time.Minute + GetRequestTimeout = 1 * time.Minute + DeleteRequestTimeout = 1 * time.Minute + + IPStateBound = "Bound" + IPStateUnbound = "Unbound" + IPProvisioningExpires = 1800 * time.Second + + VmxNet3NIC = "vmxnet3" + MachinePoweredOn = "poweredOn" +) + +var ( + // ErrConfigDiskSizeAndDisks is returned when the config has both DiskSize and Disks set, which is unsupported. + ErrConfigDiskSizeAndDisks = errors.New("both the deprecated DiskSize and new Disks attribute are set") + + // ErrConfigVlanIDAndNetworks is returned when the config has both VlanID and Networks set, which is unsupported. + ErrConfigVlanIDAndNetworks = errors.New("both the deprecated VlanID and new Networks attribute are set") +) + +// RawDisk specifies a single disk, with some values maybe being fetched from secrets. +type RawDisk struct { + Size int `json:"size"` + PerformanceType providerconfig.ConfigVarString `json:"performanceType"` +} + +// RawNetwork specifies a single network interface. +type RawNetwork struct { + // Identifier of the VLAN to attach this network interface to. + VlanID providerconfig.ConfigVarString `json:"vlan"` + + // IDs of prefixes to reserve IP addresses from for each Machine on network interface. + // + // Empty list means that no IPs will be reserved, but the interface will still be added. + PrefixIDs []providerconfig.ConfigVarString `json:"prefixes"` +} + +// RawConfig contains all the configuration values for VMs to create, with some values maybe being fetched from secrets. +type RawConfig struct { + Token providerconfig.ConfigVarString `json:"token,omitempty"` + LocationID providerconfig.ConfigVarString `json:"locationID"` + + TemplateID providerconfig.ConfigVarString `json:"templateID"` + Template providerconfig.ConfigVarString `json:"template"` + TemplateBuild providerconfig.ConfigVarString `json:"templateBuild"` + + CPUs int `json:"cpus"` + CPUPerformanceType string `json:"cpuPerformanceType"` + Memory int `json:"memory"` + + // Deprecated, use Disks instead. + DiskSize int `json:"diskSize"` + + Disks []RawDisk `json:"disks"` + + // Deprecated, use Networks instead. + VlanID providerconfig.ConfigVarString `json:"vlanID"` + + // Configuration of the network interfaces. At least one entry with at + // least one Prefix is required. + Networks []RawNetwork `json:"networks"` +} + +type NetworkAddressStatus struct { + ReservedIP string `json:"reservedIP"` + IPState string `json:"ipState"` + IPProvisioningExpires time.Time `json:"ipProvisioningExpires"` +} + +type NetworkStatus struct { + // each entry belongs to a config.Networks.Prefix entry at the same index + Addresses []NetworkAddressStatus `json:"addresses"` +} + +type ProviderStatus struct { + InstanceID string `json:"instanceID"` + ProvisioningID string `json:"provisioningID"` + DeprovisioningID string `json:"deprovisioningID"` + Conditions []metav1.Condition `json:"conditions,omitempty"` + + // each entry belongs to the config.Networks entry at the same index + Networks []NetworkStatus `json:"networkStatus,omitempty"` +} + +func GetConfig(pconfig providerconfig.Config) (*RawConfig, error) { + rawConfig := &RawConfig{} + + return rawConfig, jsonutil.StrictUnmarshal(pconfig.CloudProviderSpec.Raw, rawConfig) +} diff --git a/sdk/cloudprovider/aws/types.go b/sdk/cloudprovider/aws/types.go new file mode 100644 index 000000000..96b9820fb --- /dev/null +++ b/sdk/cloudprovider/aws/types.go @@ -0,0 +1,69 @@ +/* +Copyright 2019 The Machine Controller Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package aws + +import ( + "k8c.io/machine-controller/sdk/jsonutil" + "k8c.io/machine-controller/sdk/providerconfig" +) + +type RawConfig struct { + AccessKeyID providerconfig.ConfigVarString `json:"accessKeyId,omitempty"` + SecretAccessKey providerconfig.ConfigVarString `json:"secretAccessKey,omitempty"` + + AssumeRoleARN providerconfig.ConfigVarString `json:"assumeRoleARN,omitempty"` + AssumeRoleExternalID providerconfig.ConfigVarString `json:"assumeRoleExternalID,omitempty"` + + Region providerconfig.ConfigVarString `json:"region"` + AvailabilityZone providerconfig.ConfigVarString `json:"availabilityZone,omitempty"` + VpcID providerconfig.ConfigVarString `json:"vpcId"` + SubnetID providerconfig.ConfigVarString `json:"subnetId"` + SecurityGroupIDs []providerconfig.ConfigVarString `json:"securityGroupIDs,omitempty"` + InstanceProfile providerconfig.ConfigVarString `json:"instanceProfile,omitempty"` + InstanceType providerconfig.ConfigVarString `json:"instanceType,omitempty"` + AMI providerconfig.ConfigVarString `json:"ami,omitempty"` + DiskSize int32 `json:"diskSize"` + DiskType providerconfig.ConfigVarString `json:"diskType,omitempty"` + DiskIops *int32 `json:"diskIops,omitempty"` + EBSVolumeEncrypted providerconfig.ConfigVarBool `json:"ebsVolumeEncrypted"` + Tags map[string]string `json:"tags,omitempty"` + AssignPublicIP *bool `json:"assignPublicIP,omitempty"` + + IsSpotInstance *bool `json:"isSpotInstance,omitempty"` + SpotInstanceConfig *SpotInstanceConfig `json:"spotInstanceConfig,omitempty"` +} + +type SpotInstanceConfig struct { + MaxPrice providerconfig.ConfigVarString `json:"maxPrice,omitempty"` + PersistentRequest providerconfig.ConfigVarBool `json:"persistentRequest,omitempty"` + InterruptionBehavior providerconfig.ConfigVarString `json:"interruptionBehavior,omitempty"` +} + +// CPUArchitecture defines processor architectures returned by the AWS API. +type CPUArchitecture string + +const ( + CPUArchitectureARM64 CPUArchitecture = "arm64" + CPUArchitectureX86_64 CPUArchitecture = "x86_64" + CPUArchitectureI386 CPUArchitecture = "i386" +) + +func GetConfig(pconfig providerconfig.Config) (*RawConfig, error) { + rawConfig := &RawConfig{} + + return rawConfig, jsonutil.StrictUnmarshal(pconfig.CloudProviderSpec.Raw, rawConfig) +} diff --git a/sdk/cloudprovider/azure/types.go b/sdk/cloudprovider/azure/types.go new file mode 100644 index 000000000..ae3266ab1 --- /dev/null +++ b/sdk/cloudprovider/azure/types.go @@ -0,0 +1,77 @@ +/* +Copyright 2019 The Machine Controller Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package azure + +import ( + "k8c.io/machine-controller/sdk/jsonutil" + "k8c.io/machine-controller/sdk/providerconfig" +) + +// RawConfig is a direct representation of an Azure machine object's configuration. +type RawConfig struct { + SubscriptionID providerconfig.ConfigVarString `json:"subscriptionID,omitempty"` + TenantID providerconfig.ConfigVarString `json:"tenantID,omitempty"` + ClientID providerconfig.ConfigVarString `json:"clientID,omitempty"` + ClientSecret providerconfig.ConfigVarString `json:"clientSecret,omitempty"` + + Location providerconfig.ConfigVarString `json:"location"` + ResourceGroup providerconfig.ConfigVarString `json:"resourceGroup"` + VNetResourceGroup providerconfig.ConfigVarString `json:"vnetResourceGroup"` + VMSize providerconfig.ConfigVarString `json:"vmSize"` + VNetName providerconfig.ConfigVarString `json:"vnetName"` + SubnetName providerconfig.ConfigVarString `json:"subnetName"` + LoadBalancerSku providerconfig.ConfigVarString `json:"loadBalancerSku"` + RouteTableName providerconfig.ConfigVarString `json:"routeTableName"` + AvailabilitySet providerconfig.ConfigVarString `json:"availabilitySet"` + AssignAvailabilitySet *bool `json:"assignAvailabilitySet"` + SecurityGroupName providerconfig.ConfigVarString `json:"securityGroupName"` + Zones []string `json:"zones"` + ImagePlan *ImagePlan `json:"imagePlan,omitempty"` + ImageReference *ImageReference `json:"imageReference,omitempty"` + EnableAcceleratedNetworking *bool `json:"enableAcceleratedNetworking"` + EnableBootDiagnostics *bool `json:"enableBootDiagnostics,omitempty"` + + ImageID providerconfig.ConfigVarString `json:"imageID"` + OSDiskSize int32 `json:"osDiskSize"` + OSDiskSKU *string `json:"osDiskSKU,omitempty"` + DataDiskSize int32 `json:"dataDiskSize"` + DataDiskSKU *string `json:"dataDiskSKU,omitempty"` + AssignPublicIP providerconfig.ConfigVarBool `json:"assignPublicIP"` + PublicIPSKU *string `json:"publicIPSKU,omitempty"` + Tags map[string]string `json:"tags,omitempty"` +} + +// ImagePlan contains azure OS Plan fields for the marketplace images. +type ImagePlan struct { + Name string `json:"name,omitempty"` + Publisher string `json:"publisher,omitempty"` + Product string `json:"product,omitempty"` +} + +// ImageReference specifies information about the image to use. +type ImageReference struct { + Publisher string `json:"publisher,omitempty"` + Offer string `json:"offer,omitempty"` + Sku string `json:"sku,omitempty"` + Version string `json:"version,omitempty"` +} + +func GetConfig(pconfig providerconfig.Config) (*RawConfig, error) { + rawConfig := &RawConfig{} + + return rawConfig, jsonutil.StrictUnmarshal(pconfig.CloudProviderSpec.Raw, rawConfig) +} diff --git a/pkg/userdata/cloud/provider.go b/sdk/cloudprovider/baremetal/plugins/plugins.go similarity index 65% rename from pkg/userdata/cloud/provider.go rename to sdk/cloudprovider/baremetal/plugins/plugins.go index b9ffe3104..7be2b56bb 100644 --- a/pkg/userdata/cloud/provider.go +++ b/sdk/cloudprovider/baremetal/plugins/plugins.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Machine Controller Authors. +Copyright 2021 The Machine Controller Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,12 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -package cloud +package plugins -import ( - clusterv1alpha1 "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" -) +type Driver string -type ConfigProvider interface { - GetCloudConfig(spec clusterv1alpha1.MachineSpec) (config string, name string, err error) -} +const Tinkerbell Driver = "tinkerbell" diff --git a/sdk/cloudprovider/baremetal/plugins/tinkerbell/types.go b/sdk/cloudprovider/baremetal/plugins/tinkerbell/types.go new file mode 100644 index 000000000..225887dbb --- /dev/null +++ b/sdk/cloudprovider/baremetal/plugins/tinkerbell/types.go @@ -0,0 +1,56 @@ +/* +Copyright 2024 The Machine Controller Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package tinkerbell + +import ( + providerconfigtypes "k8c.io/machine-controller/sdk/providerconfig" + + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/rest" +) + +// TinkerbellPluginSpec defines the required information for the Tinkerbell plugin. +type TinkerbellPluginSpec struct { + // ClusterName specifies the name of the Tinkerbell cluster. This is used to identify + // the cluster within a larger infrastructure or across multiple clusters. + ClusterName providerconfigtypes.ConfigVarString `json:"clusterName"` + + // Auth contains the kubeconfig credentials needed to authenticate against the + // Tinkerbell cluster API. This field is optional and should be provided if authentication is required. + Auth Auth `json:"auth,omitempty"` + + // OSImageURL is the URL where the OS image for the Tinkerbell template is located. + // This URL is used to download and stream the OS image during the provisioning process. + OSImageURL providerconfigtypes.ConfigVarString `json:"osImageUrl"` + + // HardwareRef specifies the unique identifier of a single hardware object in the user-cluster + // that corresponds to the machine deployment. This ensures a one-to-one mapping between a deployment + // and a hardware object in the Tinkerbell cluster. + HardwareRef types.NamespacedName `json:"hardwareRef"` +} + +// Auth. +type Auth struct { + Kubeconfig providerconfigtypes.ConfigVarString `json:"kubeconfig,omitempty"` +} + +type Config struct { + Kubeconfig string + ClusterName string + RestConfig *rest.Config + OSImageURL string +} diff --git a/pkg/cloudprovider/provider/kubevirt/types/cloudconfig.go b/sdk/cloudprovider/baremetal/types.go similarity index 55% rename from pkg/cloudprovider/provider/kubevirt/types/cloudconfig.go rename to sdk/cloudprovider/baremetal/types.go index 8d41053e3..93e4e3cf4 100644 --- a/pkg/cloudprovider/provider/kubevirt/types/cloudconfig.go +++ b/sdk/cloudprovider/baremetal/types.go @@ -14,23 +14,22 @@ See the License for the specific language governing permissions and limitations under the License. */ -package types +package baremetal import ( - "gopkg.in/yaml.v3" + "k8c.io/machine-controller/sdk/jsonutil" + "k8c.io/machine-controller/sdk/providerconfig" + + "k8s.io/apimachinery/pkg/runtime" ) -type CloudConfig struct { - // Kubeconfig used to connect to the cluster that runs KubeVirt - Kubeconfig string `yaml:"kubeconfig"` - // Namespace used in KubeVirt cloud-controller-manager as infra cluster namespace. - Namespace string `yaml:"namespace"` +type RawConfig struct { + Driver providerconfig.ConfigVarString `json:"driver"` + DriverSpec runtime.RawExtension `json:"driverSpec"` } -func (c *CloudConfig) String() (string, error) { - out, err := yaml.Marshal(c) - if err != nil { - return "", err - } - return string(out), nil +func GetConfig(pconfig providerconfig.Config) (*RawConfig, error) { + rawConfig := &RawConfig{} + + return rawConfig, jsonutil.StrictUnmarshal(pconfig.CloudProviderSpec.Raw, rawConfig) } diff --git a/pkg/cloudprovider/provider/equinixmetal/types/types.go b/sdk/cloudprovider/digitalocean/types.go similarity index 50% rename from pkg/cloudprovider/provider/equinixmetal/types/types.go rename to sdk/cloudprovider/digitalocean/types.go index b34625af0..2eac6101c 100644 --- a/pkg/cloudprovider/provider/equinixmetal/types/types.go +++ b/sdk/cloudprovider/digitalocean/types.go @@ -14,24 +14,25 @@ See the License for the specific language governing permissions and limitations under the License. */ -package types +package digitalocean import ( - "github.com/kubermatic/machine-controller/pkg/jsonutil" - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" + "k8c.io/machine-controller/sdk/jsonutil" + "k8c.io/machine-controller/sdk/providerconfig" ) type RawConfig struct { - Token providerconfigtypes.ConfigVarString `json:"token,omitempty"` - ProjectID providerconfigtypes.ConfigVarString `json:"projectID,omitempty"` - BillingCycle providerconfigtypes.ConfigVarString `json:"billingCycle"` - InstanceType providerconfigtypes.ConfigVarString `json:"instanceType"` - Metro providerconfigtypes.ConfigVarString `json:"metro,omitempty"` - Facilities []providerconfigtypes.ConfigVarString `json:"facilities,omitempty"` - Tags []providerconfigtypes.ConfigVarString `json:"tags,omitempty"` + Token providerconfig.ConfigVarString `json:"token,omitempty"` + Region providerconfig.ConfigVarString `json:"region"` + Size providerconfig.ConfigVarString `json:"size"` + Backups providerconfig.ConfigVarBool `json:"backups"` + IPv6 providerconfig.ConfigVarBool `json:"ipv6"` + PrivateNetworking providerconfig.ConfigVarBool `json:"private_networking"` + Monitoring providerconfig.ConfigVarBool `json:"monitoring"` + Tags []providerconfig.ConfigVarString `json:"tags,omitempty"` } -func GetConfig(pconfig providerconfigtypes.Config) (*RawConfig, error) { +func GetConfig(pconfig providerconfig.Config) (*RawConfig, error) { rawConfig := &RawConfig{} return rawConfig, jsonutil.StrictUnmarshal(pconfig.CloudProviderSpec.Raw, rawConfig) diff --git a/pkg/cloudprovider/provider/linode/types/types.go b/sdk/cloudprovider/equinixmetal/types.go similarity index 51% rename from pkg/cloudprovider/provider/linode/types/types.go rename to sdk/cloudprovider/equinixmetal/types.go index f2f2b7ef3..a941b58a8 100644 --- a/pkg/cloudprovider/provider/linode/types/types.go +++ b/sdk/cloudprovider/equinixmetal/types.go @@ -14,23 +14,24 @@ See the License for the specific language governing permissions and limitations under the License. */ -package types +package equinixmetal import ( - "github.com/kubermatic/machine-controller/pkg/jsonutil" - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" + "k8c.io/machine-controller/sdk/jsonutil" + "k8c.io/machine-controller/sdk/providerconfig" ) type RawConfig struct { - Token providerconfigtypes.ConfigVarString `json:"token,omitempty"` - Region providerconfigtypes.ConfigVarString `json:"region"` - Type providerconfigtypes.ConfigVarString `json:"type"` - Backups providerconfigtypes.ConfigVarBool `json:"backups"` - PrivateNetworking providerconfigtypes.ConfigVarBool `json:"private_networking"` - Tags []providerconfigtypes.ConfigVarString `json:"tags,omitempty"` + Token providerconfig.ConfigVarString `json:"token,omitempty"` + ProjectID providerconfig.ConfigVarString `json:"projectID,omitempty"` + BillingCycle providerconfig.ConfigVarString `json:"billingCycle"` + InstanceType providerconfig.ConfigVarString `json:"instanceType"` + Metro providerconfig.ConfigVarString `json:"metro,omitempty"` + Facilities []providerconfig.ConfigVarString `json:"facilities,omitempty"` + Tags []providerconfig.ConfigVarString `json:"tags,omitempty"` } -func GetConfig(pconfig providerconfigtypes.Config) (*RawConfig, error) { +func GetConfig(pconfig providerconfig.Config) (*RawConfig, error) { rawConfig := &RawConfig{} return rawConfig, jsonutil.StrictUnmarshal(pconfig.CloudProviderSpec.Raw, rawConfig) diff --git a/sdk/cloudprovider/gce/types.go b/sdk/cloudprovider/gce/types.go new file mode 100644 index 000000000..c4f05580c --- /dev/null +++ b/sdk/cloudprovider/gce/types.go @@ -0,0 +1,86 @@ +/* +Copyright 2019 The Machine Controller Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package gce + +import ( + "encoding/json" + "fmt" + + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" + "k8c.io/machine-controller/sdk/jsonutil" + "k8c.io/machine-controller/sdk/providerconfig" + + "k8s.io/apimachinery/pkg/runtime" +) + +// CloudProviderSpec contains the specification of the cloud provider taken +// from the provider configuration. +type CloudProviderSpec struct { + // ServiceAccount must be base64-encoded. + ServiceAccount providerconfig.ConfigVarString `json:"serviceAccount,omitempty"` + Zone providerconfig.ConfigVarString `json:"zone"` + MachineType providerconfig.ConfigVarString `json:"machineType"` + DiskSize int64 `json:"diskSize"` + DiskType providerconfig.ConfigVarString `json:"diskType"` + Network providerconfig.ConfigVarString `json:"network"` + Subnetwork providerconfig.ConfigVarString `json:"subnetwork"` + Preemptible providerconfig.ConfigVarBool `json:"preemptible"` + AutomaticRestart *providerconfig.ConfigVarBool `json:"automaticRestart,omitempty"` + ProvisioningModel *providerconfig.ConfigVarString `json:"provisioningModel,omitempty"` + Labels map[string]string `json:"labels,omitempty"` + Tags []string `json:"tags,omitempty"` + AssignPublicIPAddress *providerconfig.ConfigVarBool `json:"assignPublicIPAddress,omitempty"` + MultiZone providerconfig.ConfigVarBool `json:"multizone"` + Regional providerconfig.ConfigVarBool `json:"regional"` + CustomImage providerconfig.ConfigVarString `json:"customImage,omitempty"` + DisableMachineServiceAccount providerconfig.ConfigVarBool `json:"disableMachineServiceAccount,omitempty"` + EnableNestedVirtualization providerconfig.ConfigVarBool `json:"enableNestedVirtualization,omitempty"` + MinCPUPlatform providerconfig.ConfigVarString `json:"minCPUPlatform,omitempty"` + GuestOSFeatures []string `json:"guestOSFeatures,omitempty"` + ProjectID providerconfig.ConfigVarString `json:"projectID,omitempty"` +} + +// UpdateProviderSpec updates the given provider spec with changed +// configuration values. +func (cpSpec *CloudProviderSpec) UpdateProviderSpec(spec clusterv1alpha1.ProviderSpec) (*runtime.RawExtension, error) { + if spec.Value == nil { + return nil, fmt.Errorf("machine.spec.providerconfig.value is nil") + } + providerConfig := providerconfig.Config{} + err := json.Unmarshal(spec.Value.Raw, &providerConfig) + if err != nil { + return nil, err + } + rawCPSpec, err := json.Marshal(cpSpec) + if err != nil { + return nil, err + } + providerConfig.CloudProviderSpec = runtime.RawExtension{Raw: rawCPSpec} + rawProviderConfig, err := json.Marshal(providerConfig) + if err != nil { + return nil, err + } + return &runtime.RawExtension{Raw: rawProviderConfig}, nil +} + +type RawConfig = CloudProviderSpec + +func GetConfig(pconfig providerconfig.Config) (*RawConfig, error) { + rawConfig := &RawConfig{} + + return rawConfig, jsonutil.StrictUnmarshal(pconfig.CloudProviderSpec.Raw, rawConfig) +} diff --git a/sdk/cloudprovider/hetzner/types.go b/sdk/cloudprovider/hetzner/types.go new file mode 100644 index 000000000..a9215c5df --- /dev/null +++ b/sdk/cloudprovider/hetzner/types.go @@ -0,0 +1,42 @@ +/* +Copyright 2019 The Machine Controller Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package hetzner + +import ( + "k8c.io/machine-controller/sdk/jsonutil" + "k8c.io/machine-controller/sdk/providerconfig" +) + +type RawConfig struct { + Token providerconfig.ConfigVarString `json:"token,omitempty"` + ServerType providerconfig.ConfigVarString `json:"serverType"` + Datacenter providerconfig.ConfigVarString `json:"datacenter"` + Image providerconfig.ConfigVarString `json:"image"` + Location providerconfig.ConfigVarString `json:"location"` + PlacementGroupPrefix providerconfig.ConfigVarString `json:"placementGroupPrefix"` + Networks []providerconfig.ConfigVarString `json:"networks"` + Firewalls []providerconfig.ConfigVarString `json:"firewalls"` + Labels map[string]string `json:"labels,omitempty"` + AssignPublicIPv4 providerconfig.ConfigVarBool `json:"assignPublicIPv4,omitempty"` + AssignPublicIPv6 providerconfig.ConfigVarBool `json:"assignPublicIPv6,omitempty"` +} + +func GetConfig(pconfig providerconfig.Config) (*RawConfig, error) { + rawConfig := &RawConfig{} + + return rawConfig, jsonutil.StrictUnmarshal(pconfig.CloudProviderSpec.Raw, rawConfig) +} diff --git a/sdk/cloudprovider/kubevirt/types.go b/sdk/cloudprovider/kubevirt/types.go new file mode 100644 index 000000000..74eb6ad38 --- /dev/null +++ b/sdk/cloudprovider/kubevirt/types.go @@ -0,0 +1,179 @@ +/* +Copyright 2019 The Machine Controller Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubevirt + +import ( + kubevirtcorev1 "kubevirt.io/api/core/v1" + + "k8c.io/machine-controller/sdk/jsonutil" + "k8c.io/machine-controller/sdk/providerconfig" + + corev1 "k8s.io/api/core/v1" +) + +var SupportedOS = map[providerconfig.OperatingSystem]*struct{}{ + providerconfig.OperatingSystemUbuntu: nil, + providerconfig.OperatingSystemRHEL: nil, + providerconfig.OperatingSystemFlatcar: nil, + providerconfig.OperatingSystemRockyLinux: nil, +} + +type RawConfig struct { + ClusterName providerconfig.ConfigVarString `json:"clusterName"` + ProjectID providerconfig.ConfigVarString `json:"projectID,omitempty"` + Auth Auth `json:"auth,omitempty"` + VirtualMachine VirtualMachine `json:"virtualMachine,omitempty"` + Affinity Affinity `json:"affinity,omitempty"` + TopologySpreadConstraints []TopologySpreadConstraint `json:"topologySpreadConstraints"` +} + +// Auth. +type Auth struct { + Kubeconfig providerconfig.ConfigVarString `json:"kubeconfig,omitempty"` +} + +// VirtualMachine. +type VirtualMachine struct { + // Deprecated: use Instancetype/Preference instead. + Flavor Flavor `json:"flavor,omitempty"` + // Instancetype is optional. + Instancetype *kubevirtcorev1.InstancetypeMatcher `json:"instancetype,omitempty"` + // Preference is optional. + Preference *kubevirtcorev1.PreferenceMatcher `json:"preference,omitempty"` + Template Template `json:"template,omitempty"` + DNSPolicy providerconfig.ConfigVarString `json:"dnsPolicy,omitempty"` + DNSConfig *corev1.PodDNSConfig `json:"dnsConfig,omitempty"` + Location *Location `json:"location,omitempty"` + ProviderNetwork *ProviderNetwork `json:"providerNetwork,omitempty"` + EnableNetworkMultiQueue providerconfig.ConfigVarBool `json:"enableNetworkMultiQueue,omitempty"` + EvictionStrategy string `json:"evictionStrategy,omitempty"` +} + +// Flavor. +type Flavor struct { + Name providerconfig.ConfigVarString `json:"name,omitempty"` + Profile providerconfig.ConfigVarString `json:"profile,omitempty"` +} + +// Template. +type Template struct { + // VCPUs is to configure vcpus used by a the virtual machine + // when using kubevirts cpuAllocationRatio feature this leads to auto assignment of the + // calculated ratio as resource cpu requests for the pod which launches the virtual machine + VCPUs VCPUs `json:"vcpus,omitempty"` + // CPUs is to configure cpu requests and limits directly for the pod which launches the virtual machine + // and is related to the underlying hardware + CPUs providerconfig.ConfigVarString `json:"cpus,omitempty"` + Memory providerconfig.ConfigVarString `json:"memory,omitempty"` + PrimaryDisk PrimaryDisk `json:"primaryDisk,omitempty"` + SecondaryDisks []SecondaryDisks `json:"secondaryDisks,omitempty"` +} + +// VCPUs. +type VCPUs struct { + Cores int `json:"cores,omitempty"` +} + +// PrimaryDisk. +type PrimaryDisk struct { + Disk + // DataVolumeSecretRef is the name of the secret that will be sent to the CDI data importer pod to read basic auth parameters. + DataVolumeSecretRef providerconfig.ConfigVarString `json:"dataVolumeSecretRef,omitempty"` + // ExtraHeaders is a list of strings containing extra headers to include with HTTP transfer requests + // +optional + ExtraHeaders []string `json:"extraHeaders,omitempty"` + // ExtraHeadersSecretRef is a secret that contains a list of strings containing extra headers to include with HTTP transfer requests + // +optional + ExtraHeadersSecretRef providerconfig.ConfigVarString `json:"extraHeadersSecretRef,omitempty"` + // StorageTarget describes which VirtualMachine storage target will be used in the DataVolumeTemplate. + StorageTarget providerconfig.ConfigVarString `json:"storageTarget,omitempty"` + // OsImage describes the OS that will be installed on the VirtualMachine. + OsImage providerconfig.ConfigVarString `json:"osImage,omitempty"` + // Source describes the VM Disk Image source. + Source providerconfig.ConfigVarString `json:"source,omitempty"` + // PullMethod describes the VM Disk Image source optional pull method for registry source. Defaults to 'node'. + PullMethod providerconfig.ConfigVarString `json:"pullMethod,omitempty"` +} + +// SecondaryDisks. +type SecondaryDisks struct { + Disk +} + +// Disk. +type Disk struct { + Size providerconfig.ConfigVarString `json:"size,omitempty"` + StorageClassName providerconfig.ConfigVarString `json:"storageClassName,omitempty"` + StorageAccessType providerconfig.ConfigVarString `json:"storageAccessType,omitempty"` +} + +// Affinity. +type Affinity struct { + // Deprecated: Use TopologySpreadConstraint instead. + PodAffinityPreset providerconfig.ConfigVarString `json:"podAffinityPreset,omitempty"` + // Deprecated: Use TopologySpreadConstraint instead. + PodAntiAffinityPreset providerconfig.ConfigVarString `json:"podAntiAffinityPreset,omitempty"` + NodeAffinityPreset NodeAffinityPreset `json:"nodeAffinityPreset,omitempty"` +} + +// NodeAffinityPreset. +type NodeAffinityPreset struct { + Type providerconfig.ConfigVarString `json:"type,omitempty"` + Key providerconfig.ConfigVarString `json:"key,omitempty"` + Values []providerconfig.ConfigVarString `json:"values,omitempty"` +} + +// TopologySpreadConstraint describes topology spread constraints for VMs. +type TopologySpreadConstraint struct { + // MaxSkew describes the degree to which VMs may be unevenly distributed. + MaxSkew providerconfig.ConfigVarString `json:"maxSkew,omitempty"` + // TopologyKey is the key of infra-node labels. + TopologyKey providerconfig.ConfigVarString `json:"topologyKey,omitempty"` + // WhenUnsatisfiable indicates how to deal with a VM if it doesn't satisfy + // the spread constraint. + WhenUnsatisfiable providerconfig.ConfigVarString `json:"whenUnsatisfiable,omitempty"` +} + +// Location describes the region and zone where the machines are created at and where the deployed resources will reside. +type Location struct { + Region string `json:"region,omitempty"` + Zone string `json:"zone,omitempty"` +} + +// ProviderNetwork describes the infra cluster network fabric that is being used. +type ProviderNetwork struct { + Name string `json:"name"` + VPC VPC `json:"vpc"` +} + +// VPC is a virtual network dedicated to a single tenant within a KubeVirt, where the resources in the VPC +// is isolated from any other resources within the KubeVirt infra cluster. +type VPC struct { + Name string `json:"name"` + Subnet *Subnet `json:"subnet,omitempty"` +} + +// Subnet a smaller, segmented portion of a larger network, like a Virtual Private Cloud (VPC). +type Subnet struct { + Name string `json:"name"` +} + +func GetConfig(pconfig providerconfig.Config) (*RawConfig, error) { + rawConfig := &RawConfig{} + + return rawConfig, jsonutil.StrictUnmarshal(pconfig.CloudProviderSpec.Raw, rawConfig) +} diff --git a/pkg/cloudprovider/provider/vultr/types/types.go b/sdk/cloudprovider/linode/types.go similarity index 51% rename from pkg/cloudprovider/provider/vultr/types/types.go rename to sdk/cloudprovider/linode/types.go index f4b61aee2..bde06e4a1 100644 --- a/pkg/cloudprovider/provider/vultr/types/types.go +++ b/sdk/cloudprovider/linode/types.go @@ -1,5 +1,5 @@ /* -Copyright 2023 The Machine Controller Authors. +Copyright 2019 The Machine Controller Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,22 +14,23 @@ See the License for the specific language governing permissions and limitations under the License. */ -package types +package linode import ( - "github.com/kubermatic/machine-controller/pkg/jsonutil" - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" + "k8c.io/machine-controller/sdk/jsonutil" + "k8c.io/machine-controller/sdk/providerconfig" ) type RawConfig struct { - APIKey providerconfigtypes.ConfigVarString `json:"apiKey,omitempty"` - Region providerconfigtypes.ConfigVarString `json:"region"` - Plan providerconfigtypes.ConfigVarString `json:"plan"` - OsID providerconfigtypes.ConfigVarString `json:"osId"` - Tags []string `json:"tags,omitempty"` + Token providerconfig.ConfigVarString `json:"token,omitempty"` + Region providerconfig.ConfigVarString `json:"region"` + Type providerconfig.ConfigVarString `json:"type"` + Backups providerconfig.ConfigVarBool `json:"backups"` + PrivateNetworking providerconfig.ConfigVarBool `json:"private_networking"` + Tags []providerconfig.ConfigVarString `json:"tags,omitempty"` } -func GetConfig(pconfig providerconfigtypes.Config) (*RawConfig, error) { +func GetConfig(pconfig providerconfig.Config) (*RawConfig, error) { rawConfig := &RawConfig{} return rawConfig, jsonutil.StrictUnmarshal(pconfig.CloudProviderSpec.Raw, rawConfig) diff --git a/pkg/cloudprovider/provider/nutanix/types/types.go b/sdk/cloudprovider/nutanix/types.go similarity index 60% rename from pkg/cloudprovider/provider/nutanix/types/types.go rename to sdk/cloudprovider/nutanix/types.go index a2283b721..e46c34c71 100644 --- a/pkg/cloudprovider/provider/nutanix/types/types.go +++ b/sdk/cloudprovider/nutanix/types.go @@ -14,11 +14,11 @@ See the License for the specific language governing permissions and limitations under the License. */ -package types +package nutanix import ( - "github.com/kubermatic/machine-controller/pkg/jsonutil" - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" + "k8c.io/machine-controller/sdk/jsonutil" + "k8c.io/machine-controller/sdk/providerconfig" ) const ( @@ -31,18 +31,18 @@ const ( ) type RawConfig struct { - Endpoint providerconfigtypes.ConfigVarString `json:"endpoint"` - Port providerconfigtypes.ConfigVarString `json:"port"` - Username providerconfigtypes.ConfigVarString `json:"username"` - Password providerconfigtypes.ConfigVarString `json:"password"` - AllowInsecure providerconfigtypes.ConfigVarBool `json:"allowInsecure"` - ProxyURL providerconfigtypes.ConfigVarString `json:"proxyURL,omitempty"` + Endpoint providerconfig.ConfigVarString `json:"endpoint"` + Port providerconfig.ConfigVarString `json:"port"` + Username providerconfig.ConfigVarString `json:"username"` + Password providerconfig.ConfigVarString `json:"password"` + AllowInsecure providerconfig.ConfigVarBool `json:"allowInsecure"` + ProxyURL providerconfig.ConfigVarString `json:"proxyURL,omitempty"` - ClusterName providerconfigtypes.ConfigVarString `json:"clusterName"` - ProjectName *providerconfigtypes.ConfigVarString `json:"projectName,omitempty"` - SubnetName providerconfigtypes.ConfigVarString `json:"subnetName"` - AdditionalSubnetNames []string `json:"additionalSubnetNames,omitempty"` - ImageName providerconfigtypes.ConfigVarString `json:"imageName"` + ClusterName providerconfig.ConfigVarString `json:"clusterName"` + ProjectName *providerconfig.ConfigVarString `json:"projectName,omitempty"` + SubnetName providerconfig.ConfigVarString `json:"subnetName"` + AdditionalSubnetNames []string `json:"additionalSubnetNames,omitempty"` + ImageName providerconfig.ConfigVarString `json:"imageName"` // VM sizing configuration CPUs int64 `json:"cpus"` @@ -68,7 +68,7 @@ type ErrorResponseMsg struct { Reason string `json:"reason"` } -func GetConfig(pconfig providerconfigtypes.Config) (*RawConfig, error) { +func GetConfig(pconfig providerconfig.Config) (*RawConfig, error) { rawConfig := &RawConfig{} return rawConfig, jsonutil.StrictUnmarshal(pconfig.CloudProviderSpec.Raw, rawConfig) diff --git a/sdk/cloudprovider/opennebula/types.go b/sdk/cloudprovider/opennebula/types.go new file mode 100644 index 000000000..017a95004 --- /dev/null +++ b/sdk/cloudprovider/opennebula/types.go @@ -0,0 +1,46 @@ +/* +Copyright 2022 The Machine Controller Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package opennebula + +import ( + "k8c.io/machine-controller/sdk/jsonutil" + "k8c.io/machine-controller/sdk/providerconfig" +) + +type RawConfig struct { + // Auth details + Username providerconfig.ConfigVarString `json:"username,omitempty"` + Password providerconfig.ConfigVarString `json:"password,omitempty"` + Endpoint providerconfig.ConfigVarString `json:"endpoint,omitempty"` + + // Machine details + CPU *float64 `json:"cpu"` + VCPU *int `json:"vcpu"` + Memory *int `json:"memory"` + Image providerconfig.ConfigVarString `json:"image"` + Datastore providerconfig.ConfigVarString `json:"datastore"` + DiskSize *int `json:"diskSize"` + Network providerconfig.ConfigVarString `json:"network"` + EnableVNC providerconfig.ConfigVarBool `json:"enableVNC"` + VMTemplateExtra map[string]string `json:"vmTemplateExtra,omitempty"` +} + +func GetConfig(pconfig providerconfig.Config) (*RawConfig, error) { + rawConfig := &RawConfig{} + + return rawConfig, jsonutil.StrictUnmarshal(pconfig.CloudProviderSpec.Raw, rawConfig) +} diff --git a/sdk/cloudprovider/openstack/types.go b/sdk/cloudprovider/openstack/types.go new file mode 100644 index 000000000..e544a9d65 --- /dev/null +++ b/sdk/cloudprovider/openstack/types.go @@ -0,0 +1,65 @@ +/* +Copyright 2019 The Machine Controller Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package openstack + +import ( + "k8c.io/machine-controller/sdk/jsonutil" + "k8c.io/machine-controller/sdk/providerconfig" +) + +type RawConfig struct { + // Auth details + IdentityEndpoint providerconfig.ConfigVarString `json:"identityEndpoint,omitempty"` + Username providerconfig.ConfigVarString `json:"username,omitempty"` + Password providerconfig.ConfigVarString `json:"password,omitempty"` + ApplicationCredentialID providerconfig.ConfigVarString `json:"applicationCredentialID,omitempty"` + ApplicationCredentialSecret providerconfig.ConfigVarString `json:"applicationCredentialSecret,omitempty"` + DomainName providerconfig.ConfigVarString `json:"domainName,omitempty"` + ProjectName providerconfig.ConfigVarString `json:"projectName,omitempty"` + ProjectID providerconfig.ConfigVarString `json:"projectID,omitempty"` + TenantName providerconfig.ConfigVarString `json:"tenantName,omitempty"` + TenantID providerconfig.ConfigVarString `json:"tenantID,omitempty"` + TokenID providerconfig.ConfigVarString `json:"tokenId,omitempty"` + Region providerconfig.ConfigVarString `json:"region,omitempty"` + InstanceReadyCheckPeriod providerconfig.ConfigVarString `json:"instanceReadyCheckPeriod,omitempty"` + InstanceReadyCheckTimeout providerconfig.ConfigVarString `json:"instanceReadyCheckTimeout,omitempty"` + ComputeAPIVersion providerconfig.ConfigVarString `json:"computeAPIVersion,omitempty"` + + // Machine details + Image providerconfig.ConfigVarString `json:"image"` + Flavor providerconfig.ConfigVarString `json:"flavor"` + SecurityGroups []providerconfig.ConfigVarString `json:"securityGroups,omitempty"` + Network providerconfig.ConfigVarString `json:"network,omitempty"` + Networks []providerconfig.ConfigVarString `json:"networks,omitempty"` + Subnet providerconfig.ConfigVarString `json:"subnet,omitempty"` + FloatingIPPool providerconfig.ConfigVarString `json:"floatingIpPool,omitempty"` + AvailabilityZone providerconfig.ConfigVarString `json:"availabilityZone,omitempty"` + TrustDevicePath providerconfig.ConfigVarBool `json:"trustDevicePath"` + RootDiskSizeGB *int `json:"rootDiskSizeGB"` + RootDiskVolumeType providerconfig.ConfigVarString `json:"rootDiskVolumeType,omitempty"` + NodeVolumeAttachLimit *uint `json:"nodeVolumeAttachLimit"` + ServerGroup providerconfig.ConfigVarString `json:"serverGroup"` + ConfigDrive providerconfig.ConfigVarBool `json:"configDrive,omitempty"` + // This tag is related to server metadata, not compute server's tag + Tags map[string]string `json:"tags,omitempty"` +} + +func GetConfig(pconfig providerconfig.Config) (*RawConfig, error) { + rawConfig := &RawConfig{} + + return rawConfig, jsonutil.StrictUnmarshal(pconfig.CloudProviderSpec.Raw, rawConfig) +} diff --git a/pkg/cloudprovider/provider/scaleway/types/types.go b/sdk/cloudprovider/scaleway/types.go similarity index 50% rename from pkg/cloudprovider/provider/scaleway/types/types.go rename to sdk/cloudprovider/scaleway/types.go index 8ecbd5cb1..22624f543 100644 --- a/pkg/cloudprovider/provider/scaleway/types/types.go +++ b/sdk/cloudprovider/scaleway/types.go @@ -14,24 +14,24 @@ See the License for the specific language governing permissions and limitations under the License. */ -package types +package scaleway import ( - "github.com/kubermatic/machine-controller/pkg/jsonutil" - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" + "k8c.io/machine-controller/sdk/jsonutil" + "k8c.io/machine-controller/sdk/providerconfig" ) type RawConfig struct { - AccessKey providerconfigtypes.ConfigVarString `json:"accessKey,omitempty"` - SecretKey providerconfigtypes.ConfigVarString `json:"secretKey,omitempty"` - ProjectID providerconfigtypes.ConfigVarString `json:"projectId,omitempty"` - Zone providerconfigtypes.ConfigVarString `json:"zone,omitempty"` - CommercialType providerconfigtypes.ConfigVarString `json:"commercialType"` - IPv6 providerconfigtypes.ConfigVarBool `json:"ipv6"` - Tags []string `json:"tags,omitempty"` + AccessKey providerconfig.ConfigVarString `json:"accessKey,omitempty"` + SecretKey providerconfig.ConfigVarString `json:"secretKey,omitempty"` + ProjectID providerconfig.ConfigVarString `json:"projectId,omitempty"` + Zone providerconfig.ConfigVarString `json:"zone,omitempty"` + CommercialType providerconfig.ConfigVarString `json:"commercialType"` + IPv6 providerconfig.ConfigVarBool `json:"ipv6"` + Tags []string `json:"tags,omitempty"` } -func GetConfig(pconfig providerconfigtypes.Config) (*RawConfig, error) { +func GetConfig(pconfig providerconfig.Config) (*RawConfig, error) { rawConfig := &RawConfig{} return rawConfig, jsonutil.StrictUnmarshal(pconfig.CloudProviderSpec.Raw, rawConfig) diff --git a/pkg/cloudprovider/provider/vmwareclouddirector/types/types.go b/sdk/cloudprovider/vmwareclouddirector/types.go similarity index 56% rename from pkg/cloudprovider/provider/vmwareclouddirector/types/types.go rename to sdk/cloudprovider/vmwareclouddirector/types.go index 188e3c2bd..e53ea1cd5 100644 --- a/pkg/cloudprovider/provider/vmwareclouddirector/types/types.go +++ b/sdk/cloudprovider/vmwareclouddirector/types.go @@ -14,11 +14,11 @@ See the License for the specific language governing permissions and limitations under the License. */ -package types +package vmwareclouddirector import ( - "github.com/kubermatic/machine-controller/pkg/jsonutil" - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" + "k8c.io/machine-controller/sdk/jsonutil" + "k8c.io/machine-controller/sdk/providerconfig" ) type IPAllocationMode string @@ -31,22 +31,25 @@ const ( // RawConfig represents VMware Cloud Director specific configuration. type RawConfig struct { // Provider configuration. - Username providerconfigtypes.ConfigVarString `json:"username"` - Password providerconfigtypes.ConfigVarString `json:"password"` - Organization providerconfigtypes.ConfigVarString `json:"organization"` - URL providerconfigtypes.ConfigVarString `json:"url"` - VDC providerconfigtypes.ConfigVarString `json:"vdc"` - AllowInsecure providerconfigtypes.ConfigVarBool `json:"allowInsecure"` + Username providerconfig.ConfigVarString `json:"username"` + Password providerconfig.ConfigVarString `json:"password"` + APIToken providerconfig.ConfigVarString `json:"apiToken"` + Organization providerconfig.ConfigVarString `json:"organization"` + URL providerconfig.ConfigVarString `json:"url"` + VDC providerconfig.ConfigVarString `json:"vdc"` + AllowInsecure providerconfig.ConfigVarBool `json:"allowInsecure"` // VM configuration. - VApp providerconfigtypes.ConfigVarString `json:"vapp"` - Template providerconfigtypes.ConfigVarString `json:"template"` - Catalog providerconfigtypes.ConfigVarString `json:"catalog"` - PlacementPolicy *string `json:"placementPolicy,omitempty"` + VApp providerconfig.ConfigVarString `json:"vapp"` + Template providerconfig.ConfigVarString `json:"template"` + Catalog providerconfig.ConfigVarString `json:"catalog"` + PlacementPolicy *string `json:"placementPolicy,omitempty"` // Network configuration. - Network providerconfigtypes.ConfigVarString `json:"network"` - IPAllocationMode IPAllocationMode `json:"ipAllocationMode,omitempty"` + // Deprecated: Use networks instead. + Network providerconfig.ConfigVarString `json:"network,omitempty"` + Networks []providerconfig.ConfigVarString `json:"networks"` + IPAllocationMode IPAllocationMode `json:"ipAllocationMode,omitempty"` // Compute configuration. CPUs int64 `json:"cpus"` @@ -64,7 +67,7 @@ type RawConfig struct { Metadata *map[string]string `json:"metadata,omitempty"` } -func GetConfig(pconfig providerconfigtypes.Config) (*RawConfig, error) { +func GetConfig(pconfig providerconfig.Config) (*RawConfig, error) { rawConfig := &RawConfig{} return rawConfig, jsonutil.StrictUnmarshal(pconfig.CloudProviderSpec.Raw, rawConfig) diff --git a/sdk/cloudprovider/vsphere/types.go b/sdk/cloudprovider/vsphere/types.go new file mode 100644 index 000000000..c7c1f5f0f --- /dev/null +++ b/sdk/cloudprovider/vsphere/types.go @@ -0,0 +1,69 @@ +/* +Copyright 2019 The Machine Controller Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vsphere + +import ( + "k8c.io/machine-controller/sdk/jsonutil" + "k8c.io/machine-controller/sdk/providerconfig" +) + +// RawConfig represents vsphere specific configuration. +type RawConfig struct { + TemplateVMName providerconfig.ConfigVarString `json:"templateVMName"` + // Deprecated: use networks instead. + VMNetName providerconfig.ConfigVarString `json:"vmNetName"` + Networks []providerconfig.ConfigVarString `json:"networks"` + Username providerconfig.ConfigVarString `json:"username"` + Password providerconfig.ConfigVarString `json:"password"` + VSphereURL providerconfig.ConfigVarString `json:"vsphereURL"` + Datacenter providerconfig.ConfigVarString `json:"datacenter"` + + // Cluster defines the cluster to use in vcenter. + // Only needed for vm anti affinity. + Cluster providerconfig.ConfigVarString `json:"cluster"` + + Folder providerconfig.ConfigVarString `json:"folder"` + ResourcePool providerconfig.ConfigVarString `json:"resourcePool"` + + // Either Datastore or DatastoreCluster have to be provided. + DatastoreCluster providerconfig.ConfigVarString `json:"datastoreCluster"` + Datastore providerconfig.ConfigVarString `json:"datastore"` + + CPUs int32 `json:"cpus"` + MemoryMB int64 `json:"memoryMB"` + DiskSizeGB *int64 `json:"diskSizeGB,omitempty"` + Tags []Tag `json:"tags,omitempty"` + AllowInsecure providerconfig.ConfigVarBool `json:"allowInsecure"` + + // Placement rules + VMAntiAffinity providerconfig.ConfigVarBool `json:"vmAntiAffinity"` + VMGroup providerconfig.ConfigVarString `json:"vmGroup,omitempty"` +} + +// Tag represents vsphere tag. +type Tag struct { + Description string `json:"description,omitempty"` + ID string `json:"id,omitempty"` + Name string `json:"name,omitempty"` + CategoryID string `json:"categoryID"` +} + +func GetConfig(pconfig providerconfig.Config) (*RawConfig, error) { + rawConfig := &RawConfig{} + + return rawConfig, jsonutil.StrictUnmarshal(pconfig.CloudProviderSpec.Raw, rawConfig) +} diff --git a/sdk/cloudprovider/vultr/types.go b/sdk/cloudprovider/vultr/types.go new file mode 100644 index 000000000..b108d99b0 --- /dev/null +++ b/sdk/cloudprovider/vultr/types.go @@ -0,0 +1,42 @@ +/* +Copyright 2023 The Machine Controller Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vultr + +import ( + "k8c.io/machine-controller/sdk/jsonutil" + "k8c.io/machine-controller/sdk/providerconfig" +) + +type RawConfig struct { + PhysicalMachine bool `json:"physicalMachine,omitempty"` + APIKey providerconfig.ConfigVarString `json:"apiKey,omitempty"` + Region providerconfig.ConfigVarString `json:"region"` + Plan providerconfig.ConfigVarString `json:"plan"` + OsID providerconfig.ConfigVarString `json:"osId"` + Tags []string `json:"tags,omitempty"` + VpcID []string `json:"vpcId,omitempty"` + Vpc2ID []string `json:"vpc2Id,omitempty"` + EnableVPC bool `json:"enableVPC,omitempty"` + EnableVPC2 bool `json:"enableVPC2,omitempty"` + EnableIPv6 bool `json:"enableIPv6,omitempty"` +} + +func GetConfig(pconfig providerconfig.Config) (*RawConfig, error) { + rawConfig := &RawConfig{} + + return rawConfig, jsonutil.StrictUnmarshal(pconfig.CloudProviderSpec.Raw, rawConfig) +} diff --git a/sdk/go.mod b/sdk/go.mod new file mode 100644 index 000000000..bd8687cac --- /dev/null +++ b/sdk/go.mod @@ -0,0 +1,64 @@ +module k8c.io/machine-controller/sdk + +go 1.22.3 + +toolchain go1.23.1 + +require ( + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 + k8s.io/api v0.31.1 + k8s.io/apimachinery v0.31.1 + k8s.io/client-go v0.31.1 + k8s.io/utils v0.0.0-20240902221715-702e33fdd3c3 + kubevirt.io/api v1.3.1 + sigs.k8s.io/controller-runtime v0.19.0 + sigs.k8s.io/yaml v1.4.0 +) + +require ( + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/emicklei/go-restful/v3 v3.12.1 // indirect + github.com/evanphx/json-patch/v5 v5.9.0 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.21.0 // indirect + github.com/go-openapi/swag v0.23.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/imdario/mergo v0.3.16 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/mailru/easyjson v0.7.7 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/onsi/ginkgo/v2 v2.20.1 // indirect + github.com/onsi/gomega v1.34.1 // indirect + github.com/openshift/custom-resource-status v1.1.2 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/x448/float16 v0.8.4 // indirect + go.uber.org/zap v1.27.0 // indirect + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect + golang.org/x/net v0.29.0 // indirect + golang.org/x/oauth2 v0.23.0 // indirect + golang.org/x/sys v0.28.0 // indirect + golang.org/x/term v0.27.0 // indirect + golang.org/x/text v0.21.0 // indirect + golang.org/x/time v0.6.0 // indirect + google.golang.org/protobuf v1.34.2 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/apiextensions-apiserver v0.31.1 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38 // indirect + kubevirt.io/containerized-data-importer-api v1.60.3 // indirect + kubevirt.io/controller-lifecycle-operator-sdk/api v0.2.4 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect +) diff --git a/sdk/go.sum b/sdk/go.sum new file mode 100644 index 000000000..bd0c6b0b2 --- /dev/null +++ b/sdk/go.sum @@ -0,0 +1,382 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful v2.15.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU= +github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch/v5 v5.9.0 h1:kcBlZQbplgElYIlo/n1hJbls2z/1awpXxpRi0/FOJfg= +github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= +github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg= +github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= +github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 h1:FKHo8hFI3A+7w0aUQuYXQ+6EN5stWmeY/AZqtM8xk9k= +github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= +github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= +github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/ginkgo/v2 v2.20.1 h1:YlVIbqct+ZmnEph770q9Q7NVAz4wwIiVNahee6JyUzo= +github.com/onsi/ginkgo/v2 v2.20.1/go.mod h1:lG9ey2Z29hR41WMVthyJBGUBcBhGOtoPF2VFMvBXFCI= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= +github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k= +github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY= +github.com/openshift/custom-resource-status v1.1.2 h1:C3DL44LEbvlbItfd8mT5jWrqPfHnSOQoQf/sypqA6A4= +github.com/openshift/custom-resource-status v1.1.2/go.mod h1:DB/Mf2oTeiAmVVX1gN+NEqweonAPY0TKUwADizj8+ZA= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= +golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= +golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= +golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM= +golang.org/x/tools v0.1.9/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU= +golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE= +golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +k8s.io/api v0.23.3/go.mod h1:w258XdGyvCmnBj/vGzQMj6kzdufJZVUwEM1U2fRJwSQ= +k8s.io/api v0.31.1 h1:Xe1hX/fPW3PXYYv8BlozYqw63ytA92snr96zMW9gWTU= +k8s.io/api v0.31.1/go.mod h1:sbN1g6eY6XVLeqNsZGLnI5FwVseTrZX7Fv3O26rhAaI= +k8s.io/apiextensions-apiserver v0.31.1 h1:L+hwULvXx+nvTYX/MKM3kKMZyei+UiSXQWciX/N6E40= +k8s.io/apiextensions-apiserver v0.31.1/go.mod h1:tWMPR3sgW+jsl2xm9v7lAyRF1rYEK71i9G5dRtkknoQ= +k8s.io/apimachinery v0.23.3/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM= +k8s.io/apimachinery v0.31.1 h1:mhcUBbj7KUjaVhyXILglcVjuS4nYXiwC+KKFBgIVy7U= +k8s.io/apimachinery v0.31.1/go.mod h1:rsPdaZJfTfLsNJSQzNHQvYoTmxhoOEofxtOsF3rtsMo= +k8s.io/client-go v0.31.1 h1:f0ugtWSbWpxHR7sjVpQwuvw9a3ZKLXX0u0itkFXufb0= +k8s.io/client-go v0.31.1/go.mod h1:sKI8871MJN2OyeqRlmA4W4KM9KBdBUpDLu/43eGemCg= +k8s.io/code-generator v0.23.3/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk= +k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.40.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= +k8s.io/kube-openapi v0.0.0-20220124234850-424119656bbf/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= +k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38 h1:1dWzkmJrrprYvjGwh9kEUxmcUV/CtNU8QM7h1FLWQOo= +k8s.io/kube-openapi v0.0.0-20240903163716-9e1beecbcb38/go.mod h1:coRQXBK9NxO98XUv3ZD6AK3xzHCxV6+b7lrquKwaKzA= +k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20240902221715-702e33fdd3c3 h1:b2FmK8YH+QEwq/Sy2uAEhmqL5nPfGYbJOcaqjeYYZoA= +k8s.io/utils v0.0.0-20240902221715-702e33fdd3c3/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +kubevirt.io/api v1.3.1 h1:MoTNo/zvDlZ44c2ocXLPln8XTaQOeUodiYbEKrTCqv4= +kubevirt.io/api v1.3.1/go.mod h1:tCn7VAZktEvymk490iPSMPCmKM9UjbbfH2OsFR/IOLU= +kubevirt.io/containerized-data-importer-api v1.60.3 h1:kQEXi7scpzUa0RPf3/3MKk1Kmem0ZlqqiuK3kDF5L2I= +kubevirt.io/containerized-data-importer-api v1.60.3/go.mod h1:8mwrkZIdy8j/LmCyKt2wFXbiMavLUIqDaegaIF67CZs= +kubevirt.io/controller-lifecycle-operator-sdk/api v0.2.4 h1:fZYvD3/Vnitfkx6IJxjLAk8ugnZQ7CXVYcRfkSKmuZY= +kubevirt.io/controller-lifecycle-operator-sdk/api v0.2.4/go.mod h1:018lASpFYBsYN6XwmA2TIrPCx6e0gviTd/ZNtSitKgc= +sigs.k8s.io/controller-runtime v0.19.0 h1:nWVM7aq+Il2ABxwiCizrVDSlmDcshi9llbaFbC0ji/Q= +sigs.k8s.io/controller-runtime v0.19.0/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= +sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/pkg/test/helper.go b/sdk/internal/test/helper.go similarity index 100% rename from pkg/test/helper.go rename to sdk/internal/test/helper.go diff --git a/pkg/jsonutil/strict.go b/sdk/jsonutil/strict.go similarity index 100% rename from pkg/jsonutil/strict.go rename to sdk/jsonutil/strict.go diff --git a/sdk/net/net.go b/sdk/net/net.go new file mode 100644 index 000000000..de70a41c5 --- /dev/null +++ b/sdk/net/net.go @@ -0,0 +1,55 @@ +/* +Copyright 2021 The Machine Controller Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package net + +import ( + gonet "net" +) + +const ( + ErrIPv6OnlyUnsupported = "IPv6-only network family not supported yet" + ErrUnknownNetworkFamily = "unknown IP family %q, only IPv4,IPv6,IPv4+IPv6 are valid values" +) + +// IPFamily IPv4 | IPv6 | IPv4+IPv6. +type IPFamily string + +const ( + IPFamilyUnspecified IPFamily = "" // interpreted as IPv4 + IPFamilyIPv4 IPFamily = "IPv4" // IPv4 only + IPFamilyIPv6 IPFamily = "IPv6" // IPv6 only + IPFamilyIPv4IPv6 IPFamily = "IPv4+IPv6" // dualstack with IPv4 as primary + IPFamilyIPv6IPv4 IPFamily = "IPv6+IPv4" // dualstack with IPv6 as primary +) + +func (f IPFamily) HasIPv6() bool { + return f == IPFamilyIPv6 || f == IPFamilyIPv4IPv6 || f == IPFamilyIPv6IPv4 +} + +func (f IPFamily) HasIPv4() bool { + return f == IPFamilyUnspecified || f == IPFamilyIPv4 || f == IPFamilyIPv4IPv6 || f == IPFamilyIPv6IPv4 +} + +func (f IPFamily) IsDualstack() bool { + return f == IPFamilyIPv4IPv6 || f == IPFamilyIPv6IPv4 +} + +// IsLinkLocal checks if given ip address is link local.. +func IsLinkLocal(ipAddr string) bool { + addr := gonet.ParseIP(ipAddr) + return addr.IsLinkLocalMulticast() || addr.IsLinkLocalUnicast() +} diff --git a/pkg/node/eviction/types/types.go b/sdk/node/eviction.go similarity index 97% rename from pkg/node/eviction/types/types.go rename to sdk/node/eviction.go index d69a4aa39..9de6e49cc 100644 --- a/pkg/node/eviction/types/types.go +++ b/sdk/node/eviction.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package types +package node const ( SkipEvictionAnnotationKey = "kubermatic.io/skip-eviction" diff --git a/pkg/providerconfig/types.go b/sdk/providerconfig/configvar/resolver.go similarity index 61% rename from pkg/providerconfig/types.go rename to sdk/providerconfig/configvar/resolver.go index cf1520071..0e22e387c 100644 --- a/pkg/providerconfig/types.go +++ b/sdk/providerconfig/configvar/resolver.go @@ -14,37 +14,38 @@ See the License for the specific language governing permissions and limitations under the License. */ -package providerconfig +package configvar import ( "context" - "errors" "fmt" "os" "strconv" "time" - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" - "github.com/kubermatic/machine-controller/pkg/userdata/amzn2" - "github.com/kubermatic/machine-controller/pkg/userdata/centos" - "github.com/kubermatic/machine-controller/pkg/userdata/flatcar" - "github.com/kubermatic/machine-controller/pkg/userdata/rhel" - "github.com/kubermatic/machine-controller/pkg/userdata/rockylinux" - "github.com/kubermatic/machine-controller/pkg/userdata/ubuntu" + "k8c.io/machine-controller/sdk/providerconfig" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" ctrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client" ) -type ConfigVarResolver struct { +type Resolver struct { ctx context.Context client ctrlruntimeclient.Client } -func (cvr *ConfigVarResolver) GetConfigVarDurationValue(configVar providerconfigtypes.ConfigVarString) (time.Duration, error) { - durStr, err := cvr.GetConfigVarStringValue(configVar) +func NewResolver(ctx context.Context, client ctrlruntimeclient.Client) *Resolver { + return &Resolver{ + ctx: ctx, + client: client, + } +} + +var _ providerconfig.ConfigVarResolver = &Resolver{} + +func (r *Resolver) GetDurationValue(configVar providerconfig.ConfigVarString) (time.Duration, error) { + durStr, err := r.GetStringValue(configVar) if err != nil { return 0, err } @@ -52,8 +53,8 @@ func (cvr *ConfigVarResolver) GetConfigVarDurationValue(configVar providerconfig return time.ParseDuration(durStr) } -func (cvr *ConfigVarResolver) GetConfigVarDurationValueOrDefault(configVar providerconfigtypes.ConfigVarString, defaultDuration time.Duration) (time.Duration, error) { - durStr, err := cvr.GetConfigVarStringValue(configVar) +func (r *Resolver) GetDurationValueOrDefault(configVar providerconfig.ConfigVarString, defaultDuration time.Duration) (time.Duration, error) { + durStr, err := r.GetStringValue(configVar) if err != nil { return 0, err } @@ -74,12 +75,12 @@ func (cvr *ConfigVarResolver) GetConfigVarDurationValueOrDefault(configVar provi return duration, nil } -func (cvr *ConfigVarResolver) GetConfigVarStringValue(configVar providerconfigtypes.ConfigVarString) (string, error) { +func (r *Resolver) GetStringValue(configVar providerconfig.ConfigVarString) (string, error) { // We need all three of these to fetch and use a secret if configVar.SecretKeyRef.Name != "" && configVar.SecretKeyRef.Namespace != "" && configVar.SecretKeyRef.Key != "" { secret := &corev1.Secret{} name := types.NamespacedName{Namespace: configVar.SecretKeyRef.Namespace, Name: configVar.SecretKeyRef.Name} - if err := cvr.client.Get(cvr.ctx, name, secret); err != nil { + if err := r.client.Get(r.ctx, name, secret); err != nil { return "", fmt.Errorf("error retrieving secret '%s' from namespace '%s': '%w'", configVar.SecretKeyRef.Name, configVar.SecretKeyRef.Namespace, err) } if val, ok := secret.Data[configVar.SecretKeyRef.Key]; ok { @@ -92,7 +93,7 @@ func (cvr *ConfigVarResolver) GetConfigVarStringValue(configVar providerconfigty if configVar.ConfigMapKeyRef.Name != "" && configVar.ConfigMapKeyRef.Namespace != "" && configVar.ConfigMapKeyRef.Key != "" { configMap := &corev1.ConfigMap{} name := types.NamespacedName{Namespace: configVar.ConfigMapKeyRef.Namespace, Name: configVar.ConfigMapKeyRef.Name} - if err := cvr.client.Get(cvr.ctx, name, configMap); err != nil { + if err := r.client.Get(r.ctx, name, configMap); err != nil { return "", fmt.Errorf("error retrieving configmap '%s' from namespace '%s': '%w'", configVar.ConfigMapKeyRef.Name, configVar.ConfigMapKeyRef.Namespace, err) } if val, ok := configMap.Data[configVar.ConfigMapKeyRef.Key]; ok { @@ -104,10 +105,10 @@ func (cvr *ConfigVarResolver) GetConfigVarStringValue(configVar providerconfigty return configVar.Value, nil } -// GetConfigVarStringValueOrEnv tries to get the value from ConfigVarString, when it fails, it falls back to +// GetStringValueOrEnv tries to get the value from ConfigVarString, when it fails, it falls back to // getting the value from an environment variable specified by envVarName parameter. -func (cvr *ConfigVarResolver) GetConfigVarStringValueOrEnv(configVar providerconfigtypes.ConfigVarString, envVarName string) (string, error) { - cfgVar, err := cvr.GetConfigVarStringValue(configVar) +func (r *Resolver) GetStringValueOrEnv(configVar providerconfig.ConfigVarString, envVarName string) (string, error) { + cfgVar, err := r.GetStringValue(configVar) if err == nil && len(cfgVar) > 0 { return cfgVar, err } @@ -116,14 +117,14 @@ func (cvr *ConfigVarResolver) GetConfigVarStringValueOrEnv(configVar providercon return envVal, nil } -// GetConfigVarBoolValue returns a boolean from a ConfigVarBool. If there is no valid source for the boolean, +// GetBoolValue returns a boolean from a ConfigVarBool. If there is no valid source for the boolean, // the second bool returned will be false (to be able to differentiate between "false" and "unset"). -func (cvr *ConfigVarResolver) GetConfigVarBoolValue(configVar providerconfigtypes.ConfigVarBool) (bool, bool, error) { +func (r *Resolver) GetBoolValue(configVar providerconfig.ConfigVarBool) (bool, bool, error) { // We need all three of these to fetch and use a secret if configVar.SecretKeyRef.Name != "" && configVar.SecretKeyRef.Namespace != "" && configVar.SecretKeyRef.Key != "" { secret := &corev1.Secret{} name := types.NamespacedName{Namespace: configVar.SecretKeyRef.Namespace, Name: configVar.SecretKeyRef.Name} - if err := cvr.client.Get(cvr.ctx, name, secret); err != nil { + if err := r.client.Get(r.ctx, name, secret); err != nil { return false, false, fmt.Errorf("error retrieving secret '%s' from namespace '%s': '%w'", configVar.SecretKeyRef.Name, configVar.SecretKeyRef.Namespace, err) } if val, ok := secret.Data[configVar.SecretKeyRef.Key]; ok { @@ -137,7 +138,7 @@ func (cvr *ConfigVarResolver) GetConfigVarBoolValue(configVar providerconfigtype if configVar.ConfigMapKeyRef.Name != "" && configVar.ConfigMapKeyRef.Namespace != "" && configVar.ConfigMapKeyRef.Key != "" { configMap := &corev1.ConfigMap{} name := types.NamespacedName{Namespace: configVar.ConfigMapKeyRef.Namespace, Name: configVar.ConfigMapKeyRef.Name} - if err := cvr.client.Get(cvr.ctx, name, configMap); err != nil { + if err := r.client.Get(r.ctx, name, configMap); err != nil { return false, false, fmt.Errorf("error retrieving configmap '%s' from namespace '%s': '%w'", configVar.ConfigMapKeyRef.Name, configVar.ConfigMapKeyRef.Namespace, err) } if val, ok := configMap.Data[configVar.ConfigMapKeyRef.Key]; ok { @@ -154,8 +155,8 @@ func (cvr *ConfigVarResolver) GetConfigVarBoolValue(configVar providerconfigtype return configVar.Value != nil && *configVar.Value, true, nil } -func (cvr *ConfigVarResolver) GetConfigVarBoolValueOrEnv(configVar providerconfigtypes.ConfigVarBool, envVarName string) (bool, error) { - boolVal, valid, err := cvr.GetConfigVarBoolValue(configVar) +func (r *Resolver) GetBoolValueOrEnv(configVar providerconfig.ConfigVarBool, envVarName string) (bool, error) { + boolVal, valid, err := r.GetBoolValue(configVar) if valid && err == nil { return boolVal, nil } @@ -171,34 +172,3 @@ func (cvr *ConfigVarResolver) GetConfigVarBoolValueOrEnv(configVar providerconfi return false, nil } - -func NewConfigVarResolver(ctx context.Context, client ctrlruntimeclient.Client) *ConfigVarResolver { - return &ConfigVarResolver{ - ctx: ctx, - client: client, - } -} - -func DefaultOperatingSystemSpec( - osys providerconfigtypes.OperatingSystem, - cloudProvider providerconfigtypes.CloudProvider, - operatingSystemSpec runtime.RawExtension, - externalBootstrapEnabled bool, -) (runtime.RawExtension, error) { - switch osys { - case providerconfigtypes.OperatingSystemAmazonLinux2: - return amzn2.DefaultConfig(operatingSystemSpec), nil - case providerconfigtypes.OperatingSystemCentOS: - return centos.DefaultConfig(operatingSystemSpec), nil - case providerconfigtypes.OperatingSystemFlatcar: - return flatcar.DefaultConfigForCloud(operatingSystemSpec, cloudProvider, externalBootstrapEnabled), nil - case providerconfigtypes.OperatingSystemRHEL: - return rhel.DefaultConfig(operatingSystemSpec), nil - case providerconfigtypes.OperatingSystemUbuntu: - return ubuntu.DefaultConfig(operatingSystemSpec), nil - case providerconfigtypes.OperatingSystemRockyLinux: - return rockylinux.DefaultConfig(operatingSystemSpec), nil - } - - return operatingSystemSpec, errors.New("unknown OperatingSystem") -} diff --git a/pkg/ini/duration.go b/sdk/providerconfig/resolver.go similarity index 54% rename from pkg/ini/duration.go rename to sdk/providerconfig/resolver.go index 0f3af1f83..7f07cb5c9 100644 --- a/pkg/ini/duration.go +++ b/sdk/providerconfig/resolver.go @@ -14,28 +14,17 @@ See the License for the specific language governing permissions and limitations under the License. */ -package ini +package providerconfig import ( "time" ) -// Duration is the encoding.TextUnmarshaler interface for time.Duration. -type Duration struct { - time.Duration -} - -// UnmarshalText is used to convert from text to Duration. -func (d *Duration) UnmarshalText(text []byte) error { - res, err := time.ParseDuration(string(text)) - if err != nil { - return err - } - d.Duration = res - return nil -} - -// MarshalText is used to convert from Duration to text. -func (d *Duration) MarshalText() []byte { - return []byte(d.Duration.String()) +type ConfigVarResolver interface { + GetDurationValue(configVar ConfigVarString) (time.Duration, error) + GetDurationValueOrDefault(configVar ConfigVarString, defaultDuration time.Duration) (time.Duration, error) + GetStringValue(configVar ConfigVarString) (string, error) + GetStringValueOrEnv(configVar ConfigVarString, envVarName string) (string, error) + GetBoolValue(configVar ConfigVarBool) (bool, bool, error) + GetBoolValueOrEnv(configVar ConfigVarBool, envVarName string) (bool, error) } diff --git a/pkg/providerconfig/types/types.go b/sdk/providerconfig/types.go similarity index 83% rename from pkg/providerconfig/types/types.go rename to sdk/providerconfig/types.go index 02a589fbb..b986a8a8b 100644 --- a/pkg/providerconfig/types/types.go +++ b/sdk/providerconfig/types.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package types +package providerconfig import ( "bytes" @@ -23,9 +23,9 @@ import ( "fmt" "strconv" - clusterv1alpha1 "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" - "github.com/kubermatic/machine-controller/pkg/cloudprovider/util" - "github.com/kubermatic/machine-controller/pkg/jsonutil" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" + "k8c.io/machine-controller/sdk/jsonutil" + "k8c.io/machine-controller/sdk/net" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" @@ -36,13 +36,21 @@ type OperatingSystem string const ( OperatingSystemUbuntu OperatingSystem = "ubuntu" - OperatingSystemCentOS OperatingSystem = "centos" OperatingSystemAmazonLinux2 OperatingSystem = "amzn2" OperatingSystemRHEL OperatingSystem = "rhel" OperatingSystemFlatcar OperatingSystem = "flatcar" OperatingSystemRockyLinux OperatingSystem = "rockylinux" ) +func (os OperatingSystem) Validate() error { + for _, supportedOS := range AllOperatingSystems { + if os == supportedOS { + return nil + } + } + return ErrOSNotSupported +} + type CloudProvider string const ( @@ -61,11 +69,13 @@ const ( CloudProviderVultr CloudProvider = "vultr" CloudProviderVMwareCloudDirector CloudProvider = "vmware-cloud-director" CloudProviderFake CloudProvider = "fake" + CloudProviderEdge CloudProvider = "edge" CloudProviderAlibaba CloudProvider = "alibaba" CloudProviderAnexia CloudProvider = "anexia" CloudProviderScaleway CloudProvider = "scaleway" CloudProviderBaremetal CloudProvider = "baremetal" CloudProviderExternal CloudProvider = "external" + CloudProviderOpenNebula CloudProvider = "opennebula" ) var ( @@ -74,7 +84,6 @@ var ( // AllOperatingSystems is a slice containing all supported operating system identifiers. AllOperatingSystems = []OperatingSystem{ OperatingSystemUbuntu, - OperatingSystemCentOS, OperatingSystemAmazonLinux2, OperatingSystemRHEL, OperatingSystemFlatcar, @@ -97,14 +106,24 @@ var ( CloudProviderVsphere, CloudProviderVMwareCloudDirector, CloudProviderFake, + CloudProviderEdge, CloudProviderAlibaba, CloudProviderAnexia, CloudProviderScaleway, CloudProviderBaremetal, CloudProviderVultr, + CloudProviderOpenNebula, + CloudProviderExternal, } ) +func IntreeCloudProviderImplementationSupported(cloudProvider CloudProvider) (inTree bool) { + if cloudProvider == CloudProviderAzure || cloudProvider == CloudProviderVsphere || cloudProvider == CloudProviderGoogle { + return true + } + return false +} + // DNSConfig contains a machine's DNS configuration. type DNSConfig struct { Servers []string `json:"servers"` @@ -112,10 +131,10 @@ type DNSConfig struct { // NetworkConfig contains a machine's static network configuration. type NetworkConfig struct { - CIDR string `json:"cidr"` - Gateway string `json:"gateway"` - DNS DNSConfig `json:"dns"` - IPFamily util.IPFamily `json:"ipFamily,omitempty"` + CIDR string `json:"cidr"` + Gateway string `json:"gateway"` + DNS DNSConfig `json:"dns"` + IPFamily net.IPFamily `json:"ipFamily,omitempty"` } func (n *NetworkConfig) IsStaticIPConfig() bool { @@ -127,9 +146,9 @@ func (n *NetworkConfig) IsStaticIPConfig() bool { len(n.DNS.Servers) != 0 } -func (n *NetworkConfig) GetIPFamily() util.IPFamily { +func (n *NetworkConfig) GetIPFamily() net.IPFamily { if n == nil { - return util.IPFamilyUnspecified + return net.IPFamilyUnspecified } return n.IPFamily } @@ -178,14 +197,14 @@ type configVarStringWithoutUnmarshaller ConfigVarString // https://github.com/golang/go/issues/11939. func (configVarString ConfigVarString) MarshalJSON() ([]byte, error) { var secretKeyRefEmpty, configMapKeyRefEmpty bool - if configVarString.SecretKeyRef.ObjectReference.Namespace == "" && - configVarString.SecretKeyRef.ObjectReference.Name == "" && + if configVarString.SecretKeyRef.Namespace == "" && + configVarString.SecretKeyRef.Name == "" && configVarString.SecretKeyRef.Key == "" { secretKeyRefEmpty = true } - if configVarString.ConfigMapKeyRef.ObjectReference.Namespace == "" && - configVarString.ConfigMapKeyRef.ObjectReference.Name == "" && + if configVarString.ConfigMapKeyRef.Namespace == "" && + configVarString.ConfigMapKeyRef.Name == "" && configVarString.ConfigMapKeyRef.Key == "" { configMapKeyRefEmpty = true } @@ -200,7 +219,7 @@ func (configVarString ConfigVarString) MarshalJSON() ([]byte, error) { if err != nil { return nil, err } - buffer.WriteString(fmt.Sprintf(`"secretKeyRef":%s`, string(jsonVal))) + fmt.Fprintf(buffer, `"secretKeyRef":%s`, string(jsonVal)) } if !configMapKeyRefEmpty { @@ -212,11 +231,11 @@ func (configVarString ConfigVarString) MarshalJSON() ([]byte, error) { if err != nil { return nil, err } - buffer.WriteString(fmt.Sprintf(`%s"configMapKeyRef":%s`, leadingComma, jsonVal)) + fmt.Fprintf(buffer, `%s"configMapKeyRef":%s`, leadingComma, jsonVal) } if configVarString.Value != "" { - buffer.WriteString(fmt.Sprintf(`,"value":"%s"`, configVarString.Value)) + fmt.Fprintf(buffer, `,"value":"%s"`, configVarString.Value) } buffer.WriteString("}") @@ -266,14 +285,14 @@ type configVarBoolWithoutUnmarshaller ConfigVarBool // https://github.com/golang/go/issues/11939 func (configVarBool ConfigVarBool) MarshalJSON() ([]byte, error) { var secretKeyRefEmpty, configMapKeyRefEmpty bool - if configVarBool.SecretKeyRef.ObjectReference.Namespace == "" && - configVarBool.SecretKeyRef.ObjectReference.Name == "" && + if configVarBool.SecretKeyRef.Namespace == "" && + configVarBool.SecretKeyRef.Name == "" && configVarBool.SecretKeyRef.Key == "" { secretKeyRefEmpty = true } - if configVarBool.ConfigMapKeyRef.ObjectReference.Namespace == "" && - configVarBool.ConfigMapKeyRef.ObjectReference.Name == "" && + if configVarBool.ConfigMapKeyRef.Namespace == "" && + configVarBool.ConfigMapKeyRef.Name == "" && configVarBool.ConfigMapKeyRef.Key == "" { configMapKeyRefEmpty = true } @@ -292,7 +311,7 @@ func (configVarBool ConfigVarBool) MarshalJSON() ([]byte, error) { if err != nil { return nil, err } - buffer.WriteString(fmt.Sprintf(`"secretKeyRef":%s`, string(jsonVal))) + fmt.Fprintf(buffer, `"secretKeyRef":%s`, string(jsonVal)) } if !configMapKeyRefEmpty { @@ -304,7 +323,7 @@ func (configVarBool ConfigVarBool) MarshalJSON() ([]byte, error) { if err != nil { return nil, err } - buffer.WriteString(fmt.Sprintf(`%s"configMapKeyRef":%s`, leadingComma, jsonVal)) + fmt.Fprintf(buffer, `%s"configMapKeyRef":%s`, leadingComma, jsonVal) } if configVarBool.Value != nil { @@ -313,7 +332,7 @@ func (configVarBool ConfigVarBool) MarshalJSON() ([]byte, error) { return []byte{}, err } - buffer.WriteString(fmt.Sprintf(`,"value":%v`, string(jsonVal))) + fmt.Fprintf(buffer, `,"value":%v`, string(jsonVal)) } buffer.WriteString("}") @@ -325,7 +344,7 @@ func (configVarBool *ConfigVarBool) UnmarshalJSON(b []byte) error { if !bytes.HasPrefix(b, []byte("{")) { var val *bool if err := json.Unmarshal(b, &val); err != nil { - return fmt.Errorf("Error parsing value: '%w'", err) + return fmt.Errorf("error parsing value: '%w'", err) } configVarBool.Value = val diff --git a/pkg/providerconfig/types/types_test.go b/sdk/providerconfig/types_test.go similarity index 67% rename from pkg/providerconfig/types/types_test.go rename to sdk/providerconfig/types_test.go index d1d7252bd..52432ad2f 100644 --- a/pkg/providerconfig/types/types_test.go +++ b/sdk/providerconfig/types_test.go @@ -14,15 +14,15 @@ See the License for the specific language governing permissions and limitations under the License. */ -package types +package providerconfig import ( "encoding/json" "reflect" "testing" - v1 "k8s.io/api/core/v1" - "k8s.io/utils/pointer" + corev1 "k8s.io/api/core/v1" + "k8s.io/utils/ptr" ) func TestConfigVarStringUnmarshalling(t *testing.T) { @@ -52,11 +52,11 @@ func TestConfigVarBoolUnmarshalling(t *testing.T) { }{ { jsonString: "true", - expected: ConfigVarBool{Value: pointer.Bool(true)}, + expected: ConfigVarBool{Value: ptr.To(true)}, }, { jsonString: `{"value":true}`, - expected: ConfigVarBool{Value: pointer.Bool(true)}, + expected: ConfigVarBool{Value: ptr.To(true)}, }, { jsonString: "null", @@ -68,19 +68,19 @@ func TestConfigVarBoolUnmarshalling(t *testing.T) { }, { jsonString: `{"secretKeyRef":{"namespace":"ns","name":"name","key":"key"}}`, - expected: ConfigVarBool{Value: nil, SecretKeyRef: GlobalSecretKeySelector{ObjectReference: v1.ObjectReference{Namespace: "ns", Name: "name"}, Key: "key"}}, + expected: ConfigVarBool{Value: nil, SecretKeyRef: GlobalSecretKeySelector{ObjectReference: corev1.ObjectReference{Namespace: "ns", Name: "name"}, Key: "key"}}, }, { jsonString: `{"value": null, "secretKeyRef":{"namespace":"ns","name":"name","key":"key"}}`, - expected: ConfigVarBool{Value: nil, SecretKeyRef: GlobalSecretKeySelector{ObjectReference: v1.ObjectReference{Namespace: "ns", Name: "name"}, Key: "key"}}, + expected: ConfigVarBool{Value: nil, SecretKeyRef: GlobalSecretKeySelector{ObjectReference: corev1.ObjectReference{Namespace: "ns", Name: "name"}, Key: "key"}}, }, { jsonString: `{"value":false, "secretKeyRef":{"namespace":"ns","name":"name","key":"key"}}`, - expected: ConfigVarBool{Value: pointer.Bool(false), SecretKeyRef: GlobalSecretKeySelector{ObjectReference: v1.ObjectReference{Namespace: "ns", Name: "name"}, Key: "key"}}, + expected: ConfigVarBool{Value: ptr.To(false), SecretKeyRef: GlobalSecretKeySelector{ObjectReference: corev1.ObjectReference{Namespace: "ns", Name: "name"}, Key: "key"}}, }, { jsonString: `{"value":true, "secretKeyRef":{"namespace":"ns","name":"name","key":"key"}}`, - expected: ConfigVarBool{Value: pointer.Bool(true), SecretKeyRef: GlobalSecretKeySelector{ObjectReference: v1.ObjectReference{Namespace: "ns", Name: "name"}, Key: "key"}}, + expected: ConfigVarBool{Value: ptr.To(true), SecretKeyRef: GlobalSecretKeySelector{ObjectReference: corev1.ObjectReference{Namespace: "ns", Name: "name"}, Key: "key"}}, }, } @@ -103,7 +103,7 @@ func TestConfigVarStringMarshalling(t *testing.T) { expected: `"val"`, }, { - cvs: ConfigVarString{SecretKeyRef: GlobalSecretKeySelector{ObjectReference: v1.ObjectReference{Namespace: "ns", Name: "name"}, Key: "key"}}, + cvs: ConfigVarString{SecretKeyRef: GlobalSecretKeySelector{ObjectReference: corev1.ObjectReference{Namespace: "ns", Name: "name"}, Key: "key"}}, expected: `{"secretKeyRef":{"namespace":"ns","name":"name","key":"key"}}`, }, } @@ -129,19 +129,19 @@ func TestConfigVarBoolMarshalling(t *testing.T) { expected: `null`, }, { - cvb: ConfigVarBool{Value: pointer.Bool(true)}, + cvb: ConfigVarBool{Value: ptr.To(true)}, expected: `true`, }, { - cvb: ConfigVarBool{SecretKeyRef: GlobalSecretKeySelector{ObjectReference: v1.ObjectReference{Namespace: "ns", Name: "name"}, Key: "key"}}, + cvb: ConfigVarBool{SecretKeyRef: GlobalSecretKeySelector{ObjectReference: corev1.ObjectReference{Namespace: "ns", Name: "name"}, Key: "key"}}, expected: `{"secretKeyRef":{"namespace":"ns","name":"name","key":"key"}}`, }, { - cvb: ConfigVarBool{SecretKeyRef: GlobalSecretKeySelector{ObjectReference: v1.ObjectReference{Namespace: "ns", Name: "name"}, Key: "key"}, Value: pointer.Bool(true)}, + cvb: ConfigVarBool{SecretKeyRef: GlobalSecretKeySelector{ObjectReference: corev1.ObjectReference{Namespace: "ns", Name: "name"}, Key: "key"}, Value: ptr.To(true)}, expected: `{"secretKeyRef":{"namespace":"ns","name":"name","key":"key"},"value":true}`, }, { - cvb: ConfigVarBool{SecretKeyRef: GlobalSecretKeySelector{ObjectReference: v1.ObjectReference{Namespace: "ns", Name: "name"}, Key: "key"}, Value: pointer.Bool(false)}, + cvb: ConfigVarBool{SecretKeyRef: GlobalSecretKeySelector{ObjectReference: corev1.ObjectReference{Namespace: "ns", Name: "name"}, Key: "key"}, Value: ptr.To(false)}, expected: `{"secretKeyRef":{"namespace":"ns","name":"name","key":"key"},"value":false}`, }, } @@ -161,22 +161,22 @@ func TestConfigVarStringMarshallingAndUnmarshalling(t *testing.T) { testCases := []ConfigVarString{ {Value: "val"}, {Value: "spe>=%s", safeBase64Encoding(kubevirtKubeconfig)), } - runScenarios(t, selector, params, kubevirtManifest, fmt.Sprintf("kubevirt-%s", *testRunIdentifier)) + runScenarios(context.Background(), t, selector, params, kubevirtManifest, fmt.Sprintf("kubevirt-%s", *testRunIdentifier)) } // safeBase64Encoding takes a value and encodes it with base64 @@ -327,7 +334,7 @@ func TestOpenstackProvisioningE2E(t *testing.T) { osNetwork := os.Getenv("OS_NETWORK_NAME") if osAuthURL == "" || osUsername == "" || osPassword == "" || osDomain == "" || osRegion == "" || osTenant == "" { - t.Fatal("unable to run test suite, all of OS_AUTH_URL, OS_USERNAME, OS_PASSWORD, OS_REGION, and OS_TENANT OS_DOMAIN must be set!") + t.Fatal("Unable to run test suite, all of OS_AUTH_URL, OS_USERNAME, OS_PASSWORD, OS_REGION, and OS_TENANT OS_DOMAIN must be set!") } params := []string{ @@ -340,8 +347,9 @@ func TestOpenstackProvisioningE2E(t *testing.T) { fmt.Sprintf("<< NETWORK_NAME >>=%s", osNetwork), } + // In-tree cloud provider is not supported from Kubernetes v1.26. selector := Not(OsSelector("amzn2")) - runScenarios(t, selector, params, OSManifest, fmt.Sprintf("os-%s", *testRunIdentifier)) + runScenarios(context.Background(), t, selector, params, OSManifest, fmt.Sprintf("os-%s", *testRunIdentifier)) } func TestOpenstackProjectAuthProvisioningE2E(t *testing.T) { @@ -358,7 +366,7 @@ func TestOpenstackProjectAuthProvisioningE2E(t *testing.T) { osNetwork := os.Getenv("OS_NETWORK_NAME") if osAuthURL == "" || osUsername == "" || osPassword == "" || osDomain == "" || osRegion == "" || osProject == "" { - t.Fatal("unable to run test suite, all of OS_AUTH_URL, OS_USERNAME, OS_PASSWORD, OS_REGION, and OS_TENANT OS_DOMAIN must be set!") + t.Fatal("Unable to run test suite, all of OS_AUTH_URL, OS_USERNAME, OS_PASSWORD, OS_REGION, and OS_TENANT OS_DOMAIN must be set!") } params := []string{ @@ -378,7 +386,7 @@ func TestOpenstackProjectAuthProvisioningE2E(t *testing.T) { kubernetesVersion: defaultKubernetesVersion, executor: verifyCreateAndDelete, } - testScenario(t, scenario, *testRunIdentifier, params, OSManifestProjectAuth, false) + testScenario(context.Background(), t, scenario, *testRunIdentifier, params, OSManifestProjectAuth, false) } // TestDigitalOceanProvisioning - a test suite that exercises digital ocean provider @@ -391,14 +399,14 @@ func TestDigitalOceanProvisioningE2E(t *testing.T) { // test data doToken := os.Getenv("DO_E2E_TESTS_TOKEN") if len(doToken) == 0 { - t.Fatal("unable to run the test suite, DO_E2E_TESTS_TOKEN environment variable cannot be empty") + t.Fatal("Unable to run the test suite, DO_E2E_TESTS_TOKEN environment variable cannot be empty") } - selector := OsSelector("ubuntu", "centos", "rockylinux") + selector := OsSelector("ubuntu", "rockylinux") // act params := []string{fmt.Sprintf("<< DIGITALOCEAN_TOKEN >>=%s", doToken)} - runScenarios(t, selector, params, DOManifest, fmt.Sprintf("do-%s", *testRunIdentifier)) + runScenarios(context.Background(), t, selector, params, DOManifest, fmt.Sprintf("do-%s", *testRunIdentifier)) } // TestAWSProvisioning - a test suite that exercises AWS provider @@ -407,19 +415,16 @@ func TestAWSProvisioningE2E(t *testing.T) { t.Parallel() provisioningUtility := flatcar.Ignition - // `OPERATING_SYSTEM_MANAGER` will be false when legacy machine-controller userdata should be used for E2E tests. - if v := os.Getenv("OPERATING_SYSTEM_MANAGER"); v == "false" { - provisioningUtility = flatcar.CloudInit - } // test data awsKeyID := os.Getenv("AWS_E2E_TESTS_KEY_ID") awsSecret := os.Getenv("AWS_E2E_TESTS_SECRET") if len(awsKeyID) == 0 || len(awsSecret) == 0 { - t.Fatal("unable to run the test suite, AWS_E2E_TESTS_KEY_ID or AWS_E2E_TESTS_SECRET environment variables cannot be empty") + t.Fatal("Unable to run the test suite, AWS_E2E_TESTS_KEY_ID or AWS_E2E_TESTS_SECRET environment variables cannot be empty") } - selector := Not(OsSelector("sles")) + // In-tree cloud provider is not supported from Kubernetes v1.27. + selector := OsSelector("amzn2", "ubuntu", "rhel", "rockylinux", "flatcar") // act params := []string{fmt.Sprintf("<< AWS_ACCESS_KEY_ID >>=%s", awsKeyID), @@ -427,7 +432,7 @@ func TestAWSProvisioningE2E(t *testing.T) { fmt.Sprintf("<< PROVISIONING_UTILITY >>=%s", provisioningUtility), } - runScenarios(t, selector, params, AWSManifest, fmt.Sprintf("aws-%s", *testRunIdentifier)) + runScenarios(context.Background(), t, selector, params, AWSManifest, fmt.Sprintf("aws-%s", *testRunIdentifier)) } // TestAWSAssumeRoleProvisioning - a test suite that exercises AWS provider @@ -441,7 +446,7 @@ func TestAWSAssumeRoleProvisioningE2E(t *testing.T) { awsAssumeRoleARN := os.Getenv("AWS_ASSUME_ROLE_ARN") awsAssumeRoleExternalID := os.Getenv("AWS_ASSUME_ROLE_EXTERNAL_ID") if len(awsKeyID) == 0 || len(awsSecret) == 0 || len(awsAssumeRoleARN) == 0 || len(awsAssumeRoleExternalID) == 0 { - t.Fatal("unable to run the test suite, environment variables AWS_E2E_TESTS_KEY_ID, AWS_E2E_TESTS_SECRET, AWS_E2E_ASSUME_ROLE_ARN and AWS_E2E_ASSUME_ROLE_EXTERNAL_ID cannot be empty") + t.Fatal("Unable to run the test suite, environment variables AWS_E2E_TESTS_KEY_ID, AWS_E2E_TESTS_SECRET, AWS_E2E_ASSUME_ROLE_ARN and AWS_E2E_ASSUME_ROLE_EXTERNAL_ID cannot be empty") } // act @@ -457,7 +462,7 @@ func TestAWSAssumeRoleProvisioningE2E(t *testing.T) { kubernetesVersion: defaultKubernetesVersion, executor: verifyCreateAndDelete, } - testScenario(t, scenario, *testRunIdentifier, params, AWSManifest, false) + testScenario(context.Background(), t, scenario, *testRunIdentifier, params, AWSManifest, false) } // TestAWSSpotInstanceProvisioning - a test suite that exercises AWS provider @@ -469,16 +474,18 @@ func TestAWSSpotInstanceProvisioningE2E(t *testing.T) { awsKeyID := os.Getenv("AWS_E2E_TESTS_KEY_ID") awsSecret := os.Getenv("AWS_E2E_TESTS_SECRET") if len(awsKeyID) == 0 || len(awsSecret) == 0 { - t.Fatal("unable to run the test suite, AWS_E2E_TESTS_KEY_ID or AWS_E2E_TESTS_SECRET environment variables cannot be empty") + t.Fatal("Unable to run the test suite, AWS_E2E_TESTS_KEY_ID or AWS_E2E_TESTS_SECRET environment variables cannot be empty") } // Since we are only testing the spot instance functionality, testing it against a single OS is sufficient. + // In-tree cloud provider is not supported from Kubernetes v1.27. selector := OsSelector("ubuntu") + // act params := []string{fmt.Sprintf("<< AWS_ACCESS_KEY_ID >>=%s", awsKeyID), fmt.Sprintf("<< AWS_SECRET_ACCESS_KEY >>=%s", awsSecret), fmt.Sprintf("<< PROVISIONING_UTILITY >>=%s", flatcar.Ignition), } - runScenarios(t, selector, params, AWSSpotInstanceManifest, fmt.Sprintf("aws-%s", *testRunIdentifier)) + runScenarios(context.Background(), t, selector, params, AWSSpotInstanceManifest, fmt.Sprintf("aws-%s", *testRunIdentifier)) } // TestAWSARMProvisioningE2E - a test suite that exercises AWS provider for arm machines @@ -490,15 +497,17 @@ func TestAWSARMProvisioningE2E(t *testing.T) { awsKeyID := os.Getenv("AWS_E2E_TESTS_KEY_ID") awsSecret := os.Getenv("AWS_E2E_TESTS_SECRET") if len(awsKeyID) == 0 || len(awsSecret) == 0 { - t.Fatal("unable to run the test suite, AWS_E2E_TESTS_KEY_ID or AWS_E2E_TESTS_SECRET environment variables cannot be empty") + t.Fatal("Unable to run the test suite, AWS_E2E_TESTS_KEY_ID or AWS_E2E_TESTS_SECRET environment variables cannot be empty") } + // In-tree cloud provider is not supported from Kubernetes v1.27. selector := OsSelector("ubuntu") + // act params := []string{fmt.Sprintf("<< AWS_ACCESS_KEY_ID >>=%s", awsKeyID), fmt.Sprintf("<< AWS_SECRET_ACCESS_KEY >>=%s", awsSecret), fmt.Sprintf("<< PROVISIONING_UTILITY >>=%s", flatcar.Ignition), } - runScenarios(t, selector, params, AWSManifestARM, fmt.Sprintf("aws-%s", *testRunIdentifier)) + runScenarios(context.Background(), t, selector, params, AWSManifestARM, fmt.Sprintf("aws-%s", *testRunIdentifier)) } func TestAWSFlatcarCoreOSCloudInit8ProvisioningE2E(t *testing.T) { @@ -508,7 +517,7 @@ func TestAWSFlatcarCoreOSCloudInit8ProvisioningE2E(t *testing.T) { awsKeyID := os.Getenv("AWS_E2E_TESTS_KEY_ID") awsSecret := os.Getenv("AWS_E2E_TESTS_SECRET") if len(awsKeyID) == 0 || len(awsSecret) == 0 { - t.Fatal("unable to run the test suite, AWS_E2E_TESTS_KEY_ID or AWS_E2E_TESTS_SECRET environment variables cannot be empty") + t.Fatal("Unable to run the test suite, AWS_E2E_TESTS_KEY_ID or AWS_E2E_TESTS_SECRET environment variables cannot be empty") } params := []string{ @@ -519,56 +528,7 @@ func TestAWSFlatcarCoreOSCloudInit8ProvisioningE2E(t *testing.T) { // We would like to test flatcar with CoreOS-cloud-init selector := OsSelector("flatcar") - runScenarios(t, selector, params, AWSManifest, fmt.Sprintf("aws-%s", *testRunIdentifier)) -} - -func TestAWSFlatcarContainerdProvisioningE2E(t *testing.T) { - t.Parallel() - - // test data - awsKeyID := os.Getenv("AWS_E2E_TESTS_KEY_ID") - awsSecret := os.Getenv("AWS_E2E_TESTS_SECRET") - if len(awsKeyID) == 0 || len(awsSecret) == 0 { - t.Fatal("unable to run the test suite, AWS_E2E_TESTS_KEY_ID or AWS_E2E_TESTS_SECRET environment variables cannot be empty") - } - - params := []string{ - fmt.Sprintf("<< AWS_ACCESS_KEY_ID >>=%s", awsKeyID), - fmt.Sprintf("<< AWS_SECRET_ACCESS_KEY >>=%s", awsSecret), - fmt.Sprintf("<< PROVISIONING_UTILITY >>=%s", flatcar.Ignition), - } - - scenario := scenario{ - name: "flatcar with containerd in AWS", - osName: "flatcar", - containerRuntime: defaultContainerRuntime, - kubernetesVersion: defaultKubernetesVersion, - executor: verifyCreateAndDelete, - } - testScenario(t, scenario, *testRunIdentifier, params, AWSManifest, false) -} - -func TestAWSCentOS8ProvisioningE2E(t *testing.T) { - t.Parallel() - - // test data - awsKeyID := os.Getenv("AWS_E2E_TESTS_KEY_ID") - awsSecret := os.Getenv("AWS_E2E_TESTS_SECRET") - if len(awsKeyID) == 0 || len(awsSecret) == 0 { - t.Fatal("unable to run the test suite, AWS_E2E_TESTS_KEY_ID or AWS_E2E_TESTS_SECRET environment variables cannot be empty") - } - - amiID := "ami-032025b3afcbb6b34" // official "CentOS 8.2.2004 x86_64" - - params := []string{ - fmt.Sprintf("<< AWS_ACCESS_KEY_ID >>=%s", awsKeyID), - fmt.Sprintf("<< AWS_SECRET_ACCESS_KEY >>=%s", awsSecret), - fmt.Sprintf("<< AMI >>=%s", amiID), - } - - // We would like to test CentOS8 image only in this test as the other images are tested in TestAWSProvisioningE2E - selector := OsSelector("centos") - runScenarios(t, selector, params, AWSManifest, fmt.Sprintf("aws-%s", *testRunIdentifier)) + runScenarios(context.Background(), t, selector, params, AWSManifest, fmt.Sprintf("aws-%s", *testRunIdentifier)) } // TestAWSEbsEncryptionEnabledProvisioningE2E - a test suite that exercises AWS provider with ebs encryption enabled @@ -580,7 +540,7 @@ func TestAWSEbsEncryptionEnabledProvisioningE2E(t *testing.T) { awsKeyID := os.Getenv("AWS_E2E_TESTS_KEY_ID") awsSecret := os.Getenv("AWS_E2E_TESTS_SECRET") if len(awsKeyID) == 0 || len(awsSecret) == 0 { - t.Fatal("unable to run the test suite, AWS_E2E_TESTS_KEY_ID or AWS_E2E_TESTS_SECRET environment variables cannot be empty") + t.Fatal("Unable to run the test suite, AWS_E2E_TESTS_KEY_ID or AWS_E2E_TESTS_SECRET environment variables cannot be empty") } // act @@ -592,10 +552,10 @@ func TestAWSEbsEncryptionEnabledProvisioningE2E(t *testing.T) { name: "AWS with ebs encryption enabled", osName: "ubuntu", containerRuntime: defaultContainerRuntime, - kubernetesVersion: defaultKubernetesVersion, + kubernetesVersion: awsDefaultKubernetesVersion, executor: verifyCreateAndDelete, } - testScenario(t, scenario, fmt.Sprintf("aws-%s", *testRunIdentifier), params, AWSEBSEncryptedManifest, false) + testScenario(context.Background(), t, scenario, fmt.Sprintf("aws-%s", *testRunIdentifier), params, AWSEBSEncryptedManifest, false) } // TestAzureProvisioningE2E - a test suite that exercises Azure provider @@ -609,9 +569,10 @@ func TestAzureProvisioningE2E(t *testing.T) { azureClientID := os.Getenv("AZURE_E2E_TESTS_CLIENT_ID") azureClientSecret := os.Getenv("AZURE_E2E_TESTS_CLIENT_SECRET") if len(azureTenantID) == 0 || len(azureSubscriptionID) == 0 || len(azureClientID) == 0 || len(azureClientSecret) == 0 { - t.Fatal("unable to run the test suite, AZURE_TENANT_ID, AZURE_SUBSCRIPTION_ID, AZURE_CLIENT_ID and AZURE_CLIENT_SECRET environment variables cannot be empty") + t.Fatal("Unable to run the test suite, AZURE_TENANT_ID, AZURE_SUBSCRIPTION_ID, AZURE_CLIENT_ID and AZURE_CLIENT_SECRET environment variables cannot be empty") } + // In-tree cloud provider is not supported from Kubernetes v1.30. selector := Not(OsSelector("amzn2")) // act @@ -623,7 +584,7 @@ func TestAzureProvisioningE2E(t *testing.T) { fmt.Sprintf("<< AZURE_OS_DISK_SKU >>=%s", "Standard_LRS"), fmt.Sprintf("<< AZURE_DATA_DISK_SKU >>=%s", "Standard_LRS"), } - runScenarios(t, selector, params, AzureManifest, fmt.Sprintf("azure-%s", *testRunIdentifier)) + runScenarios(context.Background(), t, selector, params, AzureManifest, fmt.Sprintf("azure-%s", *testRunIdentifier)) } // TestAzureCustomImageReferenceProvisioningE2E - a test suite that exercises Azure provider @@ -637,9 +598,10 @@ func TestAzureCustomImageReferenceProvisioningE2E(t *testing.T) { azureClientID := os.Getenv("AZURE_E2E_TESTS_CLIENT_ID") azureClientSecret := os.Getenv("AZURE_E2E_TESTS_CLIENT_SECRET") if len(azureTenantID) == 0 || len(azureSubscriptionID) == 0 || len(azureClientID) == 0 || len(azureClientSecret) == 0 { - t.Fatal("unable to run the test suite, AZURE_TENANT_ID, AZURE_SUBSCRIPTION_ID, AZURE_CLIENT_ID and AZURE_CLIENT_SECRET environment variables cannot be empty") + t.Fatal("Unable to run the test suite, AZURE_TENANT_ID, AZURE_SUBSCRIPTION_ID, AZURE_CLIENT_ID and AZURE_CLIENT_SECRET environment variables cannot be empty") } + // In-tree cloud provider is not supported from Kubernetes v1.30. selector := OsSelector("ubuntu") // act params := []string{ @@ -650,7 +612,7 @@ func TestAzureCustomImageReferenceProvisioningE2E(t *testing.T) { fmt.Sprintf("<< AZURE_OS_DISK_SKU >>=%s", "Standard_LRS"), fmt.Sprintf("<< AZURE_DATA_DISK_SKU >>=%s", "Standard_LRS"), } - runScenarios(t, selector, params, AzureCustomImageReferenceManifest, fmt.Sprintf("azure-%s", *testRunIdentifier)) + runScenarios(context.Background(), t, selector, params, AzureCustomImageReferenceManifest, fmt.Sprintf("azure-%s", *testRunIdentifier)) } // TestAzureRedhatSatelliteProvisioningE2E - a test suite that exercises Azure provider @@ -665,7 +627,7 @@ func TestAzureRedhatSatelliteProvisioningE2E(t *testing.T) { azureClientID := os.Getenv("AZURE_E2E_TESTS_CLIENT_ID") azureClientSecret := os.Getenv("AZURE_E2E_TESTS_CLIENT_SECRET") if len(azureTenantID) == 0 || len(azureSubscriptionID) == 0 || len(azureClientID) == 0 || len(azureClientSecret) == 0 { - t.Fatal("unable to run the test suite, AZURE_TENANT_ID, AZURE_SUBSCRIPTION_ID, AZURE_CLIENT_ID and AZURE_CLIENT_SECRET environment variables cannot be empty") + t.Fatal("Unable to run the test suite, AZURE_TENANT_ID, AZURE_SUBSCRIPTION_ID, AZURE_CLIENT_ID and AZURE_CLIENT_SECRET environment variables cannot be empty") } // act @@ -686,7 +648,7 @@ func TestAzureRedhatSatelliteProvisioningE2E(t *testing.T) { executor: verifyCreateAndDelete, } - testScenario(t, scenario, *testRunIdentifier, params, AzureRedhatSatelliteManifest, false) + testScenario(context.Background(), t, scenario, *testRunIdentifier, params, AzureRedhatSatelliteManifest, false) } // TestGCEProvisioningE2E - a test suite that exercises Google Cloud provider @@ -698,16 +660,15 @@ func TestGCEProvisioningE2E(t *testing.T) { // Test data. googleServiceAccount := os.Getenv("GOOGLE_SERVICE_ACCOUNT") if len(googleServiceAccount) == 0 { - t.Fatal("unable to run the test suite, GOOGLE_SERVICE_ACCOUNT environment variable cannot be empty") + t.Fatal("Unable to run the test suite, GOOGLE_SERVICE_ACCOUNT environment variable cannot be empty") } - // Act. GCE does not support CentOS. - selector := OsSelector("ubuntu") + selector := OsSelector("ubuntu", "flatcar") params := []string{ fmt.Sprintf("<< GOOGLE_SERVICE_ACCOUNT_BASE64 >>=%s", safeBase64Encoding(googleServiceAccount)), } - runScenarios(t, selector, params, GCEManifest, fmt.Sprintf("gce-%s", *testRunIdentifier)) + runScenarios(context.Background(), t, selector, params, GCEManifest, fmt.Sprintf("gce-%s", *testRunIdentifier)) } // TestHetznerProvisioning - a test suite that exercises Hetzner provider @@ -718,14 +679,14 @@ func TestHetznerProvisioningE2E(t *testing.T) { // test data hzToken := os.Getenv("HZ_E2E_TOKEN") if len(hzToken) == 0 { - t.Fatal("unable to run the test suite, HZ_E2E_TOKEN environment variable cannot be empty") + t.Fatal("Unable to run the test suite, HZ_E2E_TOKEN environment variable cannot be empty") } - selector := OsSelector("ubuntu", "centos", "rockylinux") + selector := OsSelector("ubuntu", "rockylinux") // act params := []string{fmt.Sprintf("<< HETZNER_TOKEN >>=%s", hzToken)} - runScenarios(t, selector, params, HZManifest, fmt.Sprintf("hz-%s", *testRunIdentifier)) + runScenarios(context.Background(), t, selector, params, HZManifest, fmt.Sprintf("hz-%s", *testRunIdentifier)) } // TestEquinixMetalProvisioningE2E - a test suite that exercises Equinix Metal provider @@ -736,22 +697,22 @@ func TestEquinixMetalProvisioningE2E(t *testing.T) { // test data token := os.Getenv("METAL_AUTH_TOKEN") if len(token) == 0 { - t.Fatal("unable to run the test suite, METAL_AUTH_TOKEN environment variable cannot be empty") + t.Fatal("Unable to run the test suite, METAL_AUTH_TOKEN environment variable cannot be empty") } projectID := os.Getenv("METAL_PROJECT_ID") if len(projectID) == 0 { - t.Fatal("unable to run the test suite, METAL_PROJECT_ID environment variable cannot be empty") + t.Fatal("Unable to run the test suite, METAL_PROJECT_ID environment variable cannot be empty") } - selector := And(OsSelector("ubuntu", "centos", "rockylinux", "flatcar"), Not(NameSelector("migrateUID"))) + selector := And(OsSelector("ubuntu", "rockylinux", "flatcar"), Not(NameSelector("migrateUID"))) // act params := []string{ fmt.Sprintf("<< METAL_AUTH_TOKEN >>=%s", token), fmt.Sprintf("<< METAL_PROJECT_ID >>=%s", projectID), } - runScenarios(t, selector, params, EquinixMetalManifest, fmt.Sprintf("equinixmetal-%s", *testRunIdentifier)) + runScenarios(context.Background(), t, selector, params, EquinixMetalManifest, fmt.Sprintf("equinixmetal-%s", *testRunIdentifier)) } func TestAlibabaProvisioningE2E(t *testing.T) { @@ -760,12 +721,12 @@ func TestAlibabaProvisioningE2E(t *testing.T) { // test data accessKeyID := os.Getenv("ALIBABA_ACCESS_KEY_ID") if len(accessKeyID) == 0 { - t.Fatal("unable to run the test suite, ALIBABA_ACCESS_KEY_ID environment variable cannot be empty") + t.Fatal("Unable to run the test suite, ALIBABA_ACCESS_KEY_ID environment variable cannot be empty") } accessKeySecret := os.Getenv("ALIBABA_ACCESS_KEY_SECRET") if len(accessKeySecret) == 0 { - t.Fatal("unable to run the test suite, ALIBABA_ACCESS_KEY_SECRET environment variable cannot be empty") + t.Fatal("Unable to run the test suite, ALIBABA_ACCESS_KEY_SECRET environment variable cannot be empty") } selector := OsSelector("ubuntu") @@ -775,7 +736,7 @@ func TestAlibabaProvisioningE2E(t *testing.T) { fmt.Sprintf("<< ALIBABA_ACCESS_KEY_ID >>=%s", accessKeyID), fmt.Sprintf("<< ALIBABA_ACCESS_KEY_SECRET >>=%s", accessKeySecret), } - runScenarios(t, selector, params, alibabaManifest, fmt.Sprintf("alibaba-%s", *testRunIdentifier)) + runScenarios(context.Background(), t, selector, params, alibabaManifest, fmt.Sprintf("alibaba-%s", *testRunIdentifier)) } // TestLinodeProvisioning - a test suite that exercises Linode provider @@ -788,15 +749,14 @@ func TestLinodeProvisioningE2E(t *testing.T) { // test data linodeToken := os.Getenv("LINODE_E2E_TESTS_TOKEN") if len(linodeToken) == 0 { - t.Fatal("unable to run the test suite, LINODE_E2E_TESTS_TOKEN environment variable cannot be empty") + t.Fatal("Unable to run the test suite, LINODE_E2E_TESTS_TOKEN environment variable cannot be empty") } - // we're shimming userdata through Linode stackscripts and the stackscript hasn't been verified for use with centos selector := OsSelector("ubuntu") // act params := []string{fmt.Sprintf("<< LINODE_TOKEN >>=%s", linodeToken)} - runScenarios(t, selector, params, LinodeManifest, fmt.Sprintf("linode-%s", *testRunIdentifier)) + runScenarios(context.Background(), t, selector, params, LinodeManifest, fmt.Sprintf("linode-%s", *testRunIdentifier)) } func getVMwareCloudDirectorTestParams(t *testing.T) []string { @@ -808,7 +768,7 @@ func getVMwareCloudDirectorTestParams(t *testing.T) []string { vdc := os.Getenv("VCD_VDC") if password == "" || username == "" || organization == "" || url == "" || vdc == "" { - t.Fatal("unable to run the test suite, VCD_PASSWORD, VCD_USER, VCD_ORG, " + + t.Fatal("Unable to run the test suite, VCD_PASSWORD, VCD_USER, VCD_ORG, " + "VCD_URL, or VCD_VDC environment variables cannot be empty") } @@ -828,7 +788,7 @@ func TestVMwareCloudDirectorProvisioningE2E(t *testing.T) { selector := OsSelector("ubuntu") params := getVMwareCloudDirectorTestParams(t) - runScenarios(t, selector, params, VMwareCloudDirectorManifest, fmt.Sprintf("vcd-%s", *testRunIdentifier)) + runScenarios(context.Background(), t, selector, params, VMwareCloudDirectorManifest, fmt.Sprintf("vcd-%s", *testRunIdentifier)) } func getVSphereTestParams(t *testing.T) []string { @@ -838,7 +798,7 @@ func getVSphereTestParams(t *testing.T) []string { vsAddress := os.Getenv("VSPHERE_E2E_ADDRESS") if vsPassword == "" || vsUsername == "" || vsAddress == "" { - t.Fatal("unable to run the test suite, VSPHERE_E2E_PASSWORD, VSPHERE_E2E_USERNAME" + + t.Fatal("Unable to run the test suite, VSPHERE_E2E_PASSWORD, VSPHERE_E2E_USERNAME" + "or VSPHERE_E2E_ADDRESS environment variables cannot be empty") } @@ -855,10 +815,40 @@ func getVSphereTestParams(t *testing.T) []string { func TestVsphereProvisioningE2E(t *testing.T) { t.Parallel() - selector := Not(OsSelector("amzn2", "centos")) + // In-tree cloud provider is not supported from Kubernetes v1.30. + selector := OsSelector("ubuntu") params := getVSphereTestParams(t) - runScenarios(t, selector, params, VSPhereManifest, fmt.Sprintf("vs-%s", *testRunIdentifier)) + runScenarios(context.Background(), t, selector, params, VSPhereManifest, fmt.Sprintf("vs-%s", *testRunIdentifier)) +} + +// TestVsphereMultipleNICProvisioning - is the same as the TestVsphereProvisioning suit but has multiple networks attached to the VMs. +// by requesting nodes with different combination of container runtime type, container runtime version and the OS flavour. +func TestVsphereMultipleNICProvisioningE2E(t *testing.T) { + t.Parallel() + + // In-tree cloud provider is not supported from Kubernetes v1.30. + selector := OsSelector("ubuntu") + params := getVSphereTestParams(t) + + runScenarios(context.Background(), t, selector, params, VSPhereMultipleNICManifest, fmt.Sprintf("vs-%s", *testRunIdentifier)) +} + +// TestVsphereAntiAffinityProvisioningE2E - is the same as the TestVsphereProvisioning suit but has anti-affinity rules applied to the VMs. +func TestVsphereAntiAffinityProvisioningE2E(t *testing.T) { + t.Parallel() + + params := getVSphereTestParams(t) + + scenario := scenario{ + name: "VSphere Anti-Affinity provisioning", + osName: "ubuntu", + containerRuntime: defaultContainerRuntime, + kubernetesVersion: defaultKubernetesVersion, + executor: verifyCreateAndDelete, + } + + testScenario(context.Background(), t, scenario, *testRunIdentifier, params, VSPhereAntiAffinityManifest, false) } // TestVsphereDatastoreClusterProvisioning - is the same as the TestVsphereProvisioning suite but specifies a DatastoreCluster @@ -866,10 +856,11 @@ func TestVsphereProvisioningE2E(t *testing.T) { func TestVsphereDatastoreClusterProvisioningE2E(t *testing.T) { t.Parallel() - selector := OsSelector("ubuntu", "centos", "rhel", "flatcar") + // In-tree cloud provider is not supported from Kubernetes v1.30. + selector := OsSelector("ubuntu", "rhel", "flatcar") params := getVSphereTestParams(t) - runScenarios(t, selector, params, VSPhereDSCManifest, fmt.Sprintf("vs-dsc-%s", *testRunIdentifier)) + runScenarios(context.Background(), t, selector, params, VSPhereDSCManifest, fmt.Sprintf("vs-dsc-%s", *testRunIdentifier)) } // TestVsphereResourcePoolProvisioning - creates a machine deployment using a @@ -887,7 +878,7 @@ func TestVsphereResourcePoolProvisioningE2E(t *testing.T) { executor: verifyCreateAndDelete, } - testScenario(t, scenario, *testRunIdentifier, params, VSPhereResourcePoolManifest, false) + testScenario(context.Background(), t, scenario, *testRunIdentifier, params, VSPhereResourcePoolManifest, false) } // TestScalewayProvisioning - a test suite that exercises scaleway provider @@ -903,17 +894,17 @@ func TestScalewayProvisioningE2E(t *testing.T) { // test data scwAccessKey := os.Getenv("SCW_ACCESS_KEY") if len(scwAccessKey) == 0 { - t.Fatal("unable to run the test suite, SCW_E2E_TEST_ACCESS_KEY environment variable cannot be empty") + t.Fatal("Unable to run the test suite, SCW_E2E_TEST_ACCESS_KEY environment variable cannot be empty") } scwSecretKey := os.Getenv("SCW_SECRET_KEY") if len(scwSecretKey) == 0 { - t.Fatal("unable to run the test suite, SCW_E2E_TEST_SECRET_KEY environment variable cannot be empty") + t.Fatal("Unable to run the test suite, SCW_E2E_TEST_SECRET_KEY environment variable cannot be empty") } scwProjectID := os.Getenv("SCW_DEFAULT_PROJECT_ID") if len(scwProjectID) == 0 { - t.Fatal("unable to run the test suite, SCW_E2E_TEST_PROJECT_ID environment variable cannot be empty") + t.Fatal("Unable to run the test suite, SCW_E2E_TEST_PROJECT_ID environment variable cannot be empty") } selector := Not(OsSelector("rhel", "flatcar", "rockylinux")) @@ -923,7 +914,7 @@ func TestScalewayProvisioningE2E(t *testing.T) { fmt.Sprintf("<< SCW_SECRET_KEY >>=%s", scwSecretKey), fmt.Sprintf("<< SCW_DEFAULT_PROJECT_ID >>=%s", scwProjectID), } - runScenarios(t, selector, params, ScalewayManifest, fmt.Sprintf("scw-%s", *testRunIdentifier)) + runScenarios(context.Background(), t, selector, params, ScalewayManifest, fmt.Sprintf("scw-%s", *testRunIdentifier)) } func getNutanixTestParams(t *testing.T) []string { @@ -937,7 +928,7 @@ func getNutanixTestParams(t *testing.T) []string { endpoint := os.Getenv("NUTANIX_E2E_ENDPOINT") if password == "" || username == "" || endpoint == "" || cluster == "" || project == "" || subnet == "" { - t.Fatal("unable to run the test suite, NUTANIX_E2E_PASSWORD, NUTANIX_E2E_USERNAME, NUTANIX_E2E_CLUSTER_NAME, " + + t.Fatal("Unable to run the test suite, NUTANIX_E2E_PASSWORD, NUTANIX_E2E_USERNAME, NUTANIX_E2E_CLUSTER_NAME, " + "NUTANIX_E2E_ENDPOINT, NUTANIX_E2E_PROJECT_NAME or NUTANIX_E2E_SUBNET_NAME environment variables cannot be empty") } @@ -959,9 +950,46 @@ func TestNutanixProvisioningE2E(t *testing.T) { // exclude migrateUID test case because it's a no-op for Nutanix and runs from a different // location, thus possibly blocking access a HTTP proxy if it is configured. - selector := And(OsSelector("ubuntu", "centos"), Not(NameSelector("migrateUID"))) + selector := And(OsSelector("ubuntu"), Not(NameSelector("migrateUID"))) params := getNutanixTestParams(t) - runScenarios(t, selector, params, nutanixManifest, fmt.Sprintf("nx-%s", *testRunIdentifier)) + runScenarios(context.Background(), t, selector, params, nutanixManifest, fmt.Sprintf("nx-%s", *testRunIdentifier)) +} + +func TestOpenNebulaProvisioningE2E(t *testing.T) { + t.Parallel() + + oneEndpoint := os.Getenv("ONE_ENDPOINT") + oneUsername := os.Getenv("ONE_USERNAME") + onePassword := os.Getenv("ONE_PASSWORD") + + // required parameters + if oneEndpoint == "" || oneUsername == "" || onePassword == "" { + t.Fatal("unable to run test suite, all of ONE_ENDPOINT, ONE_USERNAME, and ONE_PASSWORD must be set!") + } + + // optional parameters + oneDatastore := os.Getenv("ONE_DATASTORE") + oneNetwork := os.Getenv("ONE_NETWORK") + + // set defaults for minione deployments + if oneDatastore == "" { + oneDatastore = "default" + } + + if oneNetwork == "" { + oneNetwork = "vnet" + } + + params := []string{ + fmt.Sprintf("<< ONE_ENDPOINT >>=%s", oneEndpoint), + fmt.Sprintf("<< ONE_USERNAME >>=%s", oneUsername), + fmt.Sprintf("<< ONE_PASSWORD >>=%s", onePassword), + fmt.Sprintf("<< ONE_DATASTORE_NAME >>=%s", oneDatastore), + fmt.Sprintf("<< ONE_NETWORK_NAME >>=%s", oneNetwork), + } + + selector := OsSelector("rockylinux", "flatcar") + runScenarios(context.Background(), t, selector, params, openNebulaManifest, fmt.Sprintf("one-%s", *testRunIdentifier)) } // TestUbuntuProvisioningWithUpgradeE2E will create an instance from an old Ubuntu 1604 @@ -978,7 +1006,7 @@ func TestUbuntuProvisioningWithUpgradeE2E(t *testing.T) { osNetwork := os.Getenv("OS_NETWORK_NAME") if osAuthURL == "" || osUsername == "" || osPassword == "" || osDomain == "" || osRegion == "" || osTenant == "" { - t.Fatal("unable to run test suite, all of OS_AUTH_URL, OS_USERNAME, OS_PASSWORD, OS_REGION, and OS_TENANT OS_DOMAIN must be set!") + t.Fatal("Unable to run test suite, all of OS_AUTH_URL, OS_USERNAME, OS_PASSWORD, OS_REGION, and OS_TENANT OS_DOMAIN must be set!") } params := []string{ @@ -999,7 +1027,7 @@ func TestUbuntuProvisioningWithUpgradeE2E(t *testing.T) { executor: verifyCreateAndDelete, } - testScenario(t, scenario, *testRunIdentifier, params, OSUpgradeManifest, false) + testScenario(context.Background(), t, scenario, *testRunIdentifier, params, OSUpgradeManifest, false) } // TestDeploymentControllerUpgradesMachineE2E verifies the machineDeployment controller correctly @@ -1010,7 +1038,7 @@ func TestDeploymentControllerUpgradesMachineE2E(t *testing.T) { // test data hzToken := os.Getenv("HZ_E2E_TOKEN") if len(hzToken) == 0 { - t.Fatal("unable to run the test suite, HZ_E2E_TOKEN environment variable cannot be empty") + t.Fatal("Unable to run the test suite, HZ_E2E_TOKEN environment variable cannot be empty") } // act @@ -1023,7 +1051,7 @@ func TestDeploymentControllerUpgradesMachineE2E(t *testing.T) { kubernetesVersion: defaultKubernetesVersion, executor: verifyCreateUpdateAndDelete, } - testScenario(t, scenario, *testRunIdentifier, params, HZManifest, false) + testScenario(context.Background(), t, scenario, *testRunIdentifier, params, HZManifest, false) } func TestAnexiaProvisioningE2E(t *testing.T) { @@ -1035,7 +1063,7 @@ func TestAnexiaProvisioningE2E(t *testing.T) { locationID := os.Getenv("ANEXIA_LOCATION_ID") if token == "" || vlanID == "" || templateID == "" || locationID == "" { - t.Fatal("unable to run test suite, all of ANEXIA_TOKEN, ANEXIA_VLAN_ID, ANEXIA_TEMPLATE_ID, and ANEXIA_LOCATION_ID must be set!") + t.Fatal("Unable to run test suite, all of ANEXIA_TOKEN, ANEXIA_VLAN_ID, ANEXIA_TEMPLATE_ID, and ANEXIA_LOCATION_ID must be set!") } selector := OsSelector("flatcar") @@ -1046,7 +1074,7 @@ func TestAnexiaProvisioningE2E(t *testing.T) { fmt.Sprintf("<< ANEXIA_LOCATION_ID >>=%s", locationID), } - runScenarios(t, selector, params, anexiaManifest, fmt.Sprintf("anexia-%s", *testRunIdentifier)) + runScenarios(context.Background(), t, selector, params, anexiaManifest, fmt.Sprintf("anexia-%s", *testRunIdentifier)) } // TestVultrProvisioning - a test suite that exercises Vultr provider @@ -1057,12 +1085,12 @@ func TestVultrProvisioningE2E(t *testing.T) { // test data apiKey := os.Getenv("VULTR_API_KEY") if len(apiKey) == 0 { - t.Fatal("unable to run the test suite, VULTR_API_KEY environment variable cannot be empty") + t.Fatal("Unable to run the test suite, VULTR_API_KEY environment variable cannot be empty") } - selector := OsSelector("ubuntu", "centos", "rockylinux") + selector := OsSelector("ubuntu", "rockylinux") // act params := []string{fmt.Sprintf("<< VULTR_API_KEY >>=%s", apiKey)} - runScenarios(t, selector, params, vultrManifest, fmt.Sprintf("vlt-%s", *testRunIdentifier)) + runScenarios(context.Background(), t, selector, params, vultrManifest, fmt.Sprintf("vlt-%s", *testRunIdentifier)) } diff --git a/test/e2e/provisioning/deploymentscenario.go b/test/e2e/provisioning/deploymentscenario.go index 825f81f15..a2646bf3b 100644 --- a/test/e2e/provisioning/deploymentscenario.go +++ b/test/e2e/provisioning/deploymentscenario.go @@ -21,28 +21,29 @@ import ( "fmt" "time" - clusterv1alpha1 "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" - kerrors "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/klog" ) -func verifyCreateUpdateAndDelete(kubeConfig, manifestPath string, parameters []string, timeout time.Duration) error { +func verifyCreateUpdateAndDelete(ctx context.Context, kubeConfig, manifestPath string, parameters []string, timeout time.Duration) error { client, machineDeployment, err := prepareMachineDeployment(kubeConfig, manifestPath, parameters) if err != nil { return err } + // This test inherently relies on replicas being one so we enforce that machineDeployment.Spec.Replicas = getInt32Ptr(1) - machineDeployment, err = createAndAssure(machineDeployment, client, timeout) + machineDeployment, err = createAndAssure(ctx, machineDeployment, client, timeout) if err != nil { return fmt.Errorf("failed to verify creation of node for MachineDeployment: %w", err) } - if err := updateMachineDeployment(machineDeployment, client, func(md *clusterv1alpha1.MachineDeployment) { + if err := updateMachineDeployment(ctx, machineDeployment, client, func(md *clusterv1alpha1.MachineDeployment) { md.Spec.Template.Labels["testUpdate"] = "true" }); err != nil { return fmt.Errorf("failed to update MachineDeployment %s after modifying it: %w", machineDeployment.Name, err) @@ -50,8 +51,8 @@ func verifyCreateUpdateAndDelete(kubeConfig, manifestPath string, parameters []s klog.Infof("Waiting for second MachineSet to appear after updating MachineDeployment %s", machineDeployment.Name) var machineSets []clusterv1alpha1.MachineSet - if err := wait.Poll(5*time.Second, timeout, func() (bool, error) { - machineSets, err = getMatchingMachineSets(machineDeployment, client) + if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, timeout, false, func(ctx context.Context) (bool, error) { + machineSets, err = getMatchingMachineSets(ctx, machineDeployment, client) if err != nil { return false, err } @@ -79,8 +80,8 @@ func verifyCreateUpdateAndDelete(kubeConfig, manifestPath string, parameters []s oldMachineSet = machineSets[1] } var machines []clusterv1alpha1.Machine - if err := wait.Poll(5*time.Second, timeout, func() (bool, error) { - machines, err = getMatchingMachinesForMachineset(&newestMachineSet, client) + if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, timeout, false, func(ctx context.Context) (bool, error) { + machines, err = getMatchingMachinesForMachineset(ctx, &newestMachineSet, client) if err != nil { return false, err } @@ -94,8 +95,8 @@ func verifyCreateUpdateAndDelete(kubeConfig, manifestPath string, parameters []s klog.Infof("New MachineSet %s appeared with %v machines", newestMachineSet.Name, len(machines)) klog.Infof("Waiting for new MachineSet %s to get a ready node", newestMachineSet.Name) - if err := wait.Poll(5*time.Second, timeout, func() (bool, error) { - return hasMachineReadyNode(&machines[0], client) + if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, timeout, false, func(ctx context.Context) (bool, error) { + return hasMachineReadyNode(ctx, &machines[0], client) }); err != nil { return err } @@ -103,15 +104,15 @@ func verifyCreateUpdateAndDelete(kubeConfig, manifestPath string, parameters []s klog.Infof("Waiting for old MachineSet %s to be scaled down and have no associated machines", oldMachineSet.Name) - if err := wait.Poll(5*time.Second, timeout, func() (bool, error) { + if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, timeout, false, func(ctx context.Context) (bool, error) { machineSet := &clusterv1alpha1.MachineSet{} - if err := client.Get(context.Background(), types.NamespacedName{Namespace: oldMachineSet.Namespace, Name: oldMachineSet.Name}, machineSet); err != nil { + if err := client.Get(ctx, types.NamespacedName{Namespace: oldMachineSet.Namespace, Name: oldMachineSet.Name}, machineSet); err != nil { return false, err } if *machineSet.Spec.Replicas != int32(0) { return false, nil } - machines, err := getMatchingMachinesForMachineset(machineSet, client) + machines, err := getMatchingMachinesForMachineset(ctx, machineSet, client) if err != nil { return false, err } @@ -122,7 +123,7 @@ func verifyCreateUpdateAndDelete(kubeConfig, manifestPath string, parameters []s klog.Infof("Old MachineSet %s got scaled down and has no associated machines anymore", oldMachineSet.Name) klog.Infof("Setting replicas of MachineDeployment %s to 0 and waiting until it has no associated machines", machineDeployment.Name) - if err := updateMachineDeployment(machineDeployment, client, func(md *clusterv1alpha1.MachineDeployment) { + if err := updateMachineDeployment(ctx, machineDeployment, client, func(md *clusterv1alpha1.MachineDeployment) { md.Spec.Replicas = getInt32Ptr(0) }); err != nil { return fmt.Errorf("failed to update replicas of MachineDeployment %s: %w", machineDeployment.Name, err) @@ -130,8 +131,8 @@ func verifyCreateUpdateAndDelete(kubeConfig, manifestPath string, parameters []s klog.Infof("Successfully set replicas of MachineDeployment %s to 0", machineDeployment.Name) klog.Infof("Waiting for MachineDeployment %s to not have any associated machines", machineDeployment.Name) - if err := wait.Poll(5*time.Second, timeout, func() (bool, error) { - machines, err := getMatchingMachines(machineDeployment, client) + if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, timeout, false, func(ctx context.Context) (bool, error) { + machines, err := getMatchingMachines(ctx, machineDeployment, client) return len(machines) == 0, err }); err != nil { return err @@ -139,12 +140,12 @@ func verifyCreateUpdateAndDelete(kubeConfig, manifestPath string, parameters []s klog.Infof("Successfully waited for MachineDeployment %s to not have any associated machines", machineDeployment.Name) klog.Infof("Deleting MachineDeployment %s and waiting for it to disappear", machineDeployment.Name) - if err := client.Delete(context.Background(), machineDeployment); err != nil { + if err := client.Delete(ctx, machineDeployment); err != nil { return fmt.Errorf("failed to delete MachineDeployment %s: %w", machineDeployment.Name, err) } - if err := wait.Poll(5*time.Second, timeout, func() (bool, error) { - err := client.Get(context.Background(), types.NamespacedName{Namespace: machineDeployment.Namespace, Name: machineDeployment.Name}, &clusterv1alpha1.MachineDeployment{}) - if kerrors.IsNotFound(err) { + if err := wait.PollUntilContextTimeout(ctx, 5*time.Second, timeout, false, func(ctx context.Context) (bool, error) { + err = client.Get(ctx, types.NamespacedName{Namespace: machineDeployment.Namespace, Name: machineDeployment.Name}, &clusterv1alpha1.MachineDeployment{}) + if apierrors.IsNotFound(err) { return true, nil } return false, err diff --git a/test/e2e/provisioning/helper.go b/test/e2e/provisioning/helper.go index 5fed62df6..a07d3fa45 100644 --- a/test/e2e/provisioning/helper.go +++ b/test/e2e/provisioning/helper.go @@ -17,6 +17,7 @@ limitations under the License. package provisioning import ( + "context" "fmt" "os" "path/filepath" @@ -26,21 +27,20 @@ import ( "github.com/Masterminds/semver/v3" - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" + providerconfigtypes "k8c.io/machine-controller/sdk/providerconfig" ) var ( scenarios = buildScenarios() versions = []*semver.Version{ - semver.MustParse("v1.24.10"), - semver.MustParse("v1.25.6"), - semver.MustParse("v1.26.1"), + semver.MustParse("v1.32.9"), + semver.MustParse("v1.33.5"), + semver.MustParse("v1.34.1"), } operatingSystems = []providerconfigtypes.OperatingSystem{ providerconfigtypes.OperatingSystemUbuntu, - providerconfigtypes.OperatingSystemCentOS, providerconfigtypes.OperatingSystemAmazonLinux2, providerconfigtypes.OperatingSystemRHEL, providerconfigtypes.OperatingSystemFlatcar, @@ -49,22 +49,24 @@ var ( openStackImages = map[string]string{ string(providerconfigtypes.OperatingSystemUbuntu): "kubermatic-ubuntu", - string(providerconfigtypes.OperatingSystemCentOS): "machine-controller-e2e-centos", - string(providerconfigtypes.OperatingSystemRHEL): "machine-controller-e2e-rhel-8-5", - string(providerconfigtypes.OperatingSystemFlatcar): "machine-controller-e2e-flatcar-stable-2983", + string(providerconfigtypes.OperatingSystemRHEL): "machine-controller-e2e-rhel-9-6", + string(providerconfigtypes.OperatingSystemFlatcar): "kubermatic-e2e-flatcar", + string(providerconfigtypes.OperatingSystemRockyLinux): "machine-controller-e2e-rockylinux-9-6", + } + + openNebulaImages = map[string]string{ + string(providerconfigtypes.OperatingSystemFlatcar): "machine-controller-e2e-flatcar", string(providerconfigtypes.OperatingSystemRockyLinux): "machine-controller-e2e-rockylinux", } vSphereOSImageTemplates = map[string]string{ - string(providerconfigtypes.OperatingSystemCentOS): "kkp-centos-7", string(providerconfigtypes.OperatingSystemFlatcar): "kkp-flatcar-3139.2.0", - string(providerconfigtypes.OperatingSystemRHEL): "kkp-rhel-8.6", - string(providerconfigtypes.OperatingSystemRockyLinux): "kkp-rockylinux-8.5", - string(providerconfigtypes.OperatingSystemUbuntu): "kkp-ubuntu-22.04", + string(providerconfigtypes.OperatingSystemRHEL): "kkp-rhel-9.6", + string(providerconfigtypes.OperatingSystemRockyLinux): "kkp-rockylinux-9.6", + string(providerconfigtypes.OperatingSystemUbuntu): "kkp-ubuntu-24.04", } kubevirtImages = map[string]string{ - string(providerconfigtypes.OperatingSystemCentOS): "centos", string(providerconfigtypes.OperatingSystemFlatcar): "flatcar", string(providerconfigtypes.OperatingSystemRHEL): "rhel", string(providerconfigtypes.OperatingSystemRockyLinux): "rockylinux", @@ -156,30 +158,51 @@ func (n *name) Match(tc scenario) bool { return tc.name == n.name } -func runScenarios(st *testing.T, selector Selector, testParams []string, manifestPath string, cloudProvider string) { +// VersionSelector is used to match against the kubernetes version used for a test case. +func VersionSelector(v ...string) Selector { + return &version{v} +} + +type version struct { + versions []string +} + +var _ Selector = &version{} + +func (v *version) Match(testCase scenario) bool { + for _, version := range v.versions { + if testCase.kubernetesVersion == version { + return true + } + } + return false +} + +func runScenarios(ctx context.Context, st *testing.T, selector Selector, testParams []string, manifestPath string, cloudProvider string) { for _, testCase := range scenarios { if selector != nil && !selector.Match(testCase) { + fmt.Printf("Skipping test %s\n", testCase.name) continue } st.Run(testCase.name, func(it *testing.T) { - testScenario(it, testCase, cloudProvider, testParams, manifestPath, true) + testScenario(ctx, it, testCase, cloudProvider, testParams, manifestPath, true) }) } } // scenarioExecutor represents an executor for a given scenario // args: kubeConfig, maifestPath, scenarioParams, timeout -type scenarioExecutor func(string, string, []string, time.Duration) error +type scenarioExecutor func(context.Context, string, string, []string, time.Duration) error -func testScenario(t *testing.T, testCase scenario, cloudProvider string, testParams []string, manifestPath string, parallelize bool) { +func testScenario(ctx context.Context, t *testing.T, testCase scenario, cloudProvider string, testParams []string, manifestPath string, parallelize bool) { if parallelize { t.Parallel() } kubernetesCompliantName := fmt.Sprintf("%s-%s", testCase.name, cloudProvider) - kubernetesCompliantName = strings.Replace(kubernetesCompliantName, " ", "-", -1) - kubernetesCompliantName = strings.Replace(kubernetesCompliantName, ".", "-", -1) + kubernetesCompliantName = strings.ReplaceAll(kubernetesCompliantName, " ", "-") + kubernetesCompliantName = strings.ReplaceAll(kubernetesCompliantName, ".", "-") kubernetesCompliantName = strings.ToLower(kubernetesCompliantName) scenarioParams := append([]string(nil), testParams...) @@ -195,7 +218,7 @@ func testScenario(t *testing.T, testCase scenario, cloudProvider string, testPar rhsmOfflineToken := os.Getenv("REDHAT_SUBSCRIPTIONS_OFFLINE_TOKEN") if rhelSubscriptionManagerUser == "" || rhelSubscriptionManagerPassword == "" || rhsmOfflineToken == "" { - t.Fatalf("Unable to run e2e tests, RHEL_SUBSCRIPTION_MANAGER_USER, RHEL_SUBSCRIPTION_MANAGER_PASSWORD, and " + + t.Fatal("Unable to run e2e tests, RHEL_SUBSCRIPTION_MANAGER_USER, RHEL_SUBSCRIPTION_MANAGER_PASSWORD, and " + "REDHAT_SUBSCRIPTIONS_OFFLINE_TOKEN must be set when rhel is used as an os") } @@ -206,22 +229,17 @@ func testScenario(t *testing.T, testCase scenario, cloudProvider string, testPar scenarioParams = append(scenarioParams, fmt.Sprintf("<< OS_DISK_SIZE >>=%v", 0)) scenarioParams = append(scenarioParams, fmt.Sprintf("<< DATA_DISK_SIZE >>=%v", 0)) scenarioParams = append(scenarioParams, fmt.Sprintf("<< CUSTOM-IMAGE >>=%v", "rhel-8-1-custom")) - scenarioParams = append(scenarioParams, fmt.Sprintf("<< AMI >>=%s", "ami-08c04369895785ac4")) scenarioParams = append(scenarioParams, fmt.Sprintf("<< MAX_PRICE >>=%s", "0.08")) } else { scenarioParams = append(scenarioParams, fmt.Sprintf("<< OS_DISK_SIZE >>=%v", 30)) scenarioParams = append(scenarioParams, fmt.Sprintf("<< DATA_DISK_SIZE >>=%v", 30)) - scenarioParams = append(scenarioParams, fmt.Sprintf("<< AMI >>=%s", "")) scenarioParams = append(scenarioParams, fmt.Sprintf("<< DISK_SIZE >>=%v", 25)) scenarioParams = append(scenarioParams, fmt.Sprintf("<< CUSTOM-IMAGE >>=%v", "")) - scenarioParams = append(scenarioParams, fmt.Sprintf("<< MAX_PRICE >>=%s", "0.03")) + scenarioParams = append(scenarioParams, fmt.Sprintf("<< MAX_PRICE >>=%s", "0.023")) } if strings.Contains(cloudProvider, string(providerconfigtypes.CloudProviderEquinixMetal)) { switch testCase.osName { - case string(providerconfigtypes.OperatingSystemCentOS): - scenarioParams = append(scenarioParams, fmt.Sprintf("<< INSTANCE_TYPE >>=%s", "m3.small.x86")) - scenarioParams = append(scenarioParams, fmt.Sprintf("<< METRO_CODE >>=%s", "AM")) case string(providerconfigtypes.OperatingSystemFlatcar): scenarioParams = append(scenarioParams, fmt.Sprintf("<< INSTANCE_TYPE >>=%s", "c3.small.x86")) scenarioParams = append(scenarioParams, fmt.Sprintf("<< METRO_CODE >>=%s", "NY")) @@ -241,6 +259,9 @@ func testScenario(t *testing.T, testCase scenario, cloudProvider string, testPar // only used by OpenStack scenarios scenarioParams = append(scenarioParams, fmt.Sprintf("<< OS_IMAGE >>=%s", openStackImages[testCase.osName])) + // only used by OpenNebula scenarios + scenarioParams = append(scenarioParams, fmt.Sprintf("<< ONE_IMAGE >>=%s", openNebulaImages[testCase.osName])) + // only use by vSphere scenarios scenarioParams = append(scenarioParams, fmt.Sprintf("<< OS_Image_Template >>=%s", vSphereOSImageTemplates[testCase.osName])) @@ -251,14 +272,14 @@ func testScenario(t *testing.T, testCase scenario, cloudProvider string, testPar gopath := os.Getenv("GOPATH") projectDir := filepath.Join(gopath, "src/github.com/kubermatic/machine-controller") kubeConfig := filepath.Join(projectDir, ".kubeconfig") - - if _, err := os.Stat(kubeConfig); err == nil { - // it exists at hardcoded path - } else if os.IsNotExist(err) { - // it doesn't exist, fall back to $KUBECONFIG - kubeConfig = os.Getenv("KUBECONFIG") - } else { - t.Fatal(err) + _, err := os.Stat(kubeConfig) + if err != nil { + if os.IsNotExist(err) { + // it doesn't exist, fall back to $KUBECONFIG + kubeConfig = os.Getenv("KUBECONFIG") + } else { + t.Fatal(err) + } } // the golang test runtime waits for individual subtests to complete before reporting the status. @@ -267,7 +288,7 @@ func testScenario(t *testing.T, testCase scenario, cloudProvider string, testPar // we decided to keep this time lower that the global timeout to prevent the following: // the global timeout is set to 20 minutes and the verify tool waits up to 60 hours for a machine to show up. // thus one faulty scenario prevents from showing the results for the whole group, which is confusing because it looks like all tests are broken. - if err := testCase.executor(kubeConfig, manifestPath, scenarioParams, 35*time.Minute); err != nil { + if err := testCase.executor(ctx, kubeConfig, manifestPath, scenarioParams, 35*time.Minute); err != nil { t.Errorf("verify failed due to error=%v", err) } } diff --git a/test/e2e/provisioning/migrateuidscenario.go b/test/e2e/provisioning/migrateuidscenario.go index 66e3537ad..556ca65d6 100644 --- a/test/e2e/provisioning/migrateuidscenario.go +++ b/test/e2e/provisioning/migrateuidscenario.go @@ -23,12 +23,14 @@ import ( "strings" "time" - "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" - "github.com/kubermatic/machine-controller/pkg/cloudprovider" - cloudprovidererrors "github.com/kubermatic/machine-controller/pkg/cloudprovider/errors" - cloudprovidertypes "github.com/kubermatic/machine-controller/pkg/cloudprovider/types" - "github.com/kubermatic/machine-controller/pkg/providerconfig" - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" + "go.uber.org/zap" + + "k8c.io/machine-controller/pkg/cloudprovider" + cloudprovidererrors "k8c.io/machine-controller/pkg/cloudprovider/errors" + cloudprovidertypes "k8c.io/machine-controller/pkg/cloudprovider/types" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" + "k8c.io/machine-controller/sdk/providerconfig" + "k8c.io/machine-controller/sdk/providerconfig/configvar" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" @@ -38,20 +40,22 @@ import ( fakectrlruntimeclient "sigs.k8s.io/controller-runtime/pkg/client/fake" ) -func verifyMigrateUID(kubeConfig, manifestPath string, parameters []string, timeout time.Duration) error { +func verifyMigrateUID(ctx context.Context, _, manifestPath string, parameters []string, _ time.Duration) error { + log := zap.NewNop().Sugar() + // prepare the manifest manifest, err := readAndModifyManifest(manifestPath, parameters) if err != nil { return fmt.Errorf("failed to prepare the manifest, due to: %w", err) } - machineDeployment := &v1alpha1.MachineDeployment{} + machineDeployment := &clusterv1alpha1.MachineDeployment{} manifestReader := strings.NewReader(manifest) manifestDecoder := yaml.NewYAMLToJSONDecoder(manifestReader) if err := manifestDecoder.Decode(machineDeployment); err != nil { return fmt.Errorf("failed to decode manifest into MachineDeployment: %w", err) } - machine := &v1alpha1.Machine{ + machine := &clusterv1alpha1.Machine{ ObjectMeta: machineDeployment.Spec.Template.ObjectMeta, Spec: machineDeployment.Spec.Template.Spec, } @@ -68,31 +72,29 @@ func verifyMigrateUID(kubeConfig, manifestPath string, parameters []string, time Build() providerData := &cloudprovidertypes.ProviderData{ - Update: cloudprovidertypes.GetMachineUpdater(context.Background(), fakeClient), + Update: cloudprovidertypes.GetMachineUpdater(ctx, fakeClient), Client: fakeClient, } - providerSpec, err := providerconfigtypes.GetConfig(machine.Spec.ProviderSpec) + providerSpec, err := providerconfig.GetConfig(machine.Spec.ProviderSpec) if err != nil { return fmt.Errorf("failed to get provideSpec: %w", err) } - skg := providerconfig.NewConfigVarResolver(context.Background(), fakeClient) + skg := configvar.NewResolver(ctx, fakeClient) prov, err := cloudprovider.ForProvider(providerSpec.CloudProvider, skg) if err != nil { return fmt.Errorf("failed to get cloud provider %q: %w", providerSpec.CloudProvider, err) } - defaultedSpec, err := prov.AddDefaults(machine.Spec) + defaultedSpec, err := prov.AddDefaults(log, machine.Spec) if err != nil { return fmt.Errorf("failed to add defaults: %w", err) } machine.Spec = defaultedSpec - ctx := context.Background() - // Step 0: Create instance with old UID maxTries := 15 for i := 0; i < maxTries; i++ { - _, err := prov.Get(ctx, machine, providerData) + _, err := prov.Get(ctx, log, machine, providerData) if err != nil { if !errors.Is(err, cloudprovidererrors.ErrInstanceNotFound) { if i < maxTries-1 { @@ -102,7 +104,7 @@ func verifyMigrateUID(kubeConfig, manifestPath string, parameters []string, time } return fmt.Errorf("failed to get machine %s before creating it: %w", machine.Name, err) } - _, err := prov.Create(ctx, machine, providerData, "#cloud-config\n") + _, err := prov.Create(ctx, log, machine, providerData, "#cloud-config\n") if err != nil { if i < maxTries-1 { time.Sleep(10 * time.Second) @@ -117,7 +119,7 @@ func verifyMigrateUID(kubeConfig, manifestPath string, parameters []string, time // Step 1: Verify we can successfully get the instance for i := 0; i < maxTries; i++ { - if _, err := prov.Get(ctx, machine, providerData); err != nil { + if _, err := prov.Get(ctx, log, machine, providerData); err != nil { if i < maxTries-1 { klog.V(4).Infof("failed to get instance for machine %s before migrating on try %v with err=%v, will retry", machine.Name, i, err) time.Sleep(10 * time.Second) @@ -130,7 +132,7 @@ func verifyMigrateUID(kubeConfig, manifestPath string, parameters []string, time // Step 2: Migrate UID for i := 0; i < maxTries; i++ { - if err := prov.MigrateUID(ctx, machine, newUID); err != nil { + if err := prov.MigrateUID(ctx, log, machine, newUID); err != nil { if i < maxTries-1 { time.Sleep(10 * time.Second) klog.V(4).Infof("failed to migrate UID for machine %s on try %v with err=%v, will retry", machine.Name, i, err) @@ -144,7 +146,7 @@ func verifyMigrateUID(kubeConfig, manifestPath string, parameters []string, time // Step 3: Verify we can successfully get the instance with the new UID for i := 0; i < maxTries; i++ { - if _, err := prov.Get(ctx, machine, providerData); err != nil { + if _, err := prov.Get(ctx, log, machine, providerData); err != nil { if i < maxTries-1 { time.Sleep(10 * time.Second) klog.V(4).Infof("failed to get instance for machine %s after migrating on try %v with err=%v, will retry", machine.Name, i, err) @@ -158,7 +160,7 @@ func verifyMigrateUID(kubeConfig, manifestPath string, parameters []string, time // Step 4: Delete the instance and then verify instance is gone for i := 0; i < maxTries; i++ { // Deletion part 0: Delete and continue on err if there are tries left - done, err := prov.Cleanup(ctx, machine, providerData) + done, err := prov.Cleanup(ctx, log, machine, providerData) if err != nil { if i < maxTries-1 { klog.V(4).Infof("Failed to delete machine %s on try %v with err=%v, will retry", machine.Name, i, err) @@ -174,7 +176,7 @@ func verifyMigrateUID(kubeConfig, manifestPath string, parameters []string, time } // Deletion part 1: Get and continue if err != cloudprovidererrors.ErrInstanceNotFound if there are tries left - _, err = prov.Get(ctx, machine, providerData) + _, err = prov.Get(ctx, log, machine, providerData) if err != nil && errors.Is(err, cloudprovidererrors.ErrInstanceNotFound) { break } diff --git a/test/e2e/provisioning/testdata/machine-invalid.yaml b/test/e2e/provisioning/testdata/machine-invalid.yaml index 17a100569..10258720c 100644 --- a/test/e2e/provisioning/testdata/machine-invalid.yaml +++ b/test/e2e/provisioning/testdata/machine-invalid.yaml @@ -12,7 +12,7 @@ spec: cloudProvider: "hetzner" cloudProviderSpec: token: << HETZNER_TOKEN >> - serverType: "cx11" + serverType: "cx23" datacenter: "" location: "fsn1" operatingSystem: "<< OS_NAME >>" diff --git a/test/e2e/provisioning/testdata/machinedeployment-anexia.yaml b/test/e2e/provisioning/testdata/machinedeployment-anexia.yaml index 87e539fd8..2507cb58d 100644 --- a/test/e2e/provisioning/testdata/machinedeployment-anexia.yaml +++ b/test/e2e/provisioning/testdata/machinedeployment-anexia.yaml @@ -4,7 +4,7 @@ metadata: name: << MACHINE_NAME >> namespace: kube-system annotations: - k8c.io/operating-system-profile: osp-<< OS_NAME >> + k8c.io/operating-system-profile: osp-flatcar-cloud-init spec: replicas: 1 strategy: diff --git a/test/e2e/provisioning/testdata/machinedeployment-aws-arm-machines.yaml b/test/e2e/provisioning/testdata/machinedeployment-aws-arm-machines.yaml index 793231b7f..9c489ed27 100644 --- a/test/e2e/provisioning/testdata/machinedeployment-aws-arm-machines.yaml +++ b/test/e2e/provisioning/testdata/machinedeployment-aws-arm-machines.yaml @@ -29,16 +29,15 @@ spec: accessKeyId: << AWS_ACCESS_KEY_ID >> secretAccessKey: << AWS_SECRET_ACCESS_KEY >> region: "eu-central-1" - availabilityZone: "eu-central-1a" - vpcId: "vpc-819f62e9" + availabilityZone: "eu-central-1b" + vpcId: "vpc-079f7648481a11e77" instanceType: "a1.medium" instanceProfile: "kubernetes-v1" diskSize: 50 diskType: "gp2" ebsVolumeEncrypted: false - ami: "<< AMI >>" securityGroupIDs: - - "sg-a2c195ca" + - "sg-0f1f62df28fb378b7" tags: # you have to set this flag to real clusterID when running against our dev or prod # otherwise you might have issues with your nodes not joining the cluster diff --git a/test/e2e/provisioning/testdata/machinedeployment-aws-ebs-encryption-enabled.yaml b/test/e2e/provisioning/testdata/machinedeployment-aws-ebs-encryption-enabled.yaml index 0130744c6..9b5653b92 100644 --- a/test/e2e/provisioning/testdata/machinedeployment-aws-ebs-encryption-enabled.yaml +++ b/test/e2e/provisioning/testdata/machinedeployment-aws-ebs-encryption-enabled.yaml @@ -29,15 +29,15 @@ spec: accessKeyId: << AWS_ACCESS_KEY_ID >> secretAccessKey: << AWS_SECRET_ACCESS_KEY >> region: "eu-central-1" - availabilityZone: "eu-central-1a" - vpcId: "vpc-819f62e9" + availabilityZone: "eu-central-1b" + vpcId: "vpc-079f7648481a11e77" instanceType: "t2.medium" instanceProfile: "kubernetes-v1" diskSize: 50 diskType: "gp2" ebsVolumeEncrypted: true securityGroupIDs: - - "sg-a2c195ca" + - "sg-0f1f62df28fb378b7" tags: # you have to set this flag to real clusterID when running against our dev or prod # otherwise you might have issues with your nodes not joining the cluster diff --git a/test/e2e/provisioning/testdata/machinedeployment-aws-spot-instances.yaml b/test/e2e/provisioning/testdata/machinedeployment-aws-spot-instances.yaml index e7febdc8a..bce0b19c5 100644 --- a/test/e2e/provisioning/testdata/machinedeployment-aws-spot-instances.yaml +++ b/test/e2e/provisioning/testdata/machinedeployment-aws-spot-instances.yaml @@ -29,20 +29,19 @@ spec: accessKeyId: << AWS_ACCESS_KEY_ID >> secretAccessKey: << AWS_SECRET_ACCESS_KEY >> region: "eu-central-1" - availabilityZone: "eu-central-1a" - vpcId: "vpc-819f62e9" + availabilityZone: "eu-central-1b" + vpcId: "vpc-079f7648481a11e77" instanceType: "t2.medium" instanceProfile: "kubernetes-v1" diskSize: 50 diskType: "gp2" ebsVolumeEncrypted: false - ami: "<< AMI >>" isSpotInstance: true spotInstanceConfig: maxPrice: "<< MAX_PRICE >>" persistentRequest: false securityGroupIDs: - - "sg-a2c195ca" + - "sg-0f1f62df28fb378b7" tags: # you have to set this flag to real clusterID when running against our dev or prod # otherwise you might have issues with your nodes not joining the cluster diff --git a/test/e2e/provisioning/testdata/machinedeployment-aws.yaml b/test/e2e/provisioning/testdata/machinedeployment-aws.yaml index 915f71254..6f7a7c2df 100644 --- a/test/e2e/provisioning/testdata/machinedeployment-aws.yaml +++ b/test/e2e/provisioning/testdata/machinedeployment-aws.yaml @@ -31,16 +31,15 @@ spec: assumeRoleARN: "<< AWS_ASSUME_ROLE_ARN >>" assumeRoleExternalID: "<< AWS_ASSUME_ROLE_EXTERNAL_ID >>" region: "eu-central-1" - availabilityZone: "eu-central-1a" - vpcId: "vpc-819f62e9" + availabilityZone: "eu-central-1b" + vpcId: "vpc-079f7648481a11e77" instanceType: "t2.medium" instanceProfile: "kubernetes-v1" diskSize: 50 diskType: "gp2" ebsVolumeEncrypted: false - ami: "<< AMI >>" securityGroupIDs: - - "sg-a2c195ca" + - "sg-0f1f62df28fb378b7" tags: # you have to set this flag to real clusterID when running against our dev or prod # otherwise you might have issues with your nodes not joining the cluster diff --git a/test/e2e/provisioning/testdata/machinedeployment-azure-redhat-satellite.yaml b/test/e2e/provisioning/testdata/machinedeployment-azure-redhat-satellite.yaml index 0af6c42a0..03c47442a 100644 --- a/test/e2e/provisioning/testdata/machinedeployment-azure-redhat-satellite.yaml +++ b/test/e2e/provisioning/testdata/machinedeployment-azure-redhat-satellite.yaml @@ -55,4 +55,4 @@ spec: rhelOrganizationName: "" rhelActivationKey: "" versions: - kubelet: 1.24.9 + kubelet: 1.29.9 diff --git a/test/e2e/provisioning/testdata/machinedeployment-azure.yaml b/test/e2e/provisioning/testdata/machinedeployment-azure.yaml index 3b6ed09d4..a25a725a3 100644 --- a/test/e2e/provisioning/testdata/machinedeployment-azure.yaml +++ b/test/e2e/provisioning/testdata/machinedeployment-azure.yaml @@ -33,7 +33,7 @@ spec: location: "westeurope" resourceGroup: "machine-controller-e2e" vnetResourceGroup: "" - vmSize: "Standard_F2" + vmSize: "Standard_F2s_v2" # optional disk size values in GB. If not set, the defaults for the vmSize will be used. osDiskSize: << OS_DISK_SIZE >> osDiskSKU: << AZURE_OS_DISK_SKU >> diff --git a/test/e2e/provisioning/testdata/machinedeployment-digitalocean.yaml b/test/e2e/provisioning/testdata/machinedeployment-digitalocean.yaml index 19479c97d..6710de4c1 100644 --- a/test/e2e/provisioning/testdata/machinedeployment-digitalocean.yaml +++ b/test/e2e/provisioning/testdata/machinedeployment-digitalocean.yaml @@ -35,7 +35,6 @@ spec: monitoring: false tags: - "machine-controller" - # Can be 'ubuntu' or 'centos' operatingSystem: "<< OS_NAME >>" operatingSystemSpec: distUpgradeOnBoot: false diff --git a/test/e2e/provisioning/testdata/machinedeployment-hetzner.yaml b/test/e2e/provisioning/testdata/machinedeployment-hetzner.yaml index 66a5cc2ee..28fea6820 100644 --- a/test/e2e/provisioning/testdata/machinedeployment-hetzner.yaml +++ b/test/e2e/provisioning/testdata/machinedeployment-hetzner.yaml @@ -27,7 +27,7 @@ spec: cloudProvider: "hetzner" cloudProviderSpec: token: << HETZNER_TOKEN >> - serverType: "cx11" + serverType: "cx23" datacenter: "" location: "nbg1" networks: diff --git a/test/e2e/provisioning/testdata/machinedeployment-kubevirt.yaml b/test/e2e/provisioning/testdata/machinedeployment-kubevirt.yaml index 90a46bfc1..e903f266f 100644 --- a/test/e2e/provisioning/testdata/machinedeployment-kubevirt.yaml +++ b/test/e2e/provisioning/testdata/machinedeployment-kubevirt.yaml @@ -38,7 +38,7 @@ spec: primaryDisk: osImage: http://image-repo.kube-system.svc/images/<< KUBEVIRT_OS_IMAGE >>.img size: "25Gi" - storageClassName: px-csi-db + storageClassName: local-path dnsPolicy: "None" dnsConfig: nameservers: diff --git a/test/e2e/provisioning/testdata/machinedeployment-opennebula.yaml b/test/e2e/provisioning/testdata/machinedeployment-opennebula.yaml new file mode 100644 index 000000000..ba1a73d84 --- /dev/null +++ b/test/e2e/provisioning/testdata/machinedeployment-opennebula.yaml @@ -0,0 +1,55 @@ +apiVersion: "cluster.k8s.io/v1alpha1" +kind: MachineDeployment +metadata: + name: << MACHINE_NAME >> + namespace: kube-system +spec: + replicas: 1 + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + selector: + matchLabels: + name: << MACHINE_NAME >> + template: + metadata: + labels: + name: << MACHINE_NAME >> + spec: + providerSpec: + value: + sshPublicKeys: + - "<< YOUR_PUBLIC_KEY >>" + cloudProvider: "opennebula" + cloudProviderSpec: + endpoint: "<< ONE_ENDPOINT >>" + username: "<< ONE_USERNAME >>" + password: "<< ONE_PASSWORD >>" + + cpu: 1 + vcpu: 2 + memory: 1024 + + image: "<< ONE_IMAGE >>" + datastore: "<< ONE_DATASTORE_NAME >>" + diskSize: 51200 # MB + + network: "<< ONE_NETWORK_NAME >>" + + enableVNC: true + operatingSystem: "<< OS_NAME >>" + operatingSystemSpec: + distUpgradeOnBoot: false + disableAutoUpdate: true + # 'rhelSubscriptionManagerUser' is only used for rhel os and can be set via env var `RHEL_SUBSCRIPTION_MANAGER_USER` + rhelSubscriptionManagerUser: "<< RHEL_SUBSCRIPTION_MANAGER_USER >>" + # 'rhelSubscriptionManagerPassword' is only used for rhel os and can be set via env var `RHEL_SUBSCRIPTION_MANAGER_PASSWORD` + rhelSubscriptionManagerPassword: "<< RHEL_SUBSCRIPTION_MANAGER_PASSWORD >>" + rhsmOfflineToken: "<< REDHAT_SUBSCRIPTIONS_OFFLINE_TOKEN >>" + + # use cloud-init for flatcar as ignition doesn't know anything about OpenNebula yet + provisioningUtility: "cloud-init" + versions: + kubelet: "<< KUBERNETES_VERSION >>" diff --git a/test/e2e/provisioning/testdata/machinedeployment-openstack-multiple-networks.yaml b/test/e2e/provisioning/testdata/machinedeployment-openstack-multiple-networks.yaml new file mode 100644 index 000000000..32f2a9e3b --- /dev/null +++ b/test/e2e/provisioning/testdata/machinedeployment-openstack-multiple-networks.yaml @@ -0,0 +1,53 @@ +apiVersion: "cluster.k8s.io/v1alpha1" +kind: MachineDeployment +metadata: + name: << MACHINE_NAME >> + namespace: kube-system + annotations: + k8c.io/operating-system-profile: osp-<< OS_NAME >> +spec: + replicas: 1 + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + selector: + matchLabels: + name: << MACHINE_NAME >> + template: + metadata: + labels: + name: << MACHINE_NAME >> + spec: + providerSpec: + value: + sshPublicKeys: + - "<< YOUR_PUBLIC_KEY >>" + cloudProvider: "openstack" + cloudProviderSpec: + identityEndpoint: "<< IDENTITY_ENDPOINT >>" + username: "<< USERNAME >>" + password: "<< PASSWORD >>" + tenantName: "<< TENANT_NAME >>" + image: "<< OS_IMAGE >>" + flavor: "m1.tiny" + floatingIpPool: "" + domainName: "<< DOMAIN_NAME >>" + region: "<< REGION >>" + networks: + - "<< NETWORK_NAME >>" + - "test-network-2" + instanceReadyCheckPeriod: 5s + instanceReadyCheckTimeout: 2m + operatingSystem: "<< OS_NAME >>" + operatingSystemSpec: + distUpgradeOnBoot: false + disableAutoUpdate: true + # 'rhelSubscriptionManagerUser' is only used for rhel os and can be set via env var `RHEL_SUBSCRIPTION_MANAGER_USER` + rhelSubscriptionManagerUser: "<< RHEL_SUBSCRIPTION_MANAGER_USER >>" + # 'rhelSubscriptionManagerPassword' is only used for rhel os and can be set via env var `RHEL_SUBSCRIPTION_MANAGER_PASSWORD` + rhelSubscriptionManagerPassword: "<< RHEL_SUBSCRIPTION_MANAGER_PASSWORD >>" + rhsmOfflineToken: "<< REDHAT_SUBSCRIPTIONS_OFFLINE_TOKEN >>" + versions: + kubelet: "<< KUBERNETES_VERSION >>" diff --git a/test/e2e/provisioning/testdata/machinedeployment-vmware-cloud-director.yaml b/test/e2e/provisioning/testdata/machinedeployment-vmware-cloud-director.yaml index c696987e0..2cce44033 100644 --- a/test/e2e/provisioning/testdata/machinedeployment-vmware-cloud-director.yaml +++ b/test/e2e/provisioning/testdata/machinedeployment-vmware-cloud-director.yaml @@ -33,10 +33,11 @@ spec: organization: "<< VCD_ORG >>" vdc: "<< VCD_VDC >>" allowInsecure: false - vapp: "machine-controller-e2e" + vapp: "kubermatic-e2e" catalog: "kubermatic" template: "machine-controller-<< OS_NAME >>" - network: "machine-controller-e2e" + networks: + - "kubermatic-e2e-routed-network" ipAllocationMode: "DHCP" cpus: 2 cpuCores: 1 diff --git a/test/e2e/provisioning/testdata/machinedeployment-vsphere-anti-affinity.yaml b/test/e2e/provisioning/testdata/machinedeployment-vsphere-anti-affinity.yaml new file mode 100644 index 000000000..8f74c8465 --- /dev/null +++ b/test/e2e/provisioning/testdata/machinedeployment-vsphere-anti-affinity.yaml @@ -0,0 +1,54 @@ +apiVersion: "cluster.k8s.io/v1alpha1" +kind: MachineDeployment +metadata: + name: << MACHINE_NAME >> + namespace: kube-system + annotations: + k8c.io/operating-system-profile: osp-<< OS_NAME >> +spec: + replicas: 3 + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + selector: + matchLabels: + name: << MACHINE_NAME >> + template: + metadata: + labels: + name: << MACHINE_NAME >> + spec: + providerSpec: + value: + sshPublicKeys: + - "<< YOUR_PUBLIC_KEY >>" + cloudProvider: "vsphere" + cloudProviderSpec: + templateVMName: '<< OS_Image_Template >>' + username: '<< VSPHERE_USERNAME >>' + vsphereURL: '<< VSPHERE_ADDRESS >>' + datacenter: 'Hamburg' + folder: '/Hamburg/vm/Kubermatic-ci' + password: << VSPHERE_PASSWORD >> + # example: 'https://your-vcenter:8443'. '/sdk' gets appended automatically + cluster: Kubermatic + vmAntiAffinity: true + datastore: vsan + cpus: 2 + MemoryMB: 4096 + diskSizeGB: << DISK_SIZE >> + allowInsecure: true + operatingSystem: "<< OS_NAME >>" + operatingSystemSpec: + distUpgradeOnBoot: false + disableAutoUpdate: true + attachSubscription: false + # 'rhelSubscriptionManagerUser' is only used for rhel os and can be set via env var `RHEL_SUBSCRIPTION_MANAGER_USER` + rhelSubscriptionManagerUser: "<< RHEL_SUBSCRIPTION_MANAGER_USER >>" + # 'rhelSubscriptionManagerPassword' is only used for rhel os and can be set via env var `RHEL_SUBSCRIPTION_MANAGER_PASSWORD` + rhelSubscriptionManagerPassword: "<< RHEL_SUBSCRIPTION_MANAGER_PASSWORD >>" + rhsmOfflineToken: "<< REDHAT_SUBSCRIPTIONS_OFFLINE_TOKEN >>" + versions: + kubelet: "<< KUBERNETES_VERSION >>" diff --git a/test/e2e/provisioning/testdata/machinedeployment-vsphere-datastore-cluster.yaml b/test/e2e/provisioning/testdata/machinedeployment-vsphere-datastore-cluster.yaml index b87ea6000..b85102700 100644 --- a/test/e2e/provisioning/testdata/machinedeployment-vsphere-datastore-cluster.yaml +++ b/test/e2e/provisioning/testdata/machinedeployment-vsphere-datastore-cluster.yaml @@ -33,6 +33,8 @@ spec: folder: '/Hamburg/vm/Kubermatic-ci' password: << VSPHERE_PASSWORD >> # example: 'https://your-vcenter:8443'. '/sdk' gets appended automatically + cluster: Kubermatic + vmAntiAffinity: true datastoreCluster: 'dsc-1' cpus: 2 MemoryMB: 2048 diff --git a/test/e2e/provisioning/testdata/machinedeployment-vsphere-multiple-nic.yaml b/test/e2e/provisioning/testdata/machinedeployment-vsphere-multiple-nic.yaml new file mode 100644 index 000000000..4be08e39c --- /dev/null +++ b/test/e2e/provisioning/testdata/machinedeployment-vsphere-multiple-nic.yaml @@ -0,0 +1,57 @@ +apiVersion: "cluster.k8s.io/v1alpha1" +kind: MachineDeployment +metadata: + name: << MACHINE_NAME >> + namespace: kube-system + annotations: + k8c.io/operating-system-profile: osp-<< OS_NAME >> +spec: + replicas: 1 + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 1 + maxUnavailable: 0 + selector: + matchLabels: + name: << MACHINE_NAME >> + template: + metadata: + labels: + name: << MACHINE_NAME >> + spec: + providerSpec: + value: + sshPublicKeys: + - "<< YOUR_PUBLIC_KEY >>" + cloudProvider: "vsphere" + cloudProviderSpec: + templateVMName: "<< OS_Image_Template >>" + username: "<< VSPHERE_USERNAME >>" + vsphereURL: "<< VSPHERE_ADDRESS >>" + datacenter: "Hamburg" + folder: "/Hamburg/vm/Kubermatic-ci" + password: << VSPHERE_PASSWORD >> + # example: 'https://your-vcenter:8443'. '/sdk' gets appended automatically + cluster: Kubermatic + vmAntiAffinity: true + networks: + - /Hamburg/network/Default Network + - /Hamburg/network/Management + datastore: vsan + cpus: 2 + MemoryMB: 4096 + diskSizeGB: << DISK_SIZE >> + allowInsecure: true + operatingSystem: "<< OS_NAME >>" + operatingSystemSpec: + distUpgradeOnBoot: false + disableAutoUpdate: true + attachSubscription: false + # 'rhelSubscriptionManagerUser' is only used for rhel os and can be set via env var `RHEL_SUBSCRIPTION_MANAGER_USER` + rhelSubscriptionManagerUser: "<< RHEL_SUBSCRIPTION_MANAGER_USER >>" + # 'rhelSubscriptionManagerPassword' is only used for rhel os and can be set via env var `RHEL_SUBSCRIPTION_MANAGER_PASSWORD` + rhelSubscriptionManagerPassword: "<< RHEL_SUBSCRIPTION_MANAGER_PASSWORD >>" + rhsmOfflineToken: "<< REDHAT_SUBSCRIPTIONS_OFFLINE_TOKEN >>" + versions: + kubelet: "<< KUBERNETES_VERSION >>" diff --git a/test/e2e/provisioning/testdata/machinedeployment-vsphere-resource-pool.yaml b/test/e2e/provisioning/testdata/machinedeployment-vsphere-resource-pool.yaml index ca48b60fb..54a7a345c 100644 --- a/test/e2e/provisioning/testdata/machinedeployment-vsphere-resource-pool.yaml +++ b/test/e2e/provisioning/testdata/machinedeployment-vsphere-resource-pool.yaml @@ -34,6 +34,8 @@ spec: password: << VSPHERE_PASSWORD >> datastore: 'vsan' resourcePool: 'e2e-resource-pool' + cluster: Kubermatic + vmAntiAffinity: true cpus: 2 MemoryMB: 2048 diskSizeGB: << DISK_SIZE >> diff --git a/test/e2e/provisioning/testdata/machinedeployment-vsphere-static-ip.yaml b/test/e2e/provisioning/testdata/machinedeployment-vsphere-static-ip.yaml index 2114d717a..ad4dcfda1 100644 --- a/test/e2e/provisioning/testdata/machinedeployment-vsphere-static-ip.yaml +++ b/test/e2e/provisioning/testdata/machinedeployment-vsphere-static-ip.yaml @@ -33,6 +33,8 @@ spec: folder: '/Hamburg/vm/Kubermatic-ci' password: << VSPHERE_PASSWORD >> # example: 'https://your-vcenter:8443'. '/sdk' gets appended automatically + cluster: Kubermatic + vmAntiAffinity: true datastore: vsan cpus: 2 MemoryMB: 2048 diff --git a/test/e2e/provisioning/testdata/machinedeployment-vsphere.yaml b/test/e2e/provisioning/testdata/machinedeployment-vsphere.yaml index f81548e04..377d9e8f5 100644 --- a/test/e2e/provisioning/testdata/machinedeployment-vsphere.yaml +++ b/test/e2e/provisioning/testdata/machinedeployment-vsphere.yaml @@ -30,10 +30,12 @@ spec: username: '<< VSPHERE_USERNAME >>' vsphereURL: '<< VSPHERE_ADDRESS >>' datacenter: 'Hamburg' - folder: '/Hamburg/vm/Kubermatic-ci' + folder: '/Hamburg/vm/Kubermatic-dev' password: << VSPHERE_PASSWORD >> # example: 'https://your-vcenter:8443'. '/sdk' gets appended automatically - datastore: vsan + cluster: 'vSAN Cluster' + vmAntiAffinity: true + datastore: Datastore0-truenas cpus: 2 MemoryMB: 4096 diskSizeGB: << DISK_SIZE >> diff --git a/test/e2e/provisioning/verify.go b/test/e2e/provisioning/verify.go index 2dc7233db..92e8ceed9 100644 --- a/test/e2e/provisioning/verify.go +++ b/test/e2e/provisioning/verify.go @@ -23,13 +23,13 @@ import ( "strings" "time" - clusterv1alpha1 "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" - machinecontroller "github.com/kubermatic/machine-controller/pkg/controller/machine" - evictiontypes "github.com/kubermatic/machine-controller/pkg/node/eviction/types" - providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" + machinecontroller "k8c.io/machine-controller/pkg/controller/machine" + clusterv1alpha1 "k8c.io/machine-controller/sdk/apis/cluster/v1alpha1" + nodetypes "k8c.io/machine-controller/sdk/node" + providerconfigtypes "k8c.io/machine-controller/sdk/providerconfig" corev1 "k8s.io/api/core/v1" - kerrors "k8s.io/apimachinery/pkg/api/errors" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/wait" @@ -44,30 +44,30 @@ const ( machineReadyCheckPeriod = 15 * time.Second ) -func verifyCreateMachineFails(kubeConfig, manifestPath string, parameters []string, _ time.Duration) error { +func verifyCreateMachineFails(ctx context.Context, kubeConfig, manifestPath string, parameters []string, _ time.Duration) error { client, machine, err := prepareMachine(kubeConfig, manifestPath, parameters) if err != nil { return err } - if err := client.Create(context.Background(), machine); err != nil { + if err := client.Create(ctx, machine); err != nil { return nil } return fmt.Errorf("expected create of Machine %s to fail but succeeded", machine.Name) } -func verifyCreateAndDelete(kubeConfig, manifestPath string, parameters []string, timeout time.Duration) error { +func verifyCreateAndDelete(ctx context.Context, kubeConfig, manifestPath string, parameters []string, timeout time.Duration) error { client, machineDeployment, err := prepareMachineDeployment(kubeConfig, manifestPath, parameters) if err != nil { return err } - machineDeployment, err = createAndAssure(machineDeployment, client, timeout) + machineDeployment, err = createAndAssure(ctx, machineDeployment, client, timeout) if err != nil { return fmt.Errorf("failed to verify creation of node for MachineDeployment: %w", err) } - if err := deleteAndAssure(machineDeployment, client, timeout); err != nil { - return fmt.Errorf("Failed to verify if a machine/node has been created/deleted, due to: \n%w", err) + if err := deleteAndAssure(ctx, machineDeployment, client, timeout); err != nil { + return fmt.Errorf("failed to verify if a machine/node has been created/deleted, due to: \n%w", err) } klog.Infof("Successfully finished test for MachineDeployment %s", machineDeployment.Name) @@ -89,7 +89,7 @@ func prepareMachineDeployment(kubeConfig, manifestPath string, parameters []stri // Enforce the kube-system namespace, otherwise cleanup won't work newMachineDeployment.Namespace = metav1.NamespaceSystem // Dont evict during testing - newMachineDeployment.Spec.Template.Spec.Annotations = map[string]string{evictiontypes.SkipEvictionAnnotationKey: "true"} + newMachineDeployment.Spec.Template.Spec.Annotations = map[string]string{nodetypes.SkipEvictionAnnotationKey: "true"} return client, newMachineDeployment, nil } @@ -109,7 +109,7 @@ func prepareMachine(kubeConfig, manifestPath string, parameters []string) (ctrlr // Enforce the kube-system namespace, otherwise cleanup won't work newMachine.Namespace = metav1.NamespaceSystem // Dont evict during testing - newMachine.Spec.Annotations = map[string]string{evictiontypes.SkipEvictionAnnotationKey: "true"} + newMachine.Spec.Annotations = map[string]string{nodetypes.SkipEvictionAnnotationKey: "true"} return client, newMachine, nil } @@ -122,7 +122,7 @@ func prepare(kubeConfig, manifestPath string, parameters []string) (ctrlruntimec // init kube related stuff cfg, err := clientcmd.BuildConfigFromFlags("", kubeConfig) if err != nil { - return nil, "", fmt.Errorf("Error building kubeconfig: %w", err) + return nil, "", fmt.Errorf("error building kubeconfig: %w", err) } client, err := ctrlruntimeclient.New(cfg, ctrlruntimeclient.Options{}) if err != nil { @@ -138,11 +138,11 @@ func prepare(kubeConfig, manifestPath string, parameters []string) (ctrlruntimec return client, manifest, nil } -func createAndAssure(machineDeployment *clusterv1alpha1.MachineDeployment, client ctrlruntimeclient.Client, timeout time.Duration) (*clusterv1alpha1.MachineDeployment, error) { +func createAndAssure(ctx context.Context, machineDeployment *clusterv1alpha1.MachineDeployment, client ctrlruntimeclient.Client, timeout time.Duration) (*clusterv1alpha1.MachineDeployment, error) { // we expect that no node for machine exists in the cluster - err := assureNodeForMachineDeployment(machineDeployment, client, false) + err := assureNodeForMachineDeployment(ctx, machineDeployment, client, false) if err != nil { - return nil, fmt.Errorf("unable to perform the verification, incorrect cluster state detected %w", err) + return nil, fmt.Errorf("failed to perform the verification, incorrect cluster state detected %w", err) } klog.Infof("Creating a new %q MachineDeployment", machineDeployment.Name) @@ -151,8 +151,8 @@ func createAndAssure(machineDeployment *clusterv1alpha1.MachineDeployment, clien // needs longer to validate a MachineDeployment than the kube-apiserver is willing to wait. // In real world scenarios this is not that critical, but for tests we need to pay closer // attention and retry the creation a few times. - err = wait.PollImmediate(3*time.Second, 180*time.Second, func() (bool, error) { - err := client.Create(context.Background(), machineDeployment) + err = wait.PollUntilContextTimeout(ctx, 3*time.Second, 180*time.Second, false, func(ctx context.Context) (bool, error) { + err := client.Create(ctx, machineDeployment) if err != nil { klog.Warningf("Creation of %q failed, retrying: %v", machineDeployment.Name, err) return false, nil @@ -167,26 +167,26 @@ func createAndAssure(machineDeployment *clusterv1alpha1.MachineDeployment, clien klog.Infof("MachineDeployment %q created", machineDeployment.Name) var pollErr error - err = wait.Poll(machineReadyCheckPeriod, timeout, func() (bool, error) { - pollErr = assureNodeForMachineDeployment(machineDeployment, client, true) + err = wait.PollUntilContextTimeout(ctx, machineReadyCheckPeriod, timeout, false, func(ctx context.Context) (bool, error) { + pollErr = assureNodeForMachineDeployment(ctx, machineDeployment, client, true) if pollErr == nil { return true, nil } return false, nil }) if err != nil { - return nil, fmt.Errorf("failed waiting for MachineDeployment %s to get a node: %w (%v)", machineDeployment.Name, err, pollErr) + return nil, fmt.Errorf("failed waiting for MachineDeployment %s to get a node: %w (%w)", machineDeployment.Name, err, pollErr) } klog.Infof("Found a node for MachineDeployment %s", machineDeployment.Name) klog.Infof("Waiting for node of MachineDeployment %s to become ready", machineDeployment.Name) - err = wait.Poll(machineReadyCheckPeriod, timeout, func() (bool, error) { - machines, pollErr := getMatchingMachines(machineDeployment, client) + err = wait.PollUntilContextTimeout(ctx, machineReadyCheckPeriod, timeout, false, func(ctx context.Context) (bool, error) { + machines, pollErr := getMatchingMachines(ctx, machineDeployment, client) if pollErr != nil || len(machines) < 1 { return false, nil } for _, machine := range machines { - hasReadyNode, pollErr := hasMachineReadyNode(&machine, client) + hasReadyNode, pollErr := hasMachineReadyNode(ctx, &machine, client) if err != nil { return false, pollErr } @@ -202,9 +202,9 @@ func createAndAssure(machineDeployment *clusterv1alpha1.MachineDeployment, clien return machineDeployment, nil } -func hasMachineReadyNode(machine *clusterv1alpha1.Machine, client ctrlruntimeclient.Client) (bool, error) { +func hasMachineReadyNode(ctx context.Context, machine *clusterv1alpha1.Machine, client ctrlruntimeclient.Client) (bool, error) { nodes := &corev1.NodeList{} - if err := client.List(context.Background(), nodes); err != nil { + if err := client.List(ctx, nodes); err != nil { return false, fmt.Errorf("failed to list nodes: %w", err) } for _, node := range nodes.Items { @@ -228,21 +228,21 @@ func hasMachineReadyNode(machine *clusterv1alpha1.Machine, client ctrlruntimecli return false, nil } -func deleteAndAssure(machineDeployment *clusterv1alpha1.MachineDeployment, client ctrlruntimeclient.Client, timeout time.Duration) error { +func deleteAndAssure(ctx context.Context, machineDeployment *clusterv1alpha1.MachineDeployment, client ctrlruntimeclient.Client, timeout time.Duration) error { klog.Infof("Starting to clean up MachineDeployment %s", machineDeployment.Name) // We first scale down to 0, because once the machineSets are deleted we can not // match machines anymore and we do want to verify not only the node is gone but also // the instance at the cloud provider - if err := updateMachineDeployment(machineDeployment, client, func(md *clusterv1alpha1.MachineDeployment) { + if err := updateMachineDeployment(ctx, machineDeployment, client, func(md *clusterv1alpha1.MachineDeployment) { md.Spec.Replicas = getInt32Ptr(0) }); err != nil { return fmt.Errorf("failed to update replicas of MachineDeployment %s: %w", machineDeployment.Name, err) } // Ensure machines are gone - if err := wait.Poll(machineReadyCheckPeriod, timeout, func() (bool, error) { - ownedMachines, err := getMatchingMachines(machineDeployment, client) + if err := wait.PollUntilContextTimeout(ctx, machineReadyCheckPeriod, timeout, false, func(ctx context.Context) (bool, error) { + ownedMachines, err := getMatchingMachines(ctx, machineDeployment, client) if err != nil { return false, err } @@ -255,12 +255,12 @@ func deleteAndAssure(machineDeployment *clusterv1alpha1.MachineDeployment, clien } klog.V(2).Infof("Deleting MachineDeployment %s", machineDeployment.Name) - if err := client.Delete(context.Background(), machineDeployment); err != nil { - return fmt.Errorf("unable to remove MachineDeployment %s, due to %w", machineDeployment.Name, err) + if err := client.Delete(ctx, machineDeployment); err != nil { + return fmt.Errorf("failed to remove MachineDeployment %s, due to %w", machineDeployment.Name, err) } - return wait.Poll(machineReadyCheckPeriod, timeout, func() (bool, error) { - err := client.Get(context.Background(), types.NamespacedName{Namespace: machineDeployment.Namespace, Name: machineDeployment.Name}, &clusterv1alpha1.MachineDeployment{}) - if kerrors.IsNotFound(err) { + return wait.PollUntilContextTimeout(ctx, machineReadyCheckPeriod, timeout, false, func(ctx context.Context) (bool, error) { + err := client.Get(ctx, types.NamespacedName{Namespace: machineDeployment.Namespace, Name: machineDeployment.Name}, &clusterv1alpha1.MachineDeployment{}) + if apierrors.IsNotFound(err) { return true, nil } return false, err @@ -269,8 +269,8 @@ func deleteAndAssure(machineDeployment *clusterv1alpha1.MachineDeployment, clien // assureNodeForMachineDeployment according to shouldExists parameter check if a node for machine exists in the system or not // this method examines OwnerReference of each node. -func assureNodeForMachineDeployment(machineDeployment *clusterv1alpha1.MachineDeployment, client ctrlruntimeclient.Client, shouldExist bool) error { - machines, err := getMatchingMachines(machineDeployment, client) +func assureNodeForMachineDeployment(ctx context.Context, machineDeployment *clusterv1alpha1.MachineDeployment, client ctrlruntimeclient.Client, shouldExist bool) error { + machines, err := getMatchingMachines(ctx, machineDeployment, client) if err != nil { return fmt.Errorf("failed to list Machines: %w", err) } @@ -297,7 +297,7 @@ func assureNodeForMachineDeployment(machineDeployment *clusterv1alpha1.MachineDe } nodes := &corev1.NodeList{} - if err := client.List(context.Background(), nodes); err != nil { + if err := client.List(ctx, nodes); err != nil { return fmt.Errorf("failed to list Nodes: %w", err) } @@ -339,22 +339,22 @@ func readAndModifyManifest(pathToManifest string, keyValuePairs []string) (strin if len(kv) != 2 { return "", fmt.Errorf("the given key value pair = %v is incorrect, the correct form is key=value", keyValuePair) } - content = strings.Replace(content, kv[0], kv[1], -1) + content = strings.ReplaceAll(content, kv[0], kv[1]) } return content, nil } // getMatchingMachines returns all machines that are owned by the passed machineDeployment. -func getMatchingMachines(machineDeployment *clusterv1alpha1.MachineDeployment, client ctrlruntimeclient.Client) ([]clusterv1alpha1.Machine, error) { - matchingMachineSets, err := getMatchingMachineSets(machineDeployment, client) +func getMatchingMachines(ctx context.Context, machineDeployment *clusterv1alpha1.MachineDeployment, client ctrlruntimeclient.Client) ([]clusterv1alpha1.Machine, error) { + matchingMachineSets, err := getMatchingMachineSets(ctx, machineDeployment, client) if err != nil { return nil, err } klog.V(2).Infof("Found %v matching MachineSets for %s", len(matchingMachineSets), machineDeployment.Name) var matchingMachines []clusterv1alpha1.Machine for _, machineSet := range matchingMachineSets { - machinesForMachineSet, err := getMatchingMachinesForMachineset(&machineSet, client) + machinesForMachineSet, err := getMatchingMachinesForMachineset(ctx, &machineSet, client) if err != nil { return nil, fmt.Errorf("failed to get matching Machines for MachineSet %s: %w", machineSet.Name, err) } @@ -364,9 +364,9 @@ func getMatchingMachines(machineDeployment *clusterv1alpha1.MachineDeployment, c return matchingMachines, nil } -func getMatchingMachinesForMachineset(machineSet *clusterv1alpha1.MachineSet, client ctrlruntimeclient.Client) ([]clusterv1alpha1.Machine, error) { +func getMatchingMachinesForMachineset(ctx context.Context, machineSet *clusterv1alpha1.MachineSet, client ctrlruntimeclient.Client) ([]clusterv1alpha1.Machine, error) { allMachines := &clusterv1alpha1.MachineList{} - if err := client.List(context.Background(), allMachines, &ctrlruntimeclient.ListOptions{Namespace: machineSet.Namespace}); err != nil { + if err := client.List(ctx, allMachines, &ctrlruntimeclient.ListOptions{Namespace: machineSet.Namespace}); err != nil { return nil, fmt.Errorf("failed to list Machines: %w", err) } var matchingMachines []clusterv1alpha1.Machine @@ -379,20 +379,20 @@ func getMatchingMachinesForMachineset(machineSet *clusterv1alpha1.MachineSet, cl } // getMatchingMachineSets returns all machineSets that are owned by the passed machineDeployment. -func getMatchingMachineSets(machineDeployment *clusterv1alpha1.MachineDeployment, client ctrlruntimeclient.Reader) ([]clusterv1alpha1.MachineSet, error) { +func getMatchingMachineSets(ctx context.Context, machineDeployment *clusterv1alpha1.MachineDeployment, client ctrlruntimeclient.Reader) ([]clusterv1alpha1.MachineSet, error) { // Ensure we actually have an object from the KubeAPI and not just the result of the yaml parsing, as the latter // can not be the owner of anything due to missing UID. if machineDeployment.ResourceVersion == "" { nn := types.NamespacedName{Namespace: machineDeployment.Namespace, Name: machineDeployment.Name} - if err := client.Get(context.Background(), nn, machineDeployment); err != nil { - if !kerrors.IsNotFound(err) { + if err := client.Get(ctx, nn, machineDeployment); err != nil { + if !apierrors.IsNotFound(err) { return nil, fmt.Errorf("failed to get MachineDeployment %s: %w", nn.Name, err) } return nil, nil } } allMachineSets := &clusterv1alpha1.MachineSetList{} - if err := client.List(context.Background(), allMachineSets, &ctrlruntimeclient.ListOptions{Namespace: machineDeployment.Namespace}); err != nil { + if err := client.List(ctx, allMachineSets, &ctrlruntimeclient.ListOptions{Namespace: machineDeployment.Namespace}); err != nil { return nil, fmt.Errorf("failed to list MachineSets: %w", err) } var matchingMachineSets []clusterv1alpha1.MachineSet @@ -408,17 +408,17 @@ func getInt32Ptr(i int32) *int32 { return &i } -func updateMachineDeployment(md *clusterv1alpha1.MachineDeployment, client ctrlruntimeclient.Client, modify func(*clusterv1alpha1.MachineDeployment)) error { +func updateMachineDeployment(ctx context.Context, md *clusterv1alpha1.MachineDeployment, client ctrlruntimeclient.Client, modify func(*clusterv1alpha1.MachineDeployment)) error { // Store Namespace and Name here because after an error md will be nil name := md.Name namespace := md.Namespace return retry.RetryOnConflict(retry.DefaultBackoff, func() error { md := &clusterv1alpha1.MachineDeployment{} - if err := client.Get(context.Background(), types.NamespacedName{Namespace: namespace, Name: name}, md); err != nil { + if err := client.Get(ctx, types.NamespacedName{Namespace: namespace, Name: name}, md); err != nil { return err } modify(md) - return client.Update(context.Background(), md) + return client.Update(ctx, md) }) }