Skip to content

Commit

Permalink
ref: pre v2.15 cleanup (#2871)
Browse files Browse the repository at this point in the history
  • Loading branch information
elchead authored Jan 29, 2024
1 parent 3799525 commit 489e076
Show file tree
Hide file tree
Showing 12 changed files with 35 additions and 215 deletions.
8 changes: 0 additions & 8 deletions cli/internal/cloudcmd/tfvars.go
Original file line number Diff line number Diff line change
Expand Up @@ -64,14 +64,6 @@ func TerraformIAMUpgradeVars(conf *config.Config, fileHandler file.Handler) (ter
if err := terraform.VariablesFromBytes(oldVarBytes, &oldVars); err != nil {
return nil, fmt.Errorf("parsing existing IAM workspace: %w", err)
}

// Migration from the "region" to the "location" field na.
// TODO(msanft): Remove after v2.14.0 is released.
if oldVars.Region != nil && *oldVars.Region != "" && oldVars.Location == "" {
oldVars.Location = *oldVars.Region
oldVars.Region = nil
}

vars = azureTerraformIAMVars(conf, oldVars)
case cloudprovider.GCP:
var oldVars terraform.GCPIAMVariables
Expand Down
5 changes: 1 addition & 4 deletions cli/internal/terraform/variables.go
Original file line number Diff line number Diff line change
Expand Up @@ -245,11 +245,8 @@ type AzureNodeGroup struct {

// AzureIAMVariables is user configuration for creating the IAM configuration with Terraform on Microsoft Azure.
type AzureIAMVariables struct {
// Region is the Azure location to use. (e.g. westus).
// THIS FIELD IS DEPRECATED AND ONLY KEPT FOR MIGRATION PURPOSES. DO NOT USE.
Region *string `hcl:"region" cty:"region"` // TODO(msanft): Remove this field once v2.14.0 is released.
// Location is the Azure location to use. (e.g. westus)
Location string `hcl:"location,optional" cty:"location"` // TODO(msanft): Make this required once v2.14.0 is released.
Location string `hcl:"location" cty:"location"`
// ServicePrincipal is the name of the service principal to use.
ServicePrincipal string `hcl:"service_principal_name" cty:"service_principal_name"`
// ResourceGroup is the name of the resource group to use.
Expand Down
3 changes: 1 addition & 2 deletions internal/constellation/helm/actionfactory.go
Original file line number Diff line number Diff line change
Expand Up @@ -120,8 +120,7 @@ func (a actionFactory) appendNewAction(
} else {
// This may break for external chart dependencies if we decide to upgrade more than one minor version at a time.
if err := newVersion.IsUpgradeTo(currentVersion); err != nil {
// TODO(3u13r): Remove when Constellation v2.14 is released.
// We need to ignore that we jump from Cilium v1.12 to v1.15-pre. We have verified that this works.
// Allow bigger Cilium and Cert-Manager version jumps.
if !(errors.Is(err, compatibility.ErrMinorDrift) && (release.releaseName == "cilium" || release.releaseName == "cert-manager")) {
return fmt.Errorf("invalid upgrade for %s: %w", release.releaseName, err)
}
Expand Down
1 change: 0 additions & 1 deletion internal/constellation/kubecmd/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,6 @@ go_test(
"@io_k8s_apimachinery//pkg/apis/meta/v1/unstructured",
"@io_k8s_apimachinery//pkg/runtime",
"@io_k8s_apimachinery//pkg/runtime/schema",
"@io_k8s_kubernetes//cmd/kubeadm/app/apis/kubeadm/v1beta3",
"@io_k8s_sigs_yaml//:yaml",
],
)
44 changes: 0 additions & 44 deletions internal/constellation/kubecmd/kubecmd.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ import (
"encoding/json"
"errors"
"fmt"
"slices"
"sort"
"strings"
"time"
Expand Down Expand Up @@ -104,11 +103,6 @@ func (k *KubeCmd) UpgradeNodeImage(ctx context.Context, imageVersion semver.Semv
return fmt.Errorf("updating image version: %w", err)
}

// TODO(3u13r): remove `reconcileKubeadmConfigMap` after v2.14.0 has been released.
if err := k.reconcileKubeadmConfigMap(ctx); err != nil {
return fmt.Errorf("reconciling kubeadm config: %w", err)
}

k.log.Debugf("Updating local copy of nodeVersion image version from %s to %s", nodeVersion.Spec.ImageVersion, imageVersion.String())
nodeVersion.Spec.ImageReference = imageReference
nodeVersion.Spec.ImageVersion = imageVersion.String()
Expand Down Expand Up @@ -383,44 +377,6 @@ func (k *KubeCmd) applyNodeVersion(ctx context.Context, nodeVersion updatev1alph
return updatedNodeVersion, err
}

func (k *KubeCmd) reconcileKubeadmConfigMap(ctx context.Context) error {
clusterConfiguration, kubeadmConfig, err := k.getClusterConfiguration(ctx)
if err != nil {
return fmt.Errorf("getting ClusterConfig: %w", err)
}

for i, v := range clusterConfiguration.APIServer.ExtraVolumes {
if v.Name == "konnectivity-uds" {
clusterConfiguration.APIServer.ExtraVolumes = slices.Delete(clusterConfiguration.APIServer.ExtraVolumes, i, i+1)
}
}
for i, v := range clusterConfiguration.APIServer.ExtraVolumes {
if v.Name == "egress-config" {
clusterConfiguration.APIServer.ExtraVolumes = slices.Delete(clusterConfiguration.APIServer.ExtraVolumes, i, i+1)
}
}
delete(clusterConfiguration.APIServer.ExtraArgs, "egress-selector-config-file")

newConfigYAML, err := yaml.Marshal(clusterConfiguration)
if err != nil {
return fmt.Errorf("marshaling ClusterConfiguration: %w", err)
}

if kubeadmConfig.Data[constants.ClusterConfigurationKey] == string(newConfigYAML) {
k.log.Debugf("No changes to kubeadm config required")
return nil
}

kubeadmConfig.Data[constants.ClusterConfigurationKey] = string(newConfigYAML)
k.log.Debugf("Triggering kubeadm config update now")
if _, err = k.kubectl.UpdateConfigMap(ctx, kubeadmConfig); err != nil {
return fmt.Errorf("setting new kubeadm config: %w", err)
}

k.log.Debugf("Successfully reconciled the cluster's kubeadm config")
return nil
}

// isValidImageUpdate checks if the new image version is a valid upgrade, and there is no upgrade already running.
func (k *KubeCmd) isValidImageUpgrade(nodeVersion updatev1alpha1.NodeVersion, newImageVersion string, force bool) error {
if !force {
Expand Down
71 changes: 1 addition & 70 deletions internal/constellation/kubecmd/kubecmd_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@ import (
"encoding/json"
"errors"
"fmt"
"strings"
"testing"
"time"

Expand All @@ -34,80 +33,21 @@ import (
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
kubeadmv1beta3 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
)

func TestUpgradeNodeImage(t *testing.T) {
clusterConf := kubeadmv1beta3.ClusterConfiguration{
APIServer: kubeadmv1beta3.APIServer{
ControlPlaneComponent: kubeadmv1beta3.ControlPlaneComponent{
ExtraArgs: map[string]string{},
ExtraVolumes: []kubeadmv1beta3.HostPathMount{},
},
},
}

clusterConfBytes, err := json.Marshal(clusterConf)
require.NoError(t, err)
validKubeadmConfig := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: constants.KubeadmConfigMap,
},
Data: map[string]string{
constants.ClusterConfigurationKey: string(clusterConfBytes),
},
}

clusterConfWithKonnectivity := kubeadmv1beta3.ClusterConfiguration{
APIServer: kubeadmv1beta3.APIServer{
ControlPlaneComponent: kubeadmv1beta3.ControlPlaneComponent{
ExtraArgs: map[string]string{
"egress-selector-config-file": "/etc/kubernetes/egress-selector-config-file.yaml",
},
ExtraVolumes: []kubeadmv1beta3.HostPathMount{
{
Name: "egress-config",
HostPath: "/etc/kubernetes/egress-selector-config-file.yaml",
},
{
Name: "konnectivity-uds",
HostPath: "/some/path/to/konnectivity-uds",
},
},
},
},
}

clusterConfBytesWithKonnectivity, err := json.Marshal(clusterConfWithKonnectivity)
require.NoError(t, err)
validKubeadmConfigWithKonnectivity := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: constants.KubeadmConfigMap,
},
Data: map[string]string{
constants.ClusterConfigurationKey: string(clusterConfBytesWithKonnectivity),
},
}

testCases := map[string]struct {
conditions []metav1.Condition
currentImageVersion semver.Semver
newImageVersion semver.Semver
badImageVersion string
force bool
customKubeadmConfig *corev1.ConfigMap
getCRErr error
wantErr bool
wantUpdate bool
assertCorrectError func(t *testing.T, err error) bool
customClientFn func(nodeVersion updatev1alpha1.NodeVersion) unstructuredInterface
}{
"success with konnectivity migration": {
currentImageVersion: semver.NewFromInt(1, 2, 2, ""),
newImageVersion: semver.NewFromInt(1, 2, 3, ""),
customKubeadmConfig: validKubeadmConfigWithKonnectivity,
wantUpdate: true,
},
"success": {
currentImageVersion: semver.NewFromInt(1, 2, 2, ""),
newImageVersion: semver.NewFromInt(1, 2, 3, ""),
Expand Down Expand Up @@ -226,15 +166,12 @@ func TestUpgradeNodeImage(t *testing.T) {
kubectl := &stubKubectl{
unstructuredInterface: unstructuredClient,
configMaps: map[string]*corev1.ConfigMap{
constants.KubeadmConfigMap: validKubeadmConfig,
constants.JoinConfigMap: newJoinConfigMap(`{"0":{"expected":"AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA","warnOnly":false}}`),
},
}
if tc.customClientFn != nil {
kubectl.unstructuredInterface = tc.customClientFn(nodeVersion)
}
if tc.customKubeadmConfig != nil {
kubectl.configMaps[constants.KubeadmConfigMap] = tc.customKubeadmConfig
}

upgrader := KubeCmd{
kubectl: kubectl,
Expand All @@ -255,12 +192,6 @@ func TestUpgradeNodeImage(t *testing.T) {
return
}
assert.NoError(err)
// If the ConfigMap only exists in the updatedConfigMaps map, the Konnectivity values should have been removed
if strings.Contains(kubectl.configMaps[constants.KubeadmConfigMap].Data[constants.ClusterConfigurationKey], "konnectivity-uds") {
assert.NotContains(kubectl.updatedConfigMaps[constants.KubeadmConfigMap].Data[constants.ClusterConfigurationKey], "konnectivity-uds")
assert.NotContains(kubectl.updatedConfigMaps[constants.KubeadmConfigMap].Data[constants.ClusterConfigurationKey], "egress-config")
assert.NotContains(kubectl.updatedConfigMaps[constants.KubeadmConfigMap].Data[constants.ClusterConfigurationKey], "egress-selector-config-file")
}
})
}
}
Expand Down
3 changes: 0 additions & 3 deletions internal/constellation/state/state.go
Original file line number Diff line number Diff line change
Expand Up @@ -76,9 +76,6 @@ type State struct {
// description: |
// Schema version of this state file.
Version string `yaml:"version"`

// TODO(msanft): Add link to self-managed infrastructure docs once existing.

// description: |
// State of the cluster's cloud resources. These values are retrieved during
// cluster creation. In the case of self-managed infrastructure, the marked
Expand Down
14 changes: 0 additions & 14 deletions operators/constellation-node-operator/internal/upgrade/upgrade.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,22 +44,8 @@ func (c *Client) Upgrade(ctx context.Context, kubernetesComponents components.Co
}
defer conn.Close()

// While we're transitioning between version 2.13 and 2.14, we need to
// expect an upgrade-agent that does not yet understand the
// KubernetesComponents proto field. Therefore, we pass the kubeadm
// component twice: once via KubeadmUrl/KubeadmHash, once as part of the
// kubernetesComponents argument.
kubeadm, err := kubernetesComponents.GetKubeadmComponent()
if err != nil {
return fmt.Errorf("expected a kubeadm Component: %w", err)
}
protoClient := upgradeproto.NewUpdateClient(conn)
_, err = protoClient.ExecuteUpdate(ctx, &upgradeproto.ExecuteUpdateRequest{
// TODO(burgerdev): remove these fields after releasing 2.14.
// %< ---------------------------------
KubeadmUrl: kubeadm.Url,
KubeadmHash: kubeadm.Hash,
// %< ---------------------------------
WantedKubernetesVersion: WantedKubernetesVersion,
KubernetesComponents: kubernetesComponents,
})
Expand Down
14 changes: 1 addition & 13 deletions upgrade-agent/internal/server/server.go
Original file line number Diff line number Diff line change
Expand Up @@ -131,20 +131,8 @@ func prepareUpdate(ctx context.Context, installer osInstaller, updateRequest *up
if err != nil {
return err
}

var cs components.Components
if len(updateRequest.KubeadmUrl) > 0 {
cs = append(cs, &components.Component{
Url: updateRequest.KubeadmUrl,
Hash: updateRequest.KubeadmHash,
InstallPath: constants.KubeadmPath,
Extract: false,
})
}
cs = append(cs, updateRequest.KubernetesComponents...)

// Download & install the Kubernetes components.
for _, c := range cs {
for _, c := range updateRequest.KubernetesComponents {
if err := installer.Install(ctx, c); err != nil {
return fmt.Errorf("installing Kubernetes component %q: %w", c.Url, err)
}
Expand Down
15 changes: 4 additions & 11 deletions upgrade-agent/internal/server/server_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -62,12 +62,7 @@ func TestPrepareUpdate(t *testing.T) {
slimUpdateRequest := &upgradeproto.ExecuteUpdateRequest{
WantedKubernetesVersion: "v1.1.1",
}
oldStyleUpdateRequest := &upgradeproto.ExecuteUpdateRequest{
WantedKubernetesVersion: "v1.1.1",
KubeadmUrl: "http://example.com/kubeadm",
KubeadmHash: "sha256:foo",
}
newStyleUpdateRequest := &upgradeproto.ExecuteUpdateRequest{
updateRequest := &upgradeproto.ExecuteUpdateRequest{
WantedKubernetesVersion: "v1.1.1",
KubernetesComponents: []*components.Component{
{
Expand All @@ -79,8 +74,6 @@ func TestPrepareUpdate(t *testing.T) {
}
combinedStyleUpdateRequest := &upgradeproto.ExecuteUpdateRequest{
WantedKubernetesVersion: "v1.1.1",
KubeadmUrl: "http://example.com/kubeadm",
KubeadmHash: "sha256:foo",
KubernetesComponents: []*components.Component{
{
Url: "data:application/octet-stream,foo",
Expand All @@ -104,16 +97,16 @@ func TestPrepareUpdate(t *testing.T) {
},
"install error": {
installer: stubOsInstaller{InstallErr: fmt.Errorf("install error")},
updateRequest: oldStyleUpdateRequest,
updateRequest: updateRequest,
wantErr: true,
},
"new style works": {
installer: stubOsInstaller{},
updateRequest: newStyleUpdateRequest,
updateRequest: updateRequest,
},
"new style install error": {
installer: stubOsInstaller{InstallErr: fmt.Errorf("install error")},
updateRequest: newStyleUpdateRequest,
updateRequest: updateRequest,
wantErr: true,
},
"combined style works": {
Expand Down
Loading

0 comments on commit 489e076

Please sign in to comment.