From ac8b360b73d00281c4d2ca48da5304daa9824ab1 Mon Sep 17 00:00:00 2001 From: Jason DeTiberus Date: Wed, 19 Feb 2020 19:51:02 -0500 Subject: [PATCH] Cleanup KubeadmControlPlane machine filters --- .../kubeadm_control_plane_controller.go | 28 +++++-------- .../kubeadm_control_plane_controller_test.go | 16 ++++---- controlplane/kubeadm/internal/cluster.go | 40 ++++--------------- controlplane/kubeadm/internal/cluster_test.go | 3 +- .../kubeadm/internal/etcd/util/set.go | 14 +++---- .../kubeadm/internal/failure_domain.go | 6 +-- .../kubeadm/internal/failure_domain_test.go | 19 +++------ .../kubeadm/internal/machine_filters.go | 6 --- .../kubeadm/internal/machine_filters_test.go | 19 +++++---- 9 files changed, 51 insertions(+), 100 deletions(-) diff --git a/controlplane/kubeadm/controllers/kubeadm_control_plane_controller.go b/controlplane/kubeadm/controllers/kubeadm_control_plane_controller.go index 19d58806181f..8acb643672f7 100644 --- a/controlplane/kubeadm/controllers/kubeadm_control_plane_controller.go +++ b/controlplane/kubeadm/controllers/kubeadm_control_plane_controller.go @@ -19,7 +19,6 @@ package controllers import ( "context" "fmt" - "sort" "strings" "time" @@ -67,7 +66,7 @@ const ( ) type managementCluster interface { - GetMachinesForCluster(ctx context.Context, cluster types.NamespacedName, filters ...func(machine *clusterv1.Machine) bool) ([]*clusterv1.Machine, error) + GetMachinesForCluster(ctx context.Context, cluster types.NamespacedName, filters ...internal.MachineFilter) (internal.FilterableMachineCollection, error) TargetClusterControlPlaneIsHealthy(ctx context.Context, clusterKey types.NamespacedName, controlPlaneName string) error TargetClusterEtcdIsHealthy(ctx context.Context, clusterKey types.NamespacedName, controlPlaneName string) error } @@ -235,8 +234,7 @@ func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, cluster * } currentConfigurationHash := hash.Compute(&kcp.Spec) - requireUpgrade := internal.FilterMachines( - ownedMachines, + requireUpgrade := ownedMachines.AnyFilter( internal.Not(internal.MatchesConfigurationHash(currentConfigurationHash)), internal.OlderThan(kcp.Spec.UpgradeAfter), ) @@ -254,7 +252,7 @@ func (r *KubeadmControlPlaneReconciler) reconcile(ctx context.Context, cluster * } // If we've made it this far, we don't need to worry about Machines that are older than kcp.Spec.UpgradeAfter - currentMachines := internal.FilterMachines(ownedMachines, internal.MatchesConfigurationHash(currentConfigurationHash)) + currentMachines := ownedMachines.Filter(internal.MatchesConfigurationHash(currentConfigurationHash)) numMachines := len(currentMachines) desiredReplicas := int(*kcp.Spec.Replicas) @@ -312,7 +310,7 @@ func (r *KubeadmControlPlaneReconciler) updateStatus(ctx context.Context, kcp *c return errors.Wrap(err, "failed to get list of owned machines") } - currentMachines := internal.FilterMachines(ownedMachines, internal.MatchesConfigurationHash(hash.Compute(&kcp.Spec))) + currentMachines := ownedMachines.Filter(internal.MatchesConfigurationHash(hash.Compute(&kcp.Spec))) kcp.Status.UpdatedReplicas = int32(len(currentMachines)) replicas := int32(len(ownedMachines)) @@ -347,7 +345,7 @@ func (r *KubeadmControlPlaneReconciler) updateStatus(ctx context.Context, kcp *c return nil } -func (r *KubeadmControlPlaneReconciler) upgradeControlPlane(ctx context.Context, cluster *clusterv1.Cluster, kcp *controlplanev1.KubeadmControlPlane, requireUpgrade []*clusterv1.Machine) error { +func (r *KubeadmControlPlaneReconciler) upgradeControlPlane(ctx context.Context, cluster *clusterv1.Cluster, kcp *controlplanev1.KubeadmControlPlane, requireUpgrade internal.FilterableMachineCollection) error { // TODO: verify health for each existing replica // TODO: mark an old Machine via the label kubeadm.controlplane.cluster.x-k8s.io/selected-for-upgrade @@ -412,13 +410,13 @@ func (r *KubeadmControlPlaneReconciler) scaleDownControlPlane(ctx context.Contex } // Wait for any delete in progress to complete before deleting another Machine - if len(internal.FilterMachines(ownedMachines, internal.HasDeletionTimestamp)) > 0 { + if len(ownedMachines.Filter(internal.HasDeletionTimestamp)) > 0 { return ctrl.Result{RequeueAfter: DeleteRequeueAfter}, nil } - machineToDelete, err := oldestMachine(ownedMachines) - if err != nil { - return ctrl.Result{}, errors.Wrap(err, "failed to pick control plane Machine to delete") + machineToDelete := ownedMachines.Oldest() + if machineToDelete == nil { + return ctrl.Result{}, errors.New("failed to pick control plane Machine to delete") } if err := r.Client.Delete(ctx, machineToDelete); err != nil && !apierrors.IsNotFound(err) { @@ -719,11 +717,3 @@ func getMachineNode(ctx context.Context, crClient client.Client, machine *cluste return node, nil } - -func oldestMachine(machines []*clusterv1.Machine) (*clusterv1.Machine, error) { - if len(machines) == 0 { - return &clusterv1.Machine{}, errors.New("no machines given") - } - sort.Sort(util.MachinesByCreationTimestamp(machines)) - return machines[0], nil -} diff --git a/controlplane/kubeadm/controllers/kubeadm_control_plane_controller_test.go b/controlplane/kubeadm/controllers/kubeadm_control_plane_controller_test.go index a87407191726..180e02dc0c0b 100644 --- a/controlplane/kubeadm/controllers/kubeadm_control_plane_controller_test.go +++ b/controlplane/kubeadm/controllers/kubeadm_control_plane_controller_test.go @@ -1319,10 +1319,10 @@ func TestKubeadmControlPlaneReconciler_reconcileDelete(t *testing.T) { type fakeManagementCluster struct { ControlPlaneHealthy bool EtcdHealthy bool - Machines []*clusterv1.Machine + Machines internal.FilterableMachineCollection } -func (f *fakeManagementCluster) GetMachinesForCluster(ctx context.Context, cluster types.NamespacedName, filters ...func(machine *clusterv1.Machine) bool) ([]*clusterv1.Machine, error) { +func (f *fakeManagementCluster) GetMachinesForCluster(ctx context.Context, cluster types.NamespacedName, filters ...internal.MachineFilter) (internal.FilterableMachineCollection, error) { return f.Machines, nil } @@ -1351,7 +1351,7 @@ func TestKubeadmControlPlaneReconciler_scaleUpControlPlane(t *testing.T) { g.Expect(fakeClient.Create(context.Background(), genericMachineTemplate)).To(Succeed()) fmc := &fakeManagementCluster{ - Machines: []*clusterv1.Machine{}, + Machines: internal.NewFilterableMachineCollection(), ControlPlaneHealthy: true, EtcdHealthy: true, } @@ -1359,7 +1359,7 @@ func TestKubeadmControlPlaneReconciler_scaleUpControlPlane(t *testing.T) { for i := 0; i < 2; i++ { m, _ := createMachineNodePair(fmt.Sprintf("test-%d", i), cluster, kcp, true) g.Expect(fakeClient.Create(context.Background(), m)).To(Succeed()) - fmc.Machines = append(fmc.Machines, m) + fmc.Machines.Insert(m.DeepCopy()) } r := &KubeadmControlPlaneReconciler{ @@ -1410,7 +1410,7 @@ func TestKubeadmControlPlaneReconciler_scaleDownControlPlane(t *testing.T) { g.Expect(fakeClient.Create(context.Background(), genericMachineTemplate)).To(Succeed()) fmc := &fakeManagementCluster{ - Machines: []*clusterv1.Machine{}, + Machines: internal.NewFilterableMachineCollection(), ControlPlaneHealthy: true, EtcdHealthy: true, } @@ -1418,7 +1418,7 @@ func TestKubeadmControlPlaneReconciler_scaleDownControlPlane(t *testing.T) { for i := 0; i < 2; i++ { m, _ := createMachineNodePair(fmt.Sprintf("test-%d", i), cluster, kcp, true) g.Expect(fakeClient.Create(context.Background(), m)).To(Succeed()) - fmc.Machines = append(fmc.Machines, m) + fmc.Machines.Insert(m.DeepCopy()) } r := &KubeadmControlPlaneReconciler{ @@ -1446,7 +1446,7 @@ func TestKubeadmControlPlaneReconciler_scaleDownControlPlane(t *testing.T) { g.Expect(fakeClient.Create(context.Background(), genericMachineTemplate)).To(Succeed()) fmc := &fakeManagementCluster{ - Machines: []*clusterv1.Machine{}, + Machines: internal.NewFilterableMachineCollection(), ControlPlaneHealthy: true, EtcdHealthy: true, } @@ -1454,7 +1454,7 @@ func TestKubeadmControlPlaneReconciler_scaleDownControlPlane(t *testing.T) { for i := 0; i < 2; i++ { m, _ := createMachineNodePair(fmt.Sprintf("test-%d", i), cluster, kcp, true) g.Expect(fakeClient.Create(context.Background(), m)).To(Succeed()) - fmc.Machines = append(fmc.Machines, m) + fmc.Machines.Insert(m.DeepCopy()) } r := &KubeadmControlPlaneReconciler{ diff --git a/controlplane/kubeadm/internal/cluster.go b/controlplane/kubeadm/internal/cluster.go index 2b27712a53ec..49e4a8db8071 100644 --- a/controlplane/kubeadm/internal/cluster.go +++ b/controlplane/kubeadm/internal/cluster.go @@ -50,31 +50,9 @@ type ManagementCluster struct { Client ctrlclient.Client } -// FilterMachines returns a filtered list of machines -func FilterMachines(machines []*clusterv1.Machine, filters ...func(machine *clusterv1.Machine) bool) []*clusterv1.Machine { - if len(filters) == 0 { - return machines - } - - filteredMachines := make([]*clusterv1.Machine, 0, len(machines)) - for _, machine := range machines { - add := true - for _, filter := range filters { - if !filter(machine) { - add = false - break - } - } - if add { - filteredMachines = append(filteredMachines, machine) - } - } - return filteredMachines -} - // GetMachinesForCluster returns a list of machines that can be filtered or not. // If no filter is supplied then all machines associated with the target cluster are returned. -func (m *ManagementCluster) GetMachinesForCluster(ctx context.Context, cluster types.NamespacedName, filters ...func(machine *clusterv1.Machine) bool) ([]*clusterv1.Machine, error) { +func (m *ManagementCluster) GetMachinesForCluster(ctx context.Context, cluster types.NamespacedName, filters ...MachineFilter) (FilterableMachineCollection, error) { selector := map[string]string{ clusterv1.ClusterLabelName: cluster.Name, } @@ -83,12 +61,8 @@ func (m *ManagementCluster) GetMachinesForCluster(ctx context.Context, cluster t return nil, errors.Wrap(err, "failed to list machines") } - machines := make([]*clusterv1.Machine, 0, len(ml.Items)) - for i := range ml.Items { - machines = append(machines, &ml.Items[i]) - } - - return FilterMachines(machines, filters...), nil + machines := NewFilterableMachineCollectionFromMachineList(ml) + return machines.Filter(filters...), nil } // getCluster builds a cluster object. @@ -114,7 +88,7 @@ func (m *ManagementCluster) getCluster(ctx context.Context, clusterKey types.Nam client: c, restConfig: restConfig, etcdCACert: etcdCACert, - etcdCAkey: etcdCAKey, + etcdCAKey: etcdCAKey, }, nil } @@ -204,12 +178,12 @@ type cluster struct { client ctrlclient.Client // restConfig is required for the proxy. restConfig *rest.Config - etcdCACert, etcdCAkey []byte + etcdCACert, etcdCAKey []byte } // generateEtcdTLSClientBundle builds an etcd client TLS bundle from the Etcd CA for this cluster. func (c *cluster) generateEtcdTLSClientBundle() (*tls.Config, error) { - clientCert, err := generateClientCert(c.etcdCACert, c.etcdCAkey) + clientCert, err := generateClientCert(c.etcdCACert, c.etcdCAKey) if err != nil { return nil, err } @@ -361,7 +335,7 @@ func (c *cluster) getEtcdClientForNode(nodeName string, tlsConfig *tls.Config) ( // This does not support external etcd. p := proxy.Proxy{ Kind: "pods", - Namespace: "kube-system", // TODO, can etcd ever run in a different namespace? + Namespace: metav1.NamespaceSystem, // TODO, can etcd ever run in a different namespace? ResourceName: staticPodName("etcd", nodeName), KubeConfig: c.restConfig, TLSConfig: tlsConfig, diff --git a/controlplane/kubeadm/internal/cluster_test.go b/controlplane/kubeadm/internal/cluster_test.go index f747fc209948..1ff4383767dd 100644 --- a/controlplane/kubeadm/internal/cluster_test.go +++ b/controlplane/kubeadm/internal/cluster_test.go @@ -25,8 +25,9 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" "sigs.k8s.io/controller-runtime/pkg/client" + + clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" ) func podReady(isReady corev1.ConditionStatus) corev1.PodCondition { diff --git a/controlplane/kubeadm/internal/etcd/util/set.go b/controlplane/kubeadm/internal/etcd/util/set.go index 17c61a58072d..c5d941e93153 100644 --- a/controlplane/kubeadm/internal/etcd/util/set.go +++ b/controlplane/kubeadm/internal/etcd/util/set.go @@ -27,7 +27,7 @@ import ( type empty struct{} -// util.UInt64Set is a set of int64s, implemented via map[uint64]struct{} for minimal memory consumption. +// util.UInt64Set is a set of uint64s, implemented via map[uint64]struct{} for minimal memory consumption. type UInt64Set map[uint64]empty // NewUInt64Set creates a UInt64Set from a list of values. @@ -156,15 +156,15 @@ func (s UInt64Set) Equal(s2 UInt64Set) bool { return len(s1) == len(s2) && s1.IsSuperset(s2) } -type sortableSliceOfInt64 []uint64 +type sortableSliceOfUInt64 []uint64 -func (s sortableSliceOfInt64) Len() int { return len(s) } -func (s sortableSliceOfInt64) Less(i, j int) bool { return lessInt64(s[i], s[j]) } -func (s sortableSliceOfInt64) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s sortableSliceOfUInt64) Len() int { return len(s) } +func (s sortableSliceOfUInt64) Less(i, j int) bool { return lessUInt64(s[i], s[j]) } +func (s sortableSliceOfUInt64) Swap(i, j int) { s[i], s[j] = s[j], s[i] } // List returns the contents as a sorted uint64 slice. func (s UInt64Set) List() []uint64 { - res := make(sortableSliceOfInt64, 0, len(s)) + res := make(sortableSliceOfUInt64, 0, len(s)) for key := range s { res = append(res, key) } @@ -196,6 +196,6 @@ func (s UInt64Set) Len() int { return len(s) } -func lessInt64(lhs, rhs uint64) bool { +func lessUInt64(lhs, rhs uint64) bool { return lhs < rhs } diff --git a/controlplane/kubeadm/internal/failure_domain.go b/controlplane/kubeadm/internal/failure_domain.go index 14257b6dc6de..c8a8ba0de05e 100644 --- a/controlplane/kubeadm/internal/failure_domain.go +++ b/controlplane/kubeadm/internal/failure_domain.go @@ -49,7 +49,7 @@ func (f failureDomainAggregations) Swap(i, j int) { } // PickMost returns the failure domain with the most number of machines. -func PickMost(failureDomains clusterv1.FailureDomains, machines []*clusterv1.Machine) string { +func PickMost(failureDomains clusterv1.FailureDomains, machines FilterableMachineCollection) string { aggregations := pick(failureDomains, machines) if len(aggregations) == 0 { return "" @@ -60,7 +60,7 @@ func PickMost(failureDomains clusterv1.FailureDomains, machines []*clusterv1.Mac } // PickFewest returns the failure domain with the fewest number of machines. -func PickFewest(failureDomains clusterv1.FailureDomains, machines []*clusterv1.Machine) string { +func PickFewest(failureDomains clusterv1.FailureDomains, machines FilterableMachineCollection) string { aggregations := pick(failureDomains, machines) if len(aggregations) == 0 { return "" @@ -69,7 +69,7 @@ func PickFewest(failureDomains clusterv1.FailureDomains, machines []*clusterv1.M return aggregations[0].id } -func pick(failureDomains clusterv1.FailureDomains, machines []*clusterv1.Machine) failureDomainAggregations { +func pick(failureDomains clusterv1.FailureDomains, machines FilterableMachineCollection) failureDomainAggregations { if len(failureDomains) == 0 { return failureDomainAggregations{} } diff --git a/controlplane/kubeadm/internal/failure_domain_test.go b/controlplane/kubeadm/internal/failure_domain_test.go index 86877eb893fa..2d6b2a304b80 100644 --- a/controlplane/kubeadm/internal/failure_domain_test.go +++ b/controlplane/kubeadm/internal/failure_domain_test.go @@ -37,7 +37,7 @@ func TestNewFailureDomainPicker(t *testing.T) { testcases := []struct { name string fds clusterv1.FailureDomains - machines []*clusterv1.Machine + machines FilterableMachineCollection expected []string }{ { @@ -52,11 +52,9 @@ func TestNewFailureDomainPicker(t *testing.T) { expected: []string{a}, }, { - name: "one machine in a failure domain", - fds: fds, - machines: []*clusterv1.Machine{ - machinea.DeepCopy(), - }, + name: "one machine in a failure domain", + fds: fds, + machines: NewFilterableMachineCollection(machinea.DeepCopy()), expected: []string{b}, }, { @@ -64,9 +62,7 @@ func TestNewFailureDomainPicker(t *testing.T) { fds: clusterv1.FailureDomains{ a: clusterv1.FailureDomainSpec{}, }, - machines: []*clusterv1.Machine{ - machinenil.DeepCopy(), - }, + machines: NewFilterableMachineCollection(machinenil.DeepCopy()), expected: []string{a, b}, }, { @@ -74,15 +70,12 @@ func TestNewFailureDomainPicker(t *testing.T) { fds: clusterv1.FailureDomains{ a: clusterv1.FailureDomainSpec{}, }, - machines: []*clusterv1.Machine{ - machineb.DeepCopy(), - }, + machines: NewFilterableMachineCollection(machineb.DeepCopy()), expected: []string{a}, }, { name: "failure domains and no machines should return a valid failure domain", fds: fds, - machines: []*clusterv1.Machine{}, expected: []string{a, b}, }, } diff --git a/controlplane/kubeadm/internal/machine_filters.go b/controlplane/kubeadm/internal/machine_filters.go index 2960891bbe98..da9c299fd4ba 100644 --- a/controlplane/kubeadm/internal/machine_filters.go +++ b/controlplane/kubeadm/internal/machine_filters.go @@ -131,12 +131,6 @@ func OlderThan(t *metav1.Time) MachineFilter { } } -// SelectedForUpgrade is a MachineFilter to find all machines that have the -// controlplanev1.SelectedForUpgradeAnnotation set. -func SelectedForUpgrade(machine *clusterv1.Machine) bool { - return HasAnnotationKey(controlplanev1.SelectedForUpgradeAnnotation)(machine) -} - // HasAnnotationKey returns a MachineFilter function to find all machines that have the // specified Annotation key present func HasAnnotationKey(key string) MachineFilter { diff --git a/controlplane/kubeadm/internal/machine_filters_test.go b/controlplane/kubeadm/internal/machine_filters_test.go index 0abb45661d65..2535b2c97e0a 100644 --- a/controlplane/kubeadm/internal/machine_filters_test.go +++ b/controlplane/kubeadm/internal/machine_filters_test.go @@ -25,7 +25,6 @@ import ( "k8s.io/utils/pointer" clusterv1 "sigs.k8s.io/cluster-api/api/v1alpha3" - controlplanev1 "sigs.k8s.io/cluster-api/controlplane/kubeadm/api/v1alpha3" ) func TestNot(t *testing.T) { @@ -117,23 +116,23 @@ func TestOlderThan(t *testing.T) { }) } -func TestSelectedForUpgrade(t *testing.T) { - t.Run("machine with selected for upgrade label returns true", func(t *testing.T) { +func TestHashAnnotationKey(t *testing.T) { + t.Run("machine with specified annotation returns true", func(t *testing.T) { g := gomega.NewWithT(t) m := &clusterv1.Machine{} - m.SetAnnotations(map[string]string{controlplanev1.SelectedForUpgradeAnnotation: ""}) - g.Expect(SelectedForUpgrade(m)).To(gomega.BeTrue()) + m.SetAnnotations(map[string]string{"test": ""}) + g.Expect(HasAnnotationKey("test")(m)).To(gomega.BeTrue()) }) - t.Run("machine with selected for upgrade label with non-empty value returns true", func(t *testing.T) { + t.Run("machine with specified annotation with non-empty value returns true", func(t *testing.T) { g := gomega.NewWithT(t) m := &clusterv1.Machine{} - m.SetAnnotations(map[string]string{controlplanev1.SelectedForUpgradeAnnotation: "blue"}) - g.Expect(SelectedForUpgrade(m)).To(gomega.BeTrue()) + m.SetAnnotations(map[string]string{"test": "blue"}) + g.Expect(HasAnnotationKey("test")(m)).To(gomega.BeTrue()) }) - t.Run("machine without selected for upgrade label returns false", func(t *testing.T) { + t.Run("machine without specified annotation returns false", func(t *testing.T) { g := gomega.NewWithT(t) m := &clusterv1.Machine{} - g.Expect(SelectedForUpgrade(m)).To(gomega.BeFalse()) + g.Expect(HasAnnotationKey("foo")(m)).To(gomega.BeFalse()) }) }