Skip to content

Commit

Permalink
fix: Remove correct set when host is removed
Browse files Browse the repository at this point in the history
  • Loading branch information
Callisto13 committed Dec 7, 2022
1 parent 73cd189 commit 117c7b8
Show file tree
Hide file tree
Showing 6 changed files with 98 additions and 27 deletions.
21 changes: 21 additions & 0 deletions api/v1alpha1/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

Original file line number Diff line number Diff line change
Expand Up @@ -344,8 +344,8 @@ spec:
description: Ready is true when all Replicas report ready
type: boolean
readyReplicas:
description: ReadyReplicas is the number of pods targeted by this
ReplicaSet with a Ready Condition.
description: ReadyReplicas is the number of microvms controlled by
this Deployment with a Ready Condition.
format: int32
type: integer
replicas:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -342,7 +342,7 @@ spec:
description: Ready is true when Replicas is Equal to ReadyReplicas.
type: boolean
readyReplicas:
description: ReadyReplicas is the number of pods targeted by this
description: ReadyReplicas is the number of microvms targeted by this
ReplicaSet with a Ready Condition.
format: int32
type: integer
Expand Down
53 changes: 30 additions & 23 deletions controllers/microvmdeployment_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -178,21 +178,25 @@ func (r *MicrovmDeploymentReconciler) reconcileNormal(
ready int32 = 0
created int32 = 0

hostMap = v1alpha1.HostMap{}
activeHosts = v1alpha1.HostMap{}
deadHosts = v1alpha1.HostMap{}
)

for _, rs := range rsList {
created += rs.Status.Replicas
ready += rs.Status.ReadyReplicas

hostMap[rs.Spec.Host.Endpoint] = struct{}{}
activeHosts[rs.Spec.Host.Endpoint] = struct{}{}
deadHosts[rs.Spec.Host.Endpoint] = struct{}{}
}

mvmDeploymentScope.SetCreatedReplicas(created)
mvmDeploymentScope.SetReadyReplicas(ready)

// get a count of the replicasets created
createdSets := len(hostMap)
createdSets := len(activeHosts)
// check whether any hosts have been removed
deadHosts = mvmDeploymentScope.ExpiredHosts(deadHosts)

switch {
// if all desired microvms are ready, mark the deployment ready.
Expand All @@ -202,12 +206,34 @@ func (r *MicrovmDeploymentReconciler) reconcileNormal(
mvmDeploymentScope.SetReady()

return reconcile.Result{}, nil
// if we are here then a host has been removed.
// we delete the set associated with that host.
case len(deadHosts) > 0:
mvmDeploymentScope.Info("MicrovmDeployment updating: delete microvmreplicaset")
mvmDeploymentScope.SetNotReady(infrav1.MicrovmDeploymentUpdatingReason, "Info", "")

for _, rs := range rsList {
if _, ok := deadHosts[rs.Spec.Host.Endpoint]; !ok {
continue
}

if !rs.DeletionTimestamp.IsZero() {
return ctrl.Result{}, nil
}

if err := r.Delete(ctx, &rs); err != nil {
mvmDeploymentScope.Error(err, "failed deleting microvmreplicaset")
mvmDeploymentScope.SetNotReady(infrav1.MicrovmDeploymentUpdateFailedReason, "Error", "")

return ctrl.Result{}, err
}
}
// if we are in this branch then not all desired replicasets have been created.
// create a new one and set the ownerref to this controller.
case createdSets < mvmDeploymentScope.RequiredSets():
mvmDeploymentScope.Info("MicrovmDeployment creating: create new microvmreplicaset")

host, err := mvmDeploymentScope.DetermineHost(hostMap)
host, err := mvmDeploymentScope.DetermineHost(activeHosts)
if err != nil {
mvmDeploymentScope.Error(err, "failed creating owned microvmreplicaset")
mvmDeploymentScope.SetNotReady(infrav1.MicrovmDeploymentProvisionFailedReason, "Error", "")
Expand All @@ -223,25 +249,6 @@ func (r *MicrovmDeploymentReconciler) reconcileNormal(
}

mvmDeploymentScope.SetNotReady(infrav1.MicrovmDeploymentIncompleteReason, "Info", "")
// if we are here then a scale down has been requested.
// we delete the first found until the numbers balance out.
// TODO the way this works is very naive and often ends up deleting everything
// if the timing is wrong/right, find a better way https://github.com/weaveworks-liquidmetal/microvm-operator/issues/17
case createdSets > mvmDeploymentScope.RequiredSets():
mvmDeploymentScope.Info("MicrovmDeployment updating: delete microvmreplicaset")
mvmDeploymentScope.SetNotReady(infrav1.MicrovmDeploymentUpdatingReason, "Info", "")

rs := rsList[0]
if !rs.DeletionTimestamp.IsZero() {
return ctrl.Result{}, nil
}

if err := r.Delete(ctx, &rs); err != nil {
mvmDeploymentScope.Error(err, "failed deleting microvmreplicaset")
mvmDeploymentScope.SetNotReady(infrav1.MicrovmDeploymentUpdateFailedReason, "Error", "")

return ctrl.Result{}, err
}
// if all desired objects have been created, but are not quite ready yet,
// set the condition and requeue
default:
Expand Down
9 changes: 9 additions & 0 deletions internal/scope/mvmd.go
Original file line number Diff line number Diff line change
Expand Up @@ -125,6 +125,15 @@ func (m *MicrovmDeploymentScope) DetermineHost(setHosts infrav1.HostMap) (microv
return microvm.Host{}, errors.New("could not find free host")
}

// ExpiredHosts returns hosts which have been removed from the spec
func (m *MicrovmDeploymentScope) ExpiredHosts(setHosts infrav1.HostMap) infrav1.HostMap {
for _, host := range m.Hosts() {
delete(setHosts, host.Endpoint)
}

return setHosts
}

// SetCreatedReplicas records the number of microvms which have been created
// this does not give information about whether the microvms are ready
func (m *MicrovmDeploymentScope) SetCreatedReplicas(count int32) {
Expand Down
36 changes: 35 additions & 1 deletion internal/scope/mvmd_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@ import (
)

func TestDetermineHost(t *testing.T) {
// RegisterTestingT(t)
g := NewWithT(t)

scheme, err := setupScheme()
Expand Down Expand Up @@ -76,6 +75,41 @@ func TestDetermineHost(t *testing.T) {
}
}

func TestExpiredHosts(t *testing.T) {
g := NewWithT(t)

scheme, err := setupScheme()
g.Expect(err).NotTo(HaveOccurred())

mvmDepName := "md-1"

mvmDep := newDeployment(mvmDepName, 0)
mvmDep.Spec.Hosts = []microvm.Host{
{Endpoint: "1"}, {Endpoint: "2"},
}

client := fake.NewClientBuilder().WithScheme(scheme).WithObjects(mvmDep).Build()
mvmScope, err := scope.NewMicrovmDeploymentScope(scope.MicrovmDeploymentScopeParams{
Client: client,
MicrovmDeployment: mvmDep,
})
g.Expect(err).NotTo(HaveOccurred())

hostMap := infrav1.HostMap{
"1": struct{}{},
"2": struct{}{},
"3": struct{}{},
"4": struct{}{},
}

hosts := mvmScope.ExpiredHosts(hostMap)
g.Expect(len(hosts)).To(Equal(2))
g.Expect(hostMap).ToNot(HaveKey("1"))
g.Expect(hostMap).ToNot(HaveKey("2"))
g.Expect(hostMap).To(HaveKey("3"))
g.Expect(hostMap).To(HaveKey("4"))
}

func newHostMap(hostCount int) infrav1.HostMap {
hostMap := infrav1.HostMap{}
for i := 0; i < hostCount; i++ {
Expand Down

0 comments on commit 117c7b8

Please sign in to comment.