Skip to content

Commit

Permalink
acpq: distribute minquota overwrites proportionally to available capa…
Browse files Browse the repository at this point in the history
…city
  • Loading branch information
Varsius committed Feb 4, 2025
1 parent d350846 commit 056f4fa
Show file tree
Hide file tree
Showing 2 changed files with 153 additions and 7 deletions.
43 changes: 36 additions & 7 deletions internal/datamodel/apply_computed_project_quota.go
Original file line number Diff line number Diff line change
Expand Up @@ -364,7 +364,7 @@ func acpqComputeQuotas(stats map[limes.AvailabilityZone]clusterAZAllocationStats
}
}
}
target.EnforceConstraints(constraints, allAZsInOrder, isProjectResourceID, isAZAware)
target.EnforceConstraints(stats, constraints, allAZsInOrder, isProjectResourceID, isAZAware)
target.TryFulfillDesired(stats, cfg, allowsQuotaOvercommit)

// phase 3: try granting desired_quota
Expand All @@ -382,7 +382,7 @@ func acpqComputeQuotas(stats map[limes.AvailabilityZone]clusterAZAllocationStats
target[az][resourceID].Desired = desiredQuota
}
}
target.EnforceConstraints(constraints, allAZsInOrder, isProjectResourceID, isAZAware)
target.EnforceConstraints(stats, constraints, allAZsInOrder, isProjectResourceID, isAZAware)
target.TryFulfillDesired(stats, cfg, allowsQuotaOvercommit)

// phase 4: try granting additional "any" quota until sum of all quotas is ProjectBaseQuota
Expand All @@ -408,7 +408,7 @@ func acpqComputeQuotas(stats map[limes.AvailabilityZone]clusterAZAllocationStats
if resInfo.Topology != liquid.AZSeparatedResourceTopology && !slices.Contains(allAZsInOrder, limes.AvailabilityZoneAny) {
allAZsInOrder = append(allAZsInOrder, limes.AvailabilityZoneAny)
}
target.EnforceConstraints(constraints, allAZsInOrder, isProjectResourceID, isAZAware)
target.EnforceConstraints(stats, constraints, allAZsInOrder, isProjectResourceID, isAZAware)
target.TryFulfillDesired(stats, cfg, allowsQuotaOvercommit)
}

Expand All @@ -417,10 +417,14 @@ func acpqComputeQuotas(stats map[limes.AvailabilityZone]clusterAZAllocationStats

// After increasing Desired, but before increasing Allocated, this decreases
// Desired in order to fit into project-local quota constraints.
func (target acpqGlobalTarget) EnforceConstraints(constraints map[db.ProjectResourceID]projectLocalQuotaConstraints, allAZs []limes.AvailabilityZone, isProjectResourceID map[db.ProjectResourceID]struct{}, isAZAware bool) {
func (target acpqGlobalTarget) EnforceConstraints(stats map[limes.AvailabilityZone]clusterAZAllocationStats, constraints map[db.ProjectResourceID]projectLocalQuotaConstraints, allAZs []limes.AvailabilityZone, isProjectResourceID map[db.ProjectResourceID]struct{}, isAZAware bool) {
// TODO: quota is distributed for all resources without updating capacity in between
// how to handle the case that the first resource uses all capacity in some AZ?
for resourceID, c := range constraints {
// raise Allocated as necessary to fulfil minimum quota
if c.MinQuota != nil && *c.MinQuota > 0 {
// phase 1: distribute quota proportionally to desire in AZs that have capacity
// if there is sufficient capacity, all quota required additionally will be assigned in this phase
totalAllocated := uint64(0)
desireScalePerAZ := make(map[limes.AvailabilityZone]uint64)
for _, az := range allAZs {
Expand All @@ -430,14 +434,39 @@ func (target acpqGlobalTarget) EnforceConstraints(constraints map[db.ProjectReso
}
t := target[az][resourceID]
totalAllocated += t.Allocated
desireScalePerAZ[az] = *c.MinQuota * max(1, subtractOrZero(t.Desired, t.Allocated))
if stats[az].Capacity > 0 {
desireScalePerAZ[az] = *c.MinQuota * max(1, subtractOrZero(t.Desired, t.Allocated))
}
}
extraAllocatedPerAZ := liquidapi.DistributeFairly(subtractOrZero(*c.MinQuota, totalAllocated), desireScalePerAZ)
missingQuota := subtractOrZero(*c.MinQuota, totalAllocated)
extraAllocatedPerAZ := liquidapi.DistributeFairly(missingQuota, desireScalePerAZ)
for _, az := range allAZs {
if az == limes.AvailabilityZoneAny && isAZAware {
continue
}
target[az][resourceID].Allocated += extraAllocatedPerAZ[az]
extraAllocated := min(extraAllocatedPerAZ[az], stats[az].Capacity)
target[az][resourceID].Allocated += extraAllocated
missingQuota -= extraAllocated
}

// phase 2: if not enough quota could be assigned due to capacity constraints,
// iniate second distribution round with distribution proportionally to the available capacity.
// Since min quota should be enforced, more quota than available capacity may be distributed
if missingQuota > 0 {
capacityScalePerAZ := make(map[limes.AvailabilityZone]uint64)
for _, az := range allAZs {
if az == limes.AvailabilityZoneAny && isAZAware {
continue
}
capacityScalePerAZ[az] = *c.MinQuota * stats[az].Capacity // TODO: Possible overflow?
}
extraAllocatedPerAZ := liquidapi.DistributeFairly(missingQuota, capacityScalePerAZ)
for _, az := range allAZs {
if az == limes.AvailabilityZoneAny && isAZAware {
continue
}
target[az][resourceID].Allocated += extraAllocatedPerAZ[az]
}
}
}

Expand Down
117 changes: 117 additions & 0 deletions internal/datamodel/apply_computed_project_quota_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -715,6 +715,123 @@ func TestMinQuotaConstraintRespectsAZAwareCapacityDistribution(t *testing.T) {
402: {Allocated: 0},
},
}, liquid.ResourceInfo{Topology: liquid.AZAwareResourceTopology})

// Multiple AZs with capacity.
// Sufficient total capacity for quota demand.
// Distribute quota w.r.t. available capacity
input = map[limes.AvailabilityZone]clusterAZAllocationStats{
"az-one": {
Capacity: 0,
ProjectStats: map[db.ProjectResourceID]projectAZAllocationStats{
401: {},
402: {},
},
},
"az-two": {
Capacity: 1, // Capacity available here as well
ProjectStats: map[db.ProjectResourceID]projectAZAllocationStats{
401: {},
402: {},
},
},
"az-three": {
Capacity: 10,
ProjectStats: map[db.ProjectResourceID]projectAZAllocationStats{
401: {},
402: {},
},
},
"any": {
Capacity: 0,
ProjectStats: map[db.ProjectResourceID]projectAZAllocationStats{
401: {},
402: {},
},
},
}

constraints = map[db.ProjectResourceID]projectLocalQuotaConstraints{
401: {MinQuota: p2u64(3)},
402: {MinQuota: p2u64(5)},
}

expectACPQResult(t, input, cfg, constraints, acpqGlobalTarget{
"az-one": {
401: {Allocated: 0},
402: {Allocated: 0},
},
"az-two": {
401: {Allocated: 1},
402: {Allocated: 1},
},
"az-three": {
401: {Allocated: 2},
402: {Allocated: 4},
},
"any": {
401: {Allocated: 0},
402: {Allocated: 0},
},
}, liquid.ResourceInfo{Topology: liquid.AZAwareResourceTopology})

// Multiple AZs with capacity.
// Total capacity can not fully satisfy quota demand.
// Distribute quota to fulfill min quota constraint ignoring capacity limits.
// Distribute proportional to available capacity.
input = map[limes.AvailabilityZone]clusterAZAllocationStats{
"az-one": {
Capacity: 0,
ProjectStats: map[db.ProjectResourceID]projectAZAllocationStats{
401: {},
402: {},
},
},
"az-two": {
Capacity: 1,
ProjectStats: map[db.ProjectResourceID]projectAZAllocationStats{
401: {},
402: {},
},
},
"az-three": {
Capacity: 2,
ProjectStats: map[db.ProjectResourceID]projectAZAllocationStats{
401: {},
402: {},
},
},
"any": {
Capacity: 0,
ProjectStats: map[db.ProjectResourceID]projectAZAllocationStats{
401: {},
402: {},
},
},
}

constraints = map[db.ProjectResourceID]projectLocalQuotaConstraints{
401: {MinQuota: p2u64(3)},
402: {MinQuota: p2u64(6)},
}

expectACPQResult(t, input, cfg, constraints, acpqGlobalTarget{
"az-one": {
401: {Allocated: 0},
402: {Allocated: 0},
},
"az-two": {
401: {Allocated: 1},
402: {Allocated: 2},
},
"az-three": {
401: {Allocated: 2},
402: {Allocated: 4},
},
"any": {
401: {Allocated: 0},
402: {Allocated: 0},
},
}, liquid.ResourceInfo{Topology: liquid.AZAwareResourceTopology})
}

// Shortcut to avoid repetition in projectAZAllocationStats literals.
Expand Down

0 comments on commit 056f4fa

Please sign in to comment.