Skip to content

Commit

Permalink
fix 'enforce-err-cuddling' wsl config for golangci-lint
Browse files Browse the repository at this point in the history
*The 'enforce-err-cuddling' property under 'linters-settings.wsl' is not allowed in the latest jsonschema for golangci-lint. Replace with 'force-err-cuddling'.
*Changing to 'force-err-cuddling' revealed existing lint errors wrt: if statements that check an error must be cuddled with the statement that assigned the error. Fixed all revealed lint issues.

Signed-off-by: Parikshith <[email protected]>
  • Loading branch information
parikshithb authored and raghavendra-talur committed Feb 18, 2025
1 parent e2d4809 commit a7ca2eb
Show file tree
Hide file tree
Showing 8 changed files with 6 additions and 12 deletions.
2 changes: 1 addition & 1 deletion .golangci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ linters-settings:
strict: true
wsl:
allow-trailing-comment: true
enforce-err-cuddling: true
force-err-cuddling: true
revive:
ignore-generated-header: false
severity: error
Expand Down
2 changes: 1 addition & 1 deletion internal/controller/drcluster_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -1004,8 +1004,8 @@ func (u *drclusterInstance) cleanClusters(clusters []ramen.DRCluster) (bool, err
cleanedCount := 0

for _, cluster := range clusters {
requeue, err := u.removeFencingCR(cluster)
// Can just error alone be checked?
requeue, err := u.removeFencingCR(cluster)
if err != nil {
needRequeue = true
} else {
Expand Down
4 changes: 2 additions & 2 deletions internal/controller/drplacementcontrol_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -784,14 +784,14 @@ func (r *DRPlacementControlReconciler) deleteAllManagedClusterViews(
) error {
// Only after the VRGs have been deleted, we delete the MCVs for the VRGs and the NS
for _, drClusterName := range clusterNames {
err := r.MCVGetter.DeleteVRGManagedClusterView(drpc.Name, drpc.Namespace, drClusterName, rmnutil.MWTypeVRG)
// Delete MCV for the VRG
err := r.MCVGetter.DeleteVRGManagedClusterView(drpc.Name, drpc.Namespace, drClusterName, rmnutil.MWTypeVRG)
if err != nil {
return fmt.Errorf("failed to delete VRG MCV %w", err)
}

err = r.MCVGetter.DeleteNamespaceManagedClusterView(drpc.Name, drpc.Namespace, drClusterName, rmnutil.MWTypeNS)
// Delete MCV for Namespace
err = r.MCVGetter.DeleteNamespaceManagedClusterView(drpc.Name, drpc.Namespace, drClusterName, rmnutil.MWTypeNS)
if err != nil {
return fmt.Errorf("failed to delete namespace MCV %w", err)
}
Expand Down
2 changes: 0 additions & 2 deletions internal/controller/util/cephfs_cg.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,6 @@ func DeleteReplicationGroupSource(
}

err := k8sClient.Delete(ctx, rgs)

if k8serrors.IsNotFound(err) {
return nil
}
Expand All @@ -90,7 +89,6 @@ func DeleteReplicationGroupDestination(
}

err := k8sClient.Delete(ctx, rgd)

if k8serrors.IsNotFound(err) {
return nil
}
Expand Down
3 changes: 1 addition & 2 deletions internal/controller/volsync/vshandler.go
Original file line number Diff line number Diff line change
Expand Up @@ -259,10 +259,9 @@ func (v *VSHandler) createOrUpdateRD(
func (v *VSHandler) isPVCInUseByNonRDPod(pvcNamespacedName types.NamespacedName) (bool, error) {
rd := &volsyncv1alpha1.ReplicationDestination{}

err := v.client.Get(v.ctx, pvcNamespacedName, rd)

// IF RD is Found, then no more checks are needed. We'll assume that the RD
// was created when the PVC was Not in use.
err := v.client.Get(v.ctx, pvcNamespacedName, rd)
if err == nil {
return false, nil
} else if !errors.IsNotFound(err) {
Expand Down
2 changes: 1 addition & 1 deletion internal/controller/volumereplicationgroup_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -330,10 +330,10 @@ func filterPVC(reader client.Reader, pvc *corev1.PersistentVolumeClaim, log logr
continue
}

selector, err := metav1.LabelSelectorAsSelector(&pvcSelector.LabelSelector)
// continue if we fail to get the labels for this object hoping
// that pvc might actually belong to some other vrg instead of
// this. If not found, then reconcile request would not be sent
selector, err := metav1.LabelSelectorAsSelector(&pvcSelector.LabelSelector)
if err != nil {
log1.Error(err, "Failed to get the label selector from VolumeReplicationGroup")

Expand Down
2 changes: 0 additions & 2 deletions internal/controller/vrg_kubeobjects.go
Original file line number Diff line number Diff line change
Expand Up @@ -261,7 +261,6 @@ func (v *VRGInstance) kubeObjectsCaptureStartOrResume(
func (v *VRGInstance) executeHook(hook kubeobjects.HookSpec, log1 logr.Logger) error {
if hook.Type == "check" {
hookResult, err := util.EvaluateCheckHook(v.reconciler.APIReader, &hook, log1)

if err != nil {
log1.Error(err, "error occurred during check hook ")
} else {
Expand Down Expand Up @@ -324,7 +323,6 @@ func (v *VRGInstance) kubeObjectsGroupCapture(
log1.Info("Kube objects group capture request submitted")
} else {
err := request.Status(v.log)

if err == nil {
log1.Info("Kube objects group captured", "start", request.StartTime(), "end", request.EndTime())

Expand Down
1 change: 0 additions & 1 deletion internal/controller/vrg_volrep.go
Original file line number Diff line number Diff line change
Expand Up @@ -2024,7 +2024,6 @@ func (v *VRGInstance) restorePVsAndPVCsFromS3(result *ctrl.Result) (int, error)
// PVC may cause a new PV to be created.
// Ignoring PVC restore errors helps with the upgrade from ODF-4.12.x to 4.13
pvcCount, err = v.restorePVCsFromObjectStore(objectStore, s3ProfileName)

if err != nil || pvCount != pvcCount {
v.log.Info(fmt.Sprintf("Warning: Mismatch in PV/PVC count %d/%d (%v)",
pvCount, pvcCount, err))
Expand Down

0 comments on commit a7ca2eb

Please sign in to comment.