From 0a0cffcc6e49e4f642f88b72cdeaefd6ae467989 Mon Sep 17 00:00:00 2001 From: Ritesh H Shukla Date: Thu, 19 Dec 2024 22:00:03 -0800 Subject: [PATCH 001/168] HDDS-11964. Allow ozone sh bucket create to take obs and fso as options. (#7599) --- .../dist/src/main/smoketest/createmrenv.robot | 2 +- .../smoketest/debug/ozone-debug-ldb.robot | 2 +- .../shell/bucket/CreateBucketHandler.java | 33 +++++++++++++++---- 3 files changed, 28 insertions(+), 9 deletions(-) diff --git a/hadoop-ozone/dist/src/main/smoketest/createmrenv.robot b/hadoop-ozone/dist/src/main/smoketest/createmrenv.robot index 7fcf3619eff..2d68be00e65 100644 --- a/hadoop-ozone/dist/src/main/smoketest/createmrenv.robot +++ b/hadoop-ozone/dist/src/main/smoketest/createmrenv.robot @@ -32,7 +32,7 @@ Create volume ${result} = Execute ozone sh volume create /${volume} --user hadoop --space-quota 100TB --namespace-quota 100 Should not contain ${result} Failed Create bucket - Execute ozone sh bucket create /${volume}/${bucket} --space-quota 1TB --layout FILE_SYSTEM_OPTIMIZED + Execute ozone sh bucket create /${volume}/${bucket} --space-quota 1TB --layout fso *** Test Cases *** Create test volume, bucket and key diff --git a/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-ldb.robot b/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-ldb.robot index 0fa43dee6c0..4380a3cf94d 100644 --- a/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-ldb.robot +++ b/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-ldb.robot @@ -31,7 +31,7 @@ ${TESTFILE} testfile Write keys Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit test user testuser testuser.keytab Execute ozone sh volume create ${VOLUME} - Execute ozone sh bucket create ${VOLUME}/${BUCKET} -l OBJECT_STORE + Execute ozone sh bucket create ${VOLUME}/${BUCKET} -l obs Execute dd if=/dev/urandom of=${TEMP_DIR}/${TESTFILE}1 bs=100 count=10 Execute ozone sh key put ${VOLUME}/${BUCKET}/${TESTFILE}1 ${TEMP_DIR}/${TESTFILE}1 Execute dd if=/dev/urandom of=${TEMP_DIR}/${TESTFILE}2 bs=100 count=15 diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/CreateBucketHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/CreateBucketHandler.java index 277c5afff21..02f5e80f6eb 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/CreateBucketHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/CreateBucketHandler.java @@ -59,11 +59,32 @@ public class CreateBucketHandler extends BucketHandler { " user if not specified") private String ownerName; - enum AllowedBucketLayouts { FILE_SYSTEM_OPTIMIZED, OBJECT_STORE, LEGACY } + private static class LayoutConverter implements CommandLine.ITypeConverter { + @Override + public BucketLayout convert(String value) { + if (value == null) { + return null; + } + switch (value) { + case "fso": + return BucketLayout.FILE_SYSTEM_OPTIMIZED; + case "obs": + return BucketLayout.OBJECT_STORE; + default: + for (BucketLayout candidate : BucketLayout.values()) { + if (candidate.name().equalsIgnoreCase(value)) { + return candidate; + } + } + throw new IllegalArgumentException("Unknown bucket layout: " + value); + } + } + } - @Option(names = { "--layout", "-l" }, - description = "Allowed Bucket Layouts: ${COMPLETION-CANDIDATES}") - private AllowedBucketLayouts allowedBucketLayout; + @Option(names = { "--layout", "-l" }, converter = LayoutConverter.class, + description = "Allowed Bucket Layouts: fso (for file system optimized buckets FILE_SYSTEM_OPTIMIZED), " + + "obs (for object store optimized OBJECT_STORE) and legacy (LEGACY is Deprecated)") + private BucketLayout allowedBucketLayout; @CommandLine.Mixin private ShellReplicationOptions replication; @@ -86,9 +107,7 @@ public void execute(OzoneClient client, OzoneAddress address) new BucketArgs.Builder().setStorageType(StorageType.DEFAULT) .setVersioning(false).setOwner(ownerName); if (allowedBucketLayout != null) { - BucketLayout bucketLayout = - BucketLayout.fromString(allowedBucketLayout.toString()); - bb.setBucketLayout(bucketLayout); + bb.setBucketLayout(allowedBucketLayout); } // TODO: New Client talking to old server, will it create a LEGACY bucket? From 0306d974000085eba243439d3a2e95201aba7459 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Fri, 20 Dec 2024 14:46:02 +0100 Subject: [PATCH 002/168] HDDS-11961. Improve existing repair tests (#7595) --- .../apache/ozone/test/GenericTestUtils.java | 114 ++++---- .../ozone/repair/om/TestFSORepairTool.java | 272 +++++++++--------- .../ozone/shell/TestOzoneRepairShell.java | 86 +++--- .../repair/ldb/TestTransactionInfoRepair.java | 124 ++++---- 4 files changed, 292 insertions(+), 304 deletions(-) diff --git a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java index 8a770424766..e42f80b329b 100644 --- a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java +++ b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java @@ -45,6 +45,8 @@ import java.lang.reflect.Modifier; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.BooleanSupplier; +import java.util.function.Consumer; +import java.util.function.Supplier; import java.util.stream.Collectors; import static java.nio.charset.StandardCharsets.UTF_8; @@ -353,86 +355,88 @@ private static long monotonicNow() { return System.nanoTime() / NANOSECONDS_PER_MILLISECOND; } - /** - * Capture output printed to {@link System#err}. - *

- * Usage: - *

-   *   try (SystemErrCapturer capture = new SystemErrCapturer()) {
-   *     ...
-   *     // Call capture.getOutput() to get the output string
-   *   }
-   * 
- *

- * TODO: Add lambda support once Java 8 is common. - * {@code - * SystemErrCapturer.withCapture(capture -> { - * ... - * }) - * } - */ - public static class SystemErrCapturer implements AutoCloseable { + public static PrintStreamCapturer captureOut() { + return new SystemOutCapturer(); + } + + public static PrintStreamCapturer captureErr() { + return new SystemErrCapturer(); + } + + /** Capture contents of a {@code PrintStream}, until {@code close()}d. */ + public abstract static class PrintStreamCapturer implements AutoCloseable, Supplier { private final ByteArrayOutputStream bytes; private final PrintStream bytesPrintStream; - private final PrintStream oldErr; + private final PrintStream old; + private final Consumer restore; - public SystemErrCapturer() throws UnsupportedEncodingException { + protected PrintStreamCapturer(PrintStream out, Consumer install) { + old = out; bytes = new ByteArrayOutputStream(); - bytesPrintStream = new PrintStream(bytes, false, UTF_8.name()); - oldErr = System.err; - System.setErr(new TeePrintStream(oldErr, bytesPrintStream)); + try { + bytesPrintStream = new PrintStream(bytes, false, UTF_8.name()); + install.accept(new TeePrintStream(out, bytesPrintStream)); + restore = install; + } catch (UnsupportedEncodingException e) { + throw new IllegalStateException(e); + } + } + + @Override + public String get() { + return getOutput(); + } + + public String getOutput() { + try { + return bytes.toString(UTF_8.name()); + } catch (UnsupportedEncodingException e) { + throw new IllegalStateException(e); + } } - public String getOutput() throws UnsupportedEncodingException { - return bytes.toString(UTF_8.name()); + public void reset() { + bytes.reset(); } @Override public void close() throws Exception { IOUtils.closeQuietly(bytesPrintStream); - System.setErr(oldErr); + restore.accept(old); } } /** - * Capture output printed to {@link System#out}. + * Capture output printed to {@link System#err}. *

* Usage: *

-   *   try (SystemOutCapturer capture = new SystemOutCapturer()) {
+   *   try (PrintStreamCapturer capture = captureErr()) {
    *     ...
    *     // Call capture.getOutput() to get the output string
    *   }
    * 
- *

- * TODO: Add lambda support once Java 8 is common. - * {@code - * SystemOutCapturer.withCapture(capture -> { - * ... - * }) - * } */ - public static class SystemOutCapturer implements AutoCloseable { - private final ByteArrayOutputStream bytes; - private final PrintStream bytesPrintStream; - private final PrintStream oldOut; - - public SystemOutCapturer() throws - UnsupportedEncodingException { - bytes = new ByteArrayOutputStream(); - bytesPrintStream = new PrintStream(bytes, false, UTF_8.name()); - oldOut = System.out; - System.setOut(new TeePrintStream(oldOut, bytesPrintStream)); - } - - public String getOutput() throws UnsupportedEncodingException { - return bytes.toString(UTF_8.name()); + public static class SystemErrCapturer extends PrintStreamCapturer { + public SystemErrCapturer() { + super(System.err, System::setErr); } + } - @Override - public void close() throws Exception { - IOUtils.closeQuietly(bytesPrintStream); - System.setOut(oldOut); + /** + * Capture output printed to {@link System#out}. + *

+ * Usage: + *

+   *   try (PrintStreamCapturer capture = captureOut()) {
+   *     ...
+   *     // Call capture.getOutput() to get the output string
+   *   }
+   * 
+ */ + public static class SystemOutCapturer extends PrintStreamCapturer { + public SystemOutCapturer() { + super(System.out, System::setOut); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/repair/om/TestFSORepairTool.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/repair/om/TestFSORepairTool.java index 6c40e69432f..d37f8ce57fb 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/repair/om/TestFSORepairTool.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/repair/om/TestFSORepairTool.java @@ -17,12 +17,12 @@ */ package org.apache.hadoop.ozone.repair.om; -import org.apache.commons.io.IOUtils; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.MiniOzoneCluster; @@ -35,9 +35,9 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.repair.OzoneRepair; +import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.MethodOrderer; @@ -45,33 +45,40 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.TestMethodOrder; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import picocli.CommandLine; -import java.io.ByteArrayOutputStream; import java.io.File; import java.io.IOException; -import java.io.PrintStream; import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; -import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; /** * FSORepairTool test cases. */ @TestMethodOrder(MethodOrderer.OrderAnnotation.class) public class TestFSORepairTool { + public static final Logger LOG = LoggerFactory.getLogger(TestFSORepairTool.class); - private static final ByteArrayOutputStream OUT = new ByteArrayOutputStream(); - private static final ByteArrayOutputStream ERR = new ByteArrayOutputStream(); - private static final PrintStream OLD_OUT = System.out; - private static final PrintStream OLD_ERR = System.err; - private static final String DEFAULT_ENCODING = UTF_8.name(); + private static final int ORDER_DRY_RUN = 1; + //private static final int ORDER_REPAIR_SOME = 2; // TODO add test case + private static final int ORDER_REPAIR_ALL = 3; + private static final int ORDER_REPAIR_ALL_AGAIN = 4; + private static final int ORDER_RESTART_OM = 5; + private static MiniOzoneCluster cluster; private static FileSystem fs; private static OzoneClient client; @@ -82,6 +89,9 @@ public class TestFSORepairTool { private static FSORepairTool.Report fullReport; private static FSORepairTool.Report emptyReport; + private GenericTestUtils.PrintStreamCapturer out; + private GenericTestUtils.PrintStreamCapturer err; + @BeforeAll public static void setup() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); @@ -137,81 +147,54 @@ public static void setup() throws Exception { @BeforeEach public void init() throws Exception { - System.setOut(new PrintStream(OUT, false, DEFAULT_ENCODING)); - System.setErr(new PrintStream(ERR, false, DEFAULT_ENCODING)); + out = GenericTestUtils.captureOut(); + err = GenericTestUtils.captureErr(); } @AfterEach public void clean() throws Exception { // reset stream after each unit test - OUT.reset(); - ERR.reset(); - - // restore system streams - System.setOut(OLD_OUT); - System.setErr(OLD_ERR); + IOUtils.closeQuietly(out, err); } @AfterAll public static void reset() throws IOException { - if (cluster != null) { - cluster.shutdown(); - } - if (client != null) { - client.close(); - } - IOUtils.closeQuietly(fs); + IOUtils.closeQuietly(fs, client, cluster); } /** * Test to check a connected tree with one bucket. * The output remains the same in debug and repair mode as the tree is connected. - * @throws Exception */ - @Order(1) - @Test - public void testConnectedTreeOneBucket() throws Exception { + @Order(ORDER_DRY_RUN) + @ParameterizedTest + @ValueSource(booleans = {true, false}) + void testConnectedTreeOneBucket(boolean dryRun) { String expectedOutput = serializeReport(vol1Report); - // Test the connected tree in debug mode. - String[] args = new String[] {"om", "fso-tree", "--db", dbPath, "-v", "/vol1", "-b", "bucket1"}; - int exitCode = cmd.execute(args); - assertEquals(0, exitCode); + int exitCode = execute(dryRun, "-v", "/vol1", "-b", "bucket1"); + assertEquals(0, exitCode, err.getOutput()); - String cliOutput = OUT.toString(DEFAULT_ENCODING); + String cliOutput = out.getOutput(); String reportOutput = extractRelevantSection(cliOutput); - Assertions.assertEquals(expectedOutput, reportOutput); - - OUT.reset(); - ERR.reset(); - - // Running again in repair mode should give same results since the tree is connected. - String[] args1 = new String[] {"om", "fso-tree", "--db", dbPath, "--repair", "-v", "/vol1", "-b", "bucket1"}; - int exitCode1 = cmd.execute(args1); - assertEquals(0, exitCode1); - - String cliOutput1 = OUT.toString(DEFAULT_ENCODING); - String reportOutput1 = extractRelevantSection(cliOutput1); - Assertions.assertEquals(expectedOutput, reportOutput1); + assertEquals(expectedOutput, reportOutput); } /** * Test to verify the file size of the tree. - * @throws Exception */ - @Order(2) + @Order(ORDER_DRY_RUN) @Test - public void testReportedDataSize() throws Exception { + public void testReportedDataSize() { String expectedOutput = serializeReport(vol2Report); - String[] args = new String[] {"om", "fso-tree", "--db", dbPath, "-v", "/vol2"}; - int exitCode = cmd.execute(args); + int exitCode = dryRun("-v", "/vol2"); assertEquals(0, exitCode); - String cliOutput = OUT.toString(DEFAULT_ENCODING); + String cliOutput = out.getOutput(); String reportOutput = extractRelevantSection(cliOutput); - Assertions.assertEquals(expectedOutput, reportOutput); + assertEquals(expectedOutput, reportOutput); } /** @@ -222,132 +205,127 @@ public void testReportedDataSize() throws Exception { * - Non-existent volume. * - Using a bucket filter without specifying a volume. */ - @Order(3) + @Order(ORDER_DRY_RUN) @Test - public void testVolumeAndBucketFilter() throws Exception { + public void testVolumeFilter() { + String expectedOutput1 = serializeReport(vol1Report); + // When volume filter is passed - String[] args1 = new String[]{"om", "fso-tree", "--db", dbPath, "--volume", "/vol1"}; - int exitCode1 = cmd.execute(args1); + int exitCode1 = dryRun("--volume", "/vol1"); assertEquals(0, exitCode1); - String cliOutput1 = OUT.toString(DEFAULT_ENCODING); + String cliOutput1 = out.getOutput(); String reportOutput1 = extractRelevantSection(cliOutput1); - String expectedOutput1 = serializeReport(vol1Report); - Assertions.assertEquals(expectedOutput1, reportOutput1); + assertEquals(expectedOutput1, reportOutput1); + } - OUT.reset(); - ERR.reset(); + @Order(ORDER_DRY_RUN) + @Test + public void testVolumeAndBucketFilter() { + String expectedOutput2 = serializeReport(vol1Report); // When both volume and bucket filters are passed - String[] args2 = new String[]{"om", "fso-tree", "--db", dbPath, "--volume", "/vol1", - "--bucket", "bucket1"}; - int exitCode2 = cmd.execute(args2); + int exitCode2 = dryRun("--volume", "/vol1", "--bucket", "bucket1"); assertEquals(0, exitCode2); - String cliOutput2 = OUT.toString(DEFAULT_ENCODING); + String cliOutput2 = out.getOutput(); String reportOutput2 = extractRelevantSection(cliOutput2); - String expectedOutput2 = serializeReport(vol1Report); - Assertions.assertEquals(expectedOutput2, reportOutput2); - - OUT.reset(); - ERR.reset(); + assertEquals(expectedOutput2, reportOutput2); + } + @Order(ORDER_DRY_RUN) + @Test + public void testNonExistentBucket() { // When a non-existent bucket filter is passed - String[] args3 = new String[]{"om", "fso-tree", "--db", dbPath, "--volume", "/vol1", - "--bucket", "bucket3"}; - int exitCode3 = cmd.execute(args3); - assertEquals(0, exitCode3); - String cliOutput3 = OUT.toString(DEFAULT_ENCODING); - Assertions.assertTrue(cliOutput3.contains("Bucket 'bucket3' does not exist in volume '/vol1'.")); - - OUT.reset(); - ERR.reset(); + int exitCode = dryRun("--volume", "/vol1", "--bucket", "bucket3"); + assertEquals(0, exitCode); + String cliOutput = out.getOutput(); + assertThat(cliOutput).contains("Bucket 'bucket3' does not exist in volume '/vol1'."); + } + @Order(ORDER_DRY_RUN) + @Test + public void testNonExistentVolume() { // When a non-existent volume filter is passed - String[] args4 = new String[]{"om", "fso-tree", "--db", dbPath, "--volume", "/vol5"}; - int exitCode4 = cmd.execute(args4); - assertEquals(0, exitCode4); - String cliOutput4 = OUT.toString(DEFAULT_ENCODING); - Assertions.assertTrue(cliOutput4.contains("Volume '/vol5' does not exist.")); - - OUT.reset(); - ERR.reset(); + int exitCode = dryRun("--volume", "/vol5"); + assertEquals(0, exitCode); + String cliOutput = out.getOutput(); + assertThat(cliOutput).contains("Volume '/vol5' does not exist."); + } + @Order(ORDER_DRY_RUN) + @Test + public void testBucketFilterWithoutVolume() { // When bucket filter is passed without the volume filter. - String[] args5 = new String[]{"om", "fso-tree", "--db", dbPath, "--bucket", "bucket1"}; - int exitCode5 = cmd.execute(args5); - assertEquals(0, exitCode5); - String cliOutput5 = OUT.toString(DEFAULT_ENCODING); - Assertions.assertTrue(cliOutput5.contains("--bucket flag cannot be used without specifying --volume.")); + int exitCode = dryRun("--bucket", "bucket1"); + assertEquals(0, exitCode); + String cliOutput = out.getOutput(); + assertThat(cliOutput).contains("--bucket flag cannot be used without specifying --volume."); } /** * Test to verify that non-fso buckets, such as legacy and obs, are skipped during the process. - * @throws Exception */ - @Order(4) + @Order(ORDER_DRY_RUN) @Test - public void testNonFSOBucketsSkipped() throws Exception { - String[] args = new String[] {"om", "fso-tree", "--db", dbPath}; - int exitCode = cmd.execute(args); + public void testNonFSOBucketsSkipped() { + int exitCode = dryRun(); assertEquals(0, exitCode); - String cliOutput = OUT.toString(DEFAULT_ENCODING); - Assertions.assertTrue(cliOutput.contains("Skipping non-FSO bucket /vol1/obs-bucket")); - Assertions.assertTrue(cliOutput.contains("Skipping non-FSO bucket /vol1/legacy-bucket")); + String cliOutput = out.getOutput(); + assertThat(cliOutput).contains("Skipping non-FSO bucket /vol1/obs-bucket"); + assertThat(cliOutput).contains("Skipping non-FSO bucket /vol1/legacy-bucket"); } /** * If no file is present inside a vol/bucket, the report statistics should be zero. - * @throws Exception */ - @Order(5) + @Order(ORDER_DRY_RUN) @Test - public void testEmptyFileTrees() throws Exception { + public void testEmptyFileTrees() { String expectedOutput = serializeReport(emptyReport); // Run on an empty volume and bucket - String[] args = new String[] {"om", "fso-tree", "--db", dbPath, "-v", "/vol-empty", "-b", "bucket-empty"}; - int exitCode = cmd.execute(args); + int exitCode = dryRun("-v", "/vol-empty", "-b", "bucket-empty"); assertEquals(0, exitCode); - String cliOutput = OUT.toString(DEFAULT_ENCODING); + String cliOutput = out.getOutput(); String reportOutput = extractRelevantSection(cliOutput); - Assertions.assertEquals(expectedOutput, reportOutput); + assertEquals(expectedOutput, reportOutput); } /** * Test in repair mode. This test ensures that: * - The initial repair correctly resolves unreferenced objects. * - Subsequent repair runs do not find any unreferenced objects to process. - * @throws Exception */ - @Order(6) + @Order(ORDER_REPAIR_ALL) @Test - public void testMultipleBucketsAndVolumes() throws Exception { + public void testMultipleBucketsAndVolumes() { String expectedOutput = serializeReport(fullReport); - String[] args = new String[] {"om", "fso-tree", "--db", dbPath, "--repair"}; - int exitCode = cmd.execute(args); + int exitCode = repair(); assertEquals(0, exitCode); - String cliOutput = OUT.toString(DEFAULT_ENCODING); + String cliOutput = out.getOutput(); String reportOutput = extractRelevantSection(cliOutput); - Assertions.assertEquals(expectedOutput, reportOutput); - Assertions.assertTrue(cliOutput.contains("Unreferenced:\n\tDirectories: 1\n\tFiles: 3\n\tBytes: 30")); + assertEquals(expectedOutput, reportOutput); + assertThat(cliOutput).contains("Unreferenced:\n\tDirectories: 1\n\tFiles: 3\n\tBytes: 30"); + } - String[] args1 = new String[] {"om", "fso-tree", "--db", dbPath, "--repair"}; - int exitCode1 = cmd.execute(args1); - assertEquals(0, exitCode1); - String cliOutput1 = OUT.toString(DEFAULT_ENCODING); - Assertions.assertTrue(cliOutput1.contains("Unreferenced:\n\tDirectories: 0\n\tFiles: 0\n\tBytes: 0")); + @Order(ORDER_REPAIR_ALL_AGAIN) + @Test + public void repairAllAgain() { + int exitCode = repair(); + assertEquals(0, exitCode); + String cliOutput = out.getOutput(); + assertThat(cliOutput).contains("Unreferenced:\n\tDirectories: 0\n\tFiles: 0\n\tBytes: 0"); } /** * Validate cluster state after OM restart by checking the tables. - * @throws Exception */ - @Order(7) + @Order(ORDER_RESTART_OM) @Test public void validateClusterAfterRestart() throws Exception { cluster.getOzoneManager().restart(); @@ -362,6 +340,24 @@ public void validateClusterAfterRestart() throws Exception { assertEquals(3, countTableEntries(cluster.getOzoneManager().getMetadataManager().getDeletedTable())); } + private int repair(String... args) { + return execute(false, args); + } + + private int dryRun(String... args) { + return execute(true, args); + } + + private int execute(boolean dryRun, String... args) { + List argList = new ArrayList<>(Arrays.asList("om", "fso-tree", "--db", dbPath)); + if (!dryRun) { + argList.add("--repair"); + } + argList.addAll(Arrays.asList(args)); + + return cmd.execute(argList.toArray(new String[0])); + } + private int countTableEntries(Table table) throws Exception { int count = 0; try (TableIterator> iterator = table.iterator()) { @@ -469,13 +465,13 @@ private static void assertConnectedTreeReadable(String volume, String bucket) th Path dir3 = new Path(bucketPath, "dir3"); Path file4 = new Path(bucketPath, "file4"); - Assertions.assertTrue(fs.exists(dir1)); - Assertions.assertTrue(fs.exists(dir2)); - Assertions.assertTrue(fs.exists(dir3)); - Assertions.assertTrue(fs.exists(file1)); - Assertions.assertTrue(fs.exists(file2)); - Assertions.assertTrue(fs.exists(file3)); - Assertions.assertTrue(fs.exists(file4)); + assertTrue(fs.exists(dir1)); + assertTrue(fs.exists(dir2)); + assertTrue(fs.exists(dir3)); + assertTrue(fs.exists(file1)); + assertTrue(fs.exists(file2)); + assertTrue(fs.exists(file3)); + assertTrue(fs.exists(file4)); } /** @@ -530,12 +526,12 @@ private static void assertDisconnectedTreePartiallyReadable(String volume, Strin Path dir3 = new Path(bucketPath, "dir3"); Path file4 = new Path(bucketPath, "file4"); - Assertions.assertFalse(fs.exists(dir1)); - Assertions.assertFalse(fs.exists(dir2)); - Assertions.assertTrue(fs.exists(dir3)); - Assertions.assertFalse(fs.exists(file1)); - Assertions.assertFalse(fs.exists(file2)); - Assertions.assertFalse(fs.exists(file3)); - Assertions.assertTrue(fs.exists(file4)); + assertFalse(fs.exists(dir1)); + assertFalse(fs.exists(dir2)); + assertTrue(fs.exists(dir3)); + assertFalse(fs.exists(file1)); + assertFalse(fs.exists(file2)); + assertFalse(fs.exists(file3)); + assertTrue(fs.exists(file4)); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneRepairShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneRepairShell.java index e770a36c737..426412ec490 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneRepairShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneRepairShell.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,22 +18,21 @@ package org.apache.hadoop.ozone.shell; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.debug.ldb.RDBParser; +import org.apache.hadoop.ozone.client.OzoneClient; +import org.apache.hadoop.ozone.debug.OzoneDebug; import org.apache.hadoop.ozone.om.OMStorage; import org.apache.hadoop.ozone.repair.OzoneRepair; -import org.apache.hadoop.ozone.repair.ldb.RDBRepair; import org.apache.ozone.test.GenericTestUtils; +import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import picocli.CommandLine; -import java.io.ByteArrayOutputStream; import java.io.File; -import java.io.IOException; -import java.io.PrintStream; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -41,18 +40,14 @@ import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; -import static java.nio.charset.StandardCharsets.UTF_8; /** * Test Ozone Repair shell. */ public class TestOzoneRepairShell { - private final ByteArrayOutputStream out = new ByteArrayOutputStream(); - private final ByteArrayOutputStream err = new ByteArrayOutputStream(); - private static final PrintStream OLD_OUT = System.out; - private static final PrintStream OLD_ERR = System.err; - private static final String DEFAULT_ENCODING = UTF_8.name(); + private GenericTestUtils.PrintStreamCapturer out; + private GenericTestUtils.PrintStreamCapturer err; private static MiniOzoneCluster cluster = null; private static OzoneConfiguration conf = null; @@ -67,24 +62,24 @@ public static void init() throws Exception { @BeforeEach public void setup() throws Exception { - System.setOut(new PrintStream(out, false, DEFAULT_ENCODING)); - System.setErr(new PrintStream(err, false, DEFAULT_ENCODING)); + out = GenericTestUtils.captureOut(); + err = GenericTestUtils.captureErr(); } @AfterEach public void reset() { // reset stream after each unit test - out.reset(); - err.reset(); + IOUtils.closeQuietly(out, err); + } - // restore system streams - System.setOut(OLD_OUT); - System.setErr(OLD_ERR); + @AfterAll + static void cleanup() { + IOUtils.closeQuietly(cluster); } @Test public void testUpdateTransactionInfoTable() throws Exception { - CommandLine cmd = new CommandLine(new RDBRepair()); + CommandLine cmd = new OzoneRepair().getCmd(); String dbPath = new File(OMStorage.getOmDbDir(conf) + "/" + OM_DB_NAME).getPath(); cluster.getOzoneManager().stop(); @@ -94,35 +89,33 @@ public void testUpdateTransactionInfoTable() throws Exception { String testTerm = "1111"; String testIndex = "1111"; - String[] args = - new String[] {"--db=" + dbPath, "update-transaction", "--term", testTerm, "--index", testIndex}; - int exitCode = cmd.execute(args); - assertEquals(0, exitCode); - assertThat(out.toString(DEFAULT_ENCODING)).contains( - "The original highest transaction Info was " + - String.format("(t:%s, i:%s)", originalHighestTermIndex[0], originalHighestTermIndex[1])); - assertThat(out.toString(DEFAULT_ENCODING)).contains( - String.format("The highest transaction info has been updated to: (t:%s, i:%s)", - testTerm, testIndex)); + int exitCode = cmd.execute("ldb", "--db", dbPath, "update-transaction", "--term", testTerm, "--index", testIndex); + assertEquals(0, exitCode, err); + assertThat(out.get()) + .contains( + "The original highest transaction Info was " + + String.format("(t:%s, i:%s)", originalHighestTermIndex[0], originalHighestTermIndex[1]), + String.format("The highest transaction info has been updated to: (t:%s, i:%s)", testTerm, testIndex) + ); String cmdOut2 = scanTransactionInfoTable(dbPath); assertThat(cmdOut2).contains(testTerm + "#" + testIndex); - cmd.execute("--db=" + dbPath, "update-transaction", "--term", + cmd.execute("ldb", "--db", dbPath, "update-transaction", "--term", originalHighestTermIndex[0], "--index", originalHighestTermIndex[1]); cluster.getOzoneManager().restart(); - cluster.newClient().getObjectStore().createVolume("vol1"); + try (OzoneClient ozoneClient = cluster.newClient()) { + ozoneClient.getObjectStore().createVolume("vol1"); + } } - private String scanTransactionInfoTable(String dbPath) throws Exception { - CommandLine cmdDBScanner = new CommandLine(new RDBParser()); - String[] argsDBScanner = - new String[] {"--db=" + dbPath, "scan", "--column_family", "transactionInfoTable"}; - cmdDBScanner.execute(argsDBScanner); - return out.toString(DEFAULT_ENCODING); + private String scanTransactionInfoTable(String dbPath) { + CommandLine debugCmd = new OzoneDebug().getCmd(); + debugCmd.execute("ldb", "--db", dbPath, "scan", "--column_family", "transactionInfoTable"); + return out.get(); } - private String[] parseScanOutput(String output) throws IOException { + private String[] parseScanOutput(String output) { Pattern pattern = Pattern.compile(TRANSACTION_INFO_TABLE_TERM_INDEX_PATTERN); Matcher matcher = pattern.matcher(output); if (matcher.find()) { @@ -135,19 +128,16 @@ private String[] parseScanOutput(String output) throws IOException { public void testQuotaRepair() throws Exception { CommandLine cmd = new OzoneRepair().getCmd(); - String[] args = new String[] {"quota", "status", "--service-host", conf.get(OZONE_OM_ADDRESS_KEY)}; - int exitCode = cmd.execute(args); - assertEquals(0, exitCode, err::toString); - args = new String[] {"quota", "start", "--service-host", conf.get(OZONE_OM_ADDRESS_KEY)}; - exitCode = cmd.execute(args); - assertEquals(0, exitCode); + int exitCode = cmd.execute("quota", "status", "--service-host", conf.get(OZONE_OM_ADDRESS_KEY)); + assertEquals(0, exitCode, err); + exitCode = cmd.execute("quota", "start", "--service-host", conf.get(OZONE_OM_ADDRESS_KEY)); + assertEquals(0, exitCode, err); GenericTestUtils.waitFor(() -> { out.reset(); // verify quota trigger is completed having non-zero lastRunFinishedTime - String[] targs = new String[]{"quota", "status", "--service-host", conf.get(OZONE_OM_ADDRESS_KEY)}; - cmd.execute(targs); + cmd.execute("quota", "status", "--service-host", conf.get(OZONE_OM_ADDRESS_KEY)); try { - return !out.toString(DEFAULT_ENCODING).contains("\"lastRunFinishedTime\":\"\""); + return out.get().contains("\"lastRunFinishedTime\":\"\""); } catch (Exception ex) { // do nothing } diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/ldb/TestTransactionInfoRepair.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/ldb/TestTransactionInfoRepair.java index 8a768d0f696..c0685002fd4 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/ldb/TestTransactionInfoRepair.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/ldb/TestTransactionInfoRepair.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -17,25 +17,26 @@ */ package org.apache.hadoop.ozone.repair.ldb; +import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.utils.TransactionInfo; -import org.apache.hadoop.hdds.utils.db.Codec; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; import org.apache.hadoop.ozone.debug.RocksDBUtils; +import org.apache.hadoop.ozone.repair.OzoneRepair; import org.apache.ozone.test.GenericTestUtils; import org.apache.ratis.server.protocol.TermIndex; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.mockito.MockedStatic; import org.rocksdb.ColumnFamilyHandle; import org.rocksdb.RocksDB; import org.rocksdb.RocksDBException; +import picocli.CommandLine; -import java.io.IOException; -import java.io.UnsupportedEncodingException; -import java.util.function.Supplier; - +import static org.apache.hadoop.ozone.OzoneConsts.TRANSACTION_INFO_KEY; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.TRANSACTION_INFO_TABLE; import static org.assertj.core.api.Assertions.assertThat; -import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyList; import static org.mockito.Mockito.anyString; @@ -43,8 +44,6 @@ import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.mockStatic; -import static org.mockito.Mockito.spy; -import static org.mockito.Mockito.when; /** * Tests TransactionInfoRepair. @@ -55,31 +54,38 @@ public class TestTransactionInfoRepair { private static final String DB_PATH = "testDBPath"; private static final long TEST_TERM = 1; private static final long TEST_INDEX = 1; + private GenericTestUtils.PrintStreamCapturer out; + private GenericTestUtils.PrintStreamCapturer err; + + @BeforeEach + void setup() { + out = GenericTestUtils.captureOut(); + err = GenericTestUtils.captureErr(); + } + + @AfterEach + void cleanup() { + IOUtils.closeQuietly(out, err); + } @Test - public void testUpdateTransactionInfoTableSuccessful() throws Exception { + public void testUpdateTransactionInfoTableSuccessful() { ManagedRocksDB mdb = mockRockDB(); - try (GenericTestUtils.SystemOutCapturer outCapturer = new GenericTestUtils.SystemOutCapturer()) { - testCommand(mdb, mock(ColumnFamilyHandle.class), () -> { - try { - return outCapturer.getOutput(); - } catch (UnsupportedEncodingException e) { - throw new RuntimeException(e); - } - }, new String[]{String.format("The original highest transaction Info was (t:%s, i:%s)", - TEST_TERM, TEST_INDEX), - String.format("The highest transaction info has been updated to: (t:%s, i:%s)", - TEST_TERM, TEST_INDEX)}); - } + testCommand(mdb, mock(ColumnFamilyHandle.class)); + + assertThat(out.getOutput()) + .contains( + String.format("The original highest transaction Info was (t:%s, i:%s)", TEST_TERM, TEST_INDEX), + String.format("The highest transaction info has been updated to: (t:%s, i:%s)", TEST_TERM, TEST_INDEX) + ); } @Test - public void testCommandWhenTableNotInDBForGivenPath() throws Exception { + public void testCommandWhenTableNotInDBForGivenPath() { ManagedRocksDB mdb = mockRockDB(); - IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, - () -> testCommand(mdb, null, null, new String[]{""})); - assertThat(exception.getMessage()).contains(TRANSACTION_INFO_TABLE + - " is not in a column family in DB for the given path"); + testCommand(mdb, null); + assertThat(err.getOutput()) + .contains(TRANSACTION_INFO_TABLE + " is not in a column family in DB for the given path"); } @Test @@ -87,53 +93,45 @@ public void testCommandWhenFailToUpdateRocksDBForGivenPath() throws Exception { ManagedRocksDB mdb = mockRockDB(); RocksDB rdb = mdb.get(); + ColumnFamilyHandle mock = mock(ColumnFamilyHandle.class); doThrow(RocksDBException.class).when(rdb) - .put(any(ColumnFamilyHandle.class), any(byte[].class), any(byte[].class)); + .put(eq(mock), any(byte[].class), any(byte[].class)); + + testCommand(mdb, mock); - IOException exception = assertThrows(IOException.class, - () -> testCommand(mdb, mock(ColumnFamilyHandle.class), null, new String[]{""})); - assertThat(exception.getMessage()).contains("Failed to update RocksDB."); - assertThat(exception.getCause()).isInstanceOf(RocksDBException.class); + assertThat(err.getOutput()) + .contains("Failed to update RocksDB."); } - private void testCommand(ManagedRocksDB mdb, ColumnFamilyHandle columnFamilyHandle, Supplier capturer, - String[] messages) throws Exception { + private void testCommand(ManagedRocksDB mdb, ColumnFamilyHandle columnFamilyHandle) { try (MockedStatic mocked = mockStatic(ManagedRocksDB.class); MockedStatic mockUtil = mockStatic(RocksDBUtils.class)) { mocked.when(() -> ManagedRocksDB.open(anyString(), anyList(), anyList())).thenReturn(mdb); mockUtil.when(() -> RocksDBUtils.getColumnFamilyHandle(anyString(), anyList())) .thenReturn(columnFamilyHandle); - mockUtil.when(() -> - RocksDBUtils.getValue(any(ManagedRocksDB.class), any(ColumnFamilyHandle.class), anyString(), - any(Codec.class))).thenReturn(mock(TransactionInfo.class)); - - mockTransactionInfo(mockUtil); - - TransactionInfoRepair cmd = spy(TransactionInfoRepair.class); - RDBRepair rdbRepair = mock(RDBRepair.class); - when(rdbRepair.getDbPath()).thenReturn(DB_PATH); - when(cmd.getParent()).thenReturn(rdbRepair); - cmd.setHighestTransactionTerm(TEST_TERM); - cmd.setHighestTransactionIndex(TEST_INDEX); - - cmd.call(); - for (String message : messages) { - assertThat(capturer.get()).contains(message); - } - } - } - - private void mockTransactionInfo(MockedStatic mockUtil) { - mockUtil.when(() -> - RocksDBUtils.getValue(any(ManagedRocksDB.class), any(ColumnFamilyHandle.class), anyString(), - any(Codec.class))).thenReturn(mock(TransactionInfo.class)); - TransactionInfo transactionInfo2 = mock(TransactionInfo.class); - doReturn(TermIndex.valueOf(TEST_TERM, TEST_INDEX)).when(transactionInfo2).getTermIndex(); - mockUtil.when(() -> - RocksDBUtils.getValue(any(ManagedRocksDB.class), any(ColumnFamilyHandle.class), anyString(), - any(Codec.class))).thenReturn(transactionInfo2); + mockUtil.when(() -> RocksDBUtils.getValue(eq(mdb), eq(columnFamilyHandle), eq(TRANSACTION_INFO_KEY), + eq(TransactionInfo.getCodec()))) + .thenReturn(mock(TransactionInfo.class)); + + mockUtil.when(() -> RocksDBUtils.getValue(eq(mdb), eq(columnFamilyHandle), eq(TRANSACTION_INFO_KEY), + eq(TransactionInfo.getCodec()))) + .thenReturn(mock(TransactionInfo.class)); + + TransactionInfo transactionInfo2 = TransactionInfo.valueOf(TermIndex.valueOf(TEST_TERM, TEST_INDEX)); + mockUtil.when(() -> RocksDBUtils.getValue(eq(mdb), eq(columnFamilyHandle), eq(TRANSACTION_INFO_KEY), + eq(TransactionInfo.getCodec()))) + .thenReturn(transactionInfo2); + + CommandLine cli = new OzoneRepair().getCmd(); + cli.execute( + "ldb", + "--db", DB_PATH, + "update-transaction", + "--term", String.valueOf(TEST_TERM), + "--index", String.valueOf(TEST_INDEX)); + } } private ManagedRocksDB mockRockDB() { From 7af38a986470dbf54cc8e4f47525359996679744 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran <47532440+swamirishi@users.noreply.github.com> Date: Fri, 20 Dec 2024 08:51:12 -0800 Subject: [PATCH 003/168] HDDS-11963. Add parent interface of component and layout versions for use in request validator (#7598) --- .../apache/hadoop/hdds/ComponentVersion.java | 9 ++- .../apache/hadoop/ozone/ClientVersion.java | 5 +- .../org/apache/hadoop/ozone/Versioned.java | 26 ++++++++ .../hadoop/ozone/upgrade/LayoutFeature.java | 9 ++- .../request/validation/VersionExtractor.java | 63 ++++++++++++++++++ .../validation/TestVersionExtractor.java | 66 +++++++++++++++++++ 6 files changed, 174 insertions(+), 4 deletions(-) create mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/Versioned.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/validation/VersionExtractor.java create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/validation/TestVersionExtractor.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ComponentVersion.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ComponentVersion.java index 9545869e163..7f65010e2c0 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ComponentVersion.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ComponentVersion.java @@ -17,10 +17,12 @@ */ package org.apache.hadoop.hdds; +import org.apache.hadoop.ozone.Versioned; + /** * Base type for component version enums. */ -public interface ComponentVersion { +public interface ComponentVersion extends Versioned { /** * Returns the description of the version enum value. @@ -34,4 +36,9 @@ public interface ComponentVersion { * @return the version associated with the enum value. */ int toProtoValue(); + + @Override + default int version() { + return toProtoValue(); + } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/ClientVersion.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/ClientVersion.java index cc6695dc7d6..08e29356343 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/ClientVersion.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/ClientVersion.java @@ -20,6 +20,7 @@ import org.apache.hadoop.hdds.ComponentVersion; import java.util.Arrays; +import java.util.Comparator; import java.util.Map; import static java.util.function.Function.identity; @@ -75,8 +76,8 @@ public static ClientVersion fromProtoValue(int value) { } private static ClientVersion latest() { - ClientVersion[] versions = ClientVersion.values(); - return versions[versions.length - 2]; + return Arrays.stream(ClientVersion.values()) + .max(Comparator.comparingInt(ComponentVersion::toProtoValue)).orElse(null); } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/Versioned.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/Versioned.java new file mode 100644 index 00000000000..7f89b403b34 --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/Versioned.java @@ -0,0 +1,26 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone; + + +/** + * Base class defining the version in the entire system. + */ +public interface Versioned { + int version(); +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutFeature.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutFeature.java index 92dd706f4bb..9ec9b4cb589 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutFeature.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/upgrade/LayoutFeature.java @@ -18,12 +18,14 @@ package org.apache.hadoop.ozone.upgrade; +import org.apache.hadoop.ozone.Versioned; + import java.util.Optional; /** * Generic Layout feature interface for Ozone. */ -public interface LayoutFeature { +public interface LayoutFeature extends Versioned { String name(); int layoutVersion(); @@ -48,6 +50,11 @@ default String name() { void execute(T arg) throws Exception; } + @Override + default int version() { + return this.layoutVersion(); + } + /** * Phase of execution for this action. */ diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/validation/VersionExtractor.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/validation/VersionExtractor.java new file mode 100644 index 00000000000..e7acef45bfe --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/validation/VersionExtractor.java @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.ozone.om.request.validation; + +import org.apache.hadoop.ozone.ClientVersion; +import org.apache.hadoop.ozone.Versioned; +import org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.upgrade.LayoutVersionManager; + +/** + * Class to extract version out of OM request. + */ +public enum VersionExtractor { + /** + * Extracts current metadata layout version. + */ + LAYOUT_VERSION_EXTRACTOR { + @Override + public Versioned extractVersion(OMRequest req, ValidationContext ctx) { + LayoutVersionManager layoutVersionManager = ctx.versionManager(); + return ctx.versionManager().getFeature(layoutVersionManager.getMetadataLayoutVersion()); + } + + @Override + public Class getVersionClass() { + return OMLayoutFeature.class; + } + }, + + /** + * Extracts client version from the OMRequests. + */ + CLIENT_VERSION_EXTRACTOR { + @Override + public Versioned extractVersion(OMRequest req, ValidationContext ctx) { + return req.getVersion() > ClientVersion.CURRENT_VERSION ? + ClientVersion.FUTURE_VERSION : ClientVersion.fromProtoValue(req.getVersion()); + } + + @Override + public Class getVersionClass() { + return ClientVersion.class; + } + }; + + public abstract Versioned extractVersion(OMRequest req, ValidationContext ctx); + public abstract Class getVersionClass(); +} diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/validation/TestVersionExtractor.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/validation/TestVersionExtractor.java new file mode 100644 index 00000000000..a3c9c76e70f --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/validation/TestVersionExtractor.java @@ -0,0 +1,66 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.ozone.om.request.validation; + +import org.apache.hadoop.ozone.ClientVersion; +import org.apache.hadoop.ozone.Versioned; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.upgrade.OMLayoutFeature; +import org.apache.hadoop.ozone.om.upgrade.OMLayoutVersionManager; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.upgrade.LayoutVersionManager; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.EnumSource; +import org.junit.jupiter.params.provider.ValueSource; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +class TestVersionExtractor { + + @ParameterizedTest + @EnumSource(OMLayoutFeature.class) + void testLayoutVersionExtractor(OMLayoutFeature layoutVersionValue) throws OMException { + ValidationContext context = mock(ValidationContext.class); + LayoutVersionManager layoutVersionManager = new OMLayoutVersionManager(layoutVersionValue.version()); + when(context.versionManager()).thenReturn(layoutVersionManager); + Versioned version = VersionExtractor.LAYOUT_VERSION_EXTRACTOR.extractVersion(null, context); + assertEquals(layoutVersionValue, version); + assertEquals(OMLayoutFeature.class, VersionExtractor.LAYOUT_VERSION_EXTRACTOR.getVersionClass()); + } + + @ParameterizedTest + @EnumSource(ClientVersion.class) + void testClientVersionExtractor(ClientVersion expectedClientVersion) { + OMRequest request = mock(OMRequest.class); + when(request.getVersion()).thenReturn(expectedClientVersion.version()); + Versioned version = VersionExtractor.CLIENT_VERSION_EXTRACTOR.extractVersion(request, null); + assertEquals(expectedClientVersion, version); + assertEquals(ClientVersion.class, VersionExtractor.CLIENT_VERSION_EXTRACTOR.getVersionClass()); + } + + @ParameterizedTest + @ValueSource(ints = {1, 2, 5, 10, 1000, 10000}) + void testClientVersionExtractorForFutureValues(int futureVersion) { + OMRequest request = mock(OMRequest.class); + when(request.getVersion()).thenReturn(ClientVersion.CURRENT_VERSION + futureVersion); + Versioned version = VersionExtractor.CLIENT_VERSION_EXTRACTOR.extractVersion(request, null); + assertEquals(ClientVersion.FUTURE_VERSION, version); + assertEquals(ClientVersion.class, VersionExtractor.CLIENT_VERSION_EXTRACTOR.getVersionClass()); + } +} From e41ba9b81b5bfed0b1f3e34181a23e3b9068d44d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 21 Dec 2024 15:01:15 +0100 Subject: [PATCH 004/168] HDDS-11982. Bump jersey2 to 2.46 (#7604) --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 8cf055522b2..fa67067c612 100644 --- a/pom.xml +++ b/pom.xml @@ -125,7 +125,7 @@ 2.3.9 1.0-1 1.19.4 - 2.45 + 2.46 9.4.56.v20240826 1.4.0 3.9.12 From e25e6bf12a429b3d13545297c8dcba48d1727f80 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 21 Dec 2024 17:32:59 +0100 Subject: [PATCH 005/168] HDDS-11983. Bump junit to 5.11.4 (#7605) --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index fa67067c612..043097cda8d 100644 --- a/pom.xml +++ b/pom.xml @@ -137,7 +137,7 @@ 0.1.55 2.1 1.1.1 - 5.11.3 + 5.11.4 1.0.1 1.9.25 2.5.0 From ed9ac7add5231b3b44c80736e5231e709a678a35 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 21 Dec 2024 18:45:23 +0100 Subject: [PATCH 006/168] HDDS-11984. Bump reload4j to 1.2.26 (#7608) --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 043097cda8d..3af12ff2e60 100644 --- a/pom.xml +++ b/pom.xml @@ -203,7 +203,7 @@ 3.1.2 1.7 0.10.2 - 1.2.25 + 1.2.26 2.6.0 7.7.3 3.1.0 From be18a496abe359e2b513ebd6ed091fd55942b455 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 21 Dec 2024 21:38:50 +0100 Subject: [PATCH 007/168] HDDS-11985. Bump assertj-core to 3.27.0 (#7607) --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 3af12ff2e60..6353f081789 100644 --- a/pom.xml +++ b/pom.xml @@ -34,7 +34,7 @@ 0.16.1 1.14 1.9.7 - 3.26.3 + 3.27.0 1.12.661 0.8.0.RELEASE 1.79 From 16322fdfa5c92794b565e83300c30adda3c9f042 Mon Sep 17 00:00:00 2001 From: Chia-Chuan Yu Date: Sun, 22 Dec 2024 04:41:23 +0800 Subject: [PATCH 008/168] Add flush to buffer (#7609) --- .../test/java/org/apache/hadoop/fs/ozone/TestHSync.java | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java index f185addf6b8..d00b86d937a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java @@ -181,7 +181,7 @@ public class TestHSync { public static void init() throws Exception { final BucketLayout layout = BUCKET_LAYOUT; - CONF.setBoolean(OZONE_OM_RATIS_ENABLE_KEY, false); + CONF.setBoolean(OZONE_OM_RATIS_ENABLE_KEY, true); CONF.set(OZONE_DEFAULT_BUCKET_LAYOUT, layout.name()); CONF.setBoolean(OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED, true); CONF.setBoolean("ozone.client.hbase.enhancements.allowed", true); @@ -1426,9 +1426,12 @@ public void testHSyncKeyOverwriteNormalKey() throws Exception { outputStream2.hsync(); outputStream2.close(); assertEquals(data1.length() + data2.length(), metrics.getDataCommittedBytes()); + // wait until double buffer flush + cluster.getOzoneManager().awaitDoubleBufferFlush(); Map openKeys = getAllOpenKeys(openKeyTable); Map deletedKeys = getAllDeletedKeys(deletedTable); + // There should be no key in openKeyTable assertEquals(0, openKeys.size()); // There should be one key in delete table @@ -1503,6 +1506,8 @@ public void testHSyncKeyOverwriteHSyncKey() throws Exception { // hsync/close second hsync key should success outputStream2.hsync(); outputStream2.close(); + // wait until double buffer flush + cluster.getOzoneManager().awaitDoubleBufferFlush(); Map openKeys = getAllOpenKeys(openKeyTable); Map deletedKeys = getAllDeletedKeys(deletedTable); From 5b27f6d938c67028f105767f104f65efb9f5637d Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Mon, 23 Dec 2024 09:34:45 +0100 Subject: [PATCH 009/168] HDDS-11941. Include fork timeout in integration check summary (#7582) --- hadoop-ozone/dev-support/checks/_mvn_unit_report.sh | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh b/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh index 0249c7a498d..8b7ed939b27 100755 --- a/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh +++ b/hadoop-ozone/dev-support/checks/_mvn_unit_report.sh @@ -63,6 +63,7 @@ grep -A1 'Crashed tests' "${REPORT_DIR}/output.log" \ cat "${crashes}" >> "${tempfile}" # Check for tests that started but were not finished +timeouts=${REPORT_DIR}/timeouts.txt if grep -q 'There was a timeout.*in the fork' "${REPORT_DIR}/output.log"; then diff -uw \ <(grep -e 'Running org' "${REPORT_DIR}/output.log" \ @@ -75,7 +76,8 @@ if grep -q 'There was a timeout.*in the fork' "${REPORT_DIR}/output.log"; then | sort -u -k2) \ | grep '^- ' \ | awk '{ print $3 }' \ - >> "${tempfile}" + > "${timeouts}" + cat "${timeouts}" >> "${tempfile}" fi sort -u "${tempfile}" | tee "${REPORT_DIR}/summary.txt" @@ -118,5 +120,11 @@ if [[ -s "${crashes}" ]]; then fi rm -f "${crashes}" +if [[ -s "${timeouts}" ]]; then + printf "# Fork Timeout\n\n" >> "$SUMMARY_FILE" + cat "${timeouts}" | sed 's/^/ * /' >> "$SUMMARY_FILE" +fi +rm -f "${timeouts}" + ## generate counter wc -l "$REPORT_DIR/summary.txt" | awk '{print $1}'> "$REPORT_DIR/failures" From f34cf34d85ec15b64cc22acdc675173d87c6ec56 Mon Sep 17 00:00:00 2001 From: Chia-Chuan Yu Date: Wed, 25 Dec 2024 02:18:50 +0800 Subject: [PATCH 010/168] HDDS-11857. Freon log flooded by HSync message (#7613) --- .../main/java/org/apache/hadoop/ozone/common/ChecksumCache.java | 2 +- .../java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumCache.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumCache.java index 0f6482919a3..fffcf9c09e2 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumCache.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChecksumCache.java @@ -46,7 +46,7 @@ public class ChecksumCache { private static final int BLOCK_CHUNK_SIZE = 4 * 1024 * 1024; // 4 MB public ChecksumCache(int bytesPerChecksum) { - LOG.info("Initializing ChecksumCache with bytesPerChecksum = {}", bytesPerChecksum); + LOG.debug("Initializing ChecksumCache with bytesPerChecksum = {}", bytesPerChecksum); this.prevChunkLength = 0; this.bytesPerChecksum = bytesPerChecksum; // Set initialCapacity to avoid costly resizes diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java index bf4ffa9d8de..5c258ab670d 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneFSUtils.java @@ -318,7 +318,7 @@ public static boolean canEnableHsync(ConfigurationSource conf, boolean isClient) return confHsyncEnabled; } else { if (confHsyncEnabled) { - LOG.warn("Ignoring {} = {} because HBase enhancements are disallowed. To enable it, set {} = true as well.", + LOG.debug("Ignoring {} = {} because HBase enhancements are disallowed. To enable it, set {} = true as well.", OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true, confKey); } From f125363700ce8270f0642db9a38f7c3b15f2833f Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Thu, 26 Dec 2024 09:29:41 +0100 Subject: [PATCH 011/168] HDDS-11878. Use CommandSpec to find top-level command. (#7575) --- .../apache/hadoop/hdds/cli/GenericCli.java | 2 +- .../hadoop/hdds/cli/GenericParentCommand.java | 3 +- .../apache/hadoop/hdds/cli/AbstractMixin.java | 44 ++++++++++ .../hadoop/hdds/cli/AbstractSubcommand.java | 80 +++++++++++++++++++ .../apache/hadoop/hdds/scm/cli/ScmOption.java | 17 +--- .../hadoop/hdds/scm/cli/ScmSubcommand.java | 3 +- .../scm/cli/container/ContainerCommands.java | 9 --- .../scm/cli/container/InfoSubcommand.java | 11 +-- .../scm/cli/container/ListSubcommand.java | 6 +- .../ozone/admin/nssummary/NSSummaryAdmin.java | 4 - .../debug/container/InspectSubcommand.java | 5 +- .../ozone/repair/quota/QuotaRepair.java | 4 - .../ozone/repair/quota/QuotaStatus.java | 4 - .../ozone/repair/quota/QuotaTrigger.java | 5 -- .../apache/hadoop/ozone/shell/Handler.java | 28 +------ .../ozone/shell/bucket/BucketCommands.java | 14 +--- .../hadoop/ozone/shell/keys/KeyCommands.java | 15 +--- .../ozone/shell/prefix/PrefixCommands.java | 14 +--- .../shell/snapshot/SnapshotCommands.java | 14 +--- .../shell/tenant/TenantUserCommands.java | 15 +--- .../ozone/shell/token/TokenCommands.java | 15 +--- .../ozone/shell/volume/VolumeCommands.java | 14 +--- 22 files changed, 149 insertions(+), 177 deletions(-) create mode 100644 hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/AbstractMixin.java create mode 100644 hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/AbstractSubcommand.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java index 14d454431f9..158bd9270b7 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java @@ -105,7 +105,6 @@ public Void call() throws Exception { throw new MissingSubcommandException(cmd); } - @Override public OzoneConfiguration createOzoneConfiguration() { OzoneConfiguration ozoneConf = new OzoneConfiguration(); if (configurationPath != null) { @@ -119,6 +118,7 @@ public OzoneConfiguration createOzoneConfiguration() { return ozoneConf; } + @Override public OzoneConfiguration getOzoneConf() { if (conf == null) { conf = createOzoneConfiguration(); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericParentCommand.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericParentCommand.java index 6abad3e32b8..e4dcd8d4ab5 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericParentCommand.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericParentCommand.java @@ -25,5 +25,6 @@ public interface GenericParentCommand { boolean isVerbose(); - OzoneConfiguration createOzoneConfiguration(); + /** Returns a cached configuration, i.e. it is created only once, subsequent calls return the same instance. */ + OzoneConfiguration getOzoneConf(); } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/AbstractMixin.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/AbstractMixin.java new file mode 100644 index 00000000000..1201f2058c6 --- /dev/null +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/AbstractMixin.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.cli; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import picocli.CommandLine; + +import static picocli.CommandLine.Spec.Target.MIXEE; + +/** Base functionality for all Ozone CLI mixins. */ +@CommandLine.Command +public abstract class AbstractMixin { + + @CommandLine.Spec(MIXEE) + private CommandLine.Model.CommandSpec spec; + + protected CommandLine.Model.CommandSpec spec() { + return spec; + } + + protected GenericParentCommand rootCommand() { + return AbstractSubcommand.findRootCommand(spec); + } + + protected OzoneConfiguration getOzoneConf() { + return rootCommand().getOzoneConf(); + } + +} diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/AbstractSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/AbstractSubcommand.java new file mode 100644 index 00000000000..550a68ae07e --- /dev/null +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/AbstractSubcommand.java @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.cli; + +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.ratis.util.MemoizedSupplier; +import picocli.CommandLine; + +import java.util.function.Supplier; + +/** Base functionality for all Ozone subcommands. */ +@CommandLine.Command( + mixinStandardHelpOptions = true, + versionProvider = HddsVersionProvider.class +) +public abstract class AbstractSubcommand { + + @CommandLine.Spec + private CommandLine.Model.CommandSpec spec; + + private final Supplier rootSupplier = + MemoizedSupplier.valueOf(() -> findRootCommand(spec)); + + protected CommandLine.Model.CommandSpec spec() { + return spec; + } + + /** Get the Ozone object annotated with {@link CommandLine.Command}) that was used to run this command. + * Usually this is some subclass of {@link GenericCli}, but in unit tests it could be any subcommand. */ + protected GenericParentCommand rootCommand() { + return rootSupplier.get(); + } + + protected boolean isVerbose() { + return rootCommand().isVerbose(); + } + + /** @see GenericParentCommand#getOzoneConf() */ + protected OzoneConfiguration getOzoneConf() { + return rootCommand().getOzoneConf(); + } + + static GenericParentCommand findRootCommand(CommandLine.Model.CommandSpec spec) { + Object root = spec.root().userObject(); + return root instanceof GenericParentCommand + ? (GenericParentCommand) root + : new NoParentCommand(); + } + + /** No-op implementation for unit tests, which may bypass creation of GenericCli object. */ + private static class NoParentCommand implements GenericParentCommand { + + private final OzoneConfiguration conf = new OzoneConfiguration(); + + @Override + public boolean isVerbose() { + return false; + } + + @Override + public OzoneConfiguration getOzoneConf() { + return conf; + } + } +} diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ScmOption.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ScmOption.java index dea8ac0ec87..faff193fa93 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ScmOption.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ScmOption.java @@ -19,7 +19,7 @@ import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.HddsUtils; -import org.apache.hadoop.hdds.cli.GenericParentCommand; +import org.apache.hadoop.hdds.cli.AbstractMixin; import org.apache.hadoop.hdds.conf.ConfigurationException; import org.apache.hadoop.hdds.conf.MutableConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -33,15 +33,11 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY; import static org.apache.hadoop.hdds.utils.HddsServerUtil.getScmSecurityClient; -import static picocli.CommandLine.Spec.Target.MIXEE; /** * Defines command-line option for SCM address. */ -public class ScmOption { - - @CommandLine.Spec(MIXEE) - private CommandLine.Model.CommandSpec spec; +public class ScmOption extends AbstractMixin { @CommandLine.Option(names = {"--scm"}, description = "The destination scm (host:port)") @@ -53,9 +49,7 @@ public class ScmOption { private String scmServiceId; public ScmClient createScmClient() throws IOException { - GenericParentCommand parent = (GenericParentCommand) - spec.root().userObject(); - OzoneConfiguration conf = parent.createOzoneConfiguration(); + OzoneConfiguration conf = getOzoneConf(); checkAndSetSCMAddressArg(conf); return new ContainerOperationClient(conf); @@ -91,13 +85,10 @@ private void checkAndSetSCMAddressArg(MutableConfigurationSource conf) { public SCMSecurityProtocol createScmSecurityClient() { try { - GenericParentCommand parent = (GenericParentCommand) - spec.root().userObject(); - return getScmSecurityClient(parent.createOzoneConfiguration()); + return getScmSecurityClient(getOzoneConf()); } catch (IOException ex) { throw new IllegalArgumentException( "Can't create SCM Security client", ex); } } - } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ScmSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ScmSubcommand.java index 6dc09c2cbec..a0afddd9a40 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ScmSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ScmSubcommand.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdds.scm.cli; +import org.apache.hadoop.hdds.cli.AbstractSubcommand; import org.apache.hadoop.hdds.scm.client.ScmClient; import picocli.CommandLine; @@ -26,7 +27,7 @@ /** * Base class for admin commands that connect via SCM client. */ -public abstract class ScmSubcommand implements Callable { +public abstract class ScmSubcommand extends AbstractSubcommand implements Callable { @CommandLine.Mixin private ScmOption scmOption; diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java index a38b98c53a9..cf0c63adca3 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java @@ -22,11 +22,9 @@ import org.apache.hadoop.hdds.cli.AdminSubcommand; import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.cli.OzoneAdmin; import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; -import picocli.CommandLine.ParentCommand; import picocli.CommandLine.Model.CommandSpec; import picocli.CommandLine.Spec; @@ -52,16 +50,9 @@ public class ContainerCommands implements Callable, AdminSubcommand { @Spec private CommandSpec spec; - @ParentCommand - private OzoneAdmin parent; - @Override public Void call() throws Exception { GenericCli.missingSubcommand(spec); return null; } - - public OzoneAdmin getParent() { - return parent; - } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java index 0e67661bba1..3665a7d3fa7 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoSubcommand.java @@ -27,7 +27,6 @@ import java.util.Scanner; import java.util.stream.Collectors; -import org.apache.hadoop.hdds.cli.GenericParentCommand; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; @@ -47,9 +46,7 @@ import org.apache.hadoop.hdds.server.JsonUtils; import picocli.CommandLine; import picocli.CommandLine.Command; -import picocli.CommandLine.Model.CommandSpec; import picocli.CommandLine.Parameters; -import picocli.CommandLine.Spec; /** * This is the handler that process container info command. @@ -61,9 +58,6 @@ versionProvider = HddsVersionProvider.class) public class InfoSubcommand extends ScmSubcommand { - @Spec - private CommandSpec spec; - @CommandLine.Option(names = { "--json" }, defaultValue = "false", description = "Format output as JSON") @@ -181,10 +175,7 @@ private void printDetails(ScmClient scmClient, long containerID, } else { // Print container report info. System.out.printf("Container id: %s%n", containerID); - boolean verbose = spec != null - && spec.root().userObject() instanceof GenericParentCommand - && ((GenericParentCommand) spec.root().userObject()).isVerbose(); - if (verbose) { + if (isVerbose()) { System.out.printf("Pipeline Info: %s%n", container.getPipeline()); } else { System.out.printf("Pipeline id: %s%n", container.getPipeline().getId().getId()); diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java index 88ccef702b3..cf338c7d774 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java @@ -38,7 +38,6 @@ import com.fasterxml.jackson.databind.SerializationFeature; import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule; import picocli.CommandLine.Command; -import picocli.CommandLine.ParentCommand; import picocli.CommandLine.Help.Visibility; import picocli.CommandLine.Option; @@ -82,9 +81,6 @@ public class ListSubcommand extends ScmSubcommand { private static final ObjectWriter WRITER; - @ParentCommand - private ContainerCommands parent; - static { ObjectMapper mapper = new ObjectMapper() .registerModule(new JavaTimeModule()) @@ -116,7 +112,7 @@ public void execute(ScmClient scmClient) throws IOException { replication, new OzoneConfiguration()); } - int maxCountAllowed = parent.getParent().getOzoneConf() + int maxCountAllowed = getOzoneConf() .getInt(ScmConfigKeys.OZONE_SCM_CONTAINER_LIST_MAX_COUNT, ScmConfigKeys.OZONE_SCM_CONTAINER_LIST_MAX_COUNT_DEFAULT); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/NSSummaryAdmin.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/NSSummaryAdmin.java index ef9be49abfb..20a145b56fb 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/NSSummaryAdmin.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/nssummary/NSSummaryAdmin.java @@ -66,10 +66,6 @@ public class NSSummaryAdmin implements AdminSubcommand { @CommandLine.ParentCommand private OzoneAdmin parent; - public OzoneAdmin getParent() { - return parent; - } - private boolean isObjectStoreBucket(OzoneBucket bucket, ObjectStore objectStore) { boolean enableFileSystemPaths = getOzoneConfig() .getBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/InspectSubcommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/InspectSubcommand.java index 79a0a84c649..f924277d27f 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/InspectSubcommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/InspectSubcommand.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.debug.container; +import org.apache.hadoop.hdds.cli.AbstractSubcommand; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.container.common.impl.ContainerData; import org.apache.hadoop.ozone.container.common.interfaces.Container; @@ -39,14 +40,14 @@ name = "inspect", description = "Check the metadata of all container replicas on this datanode.") -public class InspectSubcommand implements Callable { +public class InspectSubcommand extends AbstractSubcommand implements Callable { @CommandLine.ParentCommand private ContainerCommands parent; @Override public Void call() throws IOException { - final OzoneConfiguration conf = parent.getOzoneConf(); + final OzoneConfiguration conf = getOzoneConf(); parent.loadContainersFromVolumes(); final KeyValueContainerMetadataInspector inspector diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaRepair.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaRepair.java index 6ead713e148..5c7b6b2fc4b 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaRepair.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaRepair.java @@ -113,8 +113,4 @@ private Collection getConfiguredServiceIds() { public UserGroupInformation getUser() throws IOException { return UserGroupInformation.getCurrentUser(); } - - protected OzoneRepair getParent() { - return parent; - } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaStatus.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaStatus.java index 820ac6f8eaf..cd9ef42da8e 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaStatus.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaStatus.java @@ -64,8 +64,4 @@ public Void call() throws Exception { System.out.println(ozoneManagerClient.getQuotaRepairStatus()); return null; } - - protected QuotaRepair getParent() { - return parent; - } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaTrigger.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaTrigger.java index 04d78f05dc6..daa1f332e3f 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaTrigger.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaTrigger.java @@ -84,9 +84,4 @@ public Void call() throws Exception { } return null; } - - protected QuotaRepair getParent() { - return parent; - } - } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/Handler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/Handler.java index d1755a68806..db7294e2795 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/Handler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/Handler.java @@ -24,8 +24,7 @@ import java.util.concurrent.Callable; import com.fasterxml.jackson.databind.node.ArrayNode; -import org.apache.hadoop.hdds.cli.GenericParentCommand; -import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.cli.AbstractSubcommand; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.server.JsonUtils; @@ -34,36 +33,17 @@ import org.apache.hadoop.ozone.client.OzoneClientException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import picocli.CommandLine; -import picocli.CommandLine.Command; -import picocli.CommandLine.ParentCommand; /** * Base class for shell commands that connect via Ozone client. */ -@Command(mixinStandardHelpOptions = true, - versionProvider = HddsVersionProvider.class) @SuppressWarnings("squid:S106") // CLI -public abstract class Handler implements Callable { +public abstract class Handler extends AbstractSubcommand implements Callable { protected static final Logger LOG = LoggerFactory.getLogger(Handler.class); private OzoneConfiguration conf; - @ParentCommand - private GenericParentCommand parent; - - @CommandLine.Spec - private CommandLine.Model.CommandSpec spec; - - public boolean isVerbose() { - return parent.isVerbose(); - } - - public OzoneConfiguration createOzoneConfiguration() { - return parent.createOzoneConfiguration(); - } - protected OzoneAddress getAddress() throws OzoneClientException { return new OzoneAddress(); } @@ -84,7 +64,7 @@ protected boolean isApplicable() { @Override public Void call() throws Exception { - conf = createOzoneConfiguration(); + conf = getOzoneConf(); if (!isApplicable()) { return null; @@ -111,7 +91,7 @@ protected boolean securityEnabled() { if (!enabled) { err().printf("Error: '%s' operation works only when security is " + "enabled. To enable security set ozone.security.enabled to " + - "true.%n", spec.qualifiedName().trim()); + "true.%n", spec().qualifiedName().trim()); } return enabled; } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/BucketCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/BucketCommands.java index 8a92de696a7..f4be05aab7d 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/BucketCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/BucketCommands.java @@ -20,10 +20,8 @@ import java.util.concurrent.Callable; -import org.apache.hadoop.hdds.cli.GenericParentCommand; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.cli.MissingSubcommandException; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.shell.Shell; import picocli.CommandLine.Command; @@ -52,7 +50,7 @@ }, mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class BucketCommands implements GenericParentCommand, Callable { +public class BucketCommands implements Callable { @ParentCommand private Shell shell; @@ -62,14 +60,4 @@ public Void call() throws Exception { throw new MissingSubcommandException( this.shell.getCmd().getSubcommands().get("bucket")); } - - @Override - public boolean isVerbose() { - return shell.isVerbose(); - } - - @Override - public OzoneConfiguration createOzoneConfiguration() { - return shell.createOzoneConfiguration(); - } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/KeyCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/KeyCommands.java index f4ac9e1fe8f..390db103899 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/KeyCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/KeyCommands.java @@ -20,10 +20,8 @@ import java.util.concurrent.Callable; -import org.apache.hadoop.hdds.cli.GenericParentCommand; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.cli.MissingSubcommandException; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.shell.Shell; import picocli.CommandLine.Command; @@ -52,8 +50,7 @@ }, mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class KeyCommands - implements GenericParentCommand, Callable { +public class KeyCommands implements Callable { @ParentCommand private Shell shell; @@ -63,14 +60,4 @@ public Void call() throws Exception { throw new MissingSubcommandException( this.shell.getCmd().getSubcommands().get("key")); } - - @Override - public boolean isVerbose() { - return shell.isVerbose(); - } - - @Override - public OzoneConfiguration createOzoneConfiguration() { - return shell.createOzoneConfiguration(); - } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/prefix/PrefixCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/prefix/PrefixCommands.java index f058c4214d2..6216fce08d7 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/prefix/PrefixCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/prefix/PrefixCommands.java @@ -20,10 +20,8 @@ import java.util.concurrent.Callable; -import org.apache.hadoop.hdds.cli.GenericParentCommand; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.cli.MissingSubcommandException; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.shell.Shell; import picocli.CommandLine.Command; @@ -42,7 +40,7 @@ }, mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class PrefixCommands implements GenericParentCommand, Callable { +public class PrefixCommands implements Callable { @ParentCommand private Shell shell; @@ -52,14 +50,4 @@ public Void call() throws Exception { throw new MissingSubcommandException( this.shell.getCmd().getSubcommands().get("prefix")); } - - @Override - public boolean isVerbose() { - return shell.isVerbose(); - } - - @Override - public OzoneConfiguration createOzoneConfiguration() { - return shell.createOzoneConfiguration(); - } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/SnapshotCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/SnapshotCommands.java index e4ae7f5ad7a..dbeb6cda0a4 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/SnapshotCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/SnapshotCommands.java @@ -20,10 +20,8 @@ import java.util.concurrent.Callable; -import org.apache.hadoop.hdds.cli.GenericParentCommand; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.cli.MissingSubcommandException; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.shell.Shell; import picocli.CommandLine.Command; @@ -45,7 +43,7 @@ }, mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class SnapshotCommands implements GenericParentCommand, Callable { +public class SnapshotCommands implements Callable { @ParentCommand private Shell shell; @@ -55,14 +53,4 @@ public Void call() throws Exception { throw new MissingSubcommandException( this.shell.getCmd().getSubcommands().get("snapshot")); } - - @Override - public boolean isVerbose() { - return shell.isVerbose(); - } - - @Override - public OzoneConfiguration createOzoneConfiguration() { - return shell.createOzoneConfiguration(); - } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantUserCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantUserCommands.java index baff85d0bf2..8caeb232a9e 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantUserCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantUserCommands.java @@ -17,10 +17,8 @@ */ package org.apache.hadoop.ozone.shell.tenant; -import org.apache.hadoop.hdds.cli.GenericParentCommand; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.cli.MissingSubcommandException; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.shell.Shell; import picocli.CommandLine; @@ -43,8 +41,7 @@ }, mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class TenantUserCommands implements - GenericParentCommand, Callable { +public class TenantUserCommands implements Callable { @CommandLine.ParentCommand private Shell shell; @@ -54,14 +51,4 @@ public Void call() throws Exception { throw new MissingSubcommandException( this.shell.getCmd().getSubcommands().get("user")); } - - @Override - public boolean isVerbose() { - return shell.isVerbose(); - } - - @Override - public OzoneConfiguration createOzoneConfiguration() { - return shell.createOzoneConfiguration(); - } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/TokenCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/TokenCommands.java index 3223b5b49ed..df504313840 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/TokenCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/TokenCommands.java @@ -20,10 +20,8 @@ import java.util.concurrent.Callable; -import org.apache.hadoop.hdds.cli.GenericParentCommand; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.cli.MissingSubcommandException; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.shell.Shell; import picocli.CommandLine.Command; @@ -42,8 +40,7 @@ }, mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class TokenCommands - implements GenericParentCommand, Callable { +public class TokenCommands implements Callable { @ParentCommand private Shell shell; @@ -53,14 +50,4 @@ public Void call() throws Exception { throw new MissingSubcommandException( this.shell.getCmd().getSubcommands().get("token")); } - - @Override - public boolean isVerbose() { - return shell.isVerbose(); - } - - @Override - public OzoneConfiguration createOzoneConfiguration() { - return shell.createOzoneConfiguration(); - } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/VolumeCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/VolumeCommands.java index 1cf88552030..0a87e7a4065 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/VolumeCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/VolumeCommands.java @@ -20,10 +20,8 @@ import java.util.concurrent.Callable; -import org.apache.hadoop.hdds.cli.GenericParentCommand; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.cli.MissingSubcommandException; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.shell.Shell; import picocli.CommandLine.Command; @@ -50,7 +48,7 @@ }, mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class VolumeCommands implements GenericParentCommand, Callable { +public class VolumeCommands implements Callable { @ParentCommand private Shell shell; @@ -60,14 +58,4 @@ public Void call() throws Exception { throw new MissingSubcommandException( this.shell.getCmd().getSubcommands().get("volume")); } - - @Override - public boolean isVerbose() { - return shell.isVerbose(); - } - - @Override - public OzoneConfiguration createOzoneConfiguration() { - return shell.createOzoneConfiguration(); - } } From ddd5433374b29c99fb65df3e7ac44ff2193835bc Mon Sep 17 00:00:00 2001 From: Chia-Chuan Yu Date: Sun, 29 Dec 2024 01:50:24 +0800 Subject: [PATCH 012/168] HDDS-11986. Enable SCM Ratis in TestSCMNodeManager (#7624) --- .../org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java index 6d11cb5fe58..568c11c541c 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java @@ -184,7 +184,7 @@ OzoneConfiguration getConf() { TimeUnit.MILLISECONDS); conf.setBoolean(HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_CREATION, false); conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, false); + conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); return conf; } @@ -283,7 +283,7 @@ public void testScmLayoutOnHeartbeat() throws Exception { 1, TimeUnit.DAYS); try (SCMNodeManager nodeManager = createNodeManager(conf)) { - assertTrue(scm.checkLeader()); + assertTrue(scm.getScmContext().isLeader()); // Register 2 nodes correctly. // These will be used with a faulty node to test pipeline creation. DatanodeDetails goodNode1 = registerWithCapacity(nodeManager); @@ -402,7 +402,7 @@ public void testScmLayoutOnRegister() 1, TimeUnit.DAYS); try (SCMNodeManager nodeManager = createNodeManager(conf)) { - assertTrue(scm.checkLeader()); + assertTrue(scm.getScmContext().isLeader()); // Nodes with mismatched SLV cannot join the cluster. registerWithCapacity(nodeManager, LARGER_SLV_LAYOUT_PROTO, errorNodeNotPermitted); From 345c46850f2f145d4f735214d32390e422544894 Mon Sep 17 00:00:00 2001 From: Nandakumar Vadivelu Date: Sun, 29 Dec 2024 14:19:31 +0530 Subject: [PATCH 013/168] HDDS-11995. Acceptance Test test-all script fails to delete old result directories. (#7622) --- hadoop-ozone/dist/src/main/compose/test-all.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-ozone/dist/src/main/compose/test-all.sh b/hadoop-ozone/dist/src/main/compose/test-all.sh index 863e1d0b75a..adc6853d43c 100755 --- a/hadoop-ozone/dist/src/main/compose/test-all.sh +++ b/hadoop-ozone/dist/src/main/compose/test-all.sh @@ -22,8 +22,8 @@ SCRIPT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd ) ALL_RESULT_DIR="$SCRIPT_DIR/result" PROJECT_DIR="$SCRIPT_DIR/.." -mkdir -p "$ALL_RESULT_DIR" -rm "$ALL_RESULT_DIR"/* || true +rm -rf "${ALL_RESULT_DIR}" +mkdir -p "${ALL_RESULT_DIR}" source "$SCRIPT_DIR"/testlib.sh From c3003fd8707c487c2909c0d68c47bbb8c964c32a Mon Sep 17 00:00:00 2001 From: Nandakumar Vadivelu Date: Sun, 29 Dec 2024 16:24:26 +0530 Subject: [PATCH 014/168] HDDS-11992. Replace GenericCli#createOzoneConfiguration calls with getOzoneConf. (#7623) --- .../apache/hadoop/hdds/cli/GenericCli.java | 41 +++++++------------ .../hadoop/ozone/HddsDatanodeService.java | 2 +- .../StorageContainerManagerStarter.java | 2 +- .../apache/hadoop/hdds/cli/OzoneAdmin.java | 2 +- .../apache/hadoop/ozone/csi/CsiServer.java | 2 +- .../insight/ConfigurationSubCommand.java | 2 +- .../hadoop/ozone/insight/LogSubcommand.java | 2 +- .../ozone/insight/MetricsSubCommand.java | 2 +- .../hadoop/ozone/TestBlockTokensCLI.java | 2 +- .../hadoop/ozone/om/OzoneManagerStarter.java | 2 +- .../hadoop/ozone/recon/ReconServer.java | 2 +- .../org/apache/hadoop/ozone/s3/Gateway.java | 2 +- .../ozone/freon/BaseFreonGenerator.java | 2 +- .../ozone/freon/DNRPCLoadGenerator.java | 2 +- .../hadoop/ozone/freon/DatanodeSimulator.java | 2 +- .../org/apache/hadoop/ozone/freon/Freon.java | 2 +- .../hadoop/ozone/freon/HsyncGenerator.java | 2 +- .../ozone/freon/RandomKeyGenerator.java | 2 +- .../ozone/freon/SCMThroughputBenchmark.java | 2 +- .../apache/hadoop/ozone/shell/OzoneRatis.java | 2 +- .../apache/hadoop/ozone/shell/OzoneShell.java | 2 +- .../apache/hadoop/ozone/shell/s3/S3Shell.java | 2 +- .../ozone/shell/tenant/TenantShell.java | 2 +- 23 files changed, 36 insertions(+), 49 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java index 158bd9270b7..3afda85498b 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java @@ -17,11 +17,10 @@ package org.apache.hadoop.hdds.cli; import java.io.IOException; -import java.util.HashMap; import java.util.Map; -import java.util.Map.Entry; import java.util.concurrent.Callable; +import com.google.common.base.Strings; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -39,19 +38,24 @@ public class GenericCli implements Callable, GenericParentCommand { public static final int EXECUTION_ERROR_EXIT_CODE = -1; + private final OzoneConfiguration config = new OzoneConfiguration(); + private final CommandLine cmd; + + private UserGroupInformation user; + @Option(names = {"--verbose"}, description = "More verbose output. Show the stack trace of the errors.") private boolean verbose; @Option(names = {"-D", "--set"}) - private Map configurationOverrides = new HashMap<>(); + public void setConfigurationOverrides(Map configOverrides) { + configOverrides.forEach(config::set); + } @Option(names = {"-conf"}) - private String configurationPath; - - private final CommandLine cmd; - private OzoneConfiguration conf; - private UserGroupInformation user; + public void setConfigurationPath(String configPath) { + config.addResource(new Path(configPath)); + } public GenericCli() { this(CommandLine.defaultFactory()); @@ -92,8 +96,7 @@ public int execute(String[] argv) { protected void printError(Throwable error) { //message could be null in case of NPE. This is unexpected so we can //print out the stack trace. - if (verbose || error.getMessage() == null - || error.getMessage().length() == 0) { + if (verbose || Strings.isNullOrEmpty(error.getMessage())) { error.printStackTrace(System.err); } else { System.err.println(error.getMessage().split("\n")[0]); @@ -105,25 +108,9 @@ public Void call() throws Exception { throw new MissingSubcommandException(cmd); } - public OzoneConfiguration createOzoneConfiguration() { - OzoneConfiguration ozoneConf = new OzoneConfiguration(); - if (configurationPath != null) { - ozoneConf.addResource(new Path(configurationPath)); - } - if (configurationOverrides != null) { - for (Entry entry : configurationOverrides.entrySet()) { - ozoneConf.set(entry.getKey(), entry.getValue()); - } - } - return ozoneConf; - } - @Override public OzoneConfiguration getOzoneConf() { - if (conf == null) { - conf = createOzoneConfiguration(); - } - return conf; + return config; } public UserGroupInformation getUser() throws IOException { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java index de21e37503a..7dc8c591119 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java @@ -169,7 +169,7 @@ public static Logger getLogger() { @Override public Void call() throws Exception { - OzoneConfiguration configuration = createOzoneConfiguration(); + OzoneConfiguration configuration = getOzoneConf(); if (printBanner) { HddsServerUtil.startupShutdownMessage(HddsVersionInfo.HDDS_VERSION_INFO, HddsDatanodeService.class, args, LOG, configuration); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerStarter.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerStarter.java index e258c8ee66e..8c0044f66a9 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerStarter.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerStarter.java @@ -150,7 +150,7 @@ private void startScm() throws Exception { * is set and print the startup banner message. */ private void commonInit() { - conf = createOzoneConfiguration(); + conf = getOzoneConf(); TracingUtil.initTracing("StorageContainerManager", conf); String[] originalArgs = getCmd().getParseResult().originalArgs() diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/OzoneAdmin.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/OzoneAdmin.java index 0c182d75e83..78e5f8ffd72 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/OzoneAdmin.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/OzoneAdmin.java @@ -37,7 +37,7 @@ public static void main(String[] argv) { @Override public int execute(String[] argv) { - TracingUtil.initTracing("shell", createOzoneConfiguration()); + TracingUtil.initTracing("shell", getOzoneConf()); String spanName = "ozone admin " + String.join(" ", argv); return TracingUtil.executeInNewSpan(spanName, () -> super.execute(argv)); diff --git a/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/CsiServer.java b/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/CsiServer.java index dbafccf4fd2..13fedd061c6 100644 --- a/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/CsiServer.java +++ b/hadoop-ozone/csi/src/main/java/org/apache/hadoop/ozone/csi/CsiServer.java @@ -54,7 +54,7 @@ public class CsiServer extends GenericCli implements Callable { public Void call() throws Exception { String[] originalArgs = getCmd().getParseResult().originalArgs() .toArray(new String[0]); - OzoneConfiguration ozoneConfiguration = createOzoneConfiguration(); + OzoneConfiguration ozoneConfiguration = getOzoneConf(); HddsServerUtil.startupShutdownMessage(OzoneVersionInfo.OZONE_VERSION_INFO, CsiServer.class, originalArgs, LOG, ozoneConfiguration); CsiConfig csiConfig = ozoneConfiguration.getObject(CsiConfig.class); diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/ConfigurationSubCommand.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/ConfigurationSubCommand.java index dd565e5b26f..91481a40212 100644 --- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/ConfigurationSubCommand.java +++ b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/ConfigurationSubCommand.java @@ -46,7 +46,7 @@ public class ConfigurationSubCommand extends BaseInsightSubCommand @Override public Void call() throws Exception { InsightPoint insight = - getInsight(getInsightCommand().createOzoneConfiguration(), insightName); + getInsight(getInsightCommand().getOzoneConf(), insightName); System.out.println( "Configuration for `" + insightName + "` (" + insight.getDescription() + ")"); diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/LogSubcommand.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/LogSubcommand.java index 1bae30befb3..cc557917cb8 100644 --- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/LogSubcommand.java +++ b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/LogSubcommand.java @@ -72,7 +72,7 @@ public class LogSubcommand extends BaseInsightSubCommand @Override public Void call() { OzoneConfiguration conf = - getInsightCommand().createOzoneConfiguration(); + getInsightCommand().getOzoneConf(); InsightPoint insight = getInsight(conf, insightName); diff --git a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/MetricsSubCommand.java b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/MetricsSubCommand.java index 44e0b7b7dc0..4aee7648dba 100644 --- a/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/MetricsSubCommand.java +++ b/hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/MetricsSubCommand.java @@ -62,7 +62,7 @@ public class MetricsSubCommand extends BaseInsightSubCommand @Override public Void call() throws Exception { OzoneConfiguration conf = - getInsightCommand().createOzoneConfiguration(); + getInsightCommand().getOzoneConf(); InsightPoint insight = getInsight(conf, insightName); Set sources = diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokensCLI.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokensCLI.java index 038248945a4..632debd5d58 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokensCLI.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestBlockTokensCLI.java @@ -304,7 +304,7 @@ public boolean shouldRotate(ManagedSecretKey currentKey) { * format. */ private String[] createArgsForCommand(String[] additionalArgs) { - OzoneConfiguration defaultConf = ozoneAdmin.createOzoneConfiguration(); + OzoneConfiguration defaultConf = ozoneAdmin.getOzoneConf(); Map diff = Maps.difference(defaultConf.getOzoneProperties(), conf.getOzoneProperties()).entriesOnlyOnRight(); String[] args = new String[diff.size() + additionalArgs.length]; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java index 63617ee3637..27cb8d8aa3c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java @@ -167,7 +167,7 @@ public void bootstrapOM(@CommandLine.Option(names = {"--force"}, * is set and print the startup banner message. */ private void commonInit() { - conf = createOzoneConfiguration(); + conf = getOzoneConf(); TracingUtil.initTracing("OzoneManager", conf); String[] originalArgs = getCmd().getParseResult().originalArgs() diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java index 24b5c10952a..0970c2da687 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java @@ -101,7 +101,7 @@ public Void call() throws Exception { String[] originalArgs = getCmd().getParseResult().originalArgs() .toArray(new String[0]); - configuration = createOzoneConfiguration(); + configuration = getOzoneConf(); HddsServerUtil.startupShutdownMessage(OzoneVersionInfo.OZONE_VERSION_INFO, ReconServer.class, originalArgs, LOG, configuration); ConfigurationProvider.setConfiguration(configuration); diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/Gateway.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/Gateway.java index 9816b023dc4..c20c9b496f0 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/Gateway.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/Gateway.java @@ -72,7 +72,7 @@ public static void main(String[] args) throws Exception { @Override public Void call() throws Exception { - OzoneConfiguration ozoneConfiguration = createOzoneConfiguration(); + OzoneConfiguration ozoneConfiguration = getOzoneConf(); OzoneConfigurationHolder.setConfiguration(ozoneConfiguration); TracingUtil.initTracing("S3gateway", OzoneConfigurationHolder.configuration()); UserGroupInformation.setConfiguration(OzoneConfigurationHolder.configuration()); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java index 3627b917f00..651166740d8 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java @@ -572,7 +572,7 @@ public MetricRegistry getMetrics() { } public OzoneConfiguration createOzoneConfiguration() { - return freonCommand.createOzoneConfiguration(); + return freonCommand.getOzoneConf(); } /** diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DNRPCLoadGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DNRPCLoadGenerator.java index a7527952ca3..926f3f4630a 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DNRPCLoadGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DNRPCLoadGenerator.java @@ -127,7 +127,7 @@ public Void call() throws Exception { "OM echo response payload size should be positive value or zero."); if (configuration == null) { - configuration = freon.createOzoneConfiguration(); + configuration = freon.getOzoneConf(); } ContainerOperationClient scmClient = new ContainerOperationClient(configuration); ContainerInfo containerInfo = scmClient.getContainer(containerID); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeSimulator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeSimulator.java index a86b4789fef..353bc447939 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeSimulator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeSimulator.java @@ -422,7 +422,7 @@ private void heartbeat(InetSocketAddress endpoint, } private void init() throws IOException { - conf = freonCommand.createOzoneConfiguration(); + conf = freonCommand.getOzoneConf(); Collection addresses = getSCMAddressForDatanodes(conf); scmClients = new HashMap<>(addresses.size()); for (InetSocketAddress address : addresses) { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/Freon.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/Freon.java index ccae53f345b..435d54079fe 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/Freon.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/Freon.java @@ -96,7 +96,7 @@ public class Freon extends GenericCli { @Override public int execute(String[] argv) { - conf = createOzoneConfiguration(); + conf = getOzoneConf(); HddsServerUtil.initializeMetrics(conf, "ozone-freon"); TracingUtil.initTracing("freon", conf); return super.execute(argv); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HsyncGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HsyncGenerator.java index 687030ab325..2cfcbc9be97 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HsyncGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HsyncGenerator.java @@ -100,7 +100,7 @@ public Void call() throws Exception { init(); if (configuration == null) { - configuration = freon.createOzoneConfiguration(); + configuration = freon.getOzoneConf(); } URI uri = URI.create(rootPath); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java index 58b62d22b98..f7b51fedced 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java @@ -285,7 +285,7 @@ public void init(OzoneConfiguration configuration) throws IOException { @Override public Void call() throws Exception { if (ozoneConfiguration == null) { - ozoneConfiguration = freon.createOzoneConfiguration(); + ozoneConfiguration = freon.getOzoneConf(); } if (!ozoneConfiguration.getBoolean( HddsConfigKeys.HDDS_CONTAINER_PERSISTDATA, diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/SCMThroughputBenchmark.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/SCMThroughputBenchmark.java index 6dc0efae0d2..f6e08ee9e4f 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/SCMThroughputBenchmark.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/SCMThroughputBenchmark.java @@ -190,7 +190,7 @@ private SCMThroughputBenchmark() { @Override public Void call() throws Exception { - conf = freon.createOzoneConfiguration(); + conf = freon.getOzoneConf(); ThroughputBenchmark benchmark = createBenchmark(); initCluster(benchmark); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneRatis.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneRatis.java index 20f6f683cbf..95d4e2c2a36 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneRatis.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneRatis.java @@ -39,7 +39,7 @@ public static void main(String[] argv) throws Exception { @Override public int execute(String[] argv) { - TracingUtil.initTracing("shell", createOzoneConfiguration()); + TracingUtil.initTracing("shell", getOzoneConf()); String spanName = "ozone ratis" + String.join(" ", argv); return TracingUtil.executeInNewSpan(spanName, () -> { // TODO: When Ozone has RATIS-2155, update this line to use the RatisShell.Builder diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneShell.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneShell.java index 925e3bc13ec..c324618bfe4 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneShell.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneShell.java @@ -58,7 +58,7 @@ public static void main(String[] argv) throws Exception { @Override public int execute(String[] argv) { - TracingUtil.initTracing("shell", createOzoneConfiguration()); + TracingUtil.initTracing("shell", getOzoneConf()); String spanName = "ozone sh " + String.join(" ", argv); return TracingUtil.executeInNewSpan(spanName, () -> super.execute(argv)); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/s3/S3Shell.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/s3/S3Shell.java index 60269ad5383..53324ba03d6 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/s3/S3Shell.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/s3/S3Shell.java @@ -36,7 +36,7 @@ public class S3Shell extends Shell { @Override public int execute(String[] argv) { - TracingUtil.initTracing("s3shell", createOzoneConfiguration()); + TracingUtil.initTracing("s3shell", getOzoneConf()); String spanName = "ozone s3 " + String.join(" ", argv); return TracingUtil.executeInNewSpan(spanName, () -> super.execute(argv)); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantShell.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantShell.java index cd5bb11af17..c7baf789f19 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantShell.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantShell.java @@ -37,7 +37,7 @@ public class TenantShell extends Shell { @Override public int execute(String[] argv) { - TracingUtil.initTracing("tenant-shell", createOzoneConfiguration()); + TracingUtil.initTracing("tenant-shell", getOzoneConf()); String spanName = "ozone tenant " + String.join(" ", argv); return TracingUtil.executeInNewSpan(spanName, () -> super.execute(argv)); From ad108c86b11606d49f8ea9b739e40340a2117fe2 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran <47532440+swamirishi@users.noreply.github.com> Date: Mon, 30 Dec 2024 10:22:32 -0800 Subject: [PATCH 015/168] HDDS-11997. Duplicate snapshot purge request causes NPE (#7627) --- .../snapshot/OMSnapshotPurgeRequest.java | 4 +- .../snapshot/OMSnapshotPurgeResponse.java | 6 +++ ...TestOMSnapshotPurgeRequestAndResponse.java | 37 +++++++++++++++++++ 3 files changed, 46 insertions(+), 1 deletion(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java index 38c51d4de5c..62fbb39417b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java @@ -227,7 +227,9 @@ private SnapshotInfo getUpdatedSnapshotInfo(String snapshotTableKey, OMMetadataM if (snapshotInfo == null) { snapshotInfo = omMetadataManager.getSnapshotInfoTable().get(snapshotTableKey); - updatedSnapshotInfos.put(snapshotTableKey, snapshotInfo); + if (snapshotInfo != null) { + updatedSnapshotInfos.put(snapshotTableKey, snapshotInfo); + } } return snapshotInfo; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java index 81a020653f7..826293366b7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/snapshot/OMSnapshotPurgeResponse.java @@ -18,6 +18,7 @@ */ package org.apache.hadoop.ozone.om.response.snapshot; +import com.google.common.annotations.VisibleForTesting; import org.apache.commons.io.FileUtils; import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.ozone.om.OMMetadataManager; @@ -138,4 +139,9 @@ private void deleteCheckpointDirectory(OMMetadataManager omMetadataManager, } } } + + @VisibleForTesting + public Map getUpdatedSnapInfos() { + return updatedSnapInfos; + } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java index 1c44decdfda..8c1f38c01c6 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/snapshot/TestOMSnapshotPurgeRequestAndResponse.java @@ -56,6 +56,7 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.anyString; @@ -186,6 +187,42 @@ public void testValidateAndUpdateCache() throws Exception { assertEquals(initialSnapshotPurgeFailCount, getOmMetrics().getNumSnapshotPurgeFails()); } + @Test + public void testDuplicateSnapshotPurge() throws Exception { + List snapshotDbKeysToPurge = createSnapshots(1); + assertFalse(getOmMetadataManager().getSnapshotInfoTable().isEmpty()); + OMRequest snapshotPurgeRequest = createPurgeKeysRequest( + snapshotDbKeysToPurge); + + OMSnapshotPurgeRequest omSnapshotPurgeRequest = preExecute(snapshotPurgeRequest); + + OMSnapshotPurgeResponse omSnapshotPurgeResponse = (OMSnapshotPurgeResponse) + omSnapshotPurgeRequest.validateAndUpdateCache(getOzoneManager(), 200L); + + try (BatchOperation batchOperation = getOmMetadataManager().getStore().initBatchOperation()) { + omSnapshotPurgeResponse.checkAndUpdateDB(getOmMetadataManager(), batchOperation); + getOmMetadataManager().getStore().commitBatchOperation(batchOperation); + } + + // Check if the entries are deleted. + assertTrue(getOmMetadataManager().getSnapshotInfoTable().isEmpty()); + + OMSnapshotPurgeResponse omSnapshotPurgeResponse1 = (OMSnapshotPurgeResponse) + omSnapshotPurgeRequest.validateAndUpdateCache(getOzoneManager(), 201L); + + for (Map.Entry purgedSnapshot : omSnapshotPurgeResponse1.getUpdatedSnapInfos().entrySet()) { + assertNotNull(purgedSnapshot.getValue()); + } + for (String snapshotTableKey: snapshotDbKeysToPurge) { + assertNull(getOmMetadataManager().getSnapshotInfoTable().get(snapshotTableKey)); + } + + try (BatchOperation batchOperation = getOmMetadataManager().getStore().initBatchOperation()) { + omSnapshotPurgeResponse1.checkAndUpdateDB(getOmMetadataManager(), batchOperation); + getOmMetadataManager().getStore().commitBatchOperation(batchOperation); + } + } + /** * This test is mainly to validate metrics and error code. */ From f57cf017c7dc47325fa035f15a050ee0f5e777f2 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Wed, 1 Jan 2025 01:34:49 +0100 Subject: [PATCH 016/168] HDDS-11993. Move OzoneAdmin to ozone-tools (#7619) --- .../dev-support/intellij/runConfigurations/ScmRoles.xml | 2 +- hadoop-ozone/dist/src/shell/ozone/ozone | 2 +- .../java/org/apache/hadoop/ozone/TestBlockTokensCLI.java | 2 +- .../org/apache/hadoop/ozone/shell/TestNSSummaryAdmin.java | 2 +- .../org/apache/hadoop/ozone/shell/TestOzoneShellHA.java | 2 +- .../org/apache/hadoop/ozone/shell/TestReconfigShell.java | 2 +- .../java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java | 2 +- .../hadoop/ozone/shell/TestTransferLeadershipShell.java | 2 +- .../java/org/apache/hadoop/ozone/admin}/OzoneAdmin.java | 6 +++++- .../apache/hadoop/ozone/admin/nssummary/NSSummaryAdmin.java | 2 +- .../main/java/org/apache/hadoop/ozone/admin/om/OMAdmin.java | 2 +- .../hadoop/ozone/admin/reconfig/ReconfigureCommands.java | 2 +- .../java/org/apache/hadoop/ozone/admin/scm/ScmAdmin.java | 2 +- .../hadoop/ozone/scm/TestDecommissionScmSubcommand.java | 2 +- 14 files changed, 18 insertions(+), 14 deletions(-) rename {hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli => hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin}/OzoneAdmin.java (87%) diff --git a/hadoop-ozone/dev-support/intellij/runConfigurations/ScmRoles.xml b/hadoop-ozone/dev-support/intellij/runConfigurations/ScmRoles.xml index ea4a4da516e..f6d6278ac67 100644 --- a/hadoop-ozone/dev-support/intellij/runConfigurations/ScmRoles.xml +++ b/hadoop-ozone/dev-support/intellij/runConfigurations/ScmRoles.xml @@ -16,7 +16,7 @@ --> -

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.fs.ozone; - -import org.junit.jupiter.api.TestInstance; - -@TestInstance(TestInstance.Lifecycle.PER_CLASS) -class TestO3FSWithFSOAndOMRatis extends AbstractOzoneFileSystemTestWithFSO { - TestO3FSWithFSOAndOMRatis() { - super(true); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestO3FSWithFSPaths.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestO3FSWithFSPaths.java index 5fffd9df7f4..a603d7a847d 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestO3FSWithFSPaths.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestO3FSWithFSPaths.java @@ -23,6 +23,6 @@ @TestInstance(TestInstance.Lifecycle.PER_CLASS) class TestO3FSWithFSPaths extends AbstractOzoneFileSystemTest { TestO3FSWithFSPaths() { - super(true, false, BucketLayout.LEGACY); + super(true, BucketLayout.LEGACY); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestO3FSWithFSPathsAndOMRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestO3FSWithFSPathsAndOMRatis.java deleted file mode 100644 index 461961c3e73..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestO3FSWithFSPathsAndOMRatis.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.fs.ozone; - -import org.apache.hadoop.ozone.om.helpers.BucketLayout; -import org.junit.jupiter.api.TestInstance; - -@TestInstance(TestInstance.Lifecycle.PER_CLASS) -class TestO3FSWithFSPathsAndOMRatis extends AbstractOzoneFileSystemTest { - TestO3FSWithFSPathsAndOMRatis() { - super(true, true, BucketLayout.LEGACY); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestO3FSWithOMRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestO3FSWithOMRatis.java deleted file mode 100644 index a02f3812e04..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestO3FSWithOMRatis.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.fs.ozone; - -import org.apache.hadoop.ozone.om.helpers.BucketLayout; -import org.junit.jupiter.api.TestInstance; - -@TestInstance(TestInstance.Lifecycle.PER_CLASS) -class TestO3FSWithOMRatis extends AbstractOzoneFileSystemTest { - TestO3FSWithOMRatis() { - super(false, true, BucketLayout.LEGACY); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFS.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFS.java index 295c182f7db..e9f734a426c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFS.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFS.java @@ -23,6 +23,6 @@ @TestInstance(TestInstance.Lifecycle.PER_CLASS) class TestOFS extends AbstractRootedOzoneFileSystemTest { TestOFS() { - super(BucketLayout.LEGACY, false, false, false, false); + super(BucketLayout.LEGACY, false, false, false); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithCacheOnly.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithCacheOnly.java index 0ce2a80e88d..58b1f97a8d8 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithCacheOnly.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithCacheOnly.java @@ -23,6 +23,6 @@ @TestInstance(TestInstance.Lifecycle.PER_CLASS) class TestOFSWithCacheOnly extends AbstractRootedOzoneFileSystemTest { TestOFSWithCacheOnly() { - super(BucketLayout.LEGACY, false, false, false, true); + super(BucketLayout.LEGACY, false, false, true); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithFSOAndOMRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithFSOAndOMRatis.java deleted file mode 100644 index c6e837becd3..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithFSOAndOMRatis.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.fs.ozone; - -import org.junit.jupiter.api.TestInstance; - -@TestInstance(TestInstance.Lifecycle.PER_CLASS) -class TestOFSWithFSOAndOMRatis extends AbstractRootedOzoneFileSystemTestWithFSO { - TestOFSWithFSOAndOMRatis() { - super(true, false, false); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithFSOAndOMRatisAndCacheOnly.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithFSOAndOMRatisAndCacheOnly.java deleted file mode 100644 index 2bd14d9c360..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithFSOAndOMRatisAndCacheOnly.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.fs.ozone; - -import org.junit.jupiter.api.TestInstance; - -@TestInstance(TestInstance.Lifecycle.PER_CLASS) -class TestOFSWithFSOAndOMRatisAndCacheOnly extends AbstractRootedOzoneFileSystemTestWithFSO { - TestOFSWithFSOAndOMRatisAndCacheOnly() { - super(true, false, true); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithFSPaths.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithFSPaths.java index 9e06c2d5b58..75c09467237 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithFSPaths.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithFSPaths.java @@ -23,6 +23,6 @@ @TestInstance(TestInstance.Lifecycle.PER_CLASS) class TestOFSWithFSPaths extends AbstractRootedOzoneFileSystemTest { TestOFSWithFSPaths() { - super(BucketLayout.LEGACY, true, false, false, false); + super(BucketLayout.LEGACY, true, false, false); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithFSPathsAndOMRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithFSPathsAndOMRatis.java deleted file mode 100644 index 592d11df246..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithFSPathsAndOMRatis.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.fs.ozone; - -import org.apache.hadoop.ozone.om.helpers.BucketLayout; -import org.junit.jupiter.api.TestInstance; - -@TestInstance(TestInstance.Lifecycle.PER_CLASS) -class TestOFSWithFSPathsAndOMRatis extends AbstractRootedOzoneFileSystemTest { - TestOFSWithFSPathsAndOMRatis() { - super(BucketLayout.LEGACY, true, true, false, false); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithFSPathsAndOMRatisAndACL.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithFSPathsAndOMRatisAndACL.java deleted file mode 100644 index 975bf474253..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithFSPathsAndOMRatisAndACL.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.fs.ozone; - -import org.apache.hadoop.ozone.om.helpers.BucketLayout; -import org.junit.jupiter.api.TestInstance; - -@TestInstance(TestInstance.Lifecycle.PER_CLASS) -class TestOFSWithFSPathsAndOMRatisAndACL extends AbstractRootedOzoneFileSystemTest { - TestOFSWithFSPathsAndOMRatisAndACL() { - super(BucketLayout.LEGACY, true, true, true, false); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithFSPathsAndOMRatisAndCacheOnly.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithFSPathsAndOMRatisAndCacheOnly.java deleted file mode 100644 index 212586d9af3..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithFSPathsAndOMRatisAndCacheOnly.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.fs.ozone; - -import org.apache.hadoop.ozone.om.helpers.BucketLayout; -import org.junit.jupiter.api.TestInstance; - -@TestInstance(TestInstance.Lifecycle.PER_CLASS) -class TestOFSWithFSPathsAndOMRatisAndCacheOnly extends AbstractRootedOzoneFileSystemTest { - TestOFSWithFSPathsAndOMRatisAndCacheOnly() { - super(BucketLayout.LEGACY, true, true, false, true); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithOMRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithOMRatis.java deleted file mode 100644 index 84a4ac5aaa7..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithOMRatis.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.fs.ozone; - -import org.apache.hadoop.ozone.om.helpers.BucketLayout; -import org.junit.jupiter.api.TestInstance; - -@TestInstance(TestInstance.Lifecycle.PER_CLASS) -class TestOFSWithOMRatis extends AbstractRootedOzoneFileSystemTest { - TestOFSWithOMRatis() { - super(BucketLayout.LEGACY, false, true, false, false); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithStreaming.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithStreaming.java index 059f7b3e03d..5f8bf162b4e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithStreaming.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithStreaming.java @@ -54,7 +54,6 @@ import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_SCHEME; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; @@ -86,7 +85,6 @@ public static void init() throws Exception { CONF.setBoolean(HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); CONF.setBoolean(OZONE_FS_DATASTREAM_ENABLED, true); CONF.set(OZONE_FS_DATASTREAM_AUTO_THRESHOLD, AUTO_THRESHOLD + "B"); - CONF.setBoolean(OZONE_OM_RATIS_ENABLE_KEY, true); CONF.set(OZONE_DEFAULT_BUCKET_LAYOUT, layout.name()); CONF.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedDDSWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedDDSWithFSO.java index de3358685ec..4a3bd85aa31 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedDDSWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedDDSWithFSO.java @@ -88,7 +88,6 @@ public static void init() throws Exception { conf.setInt(OMConfigKeys.OZONE_PATH_DELETING_LIMIT_PER_TASK, 5); conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 100, TimeUnit.MILLISECONDS); - conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, true); conf.setBoolean(OZONE_ACL_ENABLED, true); conf.set(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT, BucketLayout.FILE_SYSTEM_OPTIMIZED.name()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java index 39c2250b73c..9df70f1b7c2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java @@ -446,8 +446,6 @@ protected int numberOfOzoneManagers() { } protected void initOMRatisConf() { - conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, true); - // If test change the following config values we will respect, // otherwise we will set lower timeout values. long defaultDuration = OMConfigKeys.OZONE_OM_RATIS_MINIMUM_TIMEOUT_DEFAULT diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java index 0c5db29fd5f..7a3e66a0add 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java @@ -23,14 +23,13 @@ import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.om.OMConfigKeys; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Timeout; /** - * Test Ozone Client with OM Ratis disabled. + * Test Ozone Client with OM Ratis enabled. */ @Timeout(300) class TestOzoneRpcClient extends OzoneRpcClientTests { @@ -38,7 +37,6 @@ class TestOzoneRpcClient extends OzoneRpcClientTests { @BeforeAll public static void init() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); - conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, false); conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, 1); conf.setBoolean(OzoneConfigKeys.OZONE_ACL_ENABLED, true); conf.set(OzoneConfigKeys.OZONE_ACL_AUTHORIZER_CLASS, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java deleted file mode 100644 index 95d7ba6218d..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.client.rpc; - -import java.io.IOException; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.BeforeAll; - -/** - * Test Ozone Client with OM Ratis enabled. - */ -class TestOzoneRpcClientWithRatis extends OzoneRpcClientTests { - - @BeforeAll - public static void init() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, 1); - conf.setBoolean(ScmConfigKeys.OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE, - false); - conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, true); - conf.setBoolean(OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, - true); - conf.setBoolean(OzoneConfigKeys.OZONE_ACL_ENABLED, true); - conf.set(OzoneConfigKeys.OZONE_ACL_AUTHORIZER_CLASS, - OzoneConfigKeys.OZONE_ACL_AUTHORIZER_CLASS_NATIVE); - startCluster(conf); - } - - @AfterAll - public static void shutdown() throws IOException { - shutdownCluster(); - } - -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java index e3bb5b5bccb..23d0cdd1b16 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java @@ -73,8 +73,6 @@ public void setup() throws Exception { TimeUnit.MILLISECONDS); conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS); - // TODO enable when RATIS-788 is fixed - conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, false); conf.setQuietMode(false); cluster = MiniOzoneCluster.newBuilder(conf) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java index f0f4744e8c9..a94f6ea017f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java @@ -96,7 +96,6 @@ import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY; import static org.apache.hadoop.ozone.om.OmSnapshotManager.OM_HARDLINK_FILE; import static org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils.DATA_PREFIX; import static org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils.DATA_SUFFIX; @@ -293,13 +292,6 @@ public void testEndpoint(String httpMethod) throws Exception { any(), any(), eq(toExcludeList), any(), any()); } - @ParameterizedTest - @MethodSource("getHttpMethods") - public void testEndpointNotRatis(String httpMethod) throws Exception { - conf.setBoolean(OZONE_OM_RATIS_ENABLE_KEY, false); - testEndpoint(httpMethod); - } - @Test public void testDoPostWithInvalidContentType() throws Exception { conf.setBoolean(OZONE_ACL_ENABLED, false); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMEpochForNonRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMEpochForNonRatis.java deleted file mode 100644 index 01ba4db399f..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMEpochForNonRatis.java +++ /dev/null @@ -1,215 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.om; - -import java.util.HashMap; - -import org.apache.hadoop.hdds.utils.IOUtils; -import org.apache.commons.lang3.RandomStringUtils; -import org.apache.hadoop.hdds.client.ReplicationFactor; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.utils.TransactionInfo; -import org.apache.hadoop.hdds.utils.db.Table; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OmUtils; -import org.apache.hadoop.ozone.client.ObjectStore; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneVolume; -import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; -import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; -import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; -import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; -import org.apache.hadoop.ozone.om.protocolPB.OmTransportFactory; -import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB; -import org.apache.hadoop.security.UserGroupInformation; -import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.Timeout; - -import static org.apache.hadoop.ozone.OzoneConsts.TRANSACTION_INFO_KEY; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static java.nio.charset.StandardCharsets.UTF_8; -import static org.apache.hadoop.ozone.OmUtils.EPOCH_ID_SHIFT; -import static org.apache.hadoop.ozone.OmUtils.EPOCH_WHEN_RATIS_NOT_ENABLED; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY; -import static org.junit.jupiter.api.Assertions.assertTrue; - -/** - * Tests OM epoch generation for when Ratis is not enabled. - */ -@Timeout(240) -public class TestOMEpochForNonRatis { - private static MiniOzoneCluster cluster = null; - private static OzoneConfiguration conf; - private static OzoneClient client; - - @BeforeAll - public static void init() throws Exception { - conf = new OzoneConfiguration(); - conf.setBoolean(OZONE_OM_RATIS_ENABLE_KEY, false); - cluster = MiniOzoneCluster.newBuilder(conf) - .build(); - cluster.waitForClusterToBeReady(); - client = cluster.newClient(); - } - - /** - * Shutdown MiniDFSCluster. - */ - @AfterAll - public static void shutdown() { - IOUtils.closeQuietly(client); - if (cluster != null) { - cluster.shutdown(); - } - } - - @Test - public void testUniqueTrxnIndexOnOMRestart() throws Exception { - // When OM is restarted, the transaction index for requests should not - // start from 0. It should incrementally increase from the last - // transaction index which was stored in DB before restart. - - String volumeName = "volume" + RandomStringUtils.randomNumeric(5); - String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); - String keyName = "key" + RandomStringUtils.randomNumeric(5); - - OzoneManager om = cluster.getOzoneManager(); - ObjectStore objectStore = client.getObjectStore(); - - UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); - OzoneManagerProtocolClientSideTranslatorPB omClient = - new OzoneManagerProtocolClientSideTranslatorPB( - OmTransportFactory.create(conf, ugi, null), - RandomStringUtils.randomAscii(5)); - - objectStore.createVolume(volumeName); - - // Verify that the last transactionIndex stored in DB after volume - // creation equals the transaction index corresponding to volume's - // objectID. Also, the volume transaction index should be 1 as this is - // the first transaction in this cluster. - OmVolumeArgs volumeInfo = omClient.getVolumeInfo(volumeName); - long volumeTrxnIndex = OmUtils.getTxIdFromObjectId( - volumeInfo.getObjectID()); - assertEquals(1, volumeTrxnIndex); - assertEquals(volumeTrxnIndex, om.getLastTrxnIndexForNonRatis()); - - OzoneVolume ozoneVolume = objectStore.getVolume(volumeName); - ozoneVolume.createBucket(bucketName); - - // Verify last transactionIndex is updated after bucket creation - OmBucketInfo bucketInfo = omClient.getBucketInfo(volumeName, bucketName); - long bucketTrxnIndex = OmUtils.getTxIdFromObjectId( - bucketInfo.getObjectID()); - assertEquals(2, bucketTrxnIndex); - assertEquals(bucketTrxnIndex, om.getLastTrxnIndexForNonRatis()); - - // Restart the OM and create new object - cluster.restartOzoneManager(); - - String data = "random data"; - OzoneOutputStream ozoneOutputStream = ozoneVolume.getBucket(bucketName) - .createKey(keyName, data.length(), ReplicationType.RATIS, - ReplicationFactor.ONE, new HashMap<>()); - ozoneOutputStream.write(data.getBytes(UTF_8), 0, data.length()); - ozoneOutputStream.close(); - - // Verify last transactionIndex is updated after key creation and the - // transaction index after restart is incremented from the last - // transaction index before restart. - OmKeyInfo omKeyInfo = omClient.lookupKey(new OmKeyArgs.Builder() - .setVolumeName(volumeName) - .setBucketName(bucketName) - .setKeyName(keyName) - .build()); - long keyTrxnIndex = OmUtils.getTxIdFromObjectId( - omKeyInfo.getObjectID()); - assertEquals(3, keyTrxnIndex); - // Key commit is a separate transaction. Hence, the last trxn index in DB - // should be 1 more than KeyTrxnIndex - assertEquals(4, om.getLastTrxnIndexForNonRatis()); - } - - @Test - public void testIncreaseTrxnIndexBasedOnExistingDB() throws Exception { - // Set transactionInfo.getTerm() not -1 to mock the DB migrated from ratis cluster. - // When OM is first started from the existing ratis DB, the transaction index for - // requests should not start from 0. It should incrementally increase from the last - // transaction index which was stored in DB transactionInfoTable before started. - - String volumeName = "volume" + RandomStringUtils.randomNumeric(5); - String bucketName = "bucket" + RandomStringUtils.randomNumeric(5); - String keyName = "key" + RandomStringUtils.randomNumeric(5); - - OzoneManager om = cluster.getOzoneManager(); - ObjectStore objectStore = client.getObjectStore(); - - objectStore.createVolume(volumeName); - OzoneVolume ozoneVolume = objectStore.getVolume(volumeName); - ozoneVolume.createBucket(bucketName); - - Table transactionInfoTable = om.getMetadataManager().getTransactionInfoTable(); - long initIndex = transactionInfoTable.get(TRANSACTION_INFO_KEY).getTransactionIndex(); - // Set transactionInfo.getTerm() = 1 to mock the DB migrated from ratis cluster - transactionInfoTable.put(TRANSACTION_INFO_KEY, TransactionInfo.valueOf(1, initIndex)); - TransactionInfo transactionInfo = transactionInfoTable.get(TRANSACTION_INFO_KEY); - // Verify transaction term != -1 and index > 1 - assertEquals(1, transactionInfo.getTerm()); - assertTrue(initIndex > 1); - - // Restart the OM and create new object - cluster.restartOzoneManager(); - - String data = "random data"; - OzoneOutputStream ozoneOutputStream = ozoneVolume.getBucket(bucketName).createKey(keyName, data.length()); - ozoneOutputStream.write(data.getBytes(UTF_8), 0, data.length()); - ozoneOutputStream.close(); - - // Transaction index after OM restart is incremented by 2 (create and commit op) from the last - // transaction index before OM restart rather than from 0. - // So, the transactionIndex should be (initIndex + 2) rather than (0 + 2) - assertEquals(initIndex + 2, - om.getMetadataManager().getTransactionInfoTable().get(TRANSACTION_INFO_KEY).getTransactionIndex()); - } - - @Test - public void testEpochIntegrationInObjectID() throws Exception { - // Create a volume and check the objectID has the epoch as - // EPOCH_FOR_RATIS_NOT_ENABLED in the first 2 bits. - - ObjectStore objectStore = client.getObjectStore(); - - String volumeName = "volume" + RandomStringUtils.randomNumeric(5); - objectStore.createVolume(volumeName); - - UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); - OzoneManagerProtocolClientSideTranslatorPB omClient = - new OzoneManagerProtocolClientSideTranslatorPB( - OmTransportFactory.create(conf, ugi, null), - RandomStringUtils.randomAscii(5)); - - long volObjId = omClient.getVolumeInfo(volumeName).getObjectID(); - long epochInVolObjId = volObjId >> EPOCH_ID_SHIFT; - - assertEquals(EPOCH_WHEN_RATIS_NOT_ENABLED, epochInVolObjId); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java index 81f87265b0a..ab67eb7ba37 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java @@ -67,7 +67,6 @@ public class TestOzoneManagerConfiguration { void init(@TempDir Path metaDirPath) throws IOException { conf = new OzoneConfiguration(); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDirPath.toString()); - conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, true); conf.setTimeDuration(OMConfigKeys.OZONE_OM_RATIS_MINIMUM_TIMEOUT_KEY, RATIS_RPC_TIMEOUT, TimeUnit.MILLISECONDS); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java index d28f25a28fa..29417a075cb 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOzoneManagerSnapshotProvider.java @@ -65,7 +65,6 @@ public void init() throws Exception { conf = new OzoneConfiguration(); omServiceId = "om-service-test1"; conf.setBoolean(OMConfigKeys.OZONE_OM_HTTP_ENABLED_KEY, true); - conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, true); cluster = MiniOzoneCluster.newHABuilder(conf) .setOMServiceId(omServiceId) .setNumOfOzoneManagers(numOfOMs) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingServiceIntegrationTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingServiceIntegrationTest.java index c3a58a1a211..9bafe148aee 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingServiceIntegrationTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingServiceIntegrationTest.java @@ -107,7 +107,6 @@ public class TestSnapshotDeletingServiceIntegrationTest { private static final Logger LOG = LoggerFactory.getLogger(TestSnapshotDeletingServiceIntegrationTest.class); - private static boolean omRatisEnabled = true; private static final ByteBuffer CONTENT = ByteBuffer.allocate(1024 * 1024 * 16); @@ -138,7 +137,6 @@ public void setup() throws Exception { conf.setInt(OMConfigKeys.OZONE_PATH_DELETING_LIMIT_PER_TASK, 5); conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 500, TimeUnit.MILLISECONDS); - conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, omRatisEnabled); conf.setBoolean(OZONE_ACL_ENABLED, true); // Enable filesystem snapshot feature for the test regardless of the default conf.setBoolean(OMConfigKeys.OZONE_FILESYSTEM_SNAPSHOT_ENABLED_KEY, true); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java index 8656682cd15..80f06b2ef54 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java @@ -61,7 +61,6 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_PATH_DELETING_LIMIT_PER_TASK; -import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -77,8 +76,6 @@ public class TestReconInsightsForDeletedDirectories { private static final Logger LOG = LoggerFactory.getLogger(TestReconInsightsForDeletedDirectories.class); - private static boolean omRatisEnabled = true; - private static MiniOzoneCluster cluster; private static FileSystem fs; private static String volumeName; @@ -92,7 +89,6 @@ public static void init() throws Exception { conf.setInt(OZONE_PATH_DELETING_LIMIT_PER_TASK, 0); conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 10000000, TimeUnit.MILLISECONDS); - conf.setBoolean(OZONE_OM_RATIS_ENABLE_KEY, omRatisEnabled); conf.setBoolean(OZONE_ACL_ENABLED, true); cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(3) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerHA.java index 0d7cb5fbf07..955b5cd73e6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManagerHA.java @@ -31,7 +31,6 @@ import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientFactory; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; -import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix; @@ -66,7 +65,6 @@ public class TestReconWithOzoneManagerHA { @BeforeEach public void setup() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); - conf.set(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, Boolean.TRUE.toString()); // Sync to disk enabled RocksDBConfiguration dbConf = conf.getObject(RocksDBConfiguration.class); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/TestS3SDKV1.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/TestS3SDKV1.java index 5e9b3633be0..e04c4014413 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/TestS3SDKV1.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/TestS3SDKV1.java @@ -19,7 +19,6 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.ozone.om.OMConfigKeys; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Timeout; @@ -27,7 +26,7 @@ import java.io.IOException; /** - * Tests the AWS S3 SDK basic operations with OM Ratis disabled. + * Tests the AWS S3 SDK basic operations with OM Ratis enabled. */ @Timeout(300) public class TestS3SDKV1 extends AbstractS3SDKV1Tests { @@ -35,7 +34,6 @@ public class TestS3SDKV1 extends AbstractS3SDKV1Tests { @BeforeAll public static void init() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); - conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, false); conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, 1); startCluster(conf); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/TestS3SDKV1WithRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/TestS3SDKV1WithRatis.java deleted file mode 100644 index cb614453f69..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/TestS3SDKV1WithRatis.java +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.s3.awssdk.v1; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.om.OMConfigKeys; -import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.BeforeAll; - -import java.io.IOException; - -/** - * Tests the AWS S3 SDK basic operations with OM Ratis enabled. - */ -public class TestS3SDKV1WithRatis extends AbstractS3SDKV1Tests { - - @BeforeAll - public static void init() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - conf.setBoolean(ScmConfigKeys.OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE, - false); - conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, true); - conf.setBoolean(OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, - true); - startCluster(conf); - } - - @AfterAll - public static void shutdown() throws IOException { - shutdownCluster(); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/TestS3SDKV1WithRatisStreaming.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/TestS3SDKV1WithRatisStreaming.java index 571d4c64908..e54cc42ef33 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/TestS3SDKV1WithRatisStreaming.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/TestS3SDKV1WithRatisStreaming.java @@ -20,7 +20,6 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.om.OMConfigKeys; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Timeout; @@ -38,7 +37,6 @@ public static void init() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); conf.setBoolean(ScmConfigKeys.OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE, false); - conf.setBoolean(OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, true); conf.setBoolean(OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, true); conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, true); From f2e6d38ad7160095984bedd774c3d3c22d30e7ce Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Wed, 1 Jan 2025 11:17:07 +0100 Subject: [PATCH 018/168] HDDS-11994. Convert Freon to pluggable model (#7620) --- .../hdds/cli/ExtensibleParentCommand.java | 6 ++- .../ozone/freon/BaseFreonGenerator.java | 3 +- .../ozone/freon/ChunkManagerDiskWrite.java | 2 + .../freon/ClosedContainerReplicator.java | 2 + .../ozone/freon/DNRPCLoadGenerator.java | 4 +- .../ozone/freon/DatanodeBlockPutter.java | 2 + .../ozone/freon/DatanodeChunkGenerator.java | 2 + .../ozone/freon/DatanodeChunkValidator.java | 2 + .../hadoop/ozone/freon/DatanodeSimulator.java | 4 +- .../FollowerAppendLogEntryGenerator.java | 2 + .../org/apache/hadoop/ozone/freon/Freon.java | 49 +++---------------- .../hadoop/ozone/freon/FreonSubcommand.java | 23 +++++++++ .../ozone/freon/HadoopDirTreeGenerator.java | 2 + .../hadoop/ozone/freon/HadoopFsGenerator.java | 2 + .../hadoop/ozone/freon/HadoopFsValidator.java | 2 + .../ozone/freon/HadoopNestedDirGenerator.java | 2 + .../hadoop/ozone/freon/HsyncGenerator.java | 2 + .../freon/LeaderAppendLogEntryGenerator.java | 2 + .../hadoop/ozone/freon/OmBucketGenerator.java | 2 + .../ozone/freon/OmBucketReadWriteFileOps.java | 3 +- .../ozone/freon/OmBucketReadWriteKeyOps.java | 3 +- .../hadoop/ozone/freon/OmBucketRemover.java | 2 + .../hadoop/ozone/freon/OmKeyGenerator.java | 2 + .../ozone/freon/OmMetadataGenerator.java | 2 + .../ozone/freon/OmRPCLoadGenerator.java | 2 + .../ozone/freon/OzoneClientCreator.java | 2 + .../ozone/freon/OzoneClientKeyGenerator.java | 2 + .../freon/OzoneClientKeyReadWriteListOps.java | 2 + .../ozone/freon/OzoneClientKeyRemover.java | 2 + .../ozone/freon/OzoneClientKeyValidator.java | 2 + .../ozone/freon/RandomKeyGenerator.java | 6 ++- .../ozone/freon/RangeKeysGenerator.java | 2 + .../hadoop/ozone/freon/S3BucketGenerator.java | 2 + .../hadoop/ozone/freon/S3KeyGenerator.java | 2 + .../ozone/freon/SCMThroughputBenchmark.java | 7 ++- .../hadoop/ozone/freon/SameKeyReader.java | 2 + .../ozone/freon/StreamingGenerator.java | 2 + .../containergenerator/GeneratorDatanode.java | 3 ++ .../freon/containergenerator/GeneratorOm.java | 4 ++ .../containergenerator/GeneratorScm.java | 4 ++ 40 files changed, 119 insertions(+), 54 deletions(-) create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/FreonSubcommand.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/ExtensibleParentCommand.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/ExtensibleParentCommand.java index d4fde1b75cb..8f73787e82a 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/ExtensibleParentCommand.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/ExtensibleParentCommand.java @@ -20,6 +20,8 @@ import picocli.CommandLine; import java.util.ServiceLoader; +import java.util.SortedMap; +import java.util.TreeMap; /** * Interface for parent commands that accept subcommands to be dynamically registered. @@ -40,11 +42,13 @@ static void addSubcommands(CommandLine cli) { if (command instanceof ExtensibleParentCommand) { ExtensibleParentCommand parentCommand = (ExtensibleParentCommand) command; ServiceLoader subcommands = ServiceLoader.load(parentCommand.subcommandType()); + SortedMap sorted = new TreeMap<>(); for (Object subcommand : subcommands) { final CommandLine.Command commandAnnotation = subcommand.getClass().getAnnotation(CommandLine.Command.class); CommandLine subcommandCommandLine = new CommandLine(subcommand, cli.getFactory()); - cli.addSubcommand(commandAnnotation.name(), subcommandCommandLine); + sorted.put(commandAnnotation.name(), subcommandCommandLine); } + sorted.forEach(cli::addSubcommand); } // process subcommands recursively diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java index 651166740d8..1a931b85534 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java @@ -73,8 +73,9 @@ /** * Base class for simplified performance tests. */ +@CommandLine.Command @SuppressWarnings("java:S2245") // no need for secure random -public class BaseFreonGenerator { +public class BaseFreonGenerator implements FreonSubcommand { private static final Logger LOG = LoggerFactory.getLogger(BaseFreonGenerator.class); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ChunkManagerDiskWrite.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ChunkManagerDiskWrite.java index e42d660392b..e8b9fa4124d 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ChunkManagerDiskWrite.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ChunkManagerDiskWrite.java @@ -43,6 +43,7 @@ import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager; import com.codahale.metrics.Timer; +import org.kohsuke.MetaInfServices; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; @@ -60,6 +61,7 @@ versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true, showDefaultValues = true) +@MetaInfServices(FreonSubcommand.class) @SuppressWarnings("java:S2245") // no need for secure random public class ChunkManagerDiskWrite extends BaseFreonGenerator implements Callable { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java index 656251424b2..1c4f3601b3c 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java @@ -45,6 +45,7 @@ import org.apache.hadoop.ozone.container.replication.ReplicationTask; import org.apache.hadoop.ozone.container.replication.SimpleContainerDownloader; import org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand; +import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; import picocli.CommandLine.Option; @@ -71,6 +72,7 @@ versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true, showDefaultValues = true) +@MetaInfServices(FreonSubcommand.class) public class ClosedContainerReplicator extends BaseFreonGenerator implements Callable { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DNRPCLoadGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DNRPCLoadGenerator.java index 926f3f4630a..d1f894a335c 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DNRPCLoadGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DNRPCLoadGenerator.java @@ -37,6 +37,7 @@ import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB; import org.apache.hadoop.ozone.util.PayloadUtils; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; +import org.kohsuke.MetaInfServices; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import picocli.CommandLine; @@ -59,6 +60,7 @@ versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true, showDefaultValues = true) +@MetaInfServices(FreonSubcommand.class) public class DNRPCLoadGenerator extends BaseFreonGenerator implements Callable { private static final Logger LOG = @@ -111,7 +113,7 @@ public class DNRPCLoadGenerator extends BaseFreonGenerator private Freon freon; // empy constructor for picocli - DNRPCLoadGenerator() { + public DNRPCLoadGenerator() { } @VisibleForTesting diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeBlockPutter.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeBlockPutter.java index 3e613d2d2c5..0189224e627 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeBlockPutter.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeBlockPutter.java @@ -39,6 +39,7 @@ import com.codahale.metrics.Timer; import org.apache.commons.lang3.RandomStringUtils; +import org.kohsuke.MetaInfServices; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; @@ -55,6 +56,7 @@ versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true, showDefaultValues = true) +@MetaInfServices(FreonSubcommand.class) @SuppressWarnings("java:S2245") // no need for secure random public class DatanodeBlockPutter extends BaseFreonGenerator implements Callable { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeChunkGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeChunkGenerator.java index 7f0f5bb9e57..b47d81aad46 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeChunkGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeChunkGenerator.java @@ -48,6 +48,7 @@ import com.codahale.metrics.Timer; import org.apache.commons.lang3.RandomStringUtils; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; +import org.kohsuke.MetaInfServices; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; @@ -62,6 +63,7 @@ versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true, showDefaultValues = true) +@MetaInfServices(FreonSubcommand.class) @SuppressWarnings("java:S2245") // no need for secure random public class DatanodeChunkGenerator extends BaseFreonGenerator implements Callable { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeChunkValidator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeChunkValidator.java index 0b1e34efe78..d0ee83af04f 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeChunkValidator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeChunkValidator.java @@ -35,6 +35,7 @@ import com.codahale.metrics.Timer; import org.apache.hadoop.ozone.common.OzoneChecksumException; +import org.kohsuke.MetaInfServices; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; @@ -49,6 +50,7 @@ versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true, showDefaultValues = true) +@MetaInfServices(FreonSubcommand.class) public class DatanodeChunkValidator extends BaseFreonGenerator implements Callable { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeSimulator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeSimulator.java index 353bc447939..03fd6301d52 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeSimulator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeSimulator.java @@ -57,6 +57,7 @@ import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolPB; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Time; +import org.kohsuke.MetaInfServices; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import picocli.CommandLine; @@ -119,7 +120,8 @@ versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true, showDefaultValues = true) -public class DatanodeSimulator implements Callable { +@MetaInfServices(FreonSubcommand.class) +public class DatanodeSimulator implements Callable, FreonSubcommand { private static final Logger LOGGER = LoggerFactory.getLogger( DatanodeSimulator.class); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/FollowerAppendLogEntryGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/FollowerAppendLogEntryGenerator.java index 76ef7888d0a..706bd667b39 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/FollowerAppendLogEntryGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/FollowerAppendLogEntryGenerator.java @@ -68,6 +68,7 @@ import org.apache.ratis.thirdparty.io.grpc.netty.NettyChannelBuilder; import org.apache.ratis.thirdparty.io.grpc.stub.StreamObserver; import org.apache.ratis.util.Preconditions; +import org.kohsuke.MetaInfServices; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; @@ -88,6 +89,7 @@ versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true, showDefaultValues = true) +@MetaInfServices(FreonSubcommand.class) @SuppressWarnings("java:S2245") // no need for secure random public class FollowerAppendLogEntryGenerator extends BaseAppendLogGenerator implements Callable, StreamObserver { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/Freon.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/Freon.java index 435d54079fe..b34f9704dd3 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/Freon.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/Freon.java @@ -18,14 +18,12 @@ import java.io.IOException; +import org.apache.hadoop.hdds.cli.ExtensibleParentCommand; import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.tracing.TracingUtil; import org.apache.hadoop.hdds.utils.HddsServerUtil; -import org.apache.hadoop.ozone.freon.containergenerator.GeneratorDatanode; -import org.apache.hadoop.ozone.freon.containergenerator.GeneratorOm; -import org.apache.hadoop.ozone.freon.containergenerator.GeneratorScm; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -40,47 +38,9 @@ @Command( name = "ozone freon", description = "Load generator and tester tool for ozone", - subcommands = { - RandomKeyGenerator.class, - OzoneClientKeyGenerator.class, - OzoneClientKeyValidator.class, - OzoneClientKeyRemover.class, - OmKeyGenerator.class, - OmBucketGenerator.class, - OmBucketRemover.class, - HadoopFsGenerator.class, - HadoopNestedDirGenerator.class, - HadoopDirTreeGenerator.class, - HadoopFsValidator.class, - SameKeyReader.class, - S3KeyGenerator.class, - S3BucketGenerator.class, - DatanodeChunkGenerator.class, - DatanodeChunkValidator.class, - DatanodeBlockPutter.class, - FollowerAppendLogEntryGenerator.class, - ChunkManagerDiskWrite.class, - LeaderAppendLogEntryGenerator.class, - GeneratorOm.class, - GeneratorScm.class, - GeneratorDatanode.class, - ClosedContainerReplicator.class, - StreamingGenerator.class, - SCMThroughputBenchmark.class, - OmBucketReadWriteFileOps.class, - OmBucketReadWriteKeyOps.class, - OmRPCLoadGenerator.class, - OzoneClientKeyReadWriteListOps.class, - RangeKeysGenerator.class, - DatanodeSimulator.class, - OmMetadataGenerator.class, - DNRPCLoadGenerator.class, - HsyncGenerator.class, - OzoneClientCreator.class, - }, versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true) -public class Freon extends GenericCli { +public class Freon extends GenericCli implements ExtensibleParentCommand { public static final Logger LOG = LoggerFactory.getLogger(Freon.class); @@ -102,6 +62,11 @@ public int execute(String[] argv) { return super.execute(argv); } + @Override + public Class subcommandType() { + return FreonSubcommand.class; + } + public void stopHttpServer() { if (freonHttpServer != null) { try { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/FreonSubcommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/FreonSubcommand.java new file mode 100644 index 00000000000..36cb5400ef6 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/FreonSubcommand.java @@ -0,0 +1,23 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.freon; + +/** Marker interface for subcommands to be registered for {@code ozone freon}. */ +public interface FreonSubcommand { + // marker +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopDirTreeGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopDirTreeGenerator.java index 5bc2c409318..b1480548802 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopDirTreeGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopDirTreeGenerator.java @@ -22,6 +22,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.conf.StorageSize; +import org.kohsuke.MetaInfServices; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; @@ -41,6 +42,7 @@ versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true, showDefaultValues = true) +@MetaInfServices(FreonSubcommand.class) @SuppressWarnings("java:S2245") // no need for secure random public class HadoopDirTreeGenerator extends HadoopBaseFreonGenerator implements Callable { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopFsGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopFsGenerator.java index 1f910c9398a..9497a78b629 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopFsGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopFsGenerator.java @@ -25,6 +25,7 @@ import com.codahale.metrics.Timer; import org.apache.hadoop.hdds.conf.StorageSize; +import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; import picocli.CommandLine.Option; @@ -37,6 +38,7 @@ versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true, showDefaultValues = true) +@MetaInfServices(FreonSubcommand.class) public class HadoopFsGenerator extends HadoopBaseFreonGenerator implements Callable { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopFsValidator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopFsValidator.java index 1566eaed8ae..81be83078dd 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopFsValidator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopFsValidator.java @@ -25,6 +25,7 @@ import com.codahale.metrics.Timer; import org.apache.commons.io.IOUtils; +import org.kohsuke.MetaInfServices; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; @@ -38,6 +39,7 @@ versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true, showDefaultValues = true) +@MetaInfServices(FreonSubcommand.class) public class HadoopFsValidator extends HadoopBaseFreonGenerator implements Callable { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopNestedDirGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopNestedDirGenerator.java index ff74a54fbc6..8b76426b69d 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopNestedDirGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopNestedDirGenerator.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.commons.lang3.RandomStringUtils; +import org.kohsuke.MetaInfServices; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; @@ -37,6 +38,7 @@ versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true, showDefaultValues = true) +@MetaInfServices(FreonSubcommand.class) @SuppressWarnings("java:S2245") // no need for secure random public class HadoopNestedDirGenerator extends HadoopBaseFreonGenerator implements Callable { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HsyncGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HsyncGenerator.java index 2cfcbc9be97..7aa45c857ff 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HsyncGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HsyncGenerator.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.util.PayloadUtils; +import org.kohsuke.MetaInfServices; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import picocli.CommandLine; @@ -57,6 +58,7 @@ versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true, showDefaultValues = true) +@MetaInfServices(FreonSubcommand.class) public class HsyncGenerator extends BaseFreonGenerator implements Callable { private static final Logger LOG = LoggerFactory.getLogger(HsyncGenerator.class); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/LeaderAppendLogEntryGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/LeaderAppendLogEntryGenerator.java index 9038f379186..523c2c6f0da 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/LeaderAppendLogEntryGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/LeaderAppendLogEntryGenerator.java @@ -63,6 +63,7 @@ import org.apache.ratis.thirdparty.io.grpc.ManagedChannel; import org.apache.ratis.thirdparty.io.grpc.netty.NegotiationType; import org.apache.ratis.thirdparty.io.grpc.netty.NettyChannelBuilder; +import org.kohsuke.MetaInfServices; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; @@ -81,6 +82,7 @@ versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true, showDefaultValues = true) +@MetaInfServices(FreonSubcommand.class) @SuppressWarnings("java:S2245") // no need for secure random public class LeaderAppendLogEntryGenerator extends BaseAppendLogGenerator implements diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmBucketGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmBucketGenerator.java index 27ebc877633..6a95549b6e7 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmBucketGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmBucketGenerator.java @@ -26,6 +26,7 @@ import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; import com.codahale.metrics.Timer; +import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; import picocli.CommandLine.Option; @@ -38,6 +39,7 @@ versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true, showDefaultValues = true) +@MetaInfServices(FreonSubcommand.class) public class OmBucketGenerator extends BaseFreonGenerator implements Callable { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmBucketReadWriteFileOps.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmBucketReadWriteFileOps.java index 80207a8860d..eb732cea101 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmBucketReadWriteFileOps.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmBucketReadWriteFileOps.java @@ -22,6 +22,7 @@ import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.OzoneConsts; +import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; import picocli.CommandLine.Option; @@ -39,7 +40,7 @@ versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true, showDefaultValues = true) - +@MetaInfServices(FreonSubcommand.class) @SuppressWarnings("java:S2245") // no need for secure random public class OmBucketReadWriteFileOps extends AbstractOmBucketReadWriteOps { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmBucketReadWriteKeyOps.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmBucketReadWriteKeyOps.java index abc7ad5002f..14c9812d585 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmBucketReadWriteKeyOps.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmBucketReadWriteKeyOps.java @@ -23,6 +23,7 @@ import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; +import org.kohsuke.MetaInfServices; import picocli.CommandLine; import picocli.CommandLine.Command; import picocli.CommandLine.Option; @@ -43,7 +44,7 @@ versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true, showDefaultValues = true) - +@MetaInfServices(FreonSubcommand.class) @SuppressWarnings("java:S2245") // no need for secure random public class OmBucketReadWriteKeyOps extends AbstractOmBucketReadWriteOps { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmBucketRemover.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmBucketRemover.java index f8ab5482d42..d696ab24df5 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmBucketRemover.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmBucketRemover.java @@ -20,6 +20,7 @@ import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; +import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; import picocli.CommandLine.Option; @@ -34,6 +35,7 @@ versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true, showDefaultValues = true) +@MetaInfServices(FreonSubcommand.class) public class OmBucketRemover extends BaseFreonGenerator implements Callable { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmKeyGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmKeyGenerator.java index d5fbdc75f19..2d6ac882c43 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmKeyGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmKeyGenerator.java @@ -31,6 +31,7 @@ import com.codahale.metrics.Timer; import org.apache.hadoop.security.UserGroupInformation; +import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; import picocli.CommandLine.Mixin; import picocli.CommandLine.Option; @@ -46,6 +47,7 @@ versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true, showDefaultValues = true) +@MetaInfServices(FreonSubcommand.class) public class OmKeyGenerator extends BaseFreonGenerator implements Callable { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmMetadataGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmMetadataGenerator.java index 4c277f07422..0c95a0c3686 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmMetadataGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmMetadataGenerator.java @@ -53,6 +53,7 @@ import com.codahale.metrics.Timer; import org.apache.hadoop.security.UserGroupInformation; +import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; import picocli.CommandLine.Mixin; import picocli.CommandLine.Option; @@ -70,6 +71,7 @@ versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true, showDefaultValues = true) +@MetaInfServices(FreonSubcommand.class) public class OmMetadataGenerator extends BaseFreonGenerator implements Callable { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmRPCLoadGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmRPCLoadGenerator.java index 19121852970..78e3e9571ec 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmRPCLoadGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmRPCLoadGenerator.java @@ -25,6 +25,7 @@ import java.util.concurrent.Callable; import org.apache.hadoop.ozone.util.PayloadUtils; +import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; import picocli.CommandLine.Option; @@ -40,6 +41,7 @@ versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true, showDefaultValues = true) +@MetaInfServices(FreonSubcommand.class) public class OmRPCLoadGenerator extends BaseFreonGenerator implements Callable { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientCreator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientCreator.java index 2fc4cb48eac..907db9962fb 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientCreator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientCreator.java @@ -19,6 +19,7 @@ import com.codahale.metrics.Timer; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.kohsuke.MetaInfServices; import picocli.CommandLine; import java.util.concurrent.Callable; @@ -32,6 +33,7 @@ versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true, showDefaultValues = true) +@MetaInfServices(FreonSubcommand.class) public class OzoneClientCreator extends BaseFreonGenerator implements Callable { @CommandLine.Option(names = "--om-service-id", diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientKeyGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientKeyGenerator.java index 4a1958247ad..cfc8ed85246 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientKeyGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientKeyGenerator.java @@ -32,6 +32,7 @@ import com.codahale.metrics.Timer; import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput; +import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; import picocli.CommandLine.Mixin; import picocli.CommandLine.Option; @@ -45,6 +46,7 @@ versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true, showDefaultValues = true) +@MetaInfServices(FreonSubcommand.class) public class OzoneClientKeyGenerator extends BaseFreonGenerator implements Callable { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientKeyReadWriteListOps.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientKeyReadWriteListOps.java index ba7456ef64b..9160888e52a 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientKeyReadWriteListOps.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientKeyReadWriteListOps.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneKeyDetails; +import org.kohsuke.MetaInfServices; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import picocli.CommandLine; @@ -49,6 +50,7 @@ versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true, showDefaultValues = true) +@MetaInfServices(FreonSubcommand.class) public class OzoneClientKeyReadWriteListOps extends BaseFreonGenerator implements Callable { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientKeyRemover.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientKeyRemover.java index d834a634d07..d5d76e4a46f 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientKeyRemover.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientKeyRemover.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; +import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; import picocli.CommandLine.Option; @@ -35,6 +36,7 @@ versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true, showDefaultValues = true) +@MetaInfServices(FreonSubcommand.class) public class OzoneClientKeyRemover extends BaseFreonGenerator implements Callable { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientKeyValidator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientKeyValidator.java index 220a8c7f1b7..8e9d256ee8e 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientKeyValidator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientKeyValidator.java @@ -28,6 +28,7 @@ import com.codahale.metrics.Timer; import org.apache.commons.io.IOUtils; import org.apache.ratis.util.function.CheckedFunction; +import org.kohsuke.MetaInfServices; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; @@ -42,6 +43,7 @@ versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true, showDefaultValues = true) +@MetaInfServices(FreonSubcommand.class) public class OzoneClientKeyValidator extends BaseFreonGenerator implements Callable { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java index f7b51fedced..53359a03ebe 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RandomKeyGenerator.java @@ -68,6 +68,7 @@ import org.apache.commons.codec.digest.DigestUtils; import org.apache.commons.lang3.RandomStringUtils; import org.apache.commons.lang3.time.DurationFormatUtils; +import org.kohsuke.MetaInfServices; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; @@ -86,8 +87,9 @@ versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true, showDefaultValues = true) +@MetaInfServices(FreonSubcommand.class) @SuppressWarnings("java:S2245") // no need for secure random -public final class RandomKeyGenerator implements Callable { +public final class RandomKeyGenerator implements Callable, FreonSubcommand { @ParentCommand private Freon freon; @@ -240,7 +242,7 @@ enum FreonOps { private OzoneConfiguration ozoneConfiguration; private ProgressBar progressbar; - RandomKeyGenerator() { + public RandomKeyGenerator() { // for picocli } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RangeKeysGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RangeKeysGenerator.java index b826651a6fb..cf1fa29bb5c 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RangeKeysGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RangeKeysGenerator.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hdds.conf.StorageSize; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; +import org.kohsuke.MetaInfServices; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import picocli.CommandLine; @@ -45,6 +46,7 @@ versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true, showDefaultValues = true) +@MetaInfServices(FreonSubcommand.class) public class RangeKeysGenerator extends BaseFreonGenerator implements Callable { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/S3BucketGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/S3BucketGenerator.java index 0233c14470a..8d6c94a71de 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/S3BucketGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/S3BucketGenerator.java @@ -18,6 +18,7 @@ import com.codahale.metrics.Timer; import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.kohsuke.MetaInfServices; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; @@ -44,6 +45,7 @@ versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true, showDefaultValues = true) +@MetaInfServices(FreonSubcommand.class) public class S3BucketGenerator extends S3EntityGenerator implements Callable { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/S3KeyGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/S3KeyGenerator.java index 3a3537d8e4b..77049a15a15 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/S3KeyGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/S3KeyGenerator.java @@ -36,6 +36,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.OM_MULTIPART_MIN_SIZE; +import org.kohsuke.MetaInfServices; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; @@ -53,6 +54,7 @@ versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true, showDefaultValues = true) +@MetaInfServices(FreonSubcommand.class) @SuppressWarnings("java:S2245") // no need for secure random public class S3KeyGenerator extends S3EntityGenerator implements Callable { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/SCMThroughputBenchmark.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/SCMThroughputBenchmark.java index f6e08ee9e4f..623a74080ed 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/SCMThroughputBenchmark.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/SCMThroughputBenchmark.java @@ -59,6 +59,7 @@ import org.apache.http.client.HttpClient; import org.apache.http.client.methods.HttpGet; import org.apache.http.impl.client.HttpClientBuilder; +import org.kohsuke.MetaInfServices; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import picocli.CommandLine; @@ -108,8 +109,9 @@ versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true, showDefaultValues = true) +@MetaInfServices(FreonSubcommand.class) @SuppressWarnings("java:S2245") // no need for secure random -public final class SCMThroughputBenchmark implements Callable { +public final class SCMThroughputBenchmark implements Callable, FreonSubcommand { public static final Logger LOG = LoggerFactory.getLogger(SCMThroughputBenchmark.class); @@ -185,9 +187,6 @@ public enum BenchmarkType { private ScmBlockLocationProtocol scmBlockClient; - private SCMThroughputBenchmark() { - } - @Override public Void call() throws Exception { conf = freon.getOzoneConf(); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/SameKeyReader.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/SameKeyReader.java index bbd83d64dec..9f685237e61 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/SameKeyReader.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/SameKeyReader.java @@ -20,6 +20,7 @@ import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; import picocli.CommandLine.Option; @@ -32,6 +33,7 @@ versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true, showDefaultValues = true) +@MetaInfServices(FreonSubcommand.class) public class SameKeyReader extends OzoneClientKeyValidator implements Callable { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/StreamingGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/StreamingGenerator.java index dd6e3e99c26..903eb6d6995 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/StreamingGenerator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/StreamingGenerator.java @@ -24,6 +24,7 @@ import org.apache.hadoop.ozone.container.stream.DirectoryServerSource; import org.apache.hadoop.ozone.container.stream.StreamingClient; import org.apache.hadoop.ozone.container.stream.StreamingServer; +import org.kohsuke.MetaInfServices; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import picocli.CommandLine; @@ -44,6 +45,7 @@ versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true, showDefaultValues = true) +@MetaInfServices(FreonSubcommand.class) public class StreamingGenerator extends BaseFreonGenerator implements Callable { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorDatanode.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorDatanode.java index dbca12c8b26..0300b4832c3 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorDatanode.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorDatanode.java @@ -63,6 +63,8 @@ import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager; import com.codahale.metrics.Timer; +import org.apache.hadoop.ozone.freon.FreonSubcommand; +import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; import picocli.CommandLine.Option; @@ -80,6 +82,7 @@ versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true, showDefaultValues = true) +@MetaInfServices(FreonSubcommand.class) @SuppressWarnings("java:S2245") // no need for secure random public class GeneratorDatanode extends BaseGenerator { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorOm.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorOm.java index 7390488c815..921b0cd6875 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorOm.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorOm.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hdds.utils.db.RocksDBConfiguration; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.freon.FreonSubcommand; import org.apache.hadoop.ozone.om.OMStorage; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; @@ -50,6 +51,8 @@ import com.codahale.metrics.Timer; import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS; import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME; + +import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; import picocli.CommandLine.Option; @@ -61,6 +64,7 @@ versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true, showDefaultValues = true) +@MetaInfServices(FreonSubcommand.class) public class GeneratorOm extends BaseGenerator implements Callable { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorScm.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorScm.java index a15caab7d6b..6df740a1f27 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorScm.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/containergenerator/GeneratorScm.java @@ -33,6 +33,9 @@ import com.codahale.metrics.Timer; import static org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition.CONTAINERS; + +import org.apache.hadoop.ozone.freon.FreonSubcommand; +import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; @@ -45,6 +48,7 @@ versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true, showDefaultValues = true) +@MetaInfServices(FreonSubcommand.class) public class GeneratorScm extends BaseGenerator { private DBStore scmDb; From 0d1a0ce1f0a2b4d9e1b2c31cc2ef96e266583cf5 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Thu, 2 Jan 2025 11:13:51 +0100 Subject: [PATCH 019/168] HDDS-11880. Intermediate subcommands do not need to implement Callable (#7617) --- .../hdds/scm/cli/ContainerBalancerCommands.java | 15 +-------------- .../hdds/scm/cli/ReplicationManagerCommands.java | 15 +-------------- .../hadoop/hdds/scm/cli/SafeModeCommands.java | 15 +-------------- .../hadoop/hdds/scm/cli/cert/CertCommands.java | 16 +--------------- .../scm/cli/container/ContainerCommands.java | 15 +-------------- .../hdds/scm/cli/datanode/DatanodeCommands.java | 15 +-------------- .../hdds/scm/cli/datanode/StatusSubCommand.java | 13 +------------ .../hdds/scm/cli/pipeline/PipelineCommands.java | 15 +-------------- .../src/main/smoketest/admincli/container.robot | 2 +- .../src/main/smoketest/admincli/datanode.robot | 2 +- .../src/main/smoketest/admincli/pipeline.robot | 2 +- .../smoketest/admincli/replicationmanager.robot | 2 +- .../src/main/smoketest/admincli/safemode.robot | 2 +- .../admin/reconfig/ReconfigureCommands.java | 15 +-------------- .../admin/scm/DeletedBlocksTxnCommands.java | 13 +------------ .../ozone/debug/container/ContainerCommands.java | 15 +-------------- .../apache/hadoop/ozone/debug/ldb/RDBParser.java | 16 +--------------- .../hadoop/ozone/repair/ldb/RDBRepair.java | 14 +------------- .../apache/hadoop/ozone/repair/om/OMRepair.java | 13 +------------ .../hadoop/ozone/repair/quota/QuotaRepair.java | 13 +------------ .../ozone/shell/bucket/BucketCommands.java | 16 +--------------- .../hadoop/ozone/shell/keys/KeyCommands.java | 15 +-------------- .../ozone/shell/prefix/PrefixCommands.java | 15 +-------------- .../ozone/shell/snapshot/SnapshotCommands.java | 15 +-------------- .../ozone/shell/tenant/TenantUserCommands.java | 14 +------------- .../hadoop/ozone/shell/token/TokenCommands.java | 15 +-------------- .../ozone/shell/volume/VolumeCommands.java | 15 +-------------- 27 files changed, 27 insertions(+), 306 deletions(-) diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerCommands.java index 2264f096a28..408f5a53d64 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerCommands.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerBalancerCommands.java @@ -18,14 +18,9 @@ package org.apache.hadoop.hdds.scm.cli; import org.apache.hadoop.hdds.cli.AdminSubcommand; -import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; -import picocli.CommandLine.Model.CommandSpec; -import picocli.CommandLine.Spec; - -import java.util.concurrent.Callable; /** * Subcommand to group container balancer related operations. @@ -90,14 +85,6 @@ ContainerBalancerStatusSubcommand.class }) @MetaInfServices(AdminSubcommand.class) -public class ContainerBalancerCommands implements Callable, AdminSubcommand { - - @Spec - private CommandSpec spec; +public class ContainerBalancerCommands implements AdminSubcommand { - @Override - public Void call() throws Exception { - GenericCli.missingSubcommand(spec); - return null; - } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerCommands.java index a16e5227514..b5c962d0090 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerCommands.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ReplicationManagerCommands.java @@ -17,16 +17,11 @@ */ package org.apache.hadoop.hdds.scm.cli; -import java.util.concurrent.Callable; - import org.apache.hadoop.hdds.cli.AdminSubcommand; -import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; -import picocli.CommandLine.Model.CommandSpec; -import picocli.CommandLine.Spec; /** * Subcommand to group replication manager related operations. @@ -42,14 +37,6 @@ ReplicationManagerStatusSubcommand.class }) @MetaInfServices(AdminSubcommand.class) -public class ReplicationManagerCommands implements Callable, AdminSubcommand { - - @Spec - private CommandSpec spec; +public class ReplicationManagerCommands implements AdminSubcommand { - @Override - public Void call() throws Exception { - GenericCli.missingSubcommand(spec); - return null; - } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCommands.java index 49f73e6faea..de0c1e64a70 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCommands.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SafeModeCommands.java @@ -17,16 +17,11 @@ */ package org.apache.hadoop.hdds.scm.cli; -import java.util.concurrent.Callable; - import org.apache.hadoop.hdds.cli.AdminSubcommand; -import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; -import picocli.CommandLine.Model.CommandSpec; -import picocli.CommandLine.Spec; /** * Subcommand to group safe mode related operations. @@ -42,14 +37,6 @@ SafeModeWaitSubcommand.class }) @MetaInfServices(AdminSubcommand.class) -public class SafeModeCommands implements Callable, AdminSubcommand { - - @Spec - private CommandSpec spec; +public class SafeModeCommands implements AdminSubcommand { - @Override - public Void call() throws Exception { - GenericCli.missingSubcommand(spec); - return null; - } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/CertCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/CertCommands.java index 211e3bb0925..c78ec1ed020 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/CertCommands.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/cert/CertCommands.java @@ -17,16 +17,11 @@ */ package org.apache.hadoop.hdds.scm.cli.cert; -import java.util.concurrent.Callable; - import org.apache.hadoop.hdds.cli.AdminSubcommand; -import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; -import picocli.CommandLine.Model.CommandSpec; -import picocli.CommandLine.Spec; /** * Sub command for certificate related operations. @@ -43,14 +38,5 @@ }) @MetaInfServices(AdminSubcommand.class) -public class CertCommands implements Callable, AdminSubcommand { - - @Spec - private CommandSpec spec; - - @Override - public Void call() throws Exception { - GenericCli.missingSubcommand(spec); - return null; - } +public class CertCommands implements AdminSubcommand { } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java index cf0c63adca3..393d7e88f2d 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommands.java @@ -17,16 +17,11 @@ */ package org.apache.hadoop.hdds.scm.cli.container; -import java.util.concurrent.Callable; - import org.apache.hadoop.hdds.cli.AdminSubcommand; -import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; -import picocli.CommandLine.Model.CommandSpec; -import picocli.CommandLine.Spec; /** * Subcommand to group container related operations. @@ -45,14 +40,6 @@ UpgradeSubcommand.class }) @MetaInfServices(AdminSubcommand.class) -public class ContainerCommands implements Callable, AdminSubcommand { - - @Spec - private CommandSpec spec; +public class ContainerCommands implements AdminSubcommand { - @Override - public Void call() throws Exception { - GenericCli.missingSubcommand(spec); - return null; - } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DatanodeCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DatanodeCommands.java index 6c020e46f37..b01a8996b28 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DatanodeCommands.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DatanodeCommands.java @@ -18,14 +18,9 @@ package org.apache.hadoop.hdds.scm.cli.datanode; import org.apache.hadoop.hdds.cli.AdminSubcommand; -import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.kohsuke.MetaInfServices; import picocli.CommandLine; -import picocli.CommandLine.Model.CommandSpec; -import picocli.CommandLine.Spec; - -import java.util.concurrent.Callable; /** * Subcommand for datanode related operations. @@ -44,14 +39,6 @@ UsageInfoSubcommand.class }) @MetaInfServices(AdminSubcommand.class) -public class DatanodeCommands implements Callable, AdminSubcommand { - - @Spec - private CommandSpec spec; +public class DatanodeCommands implements AdminSubcommand { - @Override - public Void call() throws Exception { - GenericCli.missingSubcommand(spec); - return null; - } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/StatusSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/StatusSubCommand.java index b33a5d1ea96..ab4b98d6818 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/StatusSubCommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/StatusSubCommand.java @@ -17,11 +17,8 @@ * limitations under the License. */ -import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import picocli.CommandLine; import picocli.CommandLine.Command; -import java.util.concurrent.Callable; /** * View status of one or more datanodes. @@ -35,14 +32,6 @@ DecommissionStatusSubCommand.class }) -public class StatusSubCommand implements Callable { +public class StatusSubCommand { - @CommandLine.Spec - private CommandLine.Model.CommandSpec spec; - - @Override - public Void call() throws Exception { - GenericCli.missingSubcommand(spec); - return null; - } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/PipelineCommands.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/PipelineCommands.java index 9c391035560..531c8fd9e65 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/PipelineCommands.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/PipelineCommands.java @@ -17,16 +17,11 @@ */ package org.apache.hadoop.hdds.scm.cli.pipeline; -import java.util.concurrent.Callable; - import org.apache.hadoop.hdds.cli.AdminSubcommand; -import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; -import picocli.CommandLine.Model.CommandSpec; -import picocli.CommandLine.Spec; /** * Subcommand to group pipeline related operations. @@ -44,14 +39,6 @@ ClosePipelineSubcommand.class }) @MetaInfServices(AdminSubcommand.class) -public class PipelineCommands implements Callable, AdminSubcommand { - - @Spec - private CommandSpec spec; +public class PipelineCommands implements AdminSubcommand { - @Override - public Void call() throws Exception { - GenericCli.missingSubcommand(spec); - return null; - } } diff --git a/hadoop-ozone/dist/src/main/smoketest/admincli/container.robot b/hadoop-ozone/dist/src/main/smoketest/admincli/container.robot index 83c0731ff76..564fd1f5d69 100644 --- a/hadoop-ozone/dist/src/main/smoketest/admincli/container.robot +++ b/hadoop-ozone/dist/src/main/smoketest/admincli/container.robot @@ -105,7 +105,7 @@ Close container Incomplete command ${output} = Execute And Ignore Error ozone admin container - Should contain ${output} Incomplete command + Should contain ${output} Missing required subcommand Should contain ${output} list Should contain ${output} info Should contain ${output} create diff --git a/hadoop-ozone/dist/src/main/smoketest/admincli/datanode.robot b/hadoop-ozone/dist/src/main/smoketest/admincli/datanode.robot index 03cb03979ec..50837672d92 100644 --- a/hadoop-ozone/dist/src/main/smoketest/admincli/datanode.robot +++ b/hadoop-ozone/dist/src/main/smoketest/admincli/datanode.robot @@ -90,7 +90,7 @@ Get usage info with invalid address Incomplete command ${output} = Execute And Ignore Error ozone admin datanode - Should contain ${output} Incomplete command + Should contain ${output} Missing required subcommand Should contain ${output} list #List datanodes on unknown host diff --git a/hadoop-ozone/dist/src/main/smoketest/admincli/pipeline.robot b/hadoop-ozone/dist/src/main/smoketest/admincli/pipeline.robot index d81d0ea1a66..9d18b26876d 100644 --- a/hadoop-ozone/dist/src/main/smoketest/admincli/pipeline.robot +++ b/hadoop-ozone/dist/src/main/smoketest/admincli/pipeline.robot @@ -60,7 +60,7 @@ Close pipeline Incomplete command ${output} = Execute And Ignore Error ozone admin pipeline - Should contain ${output} Incomplete command + Should contain ${output} Missing required subcommand Should contain ${output} close Should contain ${output} create Should contain ${output} deactivate diff --git a/hadoop-ozone/dist/src/main/smoketest/admincli/replicationmanager.robot b/hadoop-ozone/dist/src/main/smoketest/admincli/replicationmanager.robot index 4cb07a291ee..9b39cecc9df 100644 --- a/hadoop-ozone/dist/src/main/smoketest/admincli/replicationmanager.robot +++ b/hadoop-ozone/dist/src/main/smoketest/admincli/replicationmanager.robot @@ -46,7 +46,7 @@ Start replicationmanager Incomplete command ${output} = Execute And Ignore Error ozone admin replicationmanager - Should contain ${output} Incomplete command + Should contain ${output} Missing required subcommand Should contain ${output} start Should contain ${output} stop Should contain ${output} status diff --git a/hadoop-ozone/dist/src/main/smoketest/admincli/safemode.robot b/hadoop-ozone/dist/src/main/smoketest/admincli/safemode.robot index 9ec5dfb95d4..7f0c93dcab6 100644 --- a/hadoop-ozone/dist/src/main/smoketest/admincli/safemode.robot +++ b/hadoop-ozone/dist/src/main/smoketest/admincli/safemode.robot @@ -37,7 +37,7 @@ Wait for safemode exit Incomplete command ${output} = Execute And Ignore Error ozone admin safemode - Should contain ${output} Incomplete command + Should contain ${output} Missing required subcommand Should contain ${output} status Should contain ${output} exit Should contain ${output} wait diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureCommands.java index ba8b35d77ea..ed4becbddeb 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/reconfig/ReconfigureCommands.java @@ -18,7 +18,6 @@ package org.apache.hadoop.ozone.admin.reconfig; import org.apache.hadoop.hdds.cli.AdminSubcommand; -import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.ozone.admin.OzoneAdmin; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -27,12 +26,9 @@ import org.kohsuke.MetaInfServices; import picocli.CommandLine; import picocli.CommandLine.Command; -import picocli.CommandLine.Model.CommandSpec; -import picocli.CommandLine.Spec; import java.io.IOException; import java.util.List; -import java.util.concurrent.Callable; /** * Subcommand to group reconfigure OM related operations. @@ -48,14 +44,11 @@ ReconfigurePropertiesSubcommand.class }) @MetaInfServices(AdminSubcommand.class) -public class ReconfigureCommands implements Callable, AdminSubcommand { +public class ReconfigureCommands implements AdminSubcommand { @CommandLine.ParentCommand private OzoneAdmin parent; - @Spec - private CommandSpec spec; - @CommandLine.Option(names = {"--service"}, description = "service: OM, SCM, DATANODE.", required = true) @@ -72,12 +65,6 @@ public class ReconfigureCommands implements Callable, AdminSubcommand { required = false) private boolean batchReconfigDatanodes; - @Override - public Void call() throws Exception { - GenericCli.missingSubcommand(spec); - return null; - } - public String getAddress() { return address; } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/DeletedBlocksTxnCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/DeletedBlocksTxnCommands.java index 3473cd8d032..0af1a749e20 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/DeletedBlocksTxnCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/DeletedBlocksTxnCommands.java @@ -17,12 +17,9 @@ */ package org.apache.hadoop.ozone.admin.scm; -import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import picocli.CommandLine; -import java.util.concurrent.Callable; - /** * Subcommand to group container related operations. */ @@ -35,15 +32,7 @@ GetFailedDeletedBlocksTxnSubcommand.class, ResetDeletedBlockRetryCountSubcommand.class, }) -public class DeletedBlocksTxnCommands implements Callable { - - @CommandLine.Spec - private CommandLine.Model.CommandSpec spec; +public class DeletedBlocksTxnCommands { - @Override - public Void call() throws Exception { - GenericCli.missingSubcommand(spec); - return null; - } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerCommands.java index 6e3792793db..3df8330136c 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerCommands.java @@ -19,7 +19,6 @@ package org.apache.hadoop.ozone.debug.container; import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.cli.DebugSubcommand; import org.apache.hadoop.hdds.conf.ConfigurationSource; @@ -48,9 +47,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; -import picocli.CommandLine.Model.CommandSpec; import picocli.CommandLine.ParentCommand; -import picocli.CommandLine.Spec; import java.io.File; import java.io.IOException; @@ -63,7 +60,6 @@ import java.util.List; import java.util.Map; import java.util.Properties; -import java.util.concurrent.Callable; import java.util.stream.Stream; /** @@ -82,7 +78,7 @@ InspectSubcommand.class }) @MetaInfServices(DebugSubcommand.class) -public class ContainerCommands implements Callable, DebugSubcommand { +public class ContainerCommands implements DebugSubcommand { private static final Logger LOG = LoggerFactory.getLogger(ContainerCommands.class); @@ -90,19 +86,10 @@ public class ContainerCommands implements Callable, DebugSubcommand { @ParentCommand private OzoneDebug parent; - @Spec - private CommandSpec spec; - private MutableVolumeSet volumeSet; private ContainerController controller; - @Override - public Void call() throws Exception { - GenericCli.missingSubcommand(spec); - return null; - } - OzoneConfiguration getOzoneConf() { return parent.getOzoneConf(); } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/RDBParser.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/RDBParser.java index 4e945c7c418..f07e8f35fba 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/RDBParser.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/RDBParser.java @@ -18,15 +18,10 @@ package org.apache.hadoop.ozone.debug.ldb; -import java.util.concurrent.Callable; - -import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.DebugSubcommand; import org.kohsuke.MetaInfServices; import picocli.CommandLine; -import picocli.CommandLine.Model.CommandSpec; -import picocli.CommandLine.Spec; /** * Tool that parses rocksdb file. @@ -41,10 +36,7 @@ }, description = "Parse rocksdb file content") @MetaInfServices(DebugSubcommand.class) -public class RDBParser implements Callable, DebugSubcommand { - - @Spec - private CommandSpec spec; +public class RDBParser implements DebugSubcommand { @CommandLine.Option(names = {"--db"}, required = true, @@ -58,10 +50,4 @@ public String getDbPath() { public void setDbPath(String dbPath) { this.dbPath = dbPath; } - - @Override - public Void call() throws Exception { - GenericCli.missingSubcommand(spec); - return null; - } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ldb/RDBRepair.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ldb/RDBRepair.java index 01ad705b201..39b957435f5 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ldb/RDBRepair.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ldb/RDBRepair.java @@ -18,13 +18,10 @@ package org.apache.hadoop.ozone.repair.ldb; -import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.RepairSubcommand; import org.kohsuke.MetaInfServices; import picocli.CommandLine; -import java.util.concurrent.Callable; - /** * Ozone Repair CLI for RocksDB. */ @@ -35,10 +32,7 @@ }, description = "Operational tool to repair RocksDB table.") @MetaInfServices(RepairSubcommand.class) -public class RDBRepair implements Callable, RepairSubcommand { - - @CommandLine.Spec - private CommandLine.Model.CommandSpec spec; +public class RDBRepair implements RepairSubcommand { @CommandLine.Option(names = {"--db"}, required = true, @@ -48,10 +42,4 @@ public class RDBRepair implements Callable, RepairSubcommand { public String getDbPath() { return dbPath; } - - @Override - public Void call() { - GenericCli.missingSubcommand(spec); - return null; - } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/OMRepair.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/OMRepair.java index 56d42d23f49..d132fea3752 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/OMRepair.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/OMRepair.java @@ -18,13 +18,10 @@ package org.apache.hadoop.ozone.repair.om; -import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.RepairSubcommand; import org.kohsuke.MetaInfServices; import picocli.CommandLine; -import java.util.concurrent.Callable; - /** * Ozone Repair CLI for OM. */ @@ -34,14 +31,6 @@ }, description = "Operational tool to repair OM.") @MetaInfServices(RepairSubcommand.class) -public class OMRepair implements Callable, RepairSubcommand { - - @CommandLine.Spec - private CommandLine.Model.CommandSpec spec; +public class OMRepair implements RepairSubcommand { - @Override - public Void call() { - GenericCli.missingSubcommand(spec); - return null; - } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaRepair.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaRepair.java index 5c7b6b2fc4b..988b42ceb91 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaRepair.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaRepair.java @@ -20,8 +20,6 @@ import java.io.IOException; import java.util.Collection; -import java.util.concurrent.Callable; -import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.RepairSubcommand; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ipc.ProtobufRpcEngine; @@ -51,20 +49,11 @@ }, description = "Operational tool to repair quota in OM DB.") @MetaInfServices(RepairSubcommand.class) -public class QuotaRepair implements Callable, RepairSubcommand { - - @CommandLine.Spec - private CommandLine.Model.CommandSpec spec; +public class QuotaRepair implements RepairSubcommand { @CommandLine.ParentCommand private OzoneRepair parent; - @Override - public Void call() { - GenericCli.missingSubcommand(spec); - return null; - } - public OzoneManagerProtocolClientSideTranslatorPB createOmClient( String omServiceID, String omHost, diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/BucketCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/BucketCommands.java index f4be05aab7d..2550186253a 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/BucketCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/BucketCommands.java @@ -18,14 +18,9 @@ package org.apache.hadoop.ozone.shell.bucket; -import java.util.concurrent.Callable; - import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.cli.MissingSubcommandException; -import org.apache.hadoop.ozone.shell.Shell; import picocli.CommandLine.Command; -import picocli.CommandLine.ParentCommand; /** * Subcommands for the bucket related operations. @@ -50,14 +45,5 @@ }, mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class BucketCommands implements Callable { - - @ParentCommand - private Shell shell; - - @Override - public Void call() throws Exception { - throw new MissingSubcommandException( - this.shell.getCmd().getSubcommands().get("bucket")); - } +public class BucketCommands { } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/KeyCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/KeyCommands.java index 390db103899..68dee19c0e8 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/KeyCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/KeyCommands.java @@ -18,14 +18,9 @@ package org.apache.hadoop.ozone.shell.keys; -import java.util.concurrent.Callable; - import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.cli.MissingSubcommandException; -import org.apache.hadoop.ozone.shell.Shell; import picocli.CommandLine.Command; -import picocli.CommandLine.ParentCommand; /** * Subcommand to group key related operations. @@ -50,14 +45,6 @@ }, mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class KeyCommands implements Callable { - - @ParentCommand - private Shell shell; +public class KeyCommands { - @Override - public Void call() throws Exception { - throw new MissingSubcommandException( - this.shell.getCmd().getSubcommands().get("key")); - } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/prefix/PrefixCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/prefix/PrefixCommands.java index 6216fce08d7..b5b314c0d3f 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/prefix/PrefixCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/prefix/PrefixCommands.java @@ -18,14 +18,9 @@ package org.apache.hadoop.ozone.shell.prefix; -import java.util.concurrent.Callable; - import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.cli.MissingSubcommandException; -import org.apache.hadoop.ozone.shell.Shell; import picocli.CommandLine.Command; -import picocli.CommandLine.ParentCommand; /** * Subcommands for the prefix related operations. @@ -40,14 +35,6 @@ }, mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class PrefixCommands implements Callable { - - @ParentCommand - private Shell shell; +public class PrefixCommands { - @Override - public Void call() throws Exception { - throw new MissingSubcommandException( - this.shell.getCmd().getSubcommands().get("prefix")); - } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/SnapshotCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/SnapshotCommands.java index dbeb6cda0a4..de534a435e9 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/SnapshotCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/SnapshotCommands.java @@ -18,14 +18,9 @@ package org.apache.hadoop.ozone.shell.snapshot; -import java.util.concurrent.Callable; - import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.cli.MissingSubcommandException; -import org.apache.hadoop.ozone.shell.Shell; import picocli.CommandLine.Command; -import picocli.CommandLine.ParentCommand; /** * Subcommands for the snapshot related operations. @@ -43,14 +38,6 @@ }, mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class SnapshotCommands implements Callable { - - @ParentCommand - private Shell shell; +public class SnapshotCommands { - @Override - public Void call() throws Exception { - throw new MissingSubcommandException( - this.shell.getCmd().getSubcommands().get("snapshot")); - } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantUserCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantUserCommands.java index 8caeb232a9e..de11753a0ba 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantUserCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantUserCommands.java @@ -18,12 +18,8 @@ package org.apache.hadoop.ozone.shell.tenant; import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.cli.MissingSubcommandException; -import org.apache.hadoop.ozone.shell.Shell; import picocli.CommandLine; -import java.util.concurrent.Callable; - /** * Subcommand to group tenant user related operations. */ @@ -41,14 +37,6 @@ }, mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class TenantUserCommands implements Callable { - - @CommandLine.ParentCommand - private Shell shell; +public class TenantUserCommands { - @Override - public Void call() throws Exception { - throw new MissingSubcommandException( - this.shell.getCmd().getSubcommands().get("user")); - } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/TokenCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/TokenCommands.java index df504313840..280068b72f7 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/TokenCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/TokenCommands.java @@ -18,14 +18,9 @@ package org.apache.hadoop.ozone.shell.token; -import java.util.concurrent.Callable; - import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.cli.MissingSubcommandException; -import org.apache.hadoop.ozone.shell.Shell; import picocli.CommandLine.Command; -import picocli.CommandLine.ParentCommand; /** * Sub-command to group token related operations. @@ -40,14 +35,6 @@ }, mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class TokenCommands implements Callable { - - @ParentCommand - private Shell shell; +public class TokenCommands { - @Override - public Void call() throws Exception { - throw new MissingSubcommandException( - this.shell.getCmd().getSubcommands().get("token")); - } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/VolumeCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/VolumeCommands.java index 0a87e7a4065..263cabe0ec6 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/VolumeCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/VolumeCommands.java @@ -18,14 +18,9 @@ package org.apache.hadoop.ozone.shell.volume; -import java.util.concurrent.Callable; - import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.cli.MissingSubcommandException; -import org.apache.hadoop.ozone.shell.Shell; import picocli.CommandLine.Command; -import picocli.CommandLine.ParentCommand; /** * Subcommand to group volume related operations. @@ -48,14 +43,6 @@ }, mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class VolumeCommands implements Callable { - - @ParentCommand - private Shell shell; +public class VolumeCommands { - @Override - public Void call() throws Exception { - throw new MissingSubcommandException( - this.shell.getCmd().getSubcommands().get("volume")); - } } From 9aae7a5c0ba19df1777c2bb4ef2329d5a5b79911 Mon Sep 17 00:00:00 2001 From: Abhishek Pal <43001336+devabhishekpal@users.noreply.github.com> Date: Thu, 2 Jan 2025 16:40:26 +0530 Subject: [PATCH 020/168] HDDS-9791. Add tests for Datanodes page (#7626) --- .../recon/ozone-recon-web/package.json | 1 + .../recon/ozone-recon-web/pnpm-lock.yaml | 12 + .../__tests__/datanodes/Datanodes.test.tsx | 191 ++++++++++++++++ .../datanodes/DatanodesTable.test.tsx | 151 +++++++++++++ .../src/__tests__/locators/locators.ts | 13 +- .../datanodeMocks/datanodeResponseMocks.ts | 212 ++++++++++++++++++ .../mocks/datanodeMocks/datanodeServer.ts | 72 ++++++ .../src/__tests__/utils/datanodes.utils.tsx | 23 ++ .../src/v2/components/search/search.tsx | 6 +- .../v2/components/tables/datanodesTable.tsx | 8 +- .../src/v2/pages/datanodes/datanodes.tsx | 13 +- 11 files changed, 691 insertions(+), 11 deletions(-) create mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/__tests__/datanodes/Datanodes.test.tsx create mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/__tests__/datanodes/DatanodesTable.test.tsx create mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/__tests__/mocks/datanodeMocks/datanodeResponseMocks.ts create mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/__tests__/mocks/datanodeMocks/datanodeServer.ts create mode 100644 hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/__tests__/utils/datanodes.utils.tsx diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/package.json b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/package.json index c2c046f1120..2b407eaebd9 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/package.json +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/package.json @@ -58,6 +58,7 @@ "devDependencies": { "@testing-library/jest-dom": "^6.4.8", "@testing-library/react": "^12.1.5", + "@testing-library/user-event": "^14.5.2", "@types/react": "16.8.15", "@types/react-dom": "16.8.4", "@types/react-router-dom": "^5.3.3", diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml index dfdbc7cedce..2b28bbf3d56 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml @@ -67,6 +67,9 @@ devDependencies: '@testing-library/react': specifier: ^12.1.5 version: 12.1.5(react-dom@16.14.0)(react@16.14.0) + '@testing-library/user-event': + specifier: ^14.5.2 + version: 14.5.2(@testing-library/dom@8.20.1) '@types/react': specifier: 16.8.15 version: 16.8.15 @@ -1222,6 +1225,15 @@ packages: react-dom: 16.14.0(react@16.14.0) dev: true + /@testing-library/user-event@14.5.2(@testing-library/dom@8.20.1): + resolution: {integrity: sha512-YAh82Wh4TIrxYLmfGcixwD18oIjyC1pFQC2Y01F2lzV2HTMiYrI0nze0FD0ocB//CKS/7jIUgae+adPqxK5yCQ==} + engines: {node: '>=12', npm: '>=6'} + peerDependencies: + '@testing-library/dom': '>=7.21.4' + dependencies: + '@testing-library/dom': 8.20.1 + dev: true + /@types/aria-query@5.0.4: resolution: {integrity: sha512-rfT93uj5s0PRL7EzccGMs3brplhcrghnDoV26NqKhCAS1hVo+WdNsPvE/yb6ilfr5hi2MEk6d5EWJTKdxg8jVw==} dev: true diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/__tests__/datanodes/Datanodes.test.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/__tests__/datanodes/Datanodes.test.tsx new file mode 100644 index 00000000000..a169e1ce34d --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/__tests__/datanodes/Datanodes.test.tsx @@ -0,0 +1,191 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +import React from 'react'; +import { + fireEvent, + render, + screen, + waitFor +} from '@testing-library/react'; +import userEvent from '@testing-library/user-event'; +import { rest } from "msw"; +import { vi } from 'vitest'; + +import Datanodes from '@/v2/pages/datanodes/datanodes'; +import * as commonUtils from '@/utils/common'; +import { datanodeServer } from '@/__tests__/mocks/datanodeMocks/datanodeServer'; +import { datanodeLocators, searchInputLocator } from '@/__tests__/locators/locators'; +import { waitForDNTable } from '@/__tests__/utils/datanodes.utils'; + +// Mock utility functions +vi.spyOn(commonUtils, 'showDataFetchError'); + +vi.mock('@/components/autoReloadPanel/autoReloadPanel', () => ({ + default: () =>

, +})); +vi.mock('@/v2/components/select/multiSelect.tsx', () => ({ + default: ({ onChange }: { onChange: Function }) => ( + + ), +})); + +describe('Datanodes Component', () => { + // Start and stop MSW server before and after all tests + beforeAll(() => datanodeServer.listen()); + afterEach(() => datanodeServer.resetHandlers()); + afterAll(() => datanodeServer.close()); + + test('renders component correctly', () => { + render(); + + expect(screen.getByText(/Datanodes/)).toBeInTheDocument(); + expect(screen.getByTestId('auto-reload-panel')).toBeInTheDocument(); + expect(screen.getByTestId('multi-select')).toBeInTheDocument(); + expect(screen.getByTestId(searchInputLocator)).toBeInTheDocument(); + }); + + test('Renders table with correct number of rows', async () => { + render(); + + // Wait for the data to load + const rows = await waitFor(() => screen.getAllByTestId(datanodeLocators.datanodeRowRegex)); + expect(rows).toHaveLength(5); // Based on the mocked DatanodeResponse + }); + + test('Loads data on mount', async () => { + render(); + // Wait for the data to be loaded into the table + const dnTable = await waitForDNTable(); + + // Ensure the correct data is displayed in the table + expect(dnTable).toHaveTextContent('ozone-datanode-1.ozone_default'); + expect(dnTable).toHaveTextContent('HEALTHY'); + }); + + test('Displays no data message if the datanodes API returns an empty array', async () => { + datanodeServer.use( + rest.get('api/v1/datanodes', (req, res, ctx) => { + return res(ctx.status(200), ctx.json({ totalCount: 0, datanodes: [] })); + }) + ); + + render(); + + // Wait for the no data message + await waitFor(() => expect(screen.getByText('No Data')).toBeInTheDocument()); + }); + + test('Handles search input change', async () => { + render(); + await waitForDNTable(); + + const searchInput = screen.getByTestId(searchInputLocator); + fireEvent.change(searchInput, { + target: { value: 'ozone-datanode-1' } + }); + // Sleep for 310ms to allow debounced search to take effect + await new Promise((r) => { setTimeout(r, 310) }); + const rows = await waitFor(() => screen.getAllByTestId(datanodeLocators.datanodeRowRegex)); + await waitFor(() => expect(rows).toHaveLength(1)); + }); + + test('Handles case-sensitive search', async () => { + render(); + await waitForDNTable(); + + const searchInput = screen.getByTestId(searchInputLocator); + fireEvent.change(searchInput, { + target: { value: 'DataNode' } + }); + await waitFor(() => expect(searchInput).toHaveValue('DataNode')); + // Sleep for 310ms to allow debounced search to take effect + await new Promise((r) => { setTimeout(r, 310) }) + + const rows = await waitFor(() => screen.getAllByTestId(datanodeLocators.datanodeRowRegex)); + expect(rows).toHaveLength(1); + }) + + test('Displays a message when no results match the search term', async () => { + render(); + const searchInput = screen.getByTestId(searchInputLocator); + + // Type a term that doesn't match any datanode + fireEvent.change(searchInput, { + target: { value: 'nonexistent-datanode' } + }); + + // Verify that no results message is displayed + await waitFor(() => expect(screen.getByText('No Data')).toBeInTheDocument()); + }); + + // Since this is a static response, even if we remove we will not get the truncated response from backend + // i.e response with the removed DN. So the table will always have the value even if we remove it + // causing this test to fail + test.skip('Shows modal on row selection and confirms removal', async () => { + render(); + + // Wait for the data to be loaded into the table + await waitForDNTable(); + + // Simulate selecting a row + // The first checkbox is for the table header "Select All" checkbox -> idx 0 + // Second checkbox is for the healthy DN -> idx 1 + // Third checkbox is the active one for Dead DN -> idx 2 + const checkbox = document.querySelectorAll('input.ant-checkbox-input'); + userEvent.click(checkbox[0]); + // Click the "Remove" button to open the modal + await waitFor(() => { + // Wait for the button to appear in screen + screen.getByTestId(datanodeLocators.datanodeRemoveButton); + }).then(() => { + userEvent.click(screen.getByText(/Remove/)); + }) + + // Confirm removal in the modal + await waitFor(() => { + // Wait for the button to appear in screen + screen.getByTestId(datanodeLocators.datanodeRemoveModal); + }).then(() => { + userEvent.click(screen.getByText(/OK/)); + }) + + // Wait for the removal operation to complete + await waitFor(() => + expect(screen.queryByText('ozone-datanode-3.ozone_default')).not.toBeInTheDocument() + ); + }); + + test('Handles API errors gracefully by showing error message', async () => { + // Set up MSW to return an error for the datanode API + datanodeServer.use( + rest.get('api/v1/datanodes', (req, res, ctx) => { + return res(ctx.status(500), ctx.json({ error: 'Internal Server Error' })); + }) + ); + + render(); + + // Wait for the error to be handled + await waitFor(() => + expect(commonUtils.showDataFetchError).toHaveBeenCalledWith('AxiosError: Request failed with status code 500') + ); + }); +}); \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/__tests__/datanodes/DatanodesTable.test.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/__tests__/datanodes/DatanodesTable.test.tsx new file mode 100644 index 00000000000..f1be5362ec1 --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/__tests__/datanodes/DatanodesTable.test.tsx @@ -0,0 +1,151 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import React from 'react'; +import { vi } from 'vitest'; +import { + fireEvent, + render, + screen, + waitFor +} from '@testing-library/react'; + +import { DatanodeTableProps } from '@/v2/types/datanode.types'; +import DatanodesTable from '@/v2/components/tables/datanodesTable'; +import { datanodeServer } from '@/__tests__/mocks/datanodeMocks/datanodeServer'; +import { waitForDNTable } from '@/__tests__/utils/datanodes.utils'; + +const defaultProps: DatanodeTableProps = { + loading: false, + selectedRows: [], + data: [], + decommissionUuids: [], + searchColumn: 'hostname', + searchTerm: '', + selectedColumns: [ + { label: 'Hostname', value: 'hostname' }, + { label: 'State', value: 'state' }, + ], + handleSelectionChange: vi.fn(), +}; + +function getDataWith(name: string, state: "HEALTHY" | "STALE" | "DEAD", uuid: number) { + return { + hostname: name, + uuid: uuid, + state: state, + opState: 'IN_SERVICE', + lastHeartbeat: 1728280581608, + storageUsed: 4096, + storageTotal: 125645656770, + storageCommitted: 0, + storageRemaining: 114225606656, + pipelines: [ + { + "pipelineID": "0f9f7bc0-505e-4428-b148-dd7eac2e8ac2", + "replicationType": "RATIS", + "replicationFactor": "THREE", + "leaderNode": "ozone-datanode-3.ozone_default" + }, + { + "pipelineID": "2c23e76e-3f18-4b86-9541-e48bdc152fda", + "replicationType": "RATIS", + "replicationFactor": "ONE", + "leaderNode": "ozone-datanode-1.ozone_default" + } + ], + containers: 8192, + openContainers: 8182, + leaderCount: 2, + version: '0.6.0-SNAPSHOT', + setupTime: 1728280539733, + revision: '3f9953c0fbbd2175ee83e8f0b4927e45e9c10ac1', + buildDate: '2024-10-06T16:41Z', + networkLocation: '/default-rack' + } +} + +describe('DatanodesTable Component', () => { + // Start and stop MSW server before and after all tests + beforeAll(() => datanodeServer.listen()); + afterEach(() => datanodeServer.resetHandlers()); + afterAll(() => datanodeServer.close()); + + test('renders table with data', async () => { + render(); + + // Wait for the table to render + waitForDNTable(); + + expect(screen.getByTestId('dn-table')).toBeInTheDocument(); + }); + + test('filters data based on search term', async () => { + render( + + ); + + // Only the matching datanode should be visible + expect(screen.getByText('ozone-datanode-1')).toBeInTheDocument(); + expect(screen.queryByText('ozone-datanode-2')).not.toBeInTheDocument(); + }); + + test('handles row selection', async () => { + render( + + ); + + // The first checkbox is for the table header "Select All" checkbox -> idx 0 + // Second checkbox is for the healthy DN -> idx 1 + // Third checkbox is the active one for Dead DN -> idx 2 + const checkbox = document.querySelectorAll('input.ant-checkbox-input')[2]; + fireEvent.click(checkbox); + + expect(defaultProps.handleSelectionChange).toHaveBeenCalledWith([2]); + }); + + test('disables selection for non-DEAD nodes', async () => { + render( + + ); + + // Check disabled and enabled rows + const checkboxes = document.querySelectorAll('input.ant-checkbox-input'); + expect(checkboxes[1]).toBeDisabled(); // HEALTHY node + expect(checkboxes[2]).not.toBeDisabled(); // DEAD node + }); +}); diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/__tests__/locators/locators.ts b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/__tests__/locators/locators.ts index 23fbc768703..83b2bc50774 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/__tests__/locators/locators.ts +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/__tests__/locators/locators.ts @@ -37,8 +37,15 @@ export const overviewLocators = { } export const datanodeLocators = { - 'datanodeContainer': 'datanodes-container', - 'datanodeMultiSelect': 'datanodes-multiselect' + 'datanodeMultiSelect': 'dn-multi-select', + 'datanodeSearchcDropdown': 'search-dropdown', + 'datanodeSearchInput': 'search-input', + 'datanodeRemoveButton': 'dn-remove-btn', + 'datanodeRemoveModal': 'dn-remove-modal', + 'datanodeTable': 'dn-table', + 'datanodeRowRegex': /dntable-/, + datanodeSearchOption: (label: string) => `search-opt-${label}`, + datanodeTableRow: (uuid: string) => `dntable-${uuid}` } export const autoReloadPanelLocators = { @@ -46,3 +53,5 @@ export const autoReloadPanelLocators = { 'refreshButton': 'autoreload-panel-refresh', 'toggleSwitch': 'autoreload-panel-switch' } + +export const searchInputLocator = 'search-input'; \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/__tests__/mocks/datanodeMocks/datanodeResponseMocks.ts b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/__tests__/mocks/datanodeMocks/datanodeResponseMocks.ts new file mode 100644 index 00000000000..887d0b4a27a --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/__tests__/mocks/datanodeMocks/datanodeResponseMocks.ts @@ -0,0 +1,212 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +export const DatanodeResponse = { + "totalCount": 5, + "datanodes": [ + { + "uuid": "1", + "hostname": "ozone-datanode-1.ozone_default", + "state": "HEALTHY", + "opState": "IN_SERVICE", + "lastHeartbeat": 1728280581608, + "storageReport": { + "capacity": 125645656770, + "used": 4096, + "remaining": 114225606656, + "committed": 0 + }, + "pipelines": [ + { + "pipelineID": "0f9f7bc0-505e-4428-b148-dd7eac2e8ac2", + "replicationType": "RATIS", + "replicationFactor": "THREE", + "leaderNode": "ozone-datanode-3.ozone_default" + }, + { + "pipelineID": "2c23e76e-3f18-4b86-9541-e48bdc152fda", + "replicationType": "RATIS", + "replicationFactor": "ONE", + "leaderNode": "ozone-datanode-1.ozone_default" + } + ], + "leaderCount": 1, + "version": "2.0.0-SNAPSHOT", + "setupTime": 1728280539733, + "revision": "3f9953c0fbbd2175ee83e8f0b4927e45e9c10ac1", + "buildDate": "2024-10-06T16:41Z", + "layoutVersion": 8, + "networkLocation": "/default-rack" + }, + { + "uuid": "3", + "hostname": "ozone-datanode-3.ozone_default", + "state": "DEAD", + "opState": "IN_SERVICE", + "lastHeartbeat": 1728280582060, + "storageReport": { + "capacity": 125645656770, + "used": 4096, + "remaining": 114225623040, + "committed": 0 + }, + "pipelines": [ + { + "pipelineID": "9c5bbf5e-62da-4d4a-a6ad-cb63d9f6aa6f", + "replicationType": "RATIS", + "replicationFactor": "ONE", + "leaderNode": "ozone-datanode-3.ozone_default" + }, + { + "pipelineID": "0f9f7bc0-505e-4428-b148-dd7eac2e8ac2", + "replicationType": "RATIS", + "replicationFactor": "THREE", + "leaderNode": "ozone-datanode-3.ozone_default" + } + ], + "leaderCount": 2, + "version": "1.5.0-SNAPSHOT", + "setupTime": 1728280539726, + "revision": "3f9953c0fbbd2175ee83e8f0b4927e45e9c10ac1", + "buildDate": "2024-10-06T16:41Z", + "layoutVersion": 8, + "networkLocation": "/default-rack" + }, + { + "uuid": "4", + "hostname": "ozone-datanode-4.ozone_default", + "state": "HEALTHY", + "opState": "DECOMMISSIONING", + "lastHeartbeat": 1728280581614, + "storageReport": { + "capacity": 125645656770, + "used": 4096, + "remaining": 114225541120, + "committed": 0 + }, + "pipelines": [ + { + "pipelineID": "4092a584-5c2f-40c6-98e5-ce9a9246e65d", + "replicationType": "RATIS", + "replicationFactor": "ONE", + "leaderNode": "ozone-datanode-4.ozone_default" + } + ], + "leaderCount": 1, + "version": "2.0.0-SNAPSHOT", + "setupTime": 1728280540325, + "revision": "3f9953c0fbbd2175ee83e8f0b4927e45e9c10ac1", + "buildDate": "2024-10-06T16:41Z", + "layoutVersion": 8, + "networkLocation": "/default-rack" + }, + { + "uuid": "2", + "hostname": "ozone-datanode-2.ozone_default", + "state": "STALE", + "opState": "IN_SERVICE", + "lastHeartbeat": 1728280581594, + "storageReport": { + "capacity": 125645656770, + "used": 4096, + "remaining": 114225573888, + "committed": 0 + }, + "pipelines": [ + { + "pipelineID": "20a874e4-790b-4312-8fc2-ca53846dba0f", + "replicationType": "RATIS", + "replicationFactor": "ONE", + "leaderNode": "ozone-datanode-2.ozone_default" + } + ], + "leaderCount": 1, + "version": "2.0.0-SNAPSHOT", + "setupTime": 1728280539745, + "revision": "3f9953c0fbbd2175ee83e8f0b4927e45e9c10ac1", + "buildDate": "2024-10-06T16:41Z", + "layoutVersion": 8, + "networkLocation": "/default-rack" + }, + { + "uuid": "5", + "hostname": "ozone-DataNode-5.ozone_default", + "state": "DEAD", + "opState": "DECOMMISSIONED", + "lastHeartbeat": 1728280582055, + "storageReport": { + "capacity": 125645656770, + "used": 4096, + "remaining": 114225614848, + "committed": 0 + }, + "pipelines": [ + { + "pipelineID": "0f9f7bc0-505e-4428-b148-dd7eac2e8ac2", + "replicationType": "RATIS", + "replicationFactor": "THREE", + "leaderNode": "ozone-datanode-3.ozone_default" + }, + { + "pipelineID": "67c973a0-722a-403a-8893-b8a5faaed7f9", + "replicationType": "RATIS", + "replicationFactor": "ONE", + "leaderNode": "ozone-datanode-5.ozone_default" + } + ], + "leaderCount": 1, + "version": "2.0.0-SNAPSHOT", + "setupTime": 1728280539866, + "revision": "3f9953c0fbbd2175ee83e8f0b4927e45e9c10ac1", + "buildDate": "2024-10-06T16:41Z", + "layoutVersion": 8, + "networkLocation": "/default-rack" + } + ] +} + +export const NullDatanodeResponse = { + "totalCount": null, + "datanodes": [ + { + "uuid": null, + "hostname": null, + "state": null, + "opState": null, + "lastHeartbeat": null, + "storageReport": null, + "pipelines": null, + "leaderCount": null, + "version": null, + "setupTime": null, + "revision": null, + "buildDate": null, + "layoutVersion": null, + "networkLocation": null + } + ] +} + +export const NullDatanodes = { + "totalCount": null, + "datanodes": null +} + +export const DecommissionInfo = { + "DatanodesDecommissionInfo": [] +} \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/__tests__/mocks/datanodeMocks/datanodeServer.ts b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/__tests__/mocks/datanodeMocks/datanodeServer.ts new file mode 100644 index 00000000000..a7b11f42974 --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/__tests__/mocks/datanodeMocks/datanodeServer.ts @@ -0,0 +1,72 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { setupServer } from "msw/node"; +import { rest } from "msw"; + +import * as mockResponses from "./datanodeResponseMocks"; + +const handlers = [ + rest.get("api/v1/datanodes", (req, res, ctx) => { + return res( + ctx.status(200), + ctx.json(mockResponses.DatanodeResponse) + ); + }), + rest.get("api/v1/datanodes/decommission/info", (req, res, ctx) => { + return res( + ctx.status(200), + ctx.json(mockResponses.DecommissionInfo) + ); + }) +]; + +const nullDatanodeResponseHandler = [ + rest.get("api/v1/datanodes", (req, res, ctx) => { + return res( + ctx.status(200), + ctx.json(mockResponses.NullDatanodeResponse) + ); + }), + rest.get("api/v1/datanodes/decommission/info", (req, res, ctx) => { + return res( + ctx.status(200), + ctx.json(mockResponses.DecommissionInfo) + ); + }) +] + +const nullDatanodeHandler = [ + rest.get("api/v1/datanodes", (req, res, ctx) => { + return res( + ctx.status(200), + ctx.json(mockResponses.NullDatanodes) + ); + }), + rest.get("api/v1/datanodes/decommission/info", (req, res, ctx) => { + return res( + ctx.status(200), + ctx.json(mockResponses.DecommissionInfo) + ); + }) +] + +//This will configure a request mocking server using MSW +export const datanodeServer = setupServer(...handlers); +export const nullDatanodeResponseServer = setupServer(...nullDatanodeResponseHandler); +export const nullDatanodeServer = setupServer(...nullDatanodeHandler); diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/__tests__/utils/datanodes.utils.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/__tests__/utils/datanodes.utils.tsx new file mode 100644 index 00000000000..721a09b8952 --- /dev/null +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/__tests__/utils/datanodes.utils.tsx @@ -0,0 +1,23 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import { waitFor, screen } from "@testing-library/react"; + +export const waitForDNTable = async () => { + return waitFor(() => screen.getByTestId('dn-table')); +} \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/search/search.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/search/search.tsx index d320fd659a6..ed09b218171 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/search/search.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/search/search.tsx @@ -55,7 +55,8 @@ const Search: React.FC = ({ suffixIcon={(searchOptions.length > 1) ? : null} defaultValue={searchColumn} options={searchOptions} - onChange={onChange} />) + onChange={onChange} + data-testid='search-dropdown'/>) : null return ( @@ -69,7 +70,8 @@ const Search: React.FC = ({ size='middle' style={{ maxWidth: 400 - }}/> + }} + data-testid='search-input'/> ) } diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/datanodesTable.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/datanodesTable.tsx index 494d898509b..17e6048f7e3 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/datanodesTable.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/tables/datanodesTable.tsx @@ -16,7 +16,7 @@ * limitations under the License. */ -import React from 'react'; +import React, { HTMLAttributes } from 'react'; import moment from 'moment'; import { Popover, Tooltip } from 'antd' import { @@ -306,7 +306,11 @@ const DatanodesTable: React.FC = ({ rowKey='uuid' pagination={paginationConfig} scroll={{ x: 'max-content', scrollToFirstRowOnChange: true }} - locale={{ filterTitle: '' }} /> + locale={{ filterTitle: '' }} + onRow={(record: Datanode) => ({ + 'data-testid': `dntable-${record.uuid}` + } as HTMLAttributes)} + data-testid='dn-table' />
); } diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/datanodes/datanodes.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/datanodes/datanodes.tsx index fe22d08dafd..33dd661d97b 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/datanodes/datanodes.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/datanodes/datanodes.tsx @@ -246,7 +246,8 @@ const Datanodes: React.FC<{}> = () => { onChange={handleColumnChange} onTagClose={() => { }} fixedColumn='hostname' - columnLength={columnOptions.length} /> + columnLength={columnOptions.length} + data-testid='dn-multi-select' /> {selectedRows.length > 0 && } @@ -271,7 +273,7 @@ const Datanodes: React.FC<{}> = () => { onChange={(value) => { setSearchTerm(''); setSearchColumn(value as 'hostname' | 'uuid' | 'version' | 'revision') - }} /> + }}/> = () => { searchColumn={searchColumn} searchTerm={debouncedSearch} handleSelectionChange={handleSelectionChange} - decommissionUuids={decommissionUuids} /> + decommissionUuids={decommissionUuids}/> = () => { margin: '0px 0px 5px 0px', fontSize: '16px', fontWeight: 'bold' - }}> + }} + data-testid='dn-remove-modal'> Stop Tracking Datanode From 9b5cedd0f9141a653f95de9b9f381c819441ade5 Mon Sep 17 00:00:00 2001 From: Nandakumar Vadivelu Date: Thu, 2 Jan 2025 22:40:50 +0530 Subject: [PATCH 021/168] HDDS-11951. Enable sortpom in hadoop-hdds sub-modules : annotations, client, common & config. (#7631) --- hadoop-hdds/annotations/pom.xml | 15 +- hadoop-hdds/client/pom.xml | 63 +++--- hadoop-hdds/common/pom.xml | 214 +++++++++--------- hadoop-hdds/config/pom.xml | 12 +- hadoop-hdds/container-service/pom.xml | 1 + hadoop-hdds/crypto-api/pom.xml | 1 + hadoop-hdds/crypto-default/pom.xml | 1 + hadoop-hdds/docs/pom.xml | 1 + hadoop-hdds/erasurecode/pom.xml | 1 + hadoop-hdds/framework/pom.xml | 1 + hadoop-hdds/hadoop-dependency-client/pom.xml | 1 + hadoop-hdds/hadoop-dependency-server/pom.xml | 1 + hadoop-hdds/hadoop-dependency-test/pom.xml | 1 + hadoop-hdds/interface-admin/pom.xml | 1 + hadoop-hdds/interface-client/pom.xml | 1 + hadoop-hdds/interface-server/pom.xml | 1 + hadoop-hdds/managed-rocksdb/pom.xml | 1 + hadoop-hdds/pom.xml | 98 ++++---- hadoop-hdds/rocks-native/pom.xml | 4 + hadoop-hdds/rocksdb-checkpoint-differ/pom.xml | 1 + hadoop-hdds/server-scm/pom.xml | 1 + hadoop-hdds/test-utils/pom.xml | 2 +- hadoop-hdds/tools/pom.xml | 1 + 23 files changed, 208 insertions(+), 216 deletions(-) diff --git a/hadoop-hdds/annotations/pom.xml b/hadoop-hdds/annotations/pom.xml index 0a961087040..e9a628f9a7e 100644 --- a/hadoop-hdds/annotations/pom.xml +++ b/hadoop-hdds/annotations/pom.xml @@ -12,10 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -25,14 +22,14 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hdds-annotation-processing 2.0.0-SNAPSHOT - Apache Ozone annotation processing tools for validating custom - annotations at compile time. - - Apache Ozone Annotation Processing jar + Apache Ozone Annotation Processing + Apache Ozone annotation processing tools for validating custom + annotations at compile time. - true + true + diff --git a/hadoop-hdds/client/pom.xml b/hadoop-hdds/client/pom.xml index 333b960fc24..e32457ec799 100644 --- a/hadoop-hdds/client/pom.xml +++ b/hadoop-hdds/client/pom.xml @@ -12,10 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -25,14 +22,35 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hdds-client 2.0.0-SNAPSHOT - Apache Ozone Distributed Data Store Client Library - Apache Ozone HDDS Client jar - - - + Apache Ozone HDDS Client + Apache Ozone Distributed Data Store Client Library + + + com.google.guava + guava + + + + io.opentracing + opentracing-api + + + io.opentracing + opentracing-util + + + + jakarta.annotation + jakarta.annotation-api + + + + org.apache.commons + commons-lang3 + org.apache.ozone hdds-common @@ -49,11 +67,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds-interface-client - - - org.apache.commons - commons-lang3 - org.apache.ratis ratis-client @@ -80,25 +93,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> slf4j-api - - com.google.guava - guava - - - - io.opentracing - opentracing-api - - - io.opentracing - opentracing-util - - - - jakarta.annotation - jakarta.annotation-api - - org.apache.ozone @@ -148,7 +142,8 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> maven-enforcer-plugin - ban-annotations + ban-annotations + diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml index f2576f7cf08..e1cf736f6d3 100644 --- a/hadoop-hdds/common/pom.xml +++ b/hadoop-hdds/common/pom.xml @@ -12,10 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -24,45 +21,45 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hdds-common 2.0.0-SNAPSHOT - Apache Ozone Distributed Data Store Common - Apache Ozone HDDS Common jar - - - + Apache Ozone HDDS Common + Apache Ozone Distributed Data Store Common - org.apache.ozone - hdds-hadoop-dependency-client + com.fasterxml.jackson.core + jackson-annotations - info.picocli - picocli + com.fasterxml.jackson.core + jackson-core + + + com.fasterxml.jackson.core + jackson-databind + + + com.fasterxml.jackson.datatype + jackson-datatype-jsr310 com.github.stephenc.jcip jcip-annotations - com.google.protobuf - protobuf-java - compile + com.google.errorprone + error_prone_annotations + true com.google.guava guava compile - - - org.glassfish.jaxb - jaxb-runtime - provided - - org.apache.commons - commons-lang3 + com.google.protobuf + protobuf-java + compile commons-collections @@ -73,33 +70,45 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> commons-io - com.fasterxml.jackson.core - jackson-annotations + commons-validator + commons-validator - com.fasterxml.jackson.core - jackson-core + info.picocli + picocli + - com.fasterxml.jackson.core - jackson-databind + io.dropwizard.metrics + metrics-core - com.fasterxml.jackson.datatype - jackson-datatype-jsr310 + io.grpc + grpc-api + ${io.grpc.version} + compile + + + com.google.code.findbugs + jsr305 + + - org.apache.ozone - hdds-annotation-processing + io.jaegertracing + jaeger-client - org.apache.ozone - hdds-config + io.jaegertracing + jaeger-core - - javax.annotation - javax.annotation-api + io.opentracing + opentracing-api + + + io.opentracing + opentracing-util jakarta.annotation @@ -111,17 +120,49 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> - io.dropwizard.metrics - metrics-core + javax.annotation + javax.annotation-api + + + org.apache.commons + commons-lang3 + + + org.apache.ozone + hdds-annotation-processing + + + org.apache.ozone + hdds-config + + + org.apache.ozone + hdds-hadoop-dependency-client + + + org.apache.ozone + hdds-interface-admin + + + org.apache.ozone + hdds-interface-client + + + org.apache.ratis + ratis-client - ratis-server-api org.apache.ratis + ratis-common - ratis-metrics-dropwizard3 org.apache.ratis + ratis-grpc + + + org.apache.ratis + ratis-metrics-dropwizard3 io.dropwizard.metrics @@ -129,37 +170,23 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> - org.apache.ratis - ratis-common - - ratis-netty - org.apache.ratis - - - ratis-grpc - org.apache.ratis org.apache.ratis ratis-proto + org.apache.ratis - ratis-client + ratis-server-api org.apache.ratis ratis-thirdparty-misc - - com.google.errorprone - error_prone_annotations - true - - org.bouncycastle @@ -174,34 +201,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.bouncycastle bcutil-jdk18on - - commons-validator - commons-validator - - - io.jaegertracing - jaeger-client - - - io.jaegertracing - jaeger-core - org.jetbrains.kotlin kotlin-stdlib - - io.opentracing - opentracing-api - - - io.opentracing - opentracing-util - - - org.yaml - snakeyaml - org.reflections reflections @@ -211,24 +214,14 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> slf4j-api - org.apache.ozone - hdds-interface-client - - - org.apache.ozone - hdds-interface-admin + org.yaml + snakeyaml + - io.grpc - grpc-api - ${io.grpc.version} - compile - - - com.google.code.findbugs - jsr305 - - + org.glassfish.jaxb + jaxb-runtime + provided @@ -258,27 +251,20 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> + false ${basedir}/src/main/resources hdds-version-info.properties - false + true ${basedir}/src/main/resources hdds-version-info.properties - true - - - kr.motd.maven - os-maven-plugin - ${os-maven-plugin.version} - - org.apache.hadoop @@ -286,10 +272,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> version-info - generate-resources version-info + generate-resources ${basedir}/../ @@ -330,7 +316,8 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> maven-enforcer-plugin - ban-annotations + ban-annotations + @@ -347,5 +334,12 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> + + + kr.motd.maven + os-maven-plugin + ${os-maven-plugin.version} + + diff --git a/hadoop-hdds/config/pom.xml b/hadoop-hdds/config/pom.xml index 60c63475ae3..5809828eccf 100644 --- a/hadoop-hdds/config/pom.xml +++ b/hadoop-hdds/config/pom.xml @@ -12,10 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -24,12 +21,9 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hdds-config 2.0.0-SNAPSHOT - Apache Ozone Distributed Data Store Config Tools - Apache Ozone HDDS Config jar - - - + Apache Ozone HDDS Config + Apache Ozone Distributed Data Store Config Tools diff --git a/hadoop-hdds/container-service/pom.xml b/hadoop-hdds/container-service/pom.xml index c21ca8203b5..29a8f92fc83 100644 --- a/hadoop-hdds/container-service/pom.xml +++ b/hadoop-hdds/container-service/pom.xml @@ -28,6 +28,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> Apache Ozone HDDS Container Service jar + true diff --git a/hadoop-hdds/crypto-api/pom.xml b/hadoop-hdds/crypto-api/pom.xml index ca54b3de9f2..3a283842781 100644 --- a/hadoop-hdds/crypto-api/pom.xml +++ b/hadoop-hdds/crypto-api/pom.xml @@ -29,6 +29,7 @@ true + true diff --git a/hadoop-hdds/crypto-default/pom.xml b/hadoop-hdds/crypto-default/pom.xml index 6024c3e2ddf..f9653145e44 100644 --- a/hadoop-hdds/crypto-default/pom.xml +++ b/hadoop-hdds/crypto-default/pom.xml @@ -29,6 +29,7 @@ true + true diff --git a/hadoop-hdds/docs/pom.xml b/hadoop-hdds/docs/pom.xml index 7f4ffbb8a70..e8261c39cb1 100644 --- a/hadoop-hdds/docs/pom.xml +++ b/hadoop-hdds/docs/pom.xml @@ -31,6 +31,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> true false + true diff --git a/hadoop-hdds/erasurecode/pom.xml b/hadoop-hdds/erasurecode/pom.xml index b540d1c68ea..5f34b7b7292 100644 --- a/hadoop-hdds/erasurecode/pom.xml +++ b/hadoop-hdds/erasurecode/pom.xml @@ -30,6 +30,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> jar + true diff --git a/hadoop-hdds/framework/pom.xml b/hadoop-hdds/framework/pom.xml index 37d41cde390..af233cbae9a 100644 --- a/hadoop-hdds/framework/pom.xml +++ b/hadoop-hdds/framework/pom.xml @@ -30,6 +30,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> jar + true diff --git a/hadoop-hdds/hadoop-dependency-client/pom.xml b/hadoop-hdds/hadoop-dependency-client/pom.xml index 7676f1f45f1..5bb9b138687 100644 --- a/hadoop-hdds/hadoop-dependency-client/pom.xml +++ b/hadoop-hdds/hadoop-dependency-client/pom.xml @@ -31,6 +31,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> true + true diff --git a/hadoop-hdds/hadoop-dependency-server/pom.xml b/hadoop-hdds/hadoop-dependency-server/pom.xml index 6be31002b09..05923dab2cd 100644 --- a/hadoop-hdds/hadoop-dependency-server/pom.xml +++ b/hadoop-hdds/hadoop-dependency-server/pom.xml @@ -31,6 +31,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> true + true diff --git a/hadoop-hdds/hadoop-dependency-test/pom.xml b/hadoop-hdds/hadoop-dependency-test/pom.xml index f04e45a0340..5df30c7dfdd 100644 --- a/hadoop-hdds/hadoop-dependency-test/pom.xml +++ b/hadoop-hdds/hadoop-dependency-test/pom.xml @@ -31,6 +31,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> true + true diff --git a/hadoop-hdds/interface-admin/pom.xml b/hadoop-hdds/interface-admin/pom.xml index f3197dc8965..94122423085 100644 --- a/hadoop-hdds/interface-admin/pom.xml +++ b/hadoop-hdds/interface-admin/pom.xml @@ -32,6 +32,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> true true + true diff --git a/hadoop-hdds/interface-client/pom.xml b/hadoop-hdds/interface-client/pom.xml index 1a61dfa930e..b373d11d507 100644 --- a/hadoop-hdds/interface-client/pom.xml +++ b/hadoop-hdds/interface-client/pom.xml @@ -32,6 +32,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> true true + true diff --git a/hadoop-hdds/interface-server/pom.xml b/hadoop-hdds/interface-server/pom.xml index 47bde5a0bc7..539a0a5430e 100644 --- a/hadoop-hdds/interface-server/pom.xml +++ b/hadoop-hdds/interface-server/pom.xml @@ -32,6 +32,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> true true + true diff --git a/hadoop-hdds/managed-rocksdb/pom.xml b/hadoop-hdds/managed-rocksdb/pom.xml index 40ad920647a..144d482be12 100644 --- a/hadoop-hdds/managed-rocksdb/pom.xml +++ b/hadoop-hdds/managed-rocksdb/pom.xml @@ -29,6 +29,7 @@ true + true diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml index b3aa6ff6952..7e4fbd32db3 100644 --- a/hadoop-hdds/pom.xml +++ b/hadoop-hdds/pom.xml @@ -12,10 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -25,157 +22,144 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hdds 2.0.0-SNAPSHOT - Apache Ozone Distributed Data Store Project - Apache Ozone HDDS pom - - - true - + Apache Ozone HDDS + Apache Ozone Distributed Data Store Project annotations - hadoop-dependency-client - hadoop-dependency-test - hadoop-dependency-server - interface-client - interface-admin - interface-server client common + config + container-service crypto-api crypto-default + docs + erasurecode framework + hadoop-dependency-client + hadoop-dependency-server + hadoop-dependency-test + interface-admin + interface-client + interface-server managed-rocksdb + rocks-native rocksdb-checkpoint-differ - container-service server-scm - tools - docs - config test-utils - erasurecode - rocks-native + tools - org.apache.ozone - hdds-common + hdds-annotation-processing ${hdds.version} org.apache.ozone - hdds-managed-rocksdb + hdds-client ${hdds.version} org.apache.ozone - hdds-hadoop-dependency-client + hdds-common ${hdds.version} org.apache.ozone - hdds-hadoop-dependency-server + hdds-config ${hdds.version} org.apache.ozone - hdds-hadoop-dependency-test + hdds-container-service ${hdds.version} - test org.apache.ozone - hdds-interface-server + hdds-docs ${hdds.version} org.apache.ozone - hdds-interface-client + hdds-erasurecode ${hdds.version} org.apache.ozone - hdds-interface-admin + hdds-hadoop-dependency-client ${hdds.version} org.apache.ozone - hdds-erasurecode + hdds-hadoop-dependency-server ${hdds.version} org.apache.ozone - hdds-client + hdds-interface-admin ${hdds.version} org.apache.ozone - hdds-tools + hdds-interface-client ${hdds.version} org.apache.ozone - hdds-server-framework + hdds-interface-server ${hdds.version} - - org.apache.ozone - rocksdb-checkpoint-differ - ${hdds.version} - - org.apache.ozone - hdds-server-scm + hdds-managed-rocksdb ${hdds.version} org.apache.ozone - hdds-container-service - ${hdds.version} + hdds-rocks-native + ${hdds.rocks.native.version} org.apache.ozone - hdds-docs + hdds-server-framework ${hdds.version} org.apache.ozone - hdds-config + hdds-server-scm ${hdds.version} org.apache.ozone - hdds-annotation-processing + hdds-tools ${hdds.version} org.apache.ozone - hdds-test-utils + rocksdb-checkpoint-differ ${hdds.version} - test @@ -194,18 +178,26 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> test + + org.apache.ozone + hdds-hadoop-dependency-test + ${hdds.version} + test + + org.apache.ozone hdds-server-scm - test-jar ${hdds.version} + test-jar test org.apache.ozone - hdds-rocks-native - ${hdds.rocks.native.version} + hdds-test-utils + ${hdds.version} + test diff --git a/hadoop-hdds/rocks-native/pom.xml b/hadoop-hdds/rocks-native/pom.xml index 4c751e0b10a..c01a4f16651 100644 --- a/hadoop-hdds/rocks-native/pom.xml +++ b/hadoop-hdds/rocks-native/pom.xml @@ -24,6 +24,10 @@ Apache Ozone HDDS RocksDB Tools hdds-rocks-native + + true + + org.apache.ozone diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml b/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml index c4284a4e85d..fc8cf910613 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml +++ b/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml @@ -30,6 +30,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> jar + true diff --git a/hadoop-hdds/server-scm/pom.xml b/hadoop-hdds/server-scm/pom.xml index 4c2e40c3759..a1da1f4e68c 100644 --- a/hadoop-hdds/server-scm/pom.xml +++ b/hadoop-hdds/server-scm/pom.xml @@ -30,6 +30,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> false + true diff --git a/hadoop-hdds/test-utils/pom.xml b/hadoop-hdds/test-utils/pom.xml index 6ff87083c03..903f01c8269 100644 --- a/hadoop-hdds/test-utils/pom.xml +++ b/hadoop-hdds/test-utils/pom.xml @@ -29,7 +29,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> jar - + true diff --git a/hadoop-hdds/tools/pom.xml b/hadoop-hdds/tools/pom.xml index 5b77f394c96..8af514f65b7 100644 --- a/hadoop-hdds/tools/pom.xml +++ b/hadoop-hdds/tools/pom.xml @@ -30,6 +30,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> jar + true From c282d91b781be3bb4162ccfefc9d2d515aa19b17 Mon Sep 17 00:00:00 2001 From: Chia-Chuan Yu Date: Fri, 3 Jan 2025 03:50:47 +0800 Subject: [PATCH 022/168] HDDS-8175. getFileChecksum() throws exception in debug mode. (#7611) --- .../checksum/BaseFileChecksumHelper.java | 35 +++++++++++++- .../client/checksum/ECFileChecksumHelper.java | 29 ------------ .../ReplicatedFileChecksumHelper.java | 46 ------------------- 3 files changed, 33 insertions(+), 77 deletions(-) diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/BaseFileChecksumHelper.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/BaseFileChecksumHelper.java index 76baefd71dd..6181ac55fdc 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/BaseFileChecksumHelper.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/BaseFileChecksumHelper.java @@ -156,8 +156,6 @@ protected void setChecksumType(ContainerProtos.ChecksumType type) { protected abstract AbstractBlockChecksumComputer getBlockChecksumComputer(List chunkInfos, long blockLength); - protected abstract String populateBlockChecksumBuf(ByteBuffer blockChecksumByteBuffer) throws IOException; - protected abstract List getChunkInfos( OmKeyLocationInfo keyLocationInfo) throws IOException; @@ -167,6 +165,39 @@ protected ByteBuffer getBlockChecksumFromChunkChecksums(AbstractBlockChecksumCom return blockChecksumComputer.getOutByteBuffer(); } + /** + * Parses out the raw blockChecksum bytes from {@code checksumData} byte + * buffer according to the blockChecksumType and populates the cumulative + * blockChecksumBuf with it. + * + * @return a debug-string representation of the parsed checksum if + * debug is enabled, otherwise null. + */ + + protected String populateBlockChecksumBuf(ByteBuffer blockChecksumByteBuffer) throws IOException { + String blockChecksumForDebug = null; + switch (getCombineMode()) { + case MD5MD5CRC: + final MD5Hash md5 = new MD5Hash(blockChecksumByteBuffer.array()); + md5.write(getBlockChecksumBuf()); + if (LOG.isDebugEnabled()) { + blockChecksumForDebug = md5.toString(); + } + break; + case COMPOSITE_CRC: + byte[] crcBytes = blockChecksumByteBuffer.array(); + if (LOG.isDebugEnabled()) { + blockChecksumForDebug = CrcUtil.toMultiCrcString(crcBytes); + } + getBlockChecksumBuf().write(crcBytes); + break; + default: + throw new IOException( + "Unknown combine mode: " + getCombineMode()); + } + return blockChecksumForDebug; + }; + /** * Compute block checksums block by block and append the raw bytes of the * block checksums into getBlockChecksumBuf(). diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ECFileChecksumHelper.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ECFileChecksumHelper.java index db36b9837ad..8f9daed6c0e 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ECFileChecksumHelper.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ECFileChecksumHelper.java @@ -28,7 +28,6 @@ import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier; -import org.apache.hadoop.io.MD5Hash; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; @@ -37,7 +36,6 @@ import org.apache.hadoop.security.token.Token; import java.io.IOException; -import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.List; @@ -60,33 +58,6 @@ protected AbstractBlockChecksumComputer getBlockChecksumComputer(List getChunkInfos(OmKeyLocationInfo keyLocationInfo) throws IOException { diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ReplicatedFileChecksumHelper.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ReplicatedFileChecksumHelper.java index 9c2df0fdb47..27a08617618 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ReplicatedFileChecksumHelper.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/checksum/ReplicatedFileChecksumHelper.java @@ -27,7 +27,6 @@ import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier; -import org.apache.hadoop.io.MD5Hash; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; @@ -36,7 +35,6 @@ import org.apache.hadoop.security.token.Token; import java.io.IOException; -import java.nio.ByteBuffer; import java.util.List; /** @@ -107,48 +105,4 @@ protected List getChunkInfos( return chunks; } - - /** - * Parses out the raw blockChecksum bytes from {@code checksumData} byte - * buffer according to the blockChecksumType and populates the cumulative - * blockChecksumBuf with it. - * - * @return a debug-string representation of the parsed checksum if - * debug is enabled, otherwise null. - */ - @Override - protected String populateBlockChecksumBuf(ByteBuffer checksumData) - throws IOException { - String blockChecksumForDebug = null; - switch (getCombineMode()) { - case MD5MD5CRC: - //read md5 - final MD5Hash md5 = new MD5Hash(checksumData.array()); - md5.write(getBlockChecksumBuf()); - if (LOG.isDebugEnabled()) { - blockChecksumForDebug = md5.toString(); - } - break; - case COMPOSITE_CRC: - // TODO: abort if chunk checksum type is not CRC32/CRC32C - //BlockChecksumType returnedType = PBHelperClient.convert( - // checksumData.getBlockChecksumOptions().getBlockChecksumType()); - /*if (returnedType != BlockChecksumType.COMPOSITE_CRC) { - throw new IOException(String.format( - "Unexpected blockChecksumType '%s', expecting COMPOSITE_CRC", - returnedType)); - }*/ - byte[] crcBytes = checksumData.array(); - if (LOG.isDebugEnabled()) { - blockChecksumForDebug = CrcUtil.toSingleCrcString(crcBytes); - } - getBlockChecksumBuf().write(crcBytes); - break; - default: - throw new IOException( - "Unknown combine mode: " + getCombineMode()); - } - - return blockChecksumForDebug; - } } From 4121aa21f698b839ff5dde8f6018f7756ea736cd Mon Sep 17 00:00:00 2001 From: Chia-Chuan Yu Date: Fri, 3 Jan 2025 04:07:27 +0800 Subject: [PATCH 023/168] HDDS-11726. Add leader readiness state to OM UI (#7628) --- .../common/src/main/java/org/apache/hadoop/ozone/OmUtils.java | 3 ++- .../src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java | 3 ++- .../src/main/resources/webapps/ozoneManager/om-overview.html | 3 +++ 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java index 8d24f2de155..cd7c6ff6186 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/OmUtils.java @@ -923,7 +923,7 @@ public static boolean isBucketSnapshotIndicator(String key) { } public static List> format( - List nodes, int port, String leaderId) { + List nodes, int port, String leaderId, String leaderReadiness) { List> omInfoList = new ArrayList<>(); // Ensuring OM's are printed in correct order List omNodes = nodes.stream() @@ -940,6 +940,7 @@ public static List> format( omInfo.add(info.getOmRoleInfo().getNodeId()); omInfo.add(String.valueOf(port)); omInfo.add(role); + omInfo.add(leaderReadiness); omInfoList.add(omInfo); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index 2ccc16cc285..6720f314748 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -3098,6 +3098,7 @@ public List> getRatisRoles() { if (null == omRatisServer) { return getRatisRolesException("Server is shutting down"); } + String leaderReadiness = omRatisServer.checkLeaderStatus().name(); final RaftPeerId leaderId = omRatisServer.getLeaderId(); if (leaderId == null) { LOG.error("No leader found"); @@ -3111,7 +3112,7 @@ public List> getRatisRoles() { LOG.error("Failed to getServiceList", e); return getRatisRolesException("IO-Exception Occurred, " + e.getMessage()); } - return OmUtils.format(serviceList, port, leaderId.toString()); + return OmUtils.format(serviceList, port, leaderId.toString(), leaderReadiness); } /** diff --git a/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/om-overview.html b/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/om-overview.html index 7a1aa67d822..56127481989 100644 --- a/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/om-overview.html +++ b/hadoop-ozone/ozone-manager/src/main/resources/webapps/ozoneManager/om-overview.html @@ -55,6 +55,7 @@

@@ -63,12 +64,14 @@

{{roles[0]}} {{roles[1]}} {{roles[2]}} {{roles[3]}} + {{roles[4]}} From 47c1b3584b6e24cbe5db50d73814c7ceccc0d513 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Fri, 3 Jan 2025 06:34:43 +0100 Subject: [PATCH 024/168] HDDS-12001. Create parent class for repair tools (#7633) --- .../ozone/repair/RecoverSCMCertificate.java | 36 ++++------- .../hadoop/ozone/repair/RepairTool.java | 62 +++++++++++++++++++ .../ozone/repair/ldb/SnapshotRepair.java | 52 +++++++--------- .../repair/ldb/TransactionInfoRepair.java | 31 +++------- .../hadoop/ozone/repair/om/FSORepairCLI.java | 15 ++--- .../ozone/repair/quota/QuotaTrigger.java | 25 ++++---- 6 files changed, 120 insertions(+), 101 deletions(-) create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RepairTool.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RecoverSCMCertificate.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RecoverSCMCertificate.java index e6462aa3f85..b85ccfb1e8b 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RecoverSCMCertificate.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RecoverSCMCertificate.java @@ -38,7 +38,6 @@ import picocli.CommandLine; import java.io.IOException; -import java.io.PrintWriter; import java.math.BigInteger; import java.net.InetAddress; import java.nio.charset.StandardCharsets; @@ -52,7 +51,6 @@ import java.util.ArrayList; import java.util.Optional; import java.util.Arrays; -import java.util.concurrent.Callable; import static org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition.VALID_SCM_CERTS; import static org.apache.hadoop.hdds.security.x509.certificate.client.DefaultCertificateClient.CERT_FILE_NAME_FORMAT; @@ -68,7 +66,7 @@ name = "cert-recover", description = "Recover Deleted SCM Certificate from RocksDB") @MetaInfServices(RepairSubcommand.class) -public class RecoverSCMCertificate implements Callable, RepairSubcommand { +public class RecoverSCMCertificate extends RepairTool implements RepairSubcommand { @CommandLine.Option(names = {"--db"}, required = true, @@ -78,19 +76,8 @@ public class RecoverSCMCertificate implements Callable, RepairSubcommand { @CommandLine.ParentCommand private OzoneRepair parent; - @CommandLine.Spec - private CommandLine.Model.CommandSpec spec; - - private PrintWriter err() { - return spec.commandLine().getErr(); - } - - private PrintWriter out() { - return spec.commandLine().getOut(); - } - @Override - public Void call() throws Exception { + public void execute() throws Exception { dbPath = removeTrailingSlashIfNeeded(dbPath); String tableName = VALID_SCM_CERTS.getName(); DBDefinition dbDefinition = @@ -112,15 +99,15 @@ public Void call() throws Exception { SecurityConfig securityConfig = new SecurityConfig(parent.getOzoneConf()); Map allCerts = getAllCerts(columnFamilyDefinition, cfHandle, db); - out().println("All Certs in DB : " + allCerts.keySet()); + info("All Certs in DB : %s", allCerts.keySet()); String hostName = InetAddress.getLocalHost().getHostName(); - out().println("Host: " + hostName); + info("Host: %s", hostName); X509Certificate subCertificate = getSubCertificate(allCerts, hostName); X509Certificate rootCertificate = getRootCertificate(allCerts); - out().println("Sub cert serialID for this host: " + subCertificate.getSerialNumber().toString()); - out().println("Root cert serialID: " + rootCertificate.getSerialNumber().toString()); + info("Sub cert serialID for this host: %s", subCertificate.getSerialNumber()); + info("Root cert serialID: %s", rootCertificate.getSerialNumber()); boolean isRootCA = false; @@ -131,9 +118,8 @@ public Void call() throws Exception { storeCerts(subCertificate, rootCertificate, isRootCA, securityConfig); } } catch (RocksDBException | CertificateException exception) { - err().print("Failed to recover scm cert"); + error("Failed to recover scm cert"); } - return null; } private static ColumnFamilyHandle getColumnFamilyHandle( @@ -210,17 +196,17 @@ private void storeCerts(X509Certificate scmCertificate, CertificateCodec certCodec = new CertificateCodec(securityConfig, SCMCertificateClient.COMPONENT_NAME); - out().println("Writing certs to path : " + certCodec.getLocation().toString()); + info("Writing certs to path : %s", certCodec.getLocation()); CertPath certPath = addRootCertInPath(scmCertificate, rootCertificate); CertPath rootCertPath = getRootCertPath(rootCertificate); String encodedCert = CertificateCodec.getPEMEncodedString(certPath); String certName = String.format(CERT_FILE_NAME_FORMAT, - CAType.NONE.getFileNamePrefix() + scmCertificate.getSerialNumber().toString()); + CAType.NONE.getFileNamePrefix() + scmCertificate.getSerialNumber()); certCodec.writeCertificate(certName, encodedCert); String rootCertName = String.format(CERT_FILE_NAME_FORMAT, - CAType.SUBORDINATE.getFileNamePrefix() + rootCertificate.getSerialNumber().toString()); + CAType.SUBORDINATE.getFileNamePrefix() + rootCertificate.getSerialNumber()); String encodedRootCert = CertificateCodec.getPEMEncodedString(rootCertPath); certCodec.writeCertificate(rootCertName, encodedRootCert); @@ -230,7 +216,7 @@ private void storeCerts(X509Certificate scmCertificate, if (isRootCA) { CertificateCodec rootCertCodec = new CertificateCodec(securityConfig, OzoneConsts.SCM_ROOT_CA_COMPONENT_NAME); - out().println("Writing root certs to path : " + rootCertCodec.getLocation().toString()); + info("Writing root certs to path : %s", rootCertCodec.getLocation()); rootCertCodec.writeCertificate(rootCertCodec.getLocation().toAbsolutePath(), securityConfig.getCertificateFileName(), encodedRootCert); } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RepairTool.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RepairTool.java new file mode 100644 index 00000000000..b94e1c52d82 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RepairTool.java @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.repair; + +import org.apache.hadoop.hdds.cli.AbstractSubcommand; + +import java.io.PrintWriter; +import java.util.concurrent.Callable; + +/** Parent class for all actionable repair commands. */ +public abstract class RepairTool extends AbstractSubcommand implements Callable { + + /** Hook method for subclasses for performing actual repair task. */ + protected abstract void execute() throws Exception; + + @Override + public final Void call() throws Exception { + execute(); + return null; + } + + protected void info(String msg, Object... args) { + out().println(formatMessage(msg, args)); + } + + protected void error(String msg, Object... args) { + err().println(formatMessage(msg, args)); + } + + private PrintWriter out() { + return spec().commandLine() + .getOut(); + } + + private PrintWriter err() { + return spec().commandLine() + .getErr(); + } + + private String formatMessage(String msg, Object[] args) { + if (args != null && args.length > 0) { + msg = String.format(msg, args); + } + return msg; + } + +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ldb/SnapshotRepair.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ldb/SnapshotRepair.java index 45c10f5668b..9c24b0d131f 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ldb/SnapshotRepair.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ldb/SnapshotRepair.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksIterator; import org.apache.hadoop.ozone.debug.RocksDBUtils; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.repair.RepairTool; import org.apache.hadoop.ozone.shell.bucket.BucketUri; import org.rocksdb.ColumnFamilyDescriptor; import org.rocksdb.ColumnFamilyHandle; @@ -31,7 +32,6 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; import picocli.CommandLine; -import picocli.CommandLine.Model.CommandSpec; import java.io.IOException; import java.util.ArrayList; @@ -40,7 +40,6 @@ import java.util.Objects; import java.util.Set; import java.util.UUID; -import java.util.concurrent.Callable; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; import static org.apache.hadoop.ozone.OzoneConsts.SNAPSHOT_INFO_TABLE; @@ -52,13 +51,10 @@ name = "snapshot", description = "CLI to update global and path previous snapshot for a snapshot in case snapshot chain is corrupted." ) -public class SnapshotRepair implements Callable { +public class SnapshotRepair extends RepairTool { protected static final Logger LOG = LoggerFactory.getLogger(SnapshotRepair.class); - @CommandLine.Spec - private static CommandSpec spec; - @CommandLine.ParentCommand private RDBRepair parent; @@ -84,15 +80,15 @@ public class SnapshotRepair implements Callable { private boolean dryRun; @Override - public Void call() throws Exception { + public void execute() throws Exception { List cfHandleList = new ArrayList<>(); List cfDescList = RocksDBUtils.getColumnFamilyDescriptors(parent.getDbPath()); try (ManagedRocksDB db = ManagedRocksDB.open(parent.getDbPath(), cfDescList, cfHandleList)) { ColumnFamilyHandle snapshotInfoCfh = RocksDBUtils.getColumnFamilyHandle(SNAPSHOT_INFO_TABLE, cfHandleList); if (snapshotInfoCfh == null) { - System.err.println(SNAPSHOT_INFO_TABLE + " is not in a column family in DB for the given path."); - return null; + error("%s is not in a column family in DB for the given path.", SNAPSHOT_INFO_TABLE); + return; } String snapshotInfoTableKey = SnapshotInfo.getTableKey(bucketUri.getValue().getVolumeName(), @@ -102,9 +98,9 @@ public Void call() throws Exception { SnapshotInfo.getCodec()); if (snapshotInfo == null) { - System.err.println(snapshotName + " does not exist for given bucketUri: " + OM_KEY_PREFIX + + error("%s does not exist for given bucketUri: %s", snapshotName, OM_KEY_PREFIX + bucketUri.getValue().getVolumeName() + OM_KEY_PREFIX + bucketUri.getValue().getBucketName()); - return null; + return; } // snapshotIdSet is the set of the all existed snapshots ID to make that the provided global previous and path @@ -112,52 +108,50 @@ public Void call() throws Exception { Set snapshotIdSet = getSnapshotIdSet(db, snapshotInfoCfh); if (Objects.equals(snapshotInfo.getSnapshotId(), globalPreviousSnapshotId)) { - System.err.println("globalPreviousSnapshotId: '" + globalPreviousSnapshotId + - "' is equal to given snapshot's ID: '" + snapshotInfo.getSnapshotId() + "'."); - return null; + error("globalPreviousSnapshotId: '%s' is equal to given snapshot's ID: '%s'.", + globalPreviousSnapshotId, snapshotInfo.getSnapshotId()); + return; } if (Objects.equals(snapshotInfo.getSnapshotId(), pathPreviousSnapshotId)) { - System.err.println("pathPreviousSnapshotId: '" + pathPreviousSnapshotId + - "' is equal to given snapshot's ID: '" + snapshotInfo.getSnapshotId() + "'."); - return null; + error("pathPreviousSnapshotId: '%s' is equal to given snapshot's ID: '%s'.", + pathPreviousSnapshotId, snapshotInfo.getSnapshotId()); + return; } if (!snapshotIdSet.contains(globalPreviousSnapshotId)) { - System.err.println("globalPreviousSnapshotId: '" + globalPreviousSnapshotId + - "' does not exist in snapshotInfoTable."); - return null; + error("globalPreviousSnapshotId: '%s' does not exist in snapshotInfoTable.", + globalPreviousSnapshotId); + return; } if (!snapshotIdSet.contains(pathPreviousSnapshotId)) { - System.err.println("pathPreviousSnapshotId: '" + pathPreviousSnapshotId + - "' does not exist in snapshotInfoTable."); - return null; + error("pathPreviousSnapshotId: '%s' does not exist in snapshotInfoTable.", + pathPreviousSnapshotId); + return; } snapshotInfo.setGlobalPreviousSnapshotId(globalPreviousSnapshotId); snapshotInfo.setPathPreviousSnapshotId(pathPreviousSnapshotId); if (dryRun) { - System.out.println("SnapshotInfo would be updated to : " + snapshotInfo); + info("SnapshotInfo would be updated to : %s", snapshotInfo); } else { byte[] snapshotInfoBytes = SnapshotInfo.getCodec().toPersistedFormat(snapshotInfo); db.get() .put(snapshotInfoCfh, StringCodec.get().toPersistedFormat(snapshotInfoTableKey), snapshotInfoBytes); - System.out.println("Snapshot Info is updated to : " + + info("Snapshot Info is updated to : %s", RocksDBUtils.getValue(db, snapshotInfoCfh, snapshotInfoTableKey, SnapshotInfo.getCodec())); } } catch (RocksDBException exception) { - System.err.println("Failed to update the RocksDB for the given path: " + parent.getDbPath()); - System.err.println( + error("Failed to update the RocksDB for the given path: %s", parent.getDbPath()); + error( "Make sure that Ozone entity (OM, SCM or DN) is not running for the give dbPath and current host."); LOG.error(exception.toString()); } finally { IOUtils.closeQuietly(cfHandleList); } - - return null; } private Set getSnapshotIdSet(ManagedRocksDB db, ColumnFamilyHandle snapshotInfoCfh) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ldb/TransactionInfoRepair.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ldb/TransactionInfoRepair.java index 277a2788247..192ff0fb1d5 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ldb/TransactionInfoRepair.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ldb/TransactionInfoRepair.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hdds.utils.db.StringCodec; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; import org.apache.hadoop.ozone.debug.RocksDBUtils; +import org.apache.hadoop.ozone.repair.RepairTool; import org.rocksdb.ColumnFamilyDescriptor; import org.rocksdb.ColumnFamilyHandle; import org.rocksdb.RocksDBException; @@ -35,7 +36,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; -import java.util.concurrent.Callable; import static org.apache.hadoop.ozone.OzoneConsts.TRANSACTION_INFO_KEY; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.TRANSACTION_INFO_TABLE; @@ -49,10 +49,7 @@ mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class ) -public class TransactionInfoRepair implements Callable { - - @CommandLine.Spec - private static CommandLine.Model.CommandSpec spec; +public class TransactionInfoRepair extends RepairTool { @CommandLine.ParentCommand private RDBRepair parent; @@ -67,20 +64,8 @@ public class TransactionInfoRepair implements Callable { description = "Highest index of transactionInfoTable. The input should be non-zero long integer.") private long highestTransactionIndex; - - protected void setHighestTransactionTerm( - long highestTransactionTerm) { - this.highestTransactionTerm = highestTransactionTerm; - } - - protected void setHighestTransactionIndex( - long highestTransactionIndex) { - this.highestTransactionIndex = highestTransactionIndex; - } - - @Override - public Void call() throws Exception { + public void execute() throws Exception { List cfHandleList = new ArrayList<>(); String dbPath = getParent().getDbPath(); List cfDescList = RocksDBUtils.getColumnFamilyDescriptors( @@ -95,7 +80,7 @@ public Void call() throws Exception { TransactionInfo originalTransactionInfo = RocksDBUtils.getValue(db, transactionInfoCfh, TRANSACTION_INFO_KEY, TransactionInfo.getCodec()); - System.out.println("The original highest transaction Info was " + originalTransactionInfo.getTermIndex()); + info("The original highest transaction Info was %s", originalTransactionInfo.getTermIndex()); TransactionInfo transactionInfo = TransactionInfo.valueOf(highestTransactionTerm, highestTransactionIndex); @@ -103,19 +88,17 @@ public Void call() throws Exception { db.get() .put(transactionInfoCfh, StringCodec.get().toPersistedFormat(TRANSACTION_INFO_KEY), transactionInfoBytes); - System.out.println("The highest transaction info has been updated to: " + + info("The highest transaction info has been updated to: %s", RocksDBUtils.getValue(db, transactionInfoCfh, TRANSACTION_INFO_KEY, TransactionInfo.getCodec()).getTermIndex()); } catch (RocksDBException exception) { - System.err.println("Failed to update the RocksDB for the given path: " + dbPath); - System.err.println( + error("Failed to update the RocksDB for the given path: %s", dbPath); + error( "Make sure that Ozone entity (OM) is not running for the give database path and current host."); throw new IOException("Failed to update RocksDB.", exception); } finally { IOUtils.closeQuietly(cfHandleList); } - - return null; } protected RDBRepair getParent() { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairCLI.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairCLI.java index 5a217e9f2de..46af1e847be 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairCLI.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairCLI.java @@ -18,10 +18,9 @@ package org.apache.hadoop.ozone.repair.om; +import org.apache.hadoop.ozone.repair.RepairTool; import picocli.CommandLine; -import java.util.concurrent.Callable; - /** * Parser for scm.db file. */ @@ -30,7 +29,7 @@ description = "Identify and repair a disconnected FSO tree by marking unreferenced entries for deletion. " + "OM should be stopped while this tool is run." ) -public class FSORepairCLI implements Callable { +public class FSORepairCLI extends RepairTool { @CommandLine.Option(names = {"--db"}, required = true, @@ -55,11 +54,11 @@ public class FSORepairCLI implements Callable { private boolean verbose; @Override - public Void call() throws Exception { + public void execute() throws Exception { if (repair) { - System.out.println("FSO Repair Tool is running in repair mode"); + info("FSO Repair Tool is running in repair mode"); } else { - System.out.println("FSO Repair Tool is running in debug mode"); + info("FSO Repair Tool is running in debug mode"); } try { FSORepairTool @@ -70,9 +69,7 @@ public Void call() throws Exception { } if (verbose) { - System.out.println("FSO repair finished."); + info("FSO repair finished."); } - - return null; } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaTrigger.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaTrigger.java index daa1f332e3f..2930c873563 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaTrigger.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaTrigger.java @@ -24,10 +24,10 @@ import java.util.Arrays; import java.util.Collections; import java.util.List; -import java.util.concurrent.Callable; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; +import org.apache.hadoop.ozone.repair.RepairTool; import picocli.CommandLine; /** @@ -39,9 +39,7 @@ mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class ) -public class QuotaTrigger implements Callable { - @CommandLine.Spec - private static CommandLine.Model.CommandSpec spec; +public class QuotaTrigger extends RepairTool { @CommandLine.ParentCommand private QuotaRepair parent; @@ -68,20 +66,19 @@ public class QuotaTrigger implements Callable { private String buckets; @Override - public Void call() throws Exception { + public void execute() throws Exception { List bucketList = Collections.emptyList(); if (StringUtils.isNotEmpty(buckets)) { bucketList = Arrays.asList(buckets.split(",")); } - - OzoneManagerProtocol ozoneManagerClient = - parent.createOmClient(omServiceId, omHost, false); - try { - ozoneManagerClient.startQuotaRepair(bucketList); - System.out.println(ozoneManagerClient.getQuotaRepairStatus()); - } catch (Exception ex) { - System.out.println(ex.getMessage()); + + try (OzoneManagerProtocol omClient = parent.createOmClient(omServiceId, omHost, false)) { + info("Triggering quota repair for %s", + bucketList.isEmpty() + ? "all buckets" + : ("buckets " + buckets)); + omClient.startQuotaRepair(bucketList); + info(omClient.getQuotaRepairStatus()); } - return null; } } From bf112aa36b2a73fd5ef82e33fd7389afad207fa7 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Fri, 3 Jan 2025 08:57:51 +0100 Subject: [PATCH 025/168] HDDS-11990. Use arity in decommission subcommands (#7618) --- .../cli/datanode/DecommissionSubCommand.java | 54 ++++++------------- .../scm/cli/datanode/HostNameParameters.java | 53 ++++++++++++++++++ .../cli/datanode/MaintenanceSubCommand.java | 50 ++++------------- .../cli/datanode/RecommissionSubCommand.java | 48 ++++------------- 4 files changed, 89 insertions(+), 116 deletions(-) create mode 100644 hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/HostNameParameters.java diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionSubCommand.java index 31123ae81b5..d9474d7355a 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionSubCommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DecommissionSubCommand.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hdds.scm.cli.datanode; -import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.scm.DatanodeAdminError; import org.apache.hadoop.hdds.scm.cli.ScmSubcommand; @@ -25,9 +24,7 @@ import picocli.CommandLine; import picocli.CommandLine.Command; import java.io.IOException; -import java.util.ArrayList; import java.util.List; -import java.util.Scanner; /** * Decommission one or more datanodes. @@ -39,14 +36,8 @@ versionProvider = HddsVersionProvider.class) public class DecommissionSubCommand extends ScmSubcommand { - @CommandLine.Spec - private CommandLine.Model.CommandSpec spec; - - @CommandLine.Parameters(description = "One or more host names separated by spaces. " + - "To read from stdin, specify '-' and supply the host names " + - "separated by newlines.", - paramLabel = "") - private List parameters = new ArrayList<>(); + @CommandLine.Mixin + private HostNameParameters hostNameParams; @CommandLine.Option(names = { "--force" }, defaultValue = "false", @@ -55,33 +46,22 @@ public class DecommissionSubCommand extends ScmSubcommand { @Override public void execute(ScmClient scmClient) throws IOException { - if (parameters.size() > 0) { - List hosts; - // Whether to read from stdin - if (parameters.get(0).equals("-")) { - hosts = new ArrayList<>(); - Scanner scanner = new Scanner(System.in, "UTF-8"); - while (scanner.hasNextLine()) { - hosts.add(scanner.nextLine().trim()); - } - } else { - hosts = parameters; - } - List errors = scmClient.decommissionNodes(hosts, force); - System.out.println("Started decommissioning datanode(s):\n" + - String.join("\n", hosts)); - if (errors.size() > 0) { - for (DatanodeAdminError error : errors) { - System.err.println("Error: " + error.getHostname() + ": " - + error.getError()); - } - // Throwing the exception will cause a non-zero exit status for the - // command. - throw new IOException( - "Some nodes could not enter the decommission workflow"); + List hosts = hostNameParams.getHostNames(); + List errors = scmClient.decommissionNodes(hosts, force); + System.out.println("Started decommissioning datanode(s):\n" + + String.join("\n", hosts)); + showErrors(errors, "Some nodes could not enter the decommission workflow"); + } + + static void showErrors(List errors, String message) throws IOException { + if (!errors.isEmpty()) { + for (DatanodeAdminError error : errors) { + System.err.println("Error: " + error.getHostname() + ": " + + error.getError()); } - } else { - GenericCli.missingSubcommand(spec); + // Throwing the exception will cause a non-zero exit status for the + // command. + throw new IOException(message); } } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/HostNameParameters.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/HostNameParameters.java new file mode 100644 index 00000000000..4020d256bc6 --- /dev/null +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/HostNameParameters.java @@ -0,0 +1,53 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdds.scm.cli.datanode; + +import picocli.CommandLine; + +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.List; +import java.util.Scanner; + +/** Parameter for specifying list of hostnames. */ +@CommandLine.Command +public class HostNameParameters { + + @CommandLine.Parameters(description = "One or more host names separated by spaces. " + + "To read from stdin, specify '-' and supply the host names " + + "separated by newlines.", + arity = "1..*", + paramLabel = "") + private List parameters = new ArrayList<>(); + + public List getHostNames() { + List hosts; + // Whether to read from stdin + if (parameters.get(0).equals("-")) { + hosts = new ArrayList<>(); + Scanner scanner = new Scanner(System.in, StandardCharsets.UTF_8.name()); + while (scanner.hasNextLine()) { + hosts.add(scanner.nextLine().trim()); + } + } else { + hosts = parameters; + } + return hosts; + } + +} diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/MaintenanceSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/MaintenanceSubCommand.java index b07af660a8f..23b91323edb 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/MaintenanceSubCommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/MaintenanceSubCommand.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hdds.scm.cli.datanode; -import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.scm.DatanodeAdminError; import org.apache.hadoop.hdds.scm.cli.ScmSubcommand; @@ -25,9 +24,9 @@ import picocli.CommandLine; import picocli.CommandLine.Command; import java.io.IOException; -import java.util.ArrayList; import java.util.List; -import java.util.Scanner; + +import static org.apache.hadoop.hdds.scm.cli.datanode.DecommissionSubCommand.showErrors; /** * Place one or more datanodes into Maintenance Mode. @@ -39,14 +38,8 @@ versionProvider = HddsVersionProvider.class) public class MaintenanceSubCommand extends ScmSubcommand { - @CommandLine.Spec - private CommandLine.Model.CommandSpec spec; - - @CommandLine.Parameters(description = "One or more host names separated by spaces. " + - "To read from stdin, specify '-' and supply the host names " + - "separated by newlines.", - paramLabel = "") - private List parameters = new ArrayList<>(); + @CommandLine.Mixin + private HostNameParameters hostNameParams; @CommandLine.Option(names = {"--end"}, description = "Automatically end maintenance after the given hours. " + @@ -61,34 +54,11 @@ public class MaintenanceSubCommand extends ScmSubcommand { @Override public void execute(ScmClient scmClient) throws IOException { - if (parameters.size() > 0) { - List hosts; - // Whether to read from stdin - if (parameters.get(0).equals("-")) { - hosts = new ArrayList<>(); - Scanner scanner = new Scanner(System.in, "UTF-8"); - while (scanner.hasNextLine()) { - hosts.add(scanner.nextLine().trim()); - } - } else { - hosts = parameters; - } - List errors = - scmClient.startMaintenanceNodes(hosts, endInHours, force); - System.out.println("Entering maintenance mode on datanode(s):\n" + - String.join("\n", hosts)); - if (errors.size() > 0) { - for (DatanodeAdminError error : errors) { - System.err.println("Error: " + error.getHostname() + ": " - + error.getError()); - } - // Throwing the exception will cause a non-zero exit status for the - // command. - throw new IOException( - "Some nodes could not start the maintenance workflow"); - } - } else { - GenericCli.missingSubcommand(spec); - } + List hosts = hostNameParams.getHostNames(); + List errors = + scmClient.startMaintenanceNodes(hosts, endInHours, force); + System.out.println("Entering maintenance mode on datanode(s):\n" + + String.join("\n", hosts)); + showErrors(errors, "Some nodes could not start the maintenance workflow"); } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/RecommissionSubCommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/RecommissionSubCommand.java index e21d61ed3d7..37f902f6830 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/RecommissionSubCommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/RecommissionSubCommand.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hdds.scm.cli.datanode; -import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.scm.DatanodeAdminError; import org.apache.hadoop.hdds.scm.cli.ScmSubcommand; @@ -25,9 +24,9 @@ import picocli.CommandLine; import picocli.CommandLine.Command; import java.io.IOException; -import java.util.ArrayList; import java.util.List; -import java.util.Scanner; + +import static org.apache.hadoop.hdds.scm.cli.datanode.DecommissionSubCommand.showErrors; /** * Recommission one or more datanodes. @@ -40,44 +39,15 @@ versionProvider = HddsVersionProvider.class) public class RecommissionSubCommand extends ScmSubcommand { - @CommandLine.Spec - private CommandLine.Model.CommandSpec spec; - - @CommandLine.Parameters(description = "One or more host names separated by spaces. " + - "To read from stdin, specify '-' and supply the host names " + - "separated by newlines.", - paramLabel = "") - private List parameters = new ArrayList<>(); + @CommandLine.Mixin + private HostNameParameters hostNameParams; @Override public void execute(ScmClient scmClient) throws IOException { - if (parameters.size() > 0) { - List hosts; - // Whether to read from stdin - if (parameters.get(0).equals("-")) { - hosts = new ArrayList<>(); - Scanner scanner = new Scanner(System.in, "UTF-8"); - while (scanner.hasNextLine()) { - hosts.add(scanner.nextLine().trim()); - } - } else { - hosts = parameters; - } - List errors = scmClient.recommissionNodes(hosts); - System.out.println("Started recommissioning datanode(s):\n" + - String.join("\n", hosts)); - if (errors.size() > 0) { - for (DatanodeAdminError error : errors) { - System.err.println("Error: " + error.getHostname() + ": " - + error.getError()); - } - // Throwing the exception will cause a non-zero exit status for the - // command. - throw new IOException( - "Some nodes could be recommissioned"); - } - } else { - GenericCli.missingSubcommand(spec); - } + List hosts = hostNameParams.getHostNames(); + List errors = scmClient.recommissionNodes(hosts); + System.out.println("Started recommissioning datanode(s):\n" + + String.join("\n", hosts)); + showErrors(errors, "Some nodes could be recommissioned"); } } From fbb792d46d1b084e14123a7033b50906f8937e13 Mon Sep 17 00:00:00 2001 From: Sarveksha Yeshavantha Raju <79865743+sarvekshayr@users.noreply.github.com> Date: Fri, 3 Jan 2025 14:57:48 +0530 Subject: [PATCH 026/168] HDDS-11945. Improve startup message for ozone repair commands (#7591) --- .../hadoop/ozone/repair/OzoneRepair.java | 7 +++++- .../hadoop/ozone/repair/TestOzoneRepair.java | 25 +++++++++++++++++-- 2 files changed, 29 insertions(+), 3 deletions(-) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/OzoneRepair.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/OzoneRepair.java index b1ed206f975..864022da6f3 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/OzoneRepair.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/OzoneRepair.java @@ -31,7 +31,8 @@ * Ozone Repair Command line tool. */ @CommandLine.Command(name = "ozone repair", - description = "Operational tool to repair Ozone", + description = "Advanced tool to repair Ozone. The nodes being repaired " + + "must be stopped before the tool is run.", versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true) public class OzoneRepair extends GenericCli implements ExtensibleParentCommand { @@ -46,6 +47,10 @@ public static void main(String[] argv) { @Override public int execute(String[] argv) { + if (argv.length == 0 || argv[0].equals("--help") || argv[0].equals("-h")) { + return super.execute(argv); + } + String currentUser = getSystemUserName(); if (!("y".equalsIgnoreCase(getConsoleReadLineWithFormat(currentUser)))) { System.out.println("Aborting command."); diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/TestOzoneRepair.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/TestOzoneRepair.java index 272bf24c066..bf6a9ed00a4 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/TestOzoneRepair.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/TestOzoneRepair.java @@ -67,7 +67,7 @@ void testOzoneRepairWhenUserIsRemindedSystemUserAndDeclinesToProceed() throws Ex OzoneRepair ozoneRepair = new OzoneRepair(); System.setIn(new ByteArrayInputStream("N".getBytes(DEFAULT_ENCODING))); - int res = ozoneRepair.execute(new String[]{}); + int res = ozoneRepair.execute(new String[]{"om", "fso-tree"}); assertEquals(1, res); assertThat(out.toString(DEFAULT_ENCODING)).contains("Aborting command."); // prompt should contain the current user name as well @@ -79,10 +79,31 @@ void testOzoneRepairWhenUserIsRemindedSystemUserAndAgreesToProceed() throws Exce OzoneRepair ozoneRepair = new OzoneRepair(); System.setIn(new ByteArrayInputStream("y".getBytes(DEFAULT_ENCODING))); - ozoneRepair.execute(new String[]{}); + ozoneRepair.execute(new String[]{"om", "fso-tree"}); assertThat(out.toString(DEFAULT_ENCODING)).contains("Run as user: " + OZONE_USER); // prompt should contain the current user name as well assertThat(err.toString(DEFAULT_ENCODING)).contains("ATTENTION: Running as user " + OZONE_USER); } + @Test + void testOzoneRepairSkipsPromptWhenNoSubcommandProvided() throws Exception { + OzoneRepair ozoneRepair = new OzoneRepair(); + + // when no argument is passed, prompt should not be displayed + ozoneRepair.execute(new String[]{}); + assertThat(err.toString(DEFAULT_ENCODING)).doesNotContain("ATTENTION: Running as user " + OZONE_USER); + } + + @Test + void testOzoneRepairSkipsPromptWhenHelpFlagProvided() throws Exception { + OzoneRepair ozoneRepair = new OzoneRepair(); + + // when --help or -h flag is passed, prompt should not be displayed + ozoneRepair.execute(new String[]{"--help"}); + assertThat(err.toString(DEFAULT_ENCODING)).doesNotContain("ATTENTION: Running as user " + OZONE_USER); + + ozoneRepair.execute(new String[]{"-h"}); + assertThat(err.toString(DEFAULT_ENCODING)).doesNotContain("ATTENTION: Running as user " + OZONE_USER); + } + } From 69206e9681cb4df07f886395ce8ec1d5e5ba68bf Mon Sep 17 00:00:00 2001 From: Chia-Chuan Yu Date: Fri, 3 Jan 2025 18:24:14 +0800 Subject: [PATCH 027/168] HDDS-11902. Remove upgrade tests for non-ha and om-ha (#7610) --- .../dist/src/main/compose/upgrade/README.md | 18 +- .../main/compose/upgrade/compose/non-ha/.env | 22 --- .../compose/non-ha/docker-compose.yaml | 155 --------------- .../upgrade/compose/non-ha/docker-config | 49 ----- .../compose/upgrade/compose/non-ha/load.sh | 29 --- .../main/compose/upgrade/compose/om-ha/.env | 27 --- .../upgrade/compose/om-ha/docker-compose.yaml | 178 ------------------ .../upgrade/compose/om-ha/docker-config | 65 ------- .../compose/upgrade/compose/om-ha/load.sh | 30 --- .../upgrade/upgrades/manual-upgrade/README.md | 19 -- .../upgrade/upgrades/manual-upgrade/driver.sh | 54 ------ 11 files changed, 2 insertions(+), 644 deletions(-) delete mode 100644 hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/.env delete mode 100644 hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/docker-compose.yaml delete mode 100644 hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/docker-config delete mode 100755 hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/load.sh delete mode 100644 hadoop-ozone/dist/src/main/compose/upgrade/compose/om-ha/.env delete mode 100644 hadoop-ozone/dist/src/main/compose/upgrade/compose/om-ha/docker-compose.yaml delete mode 100644 hadoop-ozone/dist/src/main/compose/upgrade/compose/om-ha/docker-config delete mode 100755 hadoop-ozone/dist/src/main/compose/upgrade/compose/om-ha/load.sh delete mode 100644 hadoop-ozone/dist/src/main/compose/upgrade/upgrades/manual-upgrade/README.md delete mode 100755 hadoop-ozone/dist/src/main/compose/upgrade/upgrades/manual-upgrade/driver.sh diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/README.md b/hadoop-ozone/dist/src/main/compose/upgrade/README.md index 49c453800e4..345b91d7bde 100644 --- a/hadoop-ozone/dist/src/main/compose/upgrade/README.md +++ b/hadoop-ozone/dist/src/main/compose/upgrade/README.md @@ -43,7 +43,7 @@ an older release of Ozone and a later release (which may be the local build). ## Supported Versions -Non-rolling upgrades and downgrades are supported from 1.1.0 to any later version. Note that 1.1.0 did not have the non-rolling upgrade framework, so things like preparing the OMs for upgrade and checking finalization status are not present in that version. Manual upgrade is the only supported upgrade option from 1.0.0 to 1.1.0. +Non-rolling upgrades and downgrades are supported from 1.1.0 to any later version. Note that 1.1.0 did not have the non-rolling upgrade framework, so things like preparing the OMs for upgrade and checking finalization status are not present in that version. ## Directory Layout @@ -72,23 +72,9 @@ Each type of upgrade has a subdirectory under the *upgrades* directory. 4. `with_this_version_finalized`: Run after ozone is stopped in the old version after donwgrade, started again in the new version pre-finalized, and then finalized. - The upgrade is complete when this callback runs. -#### manual-upgrade - -- This is a legacy option that was used before the upgrade framework was introduced in 1.2.0. This option is left as an example in case it needs to be used for some reason in the future. - -- Any necessary conversion of on disk structures from the old version to the new version must be done explicitly. - -- This is primarily for testing upgrades from versions before the non-rolling upgrade framework was introduced. - -- Supported Callbacks: - 1. `setup_old_version`: Run before ozone is started in the old version. - 3. `with_old_version`: Run while ozone is running in the old version. - 3. `setup_this_version`: Run after ozone is stopped in the old version, but before it is restarted in the new version. - 4. `with_this_version`: Run while ozone is running in the new version. - ### compose -Docker compose cluster definitions to be used in upgrade testing are defined in the *compose* directory. A compose cluster can be selected by specifying the name of its subdirectory as the first argument to `run_test`. `run_test` will then source the `load.sh` script in the cluster's directory so it is used during the test. For manual testing, docker compose can be used normally from the compose cluster directory. Note that some clusters may not work with older versions. Ozone 1.1.0, for example, does not support SCM HA. +Docker compose cluster definitions to be used in upgrade testing are defined in the *compose* directory. A compose cluster can be selected by specifying the name of its subdirectory as the first argument to `run_test`. `run_test` will then source the `load.sh` script in the cluster's directory so it is used during the test. ## Persisting Data diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/.env b/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/.env deleted file mode 100644 index babe87a492a..00000000000 --- a/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/.env +++ /dev/null @@ -1,22 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -HDDS_VERSION=${hdds.version} -OZONE_RUNNER_VERSION=${docker.ozone-runner.version} -OZONE_RUNNER_IMAGE=apache/ozone-runner -OZONE_TEST_IMAGE=apache/ozone-runner:${docker.ozone-runner.version} -OZONE_DIR=/opt/hadoop -OZONE_VOLUME=./data diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/docker-compose.yaml deleted file mode 100644 index 7aea9af378e..00000000000 --- a/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/docker-compose.yaml +++ /dev/null @@ -1,155 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# reusable fragments (see https://docs.docker.com/compose/compose-file/#extension-fields) -x-common-config: - &common-config - env_file: - - docker-config - image: ${OZONE_TEST_IMAGE} - -x-environment: - &environment - OZONE-SITE.XML_ozone.server.default.replication: ${OZONE_REPLICATION_FACTOR:-1} - OZONE_UPGRADE_TO: ${OZONE_UPGRADE_TO:-0} - OZONE_UPGRADE_FROM: ${OZONE_UPGRADE_FROM:-0} - -x-datanode: - &datanode - command: ["ozone","datanode"] - <<: *common-config - environment: - <<: *environment - ports: - - 19864 - - 9882 - -x-volumes: - - &ozone-dir ${TEST_DIR}/../..:${OZONE_DIR} - - &transformation ${TEST_DIR}/../../libexec/transformation.py:/opt/hadoop/libexec/transformation.py - -services: - om: - command: ["ozone","om"] - <<: *common-config - environment: - ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION - <<: *environment - ports: - - 9862 - - 9874 - networks: - net: - ipv4_address: 10.9.0.11 - volumes: - - ${OZONE_VOLUME}/om:/data - - *ozone-dir - - *transformation - scm: - command: ["ozone","scm"] - <<: *common-config - environment: - ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION - OZONE-SITE.XML_hdds.scm.safemode.min.datanode: ${OZONE_SAFEMODE_MIN_DATANODES:-1} - <<: *environment - networks: - net: - ipv4_address: 10.9.0.12 - ports: - - 9876:9876 - volumes: - - ${OZONE_VOLUME}/scm:/data - - *ozone-dir - - *transformation - dn1: - <<: *datanode - networks: - net: - ipv4_address: 10.9.0.13 - volumes: - - ${OZONE_VOLUME}/dn1:/data - - *ozone-dir - - *transformation - dn2: - <<: *datanode - networks: - net: - ipv4_address: 10.9.0.14 - volumes: - - ${OZONE_VOLUME}/dn2:/data - - *ozone-dir - - *transformation - dn3: - <<: *datanode - networks: - net: - ipv4_address: 10.9.0.15 - volumes: - - ${OZONE_VOLUME}/dn3:/data - - *ozone-dir - - *transformation - dn4: - <<: *datanode - networks: - net: - ipv4_address: 10.9.0.16 - volumes: - - ${OZONE_VOLUME}/dn4:/data - - *ozone-dir - - *transformation - dn5: - <<: *datanode - networks: - net: - ipv4_address: 10.9.0.17 - volumes: - - ${OZONE_VOLUME}/dn5:/data - - *ozone-dir - - *transformation - recon: - command: ["ozone","recon"] - <<: *common-config - environment: - <<: *environment - networks: - net: - ipv4_address: 10.9.0.18 - ports: - - 9888:9888 - volumes: - - ${OZONE_VOLUME}/recon:/data - - *ozone-dir - - *transformation - s3g: - command: ["ozone","s3g"] - <<: *common-config - environment: - <<: *environment - networks: - net: - ipv4_address: 10.9.0.19 - ports: - - 9878:9878 - volumes: - - ${OZONE_VOLUME}/s3g:/data - - *ozone-dir - - *transformation -networks: - net: - driver: bridge - ipam: - config: - - subnet: 10.9.0.0/16 diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/docker-config b/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/docker-config deleted file mode 100644 index ce4a8807e54..00000000000 --- a/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/docker-config +++ /dev/null @@ -1,49 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata - -OZONE-SITE.XML_ozone.client.failover.max.attempts=6 - -OZONE-SITE.XML_ozone.om.address=om -OZONE-SITE.XML_ozone.om.http-address=om:9874 -OZONE-SITE.XML_ozone.scm.container.size=1GB -OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB -OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s -OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 -OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata -OZONE-SITE.XML_ozone.scm.block.client.address=scm -OZONE-SITE.XML_ozone.scm.container.size=1GB -OZONE-SITE.XML_ozone.scm.client.address=scm - -OZONE-SITE.XML_hdds.datanode.dir=/data/hdds -OZONE-SITE.XML_hdds.datanode.volume.min.free.space=100MB - -OZONE-SITE.XML_ozone.recon.db.dir=/data/metadata/recon -OZONE-SITE.XML_ozone.recon.om.snapshot.task.interval.delay=1m -OZONE-SITE.XML_hdds.scmclient.max.retry.timeout=30s -OZONE-SITE.XML_ozone.http.basedir=/tmp/ozone_http -OZONE-SITE.XML_ozone.fs.hsync.enabled=true - -OZONE_CONF_DIR=/etc/hadoop -OZONE_LOG_DIR=/var/log/hadoop - -no_proxy=om,scm,s3g,kdc,localhost,127.0.0.1 - -# Explicitly enable filesystem snapshot feature for this Docker compose cluster -# Does not take effect on Ozone versions < 1.4.0 -OZONE-SITE.XML_ozone.filesystem.snapshot.enabled=true diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/load.sh b/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/load.sh deleted file mode 100755 index c1bd0d3724a..00000000000 --- a/hadoop-ozone/dist/src/main/compose/upgrade/compose/non-ha/load.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Fail if required variables are not set. -set -u -: "${OZONE_VOLUME}" -: "${TEST_DIR}" -set +u - -source "$TEST_DIR/testlib.sh" - -export COMPOSE_FILE="$TEST_DIR/compose/non-ha/docker-compose.yaml" -create_data_dirs "${OZONE_VOLUME}"/{om,dn1,dn2,dn3,dn4,dn5,recon,s3g,scm} - -echo "Using docker cluster defined in $COMPOSE_FILE" diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/compose/om-ha/.env b/hadoop-ozone/dist/src/main/compose/upgrade/compose/om-ha/.env deleted file mode 100644 index 85c422b5ad7..00000000000 --- a/hadoop-ozone/dist/src/main/compose/upgrade/compose/om-ha/.env +++ /dev/null @@ -1,27 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -HDDS_VERSION=${hdds.version} -OZONE_RUNNER_VERSION=${docker.ozone-runner.version} -OZONE_RUNNER_IMAGE=apache/ozone-runner -OZONE_TEST_IMAGE=apache/ozone-runner:${docker.ozone-runner.version} -OZONE_DIR=/opt/hadoop -OZONE_VOLUME=./data -OM_SERVICE_ID=omservice -# Indicates no arguments to the OM. -# This variable must be set to some non-empty value, or docker compose will -# expand it to an empty string and pass that to the OM as an argument. -OM_HA_ARGS=-- diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/compose/om-ha/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/upgrade/compose/om-ha/docker-compose.yaml deleted file mode 100644 index 880b36ff2b3..00000000000 --- a/hadoop-ozone/dist/src/main/compose/upgrade/compose/om-ha/docker-compose.yaml +++ /dev/null @@ -1,178 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# reusable fragments (see https://docs.docker.com/compose/compose-file/#extension-fields) -x-common-config: - &common-config - env_file: - - docker-config - image: ${OZONE_TEST_IMAGE} - -x-environment: - &environment - OZONE-SITE.XML_ozone.server.default.replication: ${OZONE_REPLICATION_FACTOR:-3} - OZONE_UPGRADE_TO: ${OZONE_UPGRADE_TO:-0} - OZONE_UPGRADE_FROM: ${OZONE_UPGRADE_FROM:-0} - -x-datanode: - &datanode - command: ["ozone","datanode"] - <<: *common-config - environment: - <<: *environment - ports: - - 19864 - - 9882 - -x-om: - &om - command: ["ozone","om","${OM_HA_ARGS}"] - <<: *common-config - environment: - ENSURE_OM_INITIALIZED: /data/metadata/om/current/VERSION - <<: *environment - ports: - - 9862 - - 9872 - -x-volumes: - - &ozone-dir ../../../..:${OZONE_DIR} - - &transformation ../../../../libexec/transformation.py:/opt/hadoop/libexec/transformation.py - -services: - om1: - <<: *om - networks: - net: - ipv4_address: 10.9.0.11 - volumes: - - ${OZONE_VOLUME}/om1:/data - - *ozone-dir - - *transformation - om2: - <<: *om - networks: - net: - ipv4_address: 10.9.0.12 - volumes: - - ${OZONE_VOLUME}/om2:/data - - *ozone-dir - - *transformation - om3: - <<: *om - networks: - net: - ipv4_address: 10.9.0.13 - volumes: - - ${OZONE_VOLUME}/om3:/data - - *ozone-dir - - *transformation - - scm: - command: ["ozone","scm"] - <<: *common-config - environment: - ENSURE_SCM_INITIALIZED: /data/metadata/scm/current/VERSION - OZONE-SITE.XML_hdds.scm.safemode.min.datanode: ${OZONE_SAFEMODE_MIN_DATANODES:-1} - <<: *environment - networks: - net: - ipv4_address: 10.9.0.14 - ports: - - 9876:9876 - volumes: - - ${OZONE_VOLUME}/scm:/data - - *ozone-dir - - *transformation - dn1: - <<: *datanode - networks: - net: - ipv4_address: 10.9.0.15 - volumes: - - ${OZONE_VOLUME}/dn1:/data - - *ozone-dir - - *transformation - dn2: - <<: *datanode - networks: - net: - ipv4_address: 10.9.0.16 - volumes: - - ${OZONE_VOLUME}/dn2:/data - - *ozone-dir - - *transformation - dn3: - <<: *datanode - networks: - net: - ipv4_address: 10.9.0.17 - volumes: - - ${OZONE_VOLUME}/dn3:/data - - *ozone-dir - - *transformation - dn4: - <<: *datanode - networks: - net: - ipv4_address: 10.9.0.18 - volumes: - - ${OZONE_VOLUME}/dn4:/data - - *ozone-dir - - *transformation - dn5: - <<: *datanode - networks: - net: - ipv4_address: 10.9.0.19 - volumes: - - ${OZONE_VOLUME}/dn5:/data - - *ozone-dir - - *transformation - recon: - command: ["ozone","recon"] - <<: *common-config - environment: - <<: *environment - networks: - net: - ipv4_address: 10.9.0.20 - ports: - - 9888:9888 - volumes: - - ${OZONE_VOLUME}/recon:/data - - *ozone-dir - - *transformation - s3g: - command: ["ozone","s3g"] - <<: *common-config - environment: - <<: *environment - networks: - net: - ipv4_address: 10.9.0.21 - ports: - - 9878:9878 - volumes: - - ${OZONE_VOLUME}/s3g:/data - - *ozone-dir - - *transformation -networks: - net: - driver: bridge - ipam: - config: - - subnet: 10.9.0.0/16 diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/compose/om-ha/docker-config b/hadoop-ozone/dist/src/main/compose/upgrade/compose/om-ha/docker-config deleted file mode 100644 index a049ba5f012..00000000000 --- a/hadoop-ozone/dist/src/main/compose/upgrade/compose/om-ha/docker-config +++ /dev/null @@ -1,65 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -OZONE-SITE.XML_ozone.metadata.dirs=/data/metadata - -OZONE-SITE.XML_ozone.client.failover.max.attempts=6 - -OZONE-SITE.XML_ozone.om.service.ids=omservice -OZONE-SITE.XML_ozone.om.nodes.omservice=om1,om2,om3 -OZONE-SITE.XML_ozone.om.address.omservice.om1=om1 -OZONE-SITE.XML_ozone.om.address.omservice.om2=om2 -OZONE-SITE.XML_ozone.om.address.omservice.om3=om3 -OZONE-SITE.XML_ozone.om.ratis.enable=true -OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s -OZONE-SITE.XML_ozone.scm.pipeline.owner.container.count=1 -OZONE-SITE.XML_ozone.scm.names=scm -OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata -OZONE-SITE.XML_ozone.scm.block.client.address=scm -OZONE-SITE.XML_ozone.scm.container.size=1GB -OZONE-SITE.XML_ozone.scm.client.address=scm -OZONE-SITE.XML_ozone.http.basedir=/tmp/ozone_http -OZONE-SITE.XML_ozone.fs.hsync.enabled=true - -OZONE-SITE.XML_hdds.datanode.dir=/data/hdds -OZONE-SITE.XML_hdds.datanode.volume.min.free.space=100MB - -# If SCM sends container close commands as part of upgrade finalization while -# datanodes are doing a leader election, all 3 replicas may end up in the -# CLOSING state. The replication manager must be running to later move them to -# a CLOSED state so the datanodes can progress with finalization. -# -# This config sets the amount of time SCM will wait after safemode exit to -# start the replication manager and pipeline scrubber. The default of 5 minutes -# is fine in real clusters to prevent unnecessary over-replication, -# but it is too long for this test. -OZONE-SITE.XML_hdds.scm.wait.time.after.safemode.exit=5s -# If datanodes take too long to close pipelines during finalization, let the -# scrubber force close them to move the test forward. -OZONE-SITE.XML_ozone.scm.pipeline.scrub.interval=1m -OZONE-SITE.XML_ozone.scm.pipeline.allocated.timeout=2m - -OZONE-SITE.XML_ozone.recon.db.dir=/data/metadata/recon -OZONE-SITE.XML_ozone.recon.om.snapshot.task.interval.delay=1m -OZONE-SITE.XML_ozone.recon.address=recon:9891 - -no_proxy=om1,om2,om3,scm,s3g,kdc,localhost,127.0.0.1 - -OM_SERVICE_ID=omservice - -# Explicitly enable filesystem snapshot feature for this Docker compose cluster -# Does not take effect on Ozone versions < 1.4.0 -OZONE-SITE.XML_ozone.filesystem.snapshot.enabled=true diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/compose/om-ha/load.sh b/hadoop-ozone/dist/src/main/compose/upgrade/compose/om-ha/load.sh deleted file mode 100755 index a2f2603414f..00000000000 --- a/hadoop-ozone/dist/src/main/compose/upgrade/compose/om-ha/load.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Fail if required variables are not set. -set -u -: "${OZONE_VOLUME}" -: "${TEST_DIR}" -set +u - -source "$TEST_DIR/testlib.sh" - -export COMPOSE_FILE="$TEST_DIR/compose/om-ha/docker-compose.yaml" -export OM_SERVICE_ID=omservice -create_data_dirs "${OZONE_VOLUME}"/{om1,om2,om3,dn1,dn2,dn3,dn4,dn5,recon,s3g,scm} - -echo "Using docker cluster defined in $COMPOSE_FILE" diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/upgrades/manual-upgrade/README.md b/hadoop-ozone/dist/src/main/compose/upgrade/upgrades/manual-upgrade/README.md deleted file mode 100644 index 20c1c3dfb23..00000000000 --- a/hadoop-ozone/dist/src/main/compose/upgrade/upgrades/manual-upgrade/README.md +++ /dev/null @@ -1,19 +0,0 @@ - - -# Manual Upgrade Testing - -- Since the release of Ozone's non-rolling upgrade framework in 1.2.0, no manual reformatting steps need to be run on nodes between upgrades. - -- The driver for testing this type of upgrade is left here as an example in case it needs to be used for some reason in the future. diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/upgrades/manual-upgrade/driver.sh b/hadoop-ozone/dist/src/main/compose/upgrade/upgrades/manual-upgrade/driver.sh deleted file mode 100755 index c77528684bc..00000000000 --- a/hadoop-ozone/dist/src/main/compose/upgrade/upgrades/manual-upgrade/driver.sh +++ /dev/null @@ -1,54 +0,0 @@ -#!/usr/bin/env bash -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# This script tests upgrade from one release to a later one. Docker -# image with Ozone binaries are required for both versions. - -set -e -o pipefail - -# Fail if required vars are not set. -set -u -: "${OZONE_UPGRADE_FROM}" -: "${OZONE_UPGRADE_TO}" -: "${TEST_DIR}" -: "${OZONE_UPGRADE_CALLBACK}" -set +u - -source "$TEST_DIR"/compose/non-ha/load.sh -source "$TEST_DIR"/testlib.sh -[[ -f "$OZONE_UPGRADE_CALLBACK" ]] && source "$OZONE_UPGRADE_CALLBACK" - -echo "--- RUNNING MANUAL UPGRADE TEST FROM $OZONE_UPGRADE_FROM TO $OZONE_UPGRADE_TO ---" - -echo "--- SETTING UP OLD VERSION $OZONE_UPGRADE_FROM ---" -OUTPUT_NAME="$OZONE_UPGRADE_FROM" -prepare_for_image "$OZONE_UPGRADE_FROM" -callback setup_old_version - -echo "--- RUNNING WITH OLD VERSION $OZONE_UPGRADE_FROM ---" -start_docker_env -callback with_old_version -stop_docker_env - -echo "--- SETTING UP NEW VERSION $OZONE_UPGRADE_TO ---" -OUTPUT_NAME="$OZONE_UPGRADE_TO" -prepare_for_image "$OZONE_UPGRADE_TO" -callback setup_this_version - -echo "--- RUNNING WITH NEW VERSION $OZONE_UPGRADE_TO ---" -OZONE_KEEP_RESULTS=true start_docker_env -callback with_this_version From d013188032cb3e235a72744bd6d2a31410e0cbf9 Mon Sep 17 00:00:00 2001 From: Sarveksha Yeshavantha Raju <79865743+sarvekshayr@users.noreply.github.com> Date: Fri, 3 Jan 2025 16:47:59 +0530 Subject: [PATCH 028/168] HDDS-11727. Block `ozone repair om fso-tree` if OM is running (#7589) --- hadoop-ozone/dist/src/shell/ozone/ozone | 20 +++++++++++++++++ .../hadoop/ozone/repair/RepairTool.java | 22 +++++++++++++++++++ .../hadoop/ozone/repair/om/FSORepairCLI.java | 3 +++ 3 files changed, 45 insertions(+) diff --git a/hadoop-ozone/dist/src/shell/ozone/ozone b/hadoop-ozone/dist/src/shell/ozone/ozone index 0973ef81095..5bb05fee724 100755 --- a/hadoop-ozone/dist/src/shell/ozone/ozone +++ b/hadoop-ozone/dist/src/shell/ozone/ozone @@ -224,6 +224,7 @@ function ozonecmd_case OZONE_RUN_ARTIFACT_NAME="ozone-tools" ;; repair) + check_running_ozone_services OZONE_CLASSNAME=org.apache.hadoop.ozone.repair.OzoneRepair OZONE_DEBUG_OPTS="${OZONE_DEBUG_OPTS} ${RATIS_OPTS} ${OZONE_MODULE_ACCESS_ARGS}" OZONE_RUN_ARTIFACT_NAME="ozone-tools" @@ -245,6 +246,25 @@ function ozonecmd_case esac } +## @description Check for running Ozone services using PID files. +## @audience public +function check_running_ozone_services +{ + OZONE_PID_DIR="/tmp" + + local services=("om" "scm" "datanode") + + for service in "${services[@]}"; do + for pid_file in ${OZONE_PID_DIR}/ozone-*-${service}.pid; do + if [[ -f "${pid_file}" ]]; then + if kill -0 "$(cat "${pid_file}")" 2>/dev/null; then + export "OZONE_${service^^}_RUNNING=true" + fi + fi + done + done +} + ## @description turn off logging for CLI by default ## @audience private function ozone_suppress_shell_log diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RepairTool.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RepairTool.java index b94e1c52d82..d8a976b2fd8 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RepairTool.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RepairTool.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.repair; import org.apache.hadoop.hdds.cli.AbstractSubcommand; +import picocli.CommandLine; import java.io.PrintWriter; import java.util.concurrent.Callable; @@ -25,6 +26,10 @@ /** Parent class for all actionable repair commands. */ public abstract class RepairTool extends AbstractSubcommand implements Callable { + @CommandLine.Option(names = {"--force"}, + description = "Use this flag if you want to bypass the check in false-positive cases.") + private boolean force; + /** Hook method for subclasses for performing actual repair task. */ protected abstract void execute() throws Exception; @@ -34,6 +39,23 @@ public final Void call() throws Exception { return null; } + protected boolean checkIfServiceIsRunning(String serviceName) { + String envVariable = String.format("OZONE_%s_RUNNING", serviceName); + String runningServices = System.getenv(envVariable); + if ("true".equals(runningServices)) { + if (!force) { + error("Error: %s is currently running on this host. " + + "Stop the service before running the repair tool.", serviceName); + return true; + } else { + info("Warning: --force flag used. Proceeding despite %s being detected as running.", serviceName); + } + } else { + info("No running %s service detected. Proceeding with repair.", serviceName); + } + return false; + } + protected void info(String msg, Object... args) { out().println(formatMessage(msg, args)); } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairCLI.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairCLI.java index 46af1e847be..fd6d75c7136 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairCLI.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairCLI.java @@ -55,6 +55,9 @@ public class FSORepairCLI extends RepairTool { @Override public void execute() throws Exception { + if (checkIfServiceIsRunning("OM")) { + return; + } if (repair) { info("FSO Repair Tool is running in repair mode"); } else { From 99fd5fcbb55cfed6d5d6b90420f44fcf4068a28d Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Fri, 3 Jan 2025 13:51:22 +0100 Subject: [PATCH 029/168] HDDS-11801. Logs missing if kubernetes check fails before tests (#7496) --- .../dist/src/main/k8s/examples/test-all.sh | 21 ++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/hadoop-ozone/dist/src/main/k8s/examples/test-all.sh b/hadoop-ozone/dist/src/main/k8s/examples/test-all.sh index 5d866514be9..6f72fc7e6cc 100755 --- a/hadoop-ozone/dist/src/main/k8s/examples/test-all.sh +++ b/hadoop-ozone/dist/src/main/k8s/examples/test-all.sh @@ -15,14 +15,13 @@ # See the License for the specific language governing permissions and # limitations under the License. +set -u -o pipefail # -# Test executor to test all the compose/*/test.sh test scripts. +# Test executor to test all the k8s/examples/*/test.sh test scripts. # SCRIPT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null && pwd ) -set -ex - ALL_RESULT_DIR="$SCRIPT_DIR/result" rm "$ALL_RESULT_DIR"/* || true mkdir -p "$ALL_RESULT_DIR" @@ -35,15 +34,23 @@ for test in $(find "$SCRIPT_DIR" -name test.sh | grep "${OZONE_TEST_SELECTOR:-"" TEST_NAME="$(basename "$TEST_DIR")" echo "" - echo "#### Executing tests of ${TEST_DIR} #####" + echo "#### Executing tests of ${TEST_NAME} #####" echo "" cd "$TEST_DIR" || continue - ./test.sh + if ! ./test.sh; then + RESULT=1 + echo "ERROR: Test execution of ${TEST_NAME} is FAILED!!!!" + fi - cp "$TEST_DIR"/result/output.xml "$ALL_RESULT_DIR"/"${TEST_NAME}".xml + cp "$TEST_DIR"/result/output.xml "$ALL_RESULT_DIR"/"${TEST_NAME}".xml || true mkdir -p "$ALL_RESULT_DIR"/"${TEST_NAME}" - mv "$TEST_DIR"/logs/*log "$ALL_RESULT_DIR"/"${TEST_NAME}"/ + mv "$TEST_DIR"/logs/*log "$ALL_RESULT_DIR"/"${TEST_NAME}"/ || true + + if [[ "${RESULT}" == "1" ]] && [[ "${FAIL_FAST:-}" == "true" ]]; then + break + fi done rebot -N "smoketests" -d "$ALL_RESULT_DIR/" "$ALL_RESULT_DIR/*.xml" +exit ${RESULT} From 60a7fdb628a3dba692ec68ad816c2ab69d687a47 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Fri, 3 Jan 2025 15:00:15 +0100 Subject: [PATCH 030/168] HDDS-11819. Improve mock datanode version handling in MiniOzoneCluster (#7632) --- .../hadoop/ozone/HddsDatanodeService.java | 20 +-- .../apache/hadoop/ozone/MiniOzoneCluster.java | 28 ---- .../hadoop/ozone/MiniOzoneClusterImpl.java | 17 --- .../hadoop/ozone/UniformDatanodesFactory.java | 26 ++++ .../client/rpc/TestBlockDataStreamOutput.java | 11 +- .../ozone/client/rpc/TestDatanodeVersion.java | 143 ------------------ 6 files changed, 43 insertions(+), 202 deletions(-) delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDatanodeVersion.java diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java index 7dc8c591119..bf33b9780d2 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java @@ -102,6 +102,9 @@ public class HddsDatanodeService extends GenericCli implements ServicePlugin { private static final Logger LOG = LoggerFactory.getLogger( HddsDatanodeService.class); + public static final String TESTING_DATANODE_VERSION_INITIAL = "testing.hdds.datanode.version.initial"; + public static final String TESTING_DATANODE_VERSION_CURRENT = "testing.hdds.datanode.version.current"; + private OzoneConfiguration conf; private SecurityConfig secConf; private DatanodeDetails datanodeDetails; @@ -432,15 +435,14 @@ private DatanodeDetails initializeDatanodeDetails() DatanodeDetails details; if (idFile.exists()) { details = ContainerUtils.readDatanodeDetailsFrom(idFile); - // Current version is always overridden to the latest - details.setCurrentVersion(getDefaultCurrentVersion()); } else { // There is no datanode.id file, this might be the first time datanode // is started. details = DatanodeDetails.newBuilder().setUuid(UUID.randomUUID()).build(); - details.setInitialVersion(getDefaultInitialVersion()); - details.setCurrentVersion(getDefaultCurrentVersion()); + details.setInitialVersion(getInitialVersion()); } + // Current version is always overridden to the latest + details.setCurrentVersion(getCurrentVersion()); return details; } @@ -680,16 +682,14 @@ private String reconfigReplicationStreamsLimit(String value) { /** * Returns the initial version of the datanode. */ - @VisibleForTesting - public static int getDefaultInitialVersion() { - return DatanodeVersion.CURRENT_VERSION; + private int getInitialVersion() { + return conf.getInt(TESTING_DATANODE_VERSION_INITIAL, DatanodeVersion.CURRENT_VERSION); } /** * Returns the current version of the datanode. */ - @VisibleForTesting - public static int getDefaultCurrentVersion() { - return DatanodeVersion.CURRENT_VERSION; + private int getCurrentVersion() { + return conf.getInt(TESTING_DATANODE_VERSION_CURRENT, DatanodeVersion.CURRENT_VERSION); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java index ff55ee83c17..1da5496fc94 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java @@ -22,7 +22,6 @@ import java.util.UUID; import java.util.concurrent.TimeoutException; -import org.apache.hadoop.hdds.DatanodeVersion; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; @@ -305,9 +304,6 @@ abstract class Builder { protected boolean includeRecon = false; protected boolean includeS3G = false; - protected int dnInitialVersion = DatanodeVersion.FUTURE_VERSION.toProtoValue(); - protected int dnCurrentVersion = DatanodeVersion.COMBINED_PUTBLOCK_WRITECHUNK_RPC.toProtoValue(); - protected int numOfDatanodes = 3; protected boolean startDataNodes = true; protected CertificateClient certClient; @@ -379,30 +375,6 @@ public Builder setNumDatanodes(int val) { return this; } - /** - * Set the initialVersion for all datanodes. - * - * @param val initialVersion value to be set for all datanodes. - * - * @return MiniOzoneCluster.Builder - */ - public Builder setDatanodeInitialVersion(int val) { - dnInitialVersion = val; - return this; - } - - /** - * Set the currentVersion for all datanodes. - * - * @param val currentVersion value to be set for all datanodes. - * - * @return MiniOzoneCluster.Builder - */ - public Builder setDatanodeCurrentVersion(int val) { - dnCurrentVersion = val; - return this; - } - public Builder setDatanodeFactory(DatanodeFactory factory) { this.dnFactory = factory; return this; diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java index 3594996856a..30e41764d3f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java @@ -41,7 +41,6 @@ import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; import org.apache.commons.lang3.StringUtils; -import org.apache.hadoop.hdds.DatanodeVersion; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.client.RatisReplicationConfig; @@ -110,8 +109,6 @@ import static org.apache.ozone.test.GenericTestUtils.PortAllocator.localhostWithFreePort; import org.hadoop.ozone.recon.codegen.ReconSqlDbConfig; -import org.mockito.MockedStatic; -import org.mockito.Mockito; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -145,7 +142,6 @@ public class MiniOzoneClusterImpl implements MiniOzoneCluster { private CertificateClient caClient; private final Set clients = ConcurrentHashMap.newKeySet(); private SecretKeyClient secretKeyClient; - private static MockedStatic mockDNStatic = Mockito.mockStatic(HddsDatanodeService.class); /** * Creates a new MiniOzoneCluster with Recon. @@ -427,16 +423,6 @@ private void waitForHddsDatanodeToStop(DatanodeDetails dn) }, 1000, waitForClusterToBeReadyTimeout); } - private static void overrideDatanodeVersions(int dnInitialVersion, int dnCurrentVersion) { - // FUTURE_VERSION (-1) is not a valid version for a datanode, using it as a marker when version is not overridden - if (dnInitialVersion != DatanodeVersion.FUTURE_VERSION.toProtoValue()) { - mockDNStatic.when(HddsDatanodeService::getDefaultInitialVersion).thenReturn(dnInitialVersion); - } - if (dnCurrentVersion != DatanodeVersion.FUTURE_VERSION.toProtoValue()) { - mockDNStatic.when(HddsDatanodeService::getDefaultCurrentVersion).thenReturn(dnCurrentVersion); - } - } - @Override public void restartHddsDatanode(int i, boolean waitForDatanode) throws InterruptedException, TimeoutException { @@ -869,9 +855,6 @@ protected List createHddsDatanodes() throws IOException { List hddsDatanodes = new ArrayList<>(); - // Override default datanode initial and current version if necessary - overrideDatanodeVersions(dnInitialVersion, dnCurrentVersion); - for (int i = 0; i < numOfDatanodes; i++) { OzoneConfiguration dnConf = dnFactory.apply(conf); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/UniformDatanodesFactory.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/UniformDatanodesFactory.java index 8f79605ab05..ecf281a1a86 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/UniformDatanodesFactory.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/UniformDatanodesFactory.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.ozone; +import org.apache.hadoop.hdds.DatanodeVersion; import org.apache.hadoop.hdds.conf.ConfigurationTarget; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.container.common.DatanodeLayoutStorage; @@ -39,6 +40,8 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_DU_RESERVED; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_REST_HTTP_ADDRESS_KEY; +import static org.apache.hadoop.ozone.HddsDatanodeService.TESTING_DATANODE_VERSION_CURRENT; +import static org.apache.hadoop.ozone.HddsDatanodeService.TESTING_DATANODE_VERSION_INITIAL; import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_IPC_PORT; import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_ADMIN_PORT; import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATANODE_STORAGE_DIR; @@ -58,11 +61,15 @@ public class UniformDatanodesFactory implements MiniOzoneCluster.DatanodeFactory private final int numDataVolumes; private final String reservedSpace; private final Integer layoutVersion; + private final DatanodeVersion initialVersion; + private final DatanodeVersion currentVersion; protected UniformDatanodesFactory(Builder builder) { numDataVolumes = builder.numDataVolumes; layoutVersion = builder.layoutVersion; reservedSpace = builder.reservedSpace; + currentVersion = builder.currentVersion; + initialVersion = builder.initialVersion != null ? builder.initialVersion : builder.currentVersion; } @Override @@ -104,6 +111,13 @@ public OzoneConfiguration apply(OzoneConfiguration conf) throws IOException { layoutStorage.initialize(); } + if (initialVersion != null) { + dnConf.setInt(TESTING_DATANODE_VERSION_INITIAL, initialVersion.toProtoValue()); + } + if (currentVersion != null) { + dnConf.setInt(TESTING_DATANODE_VERSION_CURRENT, currentVersion.toProtoValue()); + } + return dnConf; } @@ -131,6 +145,8 @@ public static class Builder { private int numDataVolumes = 1; private String reservedSpace; private Integer layoutVersion; + private DatanodeVersion initialVersion; + private DatanodeVersion currentVersion; /** * Sets the number of data volumes per datanode. @@ -158,6 +174,16 @@ public Builder setLayoutVersion(int layoutVersion) { return this; } + public Builder setInitialVersion(DatanodeVersion version) { + this.initialVersion = version; + return this; + } + + public Builder setCurrentVersion(DatanodeVersion version) { + this.currentVersion = version; + return this; + } + public UniformDatanodesFactory build() { return new UniformDatanodesFactory(this); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java index 90a3f1d6893..5bcf7084054 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java @@ -33,6 +33,7 @@ import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.UniformDatanodesFactory; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientFactory; @@ -74,7 +75,7 @@ public class TestBlockDataStreamOutput { private static String volumeName; private static String bucketName; private static String keyString; - private static final int DN_OLD_VERSION = DatanodeVersion.SEPARATE_RATIS_PORTS_AVAILABLE.toProtoValue(); + private static final DatanodeVersion DN_OLD_VERSION = DatanodeVersion.SEPARATE_RATIS_PORTS_AVAILABLE; /** * Create a MiniDFSCluster for testing. @@ -110,7 +111,9 @@ public static void init() throws Exception { cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(5) - .setDatanodeCurrentVersion(DN_OLD_VERSION) + .setDatanodeFactory(UniformDatanodesFactory.newBuilder() + .setCurrentVersion(DN_OLD_VERSION) + .build()) .build(); cluster.waitForClusterToBeReady(); //the easiest way to create an open container is creating a key @@ -281,7 +284,7 @@ public void testDatanodeVersion() throws Exception { List dns = cluster.getHddsDatanodes(); for (HddsDatanodeService dn : dns) { DatanodeDetails details = dn.getDatanodeDetails(); - assertEquals(DN_OLD_VERSION, details.getCurrentVersion()); + assertEquals(DN_OLD_VERSION.toProtoValue(), details.getCurrentVersion()); } String keyName = getKeyName(); @@ -292,7 +295,7 @@ public void testDatanodeVersion() throws Exception { // Now check 3 DNs in a random pipeline returns the correct DN versions List streamDnDetails = stream.getPipeline().getNodes(); for (DatanodeDetails details : streamDnDetails) { - assertEquals(DN_OLD_VERSION, details.getCurrentVersion()); + assertEquals(DN_OLD_VERSION.toProtoValue(), details.getCurrentVersion()); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDatanodeVersion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDatanodeVersion.java deleted file mode 100644 index 5e7d8a4b052..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDatanodeVersion.java +++ /dev/null @@ -1,143 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ - -package org.apache.hadoop.ozone.client.rpc; - -import org.apache.hadoop.hdds.DatanodeVersion; -import org.apache.hadoop.hdds.client.ReplicationType; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.conf.StorageUnit; -import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.scm.OzoneClientConfig; -import org.apache.hadoop.hdds.utils.IOUtils; -import org.apache.hadoop.ozone.ClientConfigForTesting; -import org.apache.hadoop.ozone.HddsDatanodeService; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.client.ObjectStore; -import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.client.OzoneClientFactory; -import org.apache.hadoop.ozone.client.io.BlockDataStreamOutputEntry; -import org.apache.hadoop.ozone.client.io.KeyDataStreamOutput; -import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput; -import org.apache.hadoop.ozone.container.TestHelper; -import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.Timeout; - -import java.util.List; -import java.util.UUID; - -import static org.junit.jupiter.api.Assertions.assertEquals; - -/** - * Tests DatanodeVersion in client stream. - */ -@Timeout(120) -public class TestDatanodeVersion { - private static MiniOzoneCluster cluster; - private static OzoneConfiguration conf = new OzoneConfiguration(); - private static OzoneClient client; - private static ObjectStore objectStore; - private static int chunkSize; - private static int flushSize; - private static int maxFlushSize; - private static int blockSize; - private static String volumeName; - private static String bucketName; - private static final int DN_OLD_VERSION = DatanodeVersion.SEPARATE_RATIS_PORTS_AVAILABLE.toProtoValue(); - - /** - * Create a MiniDFSCluster for testing. - *

- * Ozone is made active by setting OZONE_ENABLED = true - */ - @BeforeAll - public static void init() throws Exception { - chunkSize = 100; - flushSize = 2 * chunkSize; - maxFlushSize = 2 * flushSize; - blockSize = 2 * maxFlushSize; - - OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); - conf.setFromObject(clientConfig); - - conf.setQuietMode(false); - conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 4, StorageUnit.MB); - - ClientConfigForTesting.newBuilder(StorageUnit.BYTES) - .setBlockSize(blockSize) - .setChunkSize(chunkSize) - .setStreamBufferFlushSize(flushSize) - .setStreamBufferMaxSize(maxFlushSize) - .setDataStreamBufferFlushSize(maxFlushSize) - .setDataStreamMinPacketSize(chunkSize) - .setDataStreamWindowSize(5 * chunkSize) - .applyTo(conf); - - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(3) - .setDatanodeCurrentVersion(DN_OLD_VERSION) - .build(); - cluster.waitForClusterToBeReady(); - //the easiest way to create an open container is creating a key - client = OzoneClientFactory.getRpcClient(conf); - objectStore = client.getObjectStore(); - volumeName = "testblockoutputstream"; - bucketName = volumeName; - objectStore.createVolume(volumeName); - objectStore.getVolume(volumeName).createBucket(bucketName); - } - - /** - * Shutdown MiniDFSCluster. - */ - @AfterAll - public static void shutdown() { - IOUtils.closeQuietly(client); - if (cluster != null) { - cluster.shutdown(); - } - } - - static OzoneDataStreamOutput createKey(String keyName, ReplicationType type, long size) throws Exception { - return TestHelper.createStreamKey(keyName, type, size, objectStore, volumeName, bucketName); - } - - @Test - public void testStreamDatanodeVersion() throws Exception { - // Verify all DNs internally have versions set correctly - List dns = cluster.getHddsDatanodes(); - for (HddsDatanodeService dn : dns) { - DatanodeDetails details = dn.getDatanodeDetails(); - assertEquals(DN_OLD_VERSION, details.getCurrentVersion()); - } - - String keyName = UUID.randomUUID().toString(); - OzoneDataStreamOutput key = createKey(keyName, ReplicationType.RATIS, 0); - KeyDataStreamOutput keyDataStreamOutput = (KeyDataStreamOutput) key.getByteBufStreamOutput(); - BlockDataStreamOutputEntry stream = keyDataStreamOutput.getStreamEntries().get(0); - - // Now check 3 DNs in a random pipeline returns the correct DN versions - List streamDnDetails = stream.getPipeline().getNodes(); - for (DatanodeDetails details : streamDnDetails) { - assertEquals(DN_OLD_VERSION, details.getCurrentVersion()); - } - } - -} From ce6fb458e3e64b5876d3d94815ecf7369d1e5dbe Mon Sep 17 00:00:00 2001 From: Sarveksha Yeshavantha Raju <79865743+sarvekshayr@users.noreply.github.com> Date: Sat, 4 Jan 2025 01:01:55 +0530 Subject: [PATCH 031/168] HDDS-11999. Remove `ozone repair ldb command` and move its subcommands to `ozone repair om` (#7634) --- .../ozone/shell/TestOzoneRepairShell.java | 4 +- .../hadoop/ozone/repair/ldb/RDBRepair.java | 45 ------------------- .../hadoop/ozone/repair/om/OMRepair.java | 2 + .../SnapshotChainRepair.java} | 22 ++++----- .../SnapshotRepair.java} | 16 ++++++- .../{ldb => om}/TransactionInfoRepair.java | 14 +++--- .../TestTransactionInfoRepair.java | 6 +-- 7 files changed, 38 insertions(+), 71 deletions(-) delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ldb/RDBRepair.java rename hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/{ldb/SnapshotRepair.java => om/SnapshotChainRepair.java} (94%) rename hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/{ldb/package-info.java => om/SnapshotRepair.java} (72%) rename hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/{ldb => om}/TransactionInfoRepair.java (95%) rename hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/{ldb => om}/TestTransactionInfoRepair.java (98%) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneRepairShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneRepairShell.java index 426412ec490..6ddde5ffe82 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneRepairShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneRepairShell.java @@ -89,7 +89,7 @@ public void testUpdateTransactionInfoTable() throws Exception { String testTerm = "1111"; String testIndex = "1111"; - int exitCode = cmd.execute("ldb", "--db", dbPath, "update-transaction", "--term", testTerm, "--index", testIndex); + int exitCode = cmd.execute("om", "update-transaction", "--db", dbPath, "--term", testTerm, "--index", testIndex); assertEquals(0, exitCode, err); assertThat(out.get()) .contains( @@ -101,7 +101,7 @@ public void testUpdateTransactionInfoTable() throws Exception { String cmdOut2 = scanTransactionInfoTable(dbPath); assertThat(cmdOut2).contains(testTerm + "#" + testIndex); - cmd.execute("ldb", "--db", dbPath, "update-transaction", "--term", + cmd.execute("om", "update-transaction", "--db", dbPath, "--term", originalHighestTermIndex[0], "--index", originalHighestTermIndex[1]); cluster.getOzoneManager().restart(); try (OzoneClient ozoneClient = cluster.newClient()) { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ldb/RDBRepair.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ldb/RDBRepair.java deleted file mode 100644 index 39b957435f5..00000000000 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ldb/RDBRepair.java +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.repair.ldb; - -import org.apache.hadoop.hdds.cli.RepairSubcommand; -import org.kohsuke.MetaInfServices; -import picocli.CommandLine; - -/** - * Ozone Repair CLI for RocksDB. - */ -@CommandLine.Command(name = "ldb", - subcommands = { - SnapshotRepair.class, - TransactionInfoRepair.class, - }, - description = "Operational tool to repair RocksDB table.") -@MetaInfServices(RepairSubcommand.class) -public class RDBRepair implements RepairSubcommand { - - @CommandLine.Option(names = {"--db"}, - required = true, - description = "Database File Path") - private String dbPath; - - public String getDbPath() { - return dbPath; - } -} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/OMRepair.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/OMRepair.java index d132fea3752..3b880f87543 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/OMRepair.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/OMRepair.java @@ -28,6 +28,8 @@ @CommandLine.Command(name = "om", subcommands = { FSORepairCLI.class, + SnapshotRepair.class, + TransactionInfoRepair.class }, description = "Operational tool to repair OM.") @MetaInfServices(RepairSubcommand.class) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ldb/SnapshotRepair.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/SnapshotChainRepair.java similarity index 94% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ldb/SnapshotRepair.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/SnapshotChainRepair.java index 9c24b0d131f..a5d1244f00e 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ldb/SnapshotRepair.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/SnapshotChainRepair.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.repair.ldb; +package org.apache.hadoop.ozone.repair.om; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.utils.db.StringCodec; @@ -48,15 +48,12 @@ * Tool to repair snapshotInfoTable in case it has corrupted entries. */ @CommandLine.Command( - name = "snapshot", + name = "chain", description = "CLI to update global and path previous snapshot for a snapshot in case snapshot chain is corrupted." ) -public class SnapshotRepair extends RepairTool { +public class SnapshotChainRepair extends RepairTool { - protected static final Logger LOG = LoggerFactory.getLogger(SnapshotRepair.class); - - @CommandLine.ParentCommand - private RDBRepair parent; + protected static final Logger LOG = LoggerFactory.getLogger(SnapshotChainRepair.class); @CommandLine.Mixin private BucketUri bucketUri; @@ -64,6 +61,11 @@ public class SnapshotRepair extends RepairTool { @CommandLine.Parameters(description = "Snapshot name to update", index = "1") private String snapshotName; + @CommandLine.Option(names = {"--db"}, + required = true, + description = "Database File Path") + private String dbPath; + @CommandLine.Option(names = {"--global-previous", "--gp"}, required = true, description = "Global previous snapshotId to set for the given snapshot") @@ -82,9 +84,9 @@ public class SnapshotRepair extends RepairTool { @Override public void execute() throws Exception { List cfHandleList = new ArrayList<>(); - List cfDescList = RocksDBUtils.getColumnFamilyDescriptors(parent.getDbPath()); + List cfDescList = RocksDBUtils.getColumnFamilyDescriptors(dbPath); - try (ManagedRocksDB db = ManagedRocksDB.open(parent.getDbPath(), cfDescList, cfHandleList)) { + try (ManagedRocksDB db = ManagedRocksDB.open(dbPath, cfDescList, cfHandleList)) { ColumnFamilyHandle snapshotInfoCfh = RocksDBUtils.getColumnFamilyHandle(SNAPSHOT_INFO_TABLE, cfHandleList); if (snapshotInfoCfh == null) { error("%s is not in a column family in DB for the given path.", SNAPSHOT_INFO_TABLE); @@ -145,7 +147,7 @@ public void execute() throws Exception { RocksDBUtils.getValue(db, snapshotInfoCfh, snapshotInfoTableKey, SnapshotInfo.getCodec())); } } catch (RocksDBException exception) { - error("Failed to update the RocksDB for the given path: %s", parent.getDbPath()); + error("Failed to update the RocksDB for the given path: %s", dbPath); error( "Make sure that Ozone entity (OM, SCM or DN) is not running for the give dbPath and current host."); LOG.error(exception.toString()); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ldb/package-info.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/SnapshotRepair.java similarity index 72% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ldb/package-info.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/SnapshotRepair.java index 388d4b7dcea..c0f2550d0ba 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ldb/package-info.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/SnapshotRepair.java @@ -16,7 +16,19 @@ * limitations under the License. */ +package org.apache.hadoop.ozone.repair.om; +import picocli.CommandLine; + /** - * RDB related repair tools. + * Tool for snapshot related repairs. */ -package org.apache.hadoop.ozone.repair.ldb; +@CommandLine.Command( + name = "snapshot", + description = "Subcommand for all snapshot related repairs.", + subcommands = { + SnapshotChainRepair.class + } +) +public class SnapshotRepair { + +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ldb/TransactionInfoRepair.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/TransactionInfoRepair.java similarity index 95% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ldb/TransactionInfoRepair.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/TransactionInfoRepair.java index 192ff0fb1d5..e737f0a9138 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ldb/TransactionInfoRepair.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/TransactionInfoRepair.java @@ -19,7 +19,7 @@ * permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.repair.ldb; +package org.apache.hadoop.ozone.repair.om; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.utils.IOUtils; @@ -51,8 +51,10 @@ ) public class TransactionInfoRepair extends RepairTool { - @CommandLine.ParentCommand - private RDBRepair parent; + @CommandLine.Option(names = {"--db"}, + required = true, + description = "Database File Path") + private String dbPath; @CommandLine.Option(names = {"--term"}, required = true, @@ -67,7 +69,6 @@ public class TransactionInfoRepair extends RepairTool { @Override public void execute() throws Exception { List cfHandleList = new ArrayList<>(); - String dbPath = getParent().getDbPath(); List cfDescList = RocksDBUtils.getColumnFamilyDescriptors( dbPath); @@ -100,9 +101,4 @@ public void execute() throws Exception { IOUtils.closeQuietly(cfHandleList); } } - - protected RDBRepair getParent() { - return parent; - } - } diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/ldb/TestTransactionInfoRepair.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/om/TestTransactionInfoRepair.java similarity index 98% rename from hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/ldb/TestTransactionInfoRepair.java rename to hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/om/TestTransactionInfoRepair.java index c0685002fd4..7b355cf0c91 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/ldb/TestTransactionInfoRepair.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/om/TestTransactionInfoRepair.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.repair.ldb; +package org.apache.hadoop.ozone.repair.om; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.utils.TransactionInfo; @@ -126,9 +126,9 @@ private void testCommand(ManagedRocksDB mdb, ColumnFamilyHandle columnFamilyHand CommandLine cli = new OzoneRepair().getCmd(); cli.execute( - "ldb", - "--db", DB_PATH, + "om", "update-transaction", + "--db", DB_PATH, "--term", String.valueOf(TEST_TERM), "--index", String.valueOf(TEST_INDEX)); } From 2dd8a71cfa8b6c160e718835d2c056af4384797d Mon Sep 17 00:00:00 2001 From: Nandakumar Vadivelu Date: Sat, 4 Jan 2025 13:42:34 +0530 Subject: [PATCH 032/168] HDDS-11731. ContainerSafeModeRule Refactor (#7587) --- .../hdds/scm/container/ContainerManager.java | 3 + .../scm/container/ContainerManagerImpl.java | 7 + .../scm/container/ContainerStateManager.java | 7 + .../container/ContainerStateManagerImpl.java | 8 + .../container/states/ContainerStateMap.java | 2 +- .../scm/safemode/ContainerSafeModeRule.java | 169 +++++++++--------- .../hdds/scm/safemode/SCMSafeModeManager.java | 5 - .../hdds/scm/safemode/SafeModeExitRule.java | 12 ++ .../scm/server/StorageContainerManager.java | 4 +- .../hdds/scm/block/TestBlockManager.java | 3 +- .../scm/pipeline/TestPipelineManagerImpl.java | 4 +- .../TestHealthyPipelineSafeModeRule.java | 6 +- .../TestOneReplicaPipelineSafeModeRule.java | 2 +- .../scm/safemode/TestSCMSafeModeManager.java | 20 +-- 14 files changed, 143 insertions(+), 109 deletions(-) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java index 6b6a888f424..2ddcb223bf9 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManager.java @@ -24,6 +24,7 @@ import java.util.Set; import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; @@ -72,6 +73,8 @@ default List getContainers() { List getContainers(ContainerID startID, int count); + List getContainers(ReplicationType type); + /** * Returns all the containers which are in the specified state. * diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java index d61f9ee366b..113903e647b 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ContainerInfoProto; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent; @@ -147,6 +148,12 @@ public ContainerInfo getContainer(final ContainerID id) id + " not found.")); } + + @Override + public List getContainers(ReplicationType type) { + return toContainers(containerStateManager.getContainerIDs(type)); + } + @Override public List getContainers(final ContainerID startID, final int count) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java index 4f478b201cd..263dc14469a 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java @@ -23,6 +23,7 @@ import java.util.Set; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ContainerInfoProto; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; import org.apache.hadoop.hdds.scm.metadata.Replicate; @@ -114,6 +115,12 @@ public interface ContainerStateManager { */ Set getContainerIDs(LifeCycleState state); + + /** + * Returns the IDs of the Containers whose ReplicationType matches the given type. + */ + Set getContainerIDs(ReplicationType type); + /** * */ diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerImpl.java index 28a732795b1..f2cbe451ba7 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerImpl.java @@ -34,6 +34,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ContainerInfoProto; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; @@ -287,6 +288,13 @@ public Set getContainerIDs(final LifeCycleState state) { } } + @Override + public Set getContainerIDs(final ReplicationType type) { + try (AutoCloseableLock ignored = readLock()) { + return containers.getContainerIDsByType(type); + } + } + @Override public ContainerInfo getContainer(final ContainerID id) { try (AutoCloseableLock ignored = readLock(id)) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java index 438e9709bff..4e6f0ed67ce 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java @@ -316,7 +316,7 @@ NavigableSet getContainerIDsByOwner(final String ownerName) { * @param type - Replication type -- StandAlone, Ratis etc. * @return NavigableSet */ - NavigableSet getContainerIDsByType(final ReplicationType type) { + public NavigableSet getContainerIDsByType(final ReplicationType type) { Preconditions.checkNotNull(type); return typeMap.getCollection(type); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java index bdd7160de4c..b66b6e9f0f6 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -27,11 +27,11 @@ import java.util.stream.Collectors; import com.google.common.collect.Sets; -import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos; import org.apache.hadoop.hdds.scm.container.ContainerID; @@ -48,55 +48,53 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SCM_SAFEMODE_THRESHOLD_PCT; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SCM_SAFEMODE_THRESHOLD_PCT_DEFAULT; + /** * Class defining Safe mode exit criteria for Containers. */ public class ContainerSafeModeRule extends SafeModeExitRule { - public static final Logger LOG = - LoggerFactory.getLogger(ContainerSafeModeRule.class); + public static final Logger LOG = LoggerFactory.getLogger(ContainerSafeModeRule.class); + private final ContainerManager containerManager; // Required cutoff % for containers with at least 1 reported replica. - private double safeModeCutoff; + private final double safeModeCutoff; // Containers read from scm db (excluding containers in ALLOCATED state). - private Set ratisContainers; - private Set ecContainers; - private Map> ecContainerDNsMap; + private final Set ratisContainers; + private final Set ecContainers; + private final Map> ecContainerDNsMap; + private final AtomicLong ratisContainerWithMinReplicas = new AtomicLong(0); + private final AtomicLong ecContainerWithMinReplicas = new AtomicLong(0); + private double ratisMaxContainer; private double ecMaxContainer; - private AtomicLong ratisContainerWithMinReplicas = new AtomicLong(0); - private AtomicLong ecContainerWithMinReplicas = new AtomicLong(0); - private final ContainerManager containerManager; - - public ContainerSafeModeRule(String ruleName, EventQueue eventQueue, - ConfigurationSource conf, - ContainerManager containerManager, SCMSafeModeManager manager) { - this(ruleName, eventQueue, conf, containerManager.getContainers(), containerManager, manager); - } - public ContainerSafeModeRule(String ruleName, EventQueue eventQueue, - ConfigurationSource conf, - List containers, - ContainerManager containerManager, SCMSafeModeManager manager) { + public ContainerSafeModeRule(final String ruleName, + final EventQueue eventQueue, + final ConfigurationSource conf, + final ContainerManager containerManager, + final SCMSafeModeManager manager) { super(manager, ruleName, eventQueue); + this.safeModeCutoff = getSafeModeCutoff(conf); this.containerManager = containerManager; - safeModeCutoff = conf.getDouble( - HddsConfigKeys.HDDS_SCM_SAFEMODE_THRESHOLD_PCT, - HddsConfigKeys.HDDS_SCM_SAFEMODE_THRESHOLD_PCT_DEFAULT); - - Preconditions.checkArgument( - (safeModeCutoff >= 0.0 && safeModeCutoff <= 1.0), - HddsConfigKeys.HDDS_SCM_SAFEMODE_THRESHOLD_PCT + - " value should be >= 0.0 and <= 1.0"); + this.ratisContainers = new HashSet<>(); + this.ecContainers = new HashSet<>(); + this.ecContainerDNsMap = new ConcurrentHashMap<>(); + initializeRule(); + } - ratisContainers = new HashSet<>(); - ecContainers = new HashSet<>(); - ecContainerDNsMap = new ConcurrentHashMap<>(); - initializeRule(containers); + private static double getSafeModeCutoff(ConfigurationSource conf) { + final double cutoff = conf.getDouble(HDDS_SCM_SAFEMODE_THRESHOLD_PCT, + HDDS_SCM_SAFEMODE_THRESHOLD_PCT_DEFAULT); + Preconditions.checkArgument((cutoff >= 0.0 && cutoff <= 1.0), + HDDS_SCM_SAFEMODE_THRESHOLD_PCT + + " value should be >= 0.0 and <= 1.0"); + return cutoff; } - @Override protected TypedEvent getEventType() { return SCMEvents.CONTAINER_REGISTRATION_REPORT; @@ -104,45 +102,59 @@ protected TypedEvent getEventType() { @Override protected synchronized boolean validate() { - return (getCurrentContainerThreshold() >= safeModeCutoff) && - (getCurrentECContainerThreshold() >= safeModeCutoff); + if (validateBasedOnReportProcessing()) { + return (getCurrentContainerThreshold() >= safeModeCutoff) && + (getCurrentECContainerThreshold() >= safeModeCutoff); + } + + // TODO: Split ContainerSafeModeRule into RatisContainerSafeModeRule and + // ECContainerSafeModeRule + final List containers = containerManager.getContainers( + ReplicationType.RATIS); + + return containers.stream() + .filter(this::isClosed) + .map(ContainerInfo::containerID) + .noneMatch(this::isMissing); } - @VisibleForTesting - public synchronized double getCurrentContainerThreshold() { - if (ratisMaxContainer == 0) { - return 1; + /** + * Checks if the container has any replica. + */ + private boolean isMissing(ContainerID id) { + try { + return containerManager.getContainerReplicas(id).isEmpty(); + } catch (ContainerNotFoundException ex) { + /* + * This should never happen, in case this happens the container + * somehow got removed from SCM. + * Safemode rule doesn't have to log/fix this. We will just exclude this + * from the rule validation. + */ + return false; + } - return (ratisContainerWithMinReplicas.doubleValue() / ratisMaxContainer); } @VisibleForTesting - public synchronized double getCurrentECContainerThreshold() { - if (ecMaxContainer == 0) { - return 1; - } - return (ecContainerWithMinReplicas.doubleValue() / ecMaxContainer); + public double getCurrentContainerThreshold() { + return ratisMaxContainer == 0 ? 1 : + (ratisContainerWithMinReplicas.doubleValue() / ratisMaxContainer); } - private synchronized double getEcMaxContainer() { - if (ecMaxContainer == 0) { - return 1; - } - return ecMaxContainer; + @VisibleForTesting + public double getCurrentECContainerThreshold() { + return ecMaxContainer == 0 ? 1 : + (ecContainerWithMinReplicas.doubleValue() / ecMaxContainer); } - private synchronized double getRatisMaxContainer() { - if (ratisMaxContainer == 0) { - return 1; - } - return ratisMaxContainer; - } + // TODO: Report processing logic will be removed in future. HDDS-11958. @Override protected synchronized void process( - NodeRegistrationContainerReport reportsProto) { - DatanodeDetails datanodeDetails = reportsProto.getDatanodeDetails(); - UUID datanodeUUID = datanodeDetails.getUuid(); + final NodeRegistrationContainerReport reportsProto) { + final DatanodeDetails datanodeDetails = reportsProto.getDatanodeDetails(); + final UUID datanodeUUID = datanodeDetails.getUuid(); StorageContainerDatanodeProtocolProtos.ContainerReportsProto report = reportsProto.getReport(); report.getReportsList().forEach(c -> { @@ -166,9 +178,7 @@ protected synchronized void process( SCMSafeModeManager.getLogger().info( "SCM in safe mode. {} % containers [Ratis] have at least one" + " reported replica, {} % containers [EC] have at N reported replica.", - ((ratisContainerWithMinReplicas.doubleValue() / getRatisMaxContainer()) * 100), - ((ecContainerWithMinReplicas.doubleValue() / getEcMaxContainer()) * 100) - ); + getCurrentContainerThreshold() * 100, getCurrentECContainerThreshold() * 100); } } @@ -246,8 +256,8 @@ public String getStatusText() { String status = String.format( "%1.2f%% of [Ratis] Containers(%s / %s) with at least one reported replica (=%1.2f) >= " + "safeModeCutoff (=%1.2f);", - (ratisContainerWithMinReplicas.doubleValue() / getRatisMaxContainer()) * 100, - ratisContainerWithMinReplicas, (long) getRatisMaxContainer(), + getCurrentContainerThreshold() * 100, + ratisContainerWithMinReplicas, (long) ratisMaxContainer, getCurrentContainerThreshold(), this.safeModeCutoff); Set sampleRatisContainers = ratisContainers.stream(). @@ -264,8 +274,8 @@ public String getStatusText() { String ecStatus = String.format( "%1.2f%% of [EC] Containers(%s / %s) with at least N reported replica (=%1.2f) >= " + "safeModeCutoff (=%1.2f);", - (ecContainerWithMinReplicas.doubleValue() / getEcMaxContainer()) * 100, - ecContainerWithMinReplicas, (long) getEcMaxContainer(), + getCurrentECContainerThreshold() * 100, + ecContainerWithMinReplicas, (long) ecMaxContainer, getCurrentECContainerThreshold(), this.safeModeCutoff); status = status.concat("\n").concat(ecStatus); @@ -295,25 +305,19 @@ public String getStatusText() { @Override public synchronized void refresh(boolean forceRefresh) { - List containers = containerManager.getContainers(); - if (forceRefresh) { - initializeRule(containers); - } else { - if (!validate()) { - initializeRule(containers); - } + if (forceRefresh || !validate()) { + initializeRule(); } } - private boolean checkContainerState(LifeCycleState state) { - if (state == LifeCycleState.QUASI_CLOSED || state == LifeCycleState.CLOSED) { - return true; - } - return false; + private boolean isClosed(ContainerInfo container) { + final LifeCycleState state = container.getState(); + return state == LifeCycleState.QUASI_CLOSED || + state == LifeCycleState.CLOSED; } - private void initializeRule(List containers) { - + private void initializeRule() { + final List containers = containerManager.getContainers(); // Clean up the related data in the map. ratisContainers.clear(); ecContainers.clear(); @@ -325,10 +329,9 @@ private void initializeRule(List containers) { // created by the client. We are not considering these containers for // now. These containers can be handled by tracking pipelines. - LifeCycleState containerState = container.getState(); HddsProtos.ReplicationType replicationType = container.getReplicationType(); - if (checkContainerState(containerState) && container.getNumberOfKeys() > 0) { + if (isClosed(container) && container.getNumberOfKeys() > 0) { // If it's of type Ratis if (replicationType.equals(HddsProtos.ReplicationType.RATIS)) { ratisContainers.add(container.getContainerID()); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SCMSafeModeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SCMSafeModeManager.java index 78ce994af73..f4e6f6ee2cf 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SCMSafeModeManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SCMSafeModeManager.java @@ -19,7 +19,6 @@ import java.util.HashMap; import java.util.HashSet; -import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicBoolean; @@ -28,7 +27,6 @@ import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.ConfigurationSource; -import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerManager; import org.apache.hadoop.hdds.scm.events.SCMEvents; import org.apache.hadoop.hdds.scm.ha.SCMContext; @@ -105,7 +103,6 @@ public class SCMSafeModeManager implements SafeModeManager { private Set validatedPreCheckRules = new HashSet<>(1); private final EventQueue eventPublisher; - private final PipelineManager pipelineManager; private final SCMServiceManager serviceManager; private final SCMContext scmContext; @@ -114,12 +111,10 @@ public class SCMSafeModeManager implements SafeModeManager { // TODO: Remove allContainers argument. (HDDS-11795) public SCMSafeModeManager(ConfigurationSource conf, - List allContainers, ContainerManager containerManager, PipelineManager pipelineManager, EventQueue eventQueue, SCMServiceManager serviceManager, SCMContext scmContext) { this.config = conf; - this.pipelineManager = pipelineManager; this.eventPublisher = eventQueue; this.serviceManager = serviceManager; this.scmContext = scmContext; diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeExitRule.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeExitRule.java index 69c1a86ac37..746e825f34b 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeExitRule.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeExitRule.java @@ -41,6 +41,10 @@ public abstract class SafeModeExitRule implements EventHandler { protected static final int SAMPLE_CONTAINER_DISPLAY_LIMIT = 5; protected static final int SAMPLE_PIPELINE_DISPLAY_LIMIT = 5; + // TODO: Report processing logic will be removed in future. HDDS-11958. + // This flag is to add new code without breaking Safemode logic until we have HDDS-11958. + private boolean validateBasedOnReportProcessing = true; + public SafeModeExitRule(SCMSafeModeManager safeModeManager, String ruleName, EventQueue eventQueue) { this.safeModeManager = safeModeManager; @@ -48,6 +52,14 @@ public SafeModeExitRule(SCMSafeModeManager safeModeManager, eventQueue.addHandler(getEventType(), this); } + public void setValidateBasedOnReportProcessing(boolean newValue) { + validateBasedOnReportProcessing = newValue; + } + + protected boolean validateBasedOnReportProcessing() { + return validateBasedOnReportProcessing; + } + /** * Return's the name of this SafeModeExit Rule. * @return ruleName diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java index d117e891c4b..52148c3d683 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java @@ -841,8 +841,8 @@ private void initializeSystemManagers(OzoneConfiguration conf, scmSafeModeManager = configurator.getScmSafeModeManager(); } else { scmSafeModeManager = new SCMSafeModeManager(conf, - containerManager.getContainers(), containerManager, - pipelineManager, eventQueue, serviceManager, scmContext); + containerManager, pipelineManager, eventQueue, + serviceManager, scmContext); } scmDecommissionManager = new NodeDecommissionManager(conf, scmNodeManager, containerManager, diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java index 621c9297e7e..528891623df 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java @@ -162,8 +162,7 @@ void setUp(@TempDir File tempDir) throws Exception { new ContainerReplicaPendingOps( Clock.system(ZoneId.systemDefault()))); SCMSafeModeManager safeModeManager = new SCMSafeModeManager(conf, - containerManager.getContainers(), containerManager, - pipelineManager, eventQueue, serviceManager, scmContext) { + containerManager, pipelineManager, eventQueue, serviceManager, scmContext) { @Override public void emitSafeModeStatus() { // skip diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java index 1dfbfd32785..900b09c0146 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java @@ -358,7 +358,7 @@ public void testClosePipelineShouldFailOnFollower() throws Exception { public void testPipelineReport() throws Exception { try (PipelineManagerImpl pipelineManager = createPipelineManager(true)) { SCMSafeModeManager scmSafeModeManager = - new SCMSafeModeManager(conf, new ArrayList<>(), + new SCMSafeModeManager(conf, mock(ContainerManager.class), pipelineManager, new EventQueue(), serviceManager, scmContext); Pipeline pipeline = pipelineManager @@ -469,7 +469,7 @@ public void testPipelineOpenOnlyWhenLeaderReported() throws Exception { pipelineManager.getPipeline(pipeline.getId()).getPipelineState()); SCMSafeModeManager scmSafeModeManager = - new SCMSafeModeManager(new OzoneConfiguration(), new ArrayList<>(), + new SCMSafeModeManager(new OzoneConfiguration(), mock(ContainerManager.class), pipelineManager, new EventQueue(), serviceManager, scmContext); PipelineReportHandler pipelineReportHandler = diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestHealthyPipelineSafeModeRule.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestHealthyPipelineSafeModeRule.java index 13eb4be724c..b2b3530c1e7 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestHealthyPipelineSafeModeRule.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestHealthyPipelineSafeModeRule.java @@ -99,7 +99,7 @@ public void testHealthyPipelineSafeModeRuleWithNoPipelines() pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS, mockRatisProvider); SCMSafeModeManager scmSafeModeManager = new SCMSafeModeManager( - config, containers, containerManager, pipelineManager, eventQueue, + config, containerManager, pipelineManager, eventQueue, serviceManager, scmContext); HealthyPipelineSafeModeRule healthyPipelineSafeModeRule = @@ -179,7 +179,7 @@ public void testHealthyPipelineSafeModeRuleWithPipelines() throws Exception { MockRatisPipelineProvider.markPipelineHealthy(pipeline3); SCMSafeModeManager scmSafeModeManager = new SCMSafeModeManager( - config, containers, containerManager, pipelineManager, eventQueue, + config, containerManager, pipelineManager, eventQueue, serviceManager, scmContext); HealthyPipelineSafeModeRule healthyPipelineSafeModeRule = @@ -275,7 +275,7 @@ public void testHealthyPipelineSafeModeRuleWithMixedPipelines() MockRatisPipelineProvider.markPipelineHealthy(pipeline3); SCMSafeModeManager scmSafeModeManager = new SCMSafeModeManager( - config, containers, containerManager, pipelineManager, eventQueue, + config, containerManager, pipelineManager, eventQueue, serviceManager, scmContext); HealthyPipelineSafeModeRule healthyPipelineSafeModeRule = diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestOneReplicaPipelineSafeModeRule.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestOneReplicaPipelineSafeModeRule.java index 76bafa8b1fb..44594740210 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestOneReplicaPipelineSafeModeRule.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestOneReplicaPipelineSafeModeRule.java @@ -120,7 +120,7 @@ private void setup(int nodes, int pipelineFactorThreeCount, HddsProtos.ReplicationFactor.ONE); SCMSafeModeManager scmSafeModeManager = - new SCMSafeModeManager(ozoneConfiguration, containers, containerManager, + new SCMSafeModeManager(ozoneConfiguration, containerManager, pipelineManager, eventQueue, serviceManager, scmContext); rule = scmSafeModeManager.getOneReplicaPipelineSafeModeRule(); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java index fc8ec9c1912..1d9a41b683f 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java @@ -136,7 +136,7 @@ private void testSafeMode(int numContainers) throws Exception { ContainerManager containerManager = mock(ContainerManager.class); when(containerManager.getContainers()).thenReturn(containers); scmSafeModeManager = new SCMSafeModeManager( - config, containers, containerManager, null, queue, + config, containerManager, null, queue, serviceManager, scmContext); assertTrue(scmSafeModeManager.getInSafeMode()); @@ -175,7 +175,7 @@ public void testSafeModeExitRule() throws Exception { ContainerManager containerManager = mock(ContainerManager.class); when(containerManager.getContainers()).thenReturn(containers); scmSafeModeManager = new SCMSafeModeManager( - config, containers, containerManager, null, queue, + config, containerManager, null, queue, serviceManager, scmContext); long cutOff = (long) Math.ceil(numContainers * config.getDouble( @@ -244,7 +244,7 @@ public void testHealthyPipelinePercentWithIncorrectValue(double healthyPercent, ContainerManager containerManager = mock(ContainerManager.class); when(containerManager.getContainers()).thenReturn(containers); IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, - () -> new SCMSafeModeManager(conf, containers, containerManager, + () -> new SCMSafeModeManager(conf, containerManager, pipelineManager, queue, serviceManager, scmContext)); assertThat(exception).hasMessageEndingWith("value should be >= 0.0 and <= 1.0"); } @@ -311,7 +311,7 @@ public void testSafeModeExitRuleWithPipelineAvailabilityCheck( when(containerManager.getContainers()).thenReturn(containers); scmSafeModeManager = new SCMSafeModeManager( - conf, containers, containerManager, pipelineManager, queue, serviceManager, + conf, containerManager, pipelineManager, queue, serviceManager, scmContext); assertTrue(scmSafeModeManager.getInSafeMode()); @@ -447,7 +447,7 @@ public void testDisableSafeMode() { ContainerManager containerManager = mock(ContainerManager.class); when(containerManager.getContainers()).thenReturn(containers); scmSafeModeManager = new SCMSafeModeManager( - conf, containers, containerManager, pipelineManager, queue, serviceManager, + conf, containerManager, pipelineManager, queue, serviceManager, scmContext); assertFalse(scmSafeModeManager.getInSafeMode()); } @@ -489,7 +489,7 @@ public void testContainerSafeModeRule() throws Exception { when(containerManager.getContainers()).thenReturn(containers); scmSafeModeManager = new SCMSafeModeManager( - config, containers, containerManager, null, queue, serviceManager, scmContext); + config, containerManager, null, queue, serviceManager, scmContext); assertTrue(scmSafeModeManager.getInSafeMode()); @@ -565,7 +565,7 @@ public void testContainerSafeModeRuleEC(int data, int parity) throws Exception { new ContainerReplicaPendingOps(Clock.system(ZoneId.systemDefault()))); scmSafeModeManager = new SCMSafeModeManager( - config, containers, containerManager, pipelineManager, queue, serviceManager, scmContext); + config, containerManager, pipelineManager, queue, serviceManager, scmContext); assertTrue(scmSafeModeManager.getInSafeMode()); // Only 20 containers are involved in the calculation, @@ -588,7 +588,7 @@ private void testSafeModeDataNodes(int numOfDns) throws Exception { ContainerManager containerManager = mock(ContainerManager.class); when(containerManager.getContainers()).thenReturn(containers); scmSafeModeManager = new SCMSafeModeManager( - conf, containers, containerManager, null, queue, + conf, containerManager, null, queue, serviceManager, scmContext); // Assert SCM is in Safe mode. @@ -702,7 +702,7 @@ public void testSafeModePipelineExitRule() throws Exception { when(containerManager.getContainers()).thenReturn(containers); scmSafeModeManager = new SCMSafeModeManager( - config, containers, containerManager, pipelineManager, queue, serviceManager, + config, containerManager, pipelineManager, queue, serviceManager, scmContext); SCMDatanodeProtocolServer.NodeRegistrationContainerReport nodeRegistrationContainerReport = @@ -757,7 +757,7 @@ public void testPipelinesNotCreatedUntilPreCheckPasses() throws Exception { when(containerManager.getContainers()).thenReturn(containers); scmSafeModeManager = new SCMSafeModeManager( - config, containers, containerManager, pipelineManager, queue, serviceManager, + config, containerManager, pipelineManager, queue, serviceManager, scmContext); // Assert SCM is in Safe mode. From 91d95371dc4c032d731cb53e94a490d131941cb1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 4 Jan 2025 10:04:46 +0100 Subject: [PATCH 033/168] HDDS-12013. Bump sqlite-jdbc to 3.47.2.0 (#7641) --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 6353f081789..ae5c5d71766 100644 --- a/pom.xml +++ b/pom.xml @@ -215,7 +215,7 @@ 3.0.1 3.1.12.2 5.3.39 - 3.47.1.0 + 3.47.2.0 4.2.2 false 1200 From df6e119f4c8b04ae3425ec291e7ddbbde360556b Mon Sep 17 00:00:00 2001 From: Nandakumar Vadivelu Date: Sat, 4 Jan 2025 15:28:57 +0530 Subject: [PATCH 034/168] HDDS-12006. Enable sortpom in hdds-container-service, hdds-crypto-api, hdds-crypto-default, hdds-docs (#7638) --- hadoop-hdds/annotations/pom.xml | 2 +- hadoop-hdds/container-service/pom.xml | 162 +++++++++++++------------- hadoop-hdds/crypto-api/pom.xml | 36 +++--- hadoop-hdds/crypto-default/pom.xml | 36 +++--- hadoop-hdds/docs/pom.xml | 13 +-- 5 files changed, 118 insertions(+), 131 deletions(-) diff --git a/hadoop-hdds/annotations/pom.xml b/hadoop-hdds/annotations/pom.xml index e9a628f9a7e..84696c60945 100644 --- a/hadoop-hdds/annotations/pom.xml +++ b/hadoop-hdds/annotations/pom.xml @@ -28,8 +28,8 @@ annotations at compile time. - true + true diff --git a/hadoop-hdds/container-service/pom.xml b/hadoop-hdds/container-service/pom.xml index 29a8f92fc83..0c1f0bbf4a1 100644 --- a/hadoop-hdds/container-service/pom.xml +++ b/hadoop-hdds/container-service/pom.xml @@ -12,10 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -24,85 +21,78 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hdds-container-service 2.0.0-SNAPSHOT - Apache Ozone Distributed Data Store Container Service - Apache Ozone HDDS Container Service jar - - true - + Apache Ozone HDDS Container Service + Apache Ozone Distributed Data Store Container Service + - org.apache.ozone - hdds-common + com.fasterxml.jackson.core + jackson-annotations - org.apache.ozone - hdds-config + com.fasterxml.jackson.core + jackson-databind - org.apache.ozone - hdds-interface-client + com.github.luben + zstd-jni - org.apache.ozone - hdds-interface-server + com.google.guava + guava - org.apache.ozone - hdds-managed-rocksdb + com.google.protobuf + protobuf-java - - org.apache.commons - commons-compress + commons-codec + commons-codec - org.apache.logging.log4j - log4j-api + commons-collections + commons-collections commons-io commons-io - com.github.luben - zstd-jni + info.picocli + picocli - org.apache.ozone - hdds-server-framework + io.dropwizard.metrics + metrics-core - org.apache.ozone - hdds-client + io.netty + netty-buffer - commons-codec - commons-codec + io.netty + netty-codec - commons-collections - commons-collections + io.netty + netty-common - io.dropwizard.metrics - metrics-core + io.netty + netty-handler - - org.yaml - snakeyaml + io.netty + netty-transport - - org.apache.ozone - hdds-docs - provided + io.opentracing + opentracing-api - - org.apache.ratis - ratis-server + io.opentracing + opentracing-util @@ -113,49 +103,50 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> jakarta.xml.bind jakarta.xml.bind-api + - org.glassfish.jaxb - jaxb-runtime + org.apache.commons + commons-compress - info.picocli - picocli + org.apache.commons + commons-lang3 - io.netty - netty-transport + org.apache.commons + commons-text - io.netty - netty-buffer + org.apache.logging.log4j + log4j-api - io.netty - netty-common + org.apache.ozone + hdds-client - io.netty - netty-codec + org.apache.ozone + hdds-common - io.netty - netty-handler + org.apache.ozone + hdds-config - io.opentracing - opentracing-api + org.apache.ozone + hdds-interface-client - io.opentracing - opentracing-util + org.apache.ozone + hdds-interface-server - org.apache.commons - commons-lang3 + org.apache.ozone + hdds-managed-rocksdb - org.apache.commons - commons-text + org.apache.ozone + hdds-server-framework org.apache.ratis @@ -177,6 +168,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ratis ratis-proto + + + org.apache.ratis + ratis-server + org.apache.ratis ratis-server-api @@ -185,6 +181,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ratis ratis-thirdparty-misc + + org.glassfish.jaxb + jaxb-runtime + org.rocksdb @@ -196,20 +196,14 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> - com.fasterxml.jackson.core - jackson-annotations - - - com.fasterxml.jackson.core - jackson-databind - - - com.google.guava - guava + org.yaml + snakeyaml + - com.google.protobuf - protobuf-java + org.apache.ozone + hdds-docs + provided @@ -261,7 +255,8 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> maven-enforcer-plugin - ban-annotations + ban-annotations + @@ -283,17 +278,16 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> copy-common-html - prepare-package unpack + prepare-package org.apache.ozone hdds-server-framework - ${project.build.outputDirectory} - + ${project.build.outputDirectory} webapps/static/**/*.* diff --git a/hadoop-hdds/crypto-api/pom.xml b/hadoop-hdds/crypto-api/pom.xml index 3a283842781..9524b2df2db 100644 --- a/hadoop-hdds/crypto-api/pom.xml +++ b/hadoop-hdds/crypto-api/pom.xml @@ -12,27 +12,25 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - - 4.0.0 - - org.apache.ozone - hdds - 2.0.0-SNAPSHOT - - - hdds-crypto-api + + 4.0.0 + + org.apache.ozone + hdds 2.0.0-SNAPSHOT - Apache Ozone Distributed Data Store cryptographic functions - Apache Ozone HDDS Crypto + + + hdds-crypto-api + 2.0.0-SNAPSHOT + Apache Ozone HDDS Crypto + Apache Ozone Distributed Data Store cryptographic functions - - true - true - + + + true + - + - + diff --git a/hadoop-hdds/crypto-default/pom.xml b/hadoop-hdds/crypto-default/pom.xml index f9653145e44..5200521d249 100644 --- a/hadoop-hdds/crypto-default/pom.xml +++ b/hadoop-hdds/crypto-default/pom.xml @@ -12,27 +12,25 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - - 4.0.0 - - org.apache.ozone - hdds - 2.0.0-SNAPSHOT - - - hdds-crypto-default + + 4.0.0 + + org.apache.ozone + hdds 2.0.0-SNAPSHOT - Default implementation of Apache Ozone Distributed Data Store's cryptographic functions - Apache Ozone HDDS Crypto - Default + + + hdds-crypto-default + 2.0.0-SNAPSHOT + Apache Ozone HDDS Crypto - Default + Default implementation of Apache Ozone Distributed Data Store's cryptographic functions - - true - true - + + + true + - + - + diff --git a/hadoop-hdds/docs/pom.xml b/hadoop-hdds/docs/pom.xml index e8261c39cb1..a67c80e9f37 100644 --- a/hadoop-hdds/docs/pom.xml +++ b/hadoop-hdds/docs/pom.xml @@ -12,10 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -24,14 +21,14 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hdds-docs 2.0.0-SNAPSHOT - Apache Ozone/HDDS Documentation - Apache Ozone/HDDS Documentation jar + Apache Ozone/HDDS Documentation + Apache Ozone/HDDS Documentation - true + + true false - true From de79c37c61de9e4c1e39f5e52632a33192b490c2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 4 Jan 2025 11:31:08 +0100 Subject: [PATCH 035/168] HDDS-12014. Bump assertj-core to 3.27.1 (#7642) --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index ae5c5d71766..36a7e660651 100644 --- a/pom.xml +++ b/pom.xml @@ -34,7 +34,7 @@ 0.16.1 1.14 1.9.7 - 3.27.0 + 3.27.1 1.12.661 0.8.0.RELEASE 1.79 From 138cbe31570da5876a682b54a5322e99cb9e4dc0 Mon Sep 17 00:00:00 2001 From: Ivan Andika Date: Sat, 4 Jan 2025 19:28:20 +0800 Subject: [PATCH 036/168] HDDS-11998. BlockDataStreamOutput should decrPendingContainerOpsMetrics (#7636) --- .../scm/storage/BlockDataStreamOutput.java | 5 +- .../hadoop/hdds/scm/storage/StreamBuffer.java | 4 +- .../client/rpc/TestBlockDataStreamOutput.java | 77 ++++++++++++------- 3 files changed, 56 insertions(+), 30 deletions(-) diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java index 48c77f2c863..8c2883a4374 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java @@ -253,7 +253,7 @@ public void write(ByteBuffer b, int off, int len) throws IOException { } while (len > 0) { allocateNewBufferIfNeeded(); - int writeLen = Math.min(len, currentBuffer.length()); + int writeLen = Math.min(len, currentBuffer.remaining()); final StreamBuffer buf = new StreamBuffer(b, off, writeLen); currentBuffer.put(buf); writeChunkIfNeeded(); @@ -265,7 +265,7 @@ public void write(ByteBuffer b, int off, int len) throws IOException { } private void writeChunkIfNeeded() throws IOException { - if (currentBuffer.length() == 0) { + if (currentBuffer.remaining() == 0) { writeChunk(currentBuffer); currentBuffer = null; } @@ -672,6 +672,7 @@ private void writeChunkToContainer(ByteBuffer buf) out.writeAsync(buf, StandardWriteOption.SYNC) : out.writeAsync(buf)) .whenCompleteAsync((r, e) -> { + metrics.decrPendingContainerOpsMetrics(ContainerProtos.Type.WriteChunk); if (e != null || !r.isSuccess()) { if (e == null) { e = new IOException("result is not success"); diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/StreamBuffer.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/StreamBuffer.java index b889aa35e26..5bf6dcee826 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/StreamBuffer.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/StreamBuffer.java @@ -39,8 +39,8 @@ public ByteBuffer duplicate() { return buffer.duplicate(); } - public int length() { - return buffer.limit() - buffer.position(); + public int remaining() { + return buffer.remaining(); } public int position() { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java index 5bcf7084054..46c3c716271 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java @@ -22,7 +22,6 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.DatanodeDetails; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.XceiverClientMetrics; @@ -47,13 +46,14 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; -import java.io.IOException; import java.nio.ByteBuffer; import java.util.List; import java.util.UUID; import java.util.concurrent.TimeUnit; import static java.nio.charset.StandardCharsets.UTF_8; +import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type.PutBlock; +import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type.WriteChunk; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -77,13 +77,6 @@ public class TestBlockDataStreamOutput { private static String keyString; private static final DatanodeVersion DN_OLD_VERSION = DatanodeVersion.SEPARATE_RATIS_PORTS_AVAILABLE; - /** - * Create a MiniDFSCluster for testing. - *

- * Ozone is made active by setting OZONE_ENABLED = true - * - * @throws IOException - */ @BeforeAll public static void init() throws Exception { chunkSize = 100; @@ -120,7 +113,7 @@ public static void init() throws Exception { client = OzoneClientFactory.getRpcClient(conf); objectStore = client.getObjectStore(); keyString = UUID.randomUUID().toString(); - volumeName = "testblockoutputstream"; + volumeName = "testblockdatastreamoutput"; bucketName = volumeName; objectStore.createVolume(volumeName); objectStore.getVolume(volumeName).createBucket(bucketName); @@ -130,9 +123,6 @@ static String getKeyName() { return UUID.randomUUID().toString(); } - /** - * Shutdown MiniDFSCluster. - */ @AfterAll public static void shutdown() { IOUtils.closeQuietly(client); @@ -166,6 +156,11 @@ public void testMultiBlockWrite() throws Exception { } static void testWrite(int dataLength) throws Exception { + XceiverClientMetrics metrics = + XceiverClientManager.getXceiverClientMetrics(); + long pendingWriteChunkCount = metrics.getPendingContainerOpCountMetrics(WriteChunk); + long pendingPutBlockCount = metrics.getPendingContainerOpCountMetrics(PutBlock); + String keyName = getKeyName(); OzoneDataStreamOutput key = createKey( keyName, ReplicationType.RATIS, dataLength); @@ -174,9 +169,19 @@ static void testWrite(int dataLength) throws Exception { // now close the stream, It will update the key length. key.close(); validateData(keyName, data); + + assertEquals(pendingPutBlockCount, + metrics.getPendingContainerOpCountMetrics(PutBlock)); + assertEquals(pendingWriteChunkCount, + metrics.getPendingContainerOpCountMetrics(WriteChunk)); } private void testWriteWithFailure(int dataLength) throws Exception { + XceiverClientMetrics metrics = + XceiverClientManager.getXceiverClientMetrics(); + long pendingWriteChunkCount = metrics.getPendingContainerOpCountMetrics(WriteChunk); + long pendingPutBlockCount = metrics.getPendingContainerOpCountMetrics(PutBlock); + String keyName = getKeyName(); OzoneDataStreamOutput key = createKey( keyName, ReplicationType.RATIS, dataLength); @@ -195,17 +200,24 @@ private void testWriteWithFailure(int dataLength) throws Exception { key.close(); String dataString = new String(data, UTF_8); validateData(keyName, dataString.concat(dataString).getBytes(UTF_8)); + + assertEquals(pendingPutBlockCount, + metrics.getPendingContainerOpCountMetrics(PutBlock)); + assertEquals(pendingWriteChunkCount, + metrics.getPendingContainerOpCountMetrics(WriteChunk)); } @Test public void testPutBlockAtBoundary() throws Exception { - int dataLength = 500; + int dataLength = maxFlushSize + 100; XceiverClientMetrics metrics = XceiverClientManager.getXceiverClientMetrics(); - long putBlockCount = metrics.getContainerOpCountMetrics( - ContainerProtos.Type.PutBlock); - long pendingPutBlockCount = metrics.getPendingContainerOpCountMetrics( - ContainerProtos.Type.PutBlock); + long writeChunkCount = metrics.getContainerOpCountMetrics(WriteChunk); + long putBlockCount = metrics.getContainerOpCountMetrics(PutBlock); + long pendingWriteChunkCount = metrics.getPendingContainerOpCountMetrics(WriteChunk); + long pendingPutBlockCount = metrics.getPendingContainerOpCountMetrics(PutBlock); + long totalOpCount = metrics.getTotalOpCount(); + String keyName = getKeyName(); OzoneDataStreamOutput key = createKey( keyName, ReplicationType.RATIS, 0); @@ -213,14 +225,25 @@ public void testPutBlockAtBoundary() throws Exception { ContainerTestHelper.getFixedLengthString(keyString, dataLength) .getBytes(UTF_8); key.write(ByteBuffer.wrap(data)); - assertThat(metrics.getPendingContainerOpCountMetrics(ContainerProtos.Type.PutBlock)) + assertThat(metrics.getPendingContainerOpCountMetrics(PutBlock)) .isLessThanOrEqualTo(pendingPutBlockCount + 1); + assertThat(metrics.getPendingContainerOpCountMetrics(WriteChunk)) + .isLessThanOrEqualTo(pendingWriteChunkCount + 5); key.close(); // Since data length is 500 , first putBlock will be at 400(flush boundary) // and the other at 500 - assertEquals( - metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock), - putBlockCount + 2); + assertEquals(putBlockCount + 2, + metrics.getContainerOpCountMetrics(PutBlock)); + // Each chunk is 100 so there will be 500 / 100 = 5 chunks. + assertEquals(writeChunkCount + 5, + metrics.getContainerOpCountMetrics(WriteChunk)); + assertEquals(totalOpCount + 7, + metrics.getTotalOpCount()); + assertEquals(pendingPutBlockCount, + metrics.getPendingContainerOpCountMetrics(PutBlock)); + assertEquals(pendingWriteChunkCount, + metrics.getPendingContainerOpCountMetrics(WriteChunk)); + validateData(keyName, data); } @@ -242,20 +265,22 @@ public void testMinPacketSize() throws Exception { XceiverClientMetrics metrics = XceiverClientManager.getXceiverClientMetrics(); OzoneDataStreamOutput key = createKey(keyName, ReplicationType.RATIS, 0); - long writeChunkCount = - metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk); + long writeChunkCount = metrics.getContainerOpCountMetrics(WriteChunk); + long pendingWriteChunkCount = metrics.getPendingContainerOpCountMetrics(WriteChunk); byte[] data = ContainerTestHelper.getFixedLengthString(keyString, chunkSize / 2) .getBytes(UTF_8); key.write(ByteBuffer.wrap(data)); // minPacketSize= 100, so first write of 50 wont trigger a writeChunk assertEquals(writeChunkCount, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk)); + metrics.getContainerOpCountMetrics(WriteChunk)); key.write(ByteBuffer.wrap(data)); assertEquals(writeChunkCount + 1, - metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk)); + metrics.getContainerOpCountMetrics(WriteChunk)); // now close the stream, It will update the key length. key.close(); + assertEquals(pendingWriteChunkCount, + metrics.getPendingContainerOpCountMetrics(WriteChunk)); String dataString = new String(data, UTF_8); validateData(keyName, dataString.concat(dataString).getBytes(UTF_8)); } From 53fc590878325ca78081d16ea2238641a884ae78 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 4 Jan 2025 15:25:05 +0100 Subject: [PATCH 037/168] HDDS-12019. Bump zstd-jni to 1.5.6-9 (#7643) --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 36a7e660651..1f6a07382fd 100644 --- a/pom.xml +++ b/pom.xml @@ -230,7 +230,7 @@ 3.1.9.Final 5.4.0 3.8.4 - 1.5.6-8 + 1.5.6-9 From 24a721182eb5ec3a5f4d14cba70509228f61d963 Mon Sep 17 00:00:00 2001 From: Tsz-Wo Nicholas Sze Date: Sat, 4 Jan 2025 08:00:46 -0800 Subject: [PATCH 038/168] HDDS-7188. Read chunk files using netty ChunkedNioFile. (#7625) --- .../apache/hadoop/hdds/scm/ScmConfigKeys.java | 3 + .../ContainerCommandResponseBuilders.java | 4 +- .../hadoop/ozone/common/ChunkBuffer.java | 55 +--------- .../ozone/common/ChunkBufferToByteString.java | 93 ++++++++++++++++ .../ChunkBufferToByteStringByByteBufs.java | 101 ++++++++++++++++++ .../ozone/common/IncrementalChunkBuffer.java | 4 +- .../transport/server/GrpcXceiverService.java | 13 ++- .../server/ratis/DispatcherContext.java | 30 +++++- .../container/keyvalue/KeyValueHandler.java | 17 ++- .../keyvalue/helpers/ChunkUtils.java | 40 ++++++- .../keyvalue/impl/BlockManagerImpl.java | 12 ++- .../keyvalue/impl/ChunkManagerDispatcher.java | 7 +- .../keyvalue/impl/FilePerBlockStrategy.java | 11 +- .../keyvalue/impl/FilePerChunkStrategy.java | 10 +- .../keyvalue/interfaces/BlockManager.java | 3 + .../keyvalue/interfaces/ChunkManager.java | 3 +- .../common/impl/TestContainerPersistence.java | 6 +- .../impl/TestChunkManagerDummyImpl.java | 4 +- .../impl/TestFilePerBlockStrategy.java | 13 ++- .../ozone/TestOzoneConfigurationFields.java | 1 + .../client/rpc/TestBlockDataStreamOutput.java | 2 + 21 files changed, 345 insertions(+), 87 deletions(-) create mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBufferToByteString.java create mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBufferToByteStringByByteBufs.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java index c4b42acec43..85c82af942f 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java @@ -153,6 +153,9 @@ public final class ScmConfigKeys { "ozone.chunk.read.mapped.buffer.max.count"; // this max_count could not be greater than Linux platform max_map_count which by default is 65530. public static final int OZONE_CHUNK_READ_MAPPED_BUFFER_MAX_COUNT_DEFAULT = 0; + public static final String OZONE_CHUNK_READ_NETTY_CHUNKED_NIO_FILE_KEY = + "ozone.chunk.read.netty.ChunkedNioFile"; + public static final boolean OZONE_CHUNK_READ_NETTY_CHUNKED_NIO_FILE_DEFAULT = false; public static final String OZONE_SCM_CONTAINER_LAYOUT_KEY = "ozone.scm.container.layout"; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ContainerCommandResponseBuilders.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ContainerCommandResponseBuilders.java index d3f39c023b7..31615cf509b 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ContainerCommandResponseBuilders.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ContainerCommandResponseBuilders.java @@ -43,7 +43,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.WriteChunkResponseProto; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type; -import org.apache.hadoop.ozone.common.ChunkBuffer; +import org.apache.hadoop.ozone.common.ChunkBufferToByteString; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import org.apache.ratis.thirdparty.com.google.protobuf.UnsafeByteOperations; @@ -304,7 +304,7 @@ public static ContainerCommandResponseProto getReadContainerResponse( } public static ContainerCommandResponseProto getReadChunkResponse( - ContainerCommandRequestProto request, ChunkBuffer data, + ContainerCommandRequestProto request, ChunkBufferToByteString data, Function byteBufferToByteString) { boolean isReadChunkV0 = getReadChunkVersion(request.getReadChunk()) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBuffer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBuffer.java index a24d39e5dac..56541c57eff 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBuffer.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBuffer.java @@ -22,17 +22,13 @@ import java.nio.channels.GatheringByteChannel; import java.util.List; import java.util.Objects; -import java.util.function.Function; -import java.util.function.Supplier; - -import org.apache.hadoop.hdds.scm.ByteStringConversion; import org.apache.hadoop.hdds.utils.db.CodecBuffer; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import org.apache.ratis.util.UncheckedAutoCloseable; /** Buffer for a block chunk. */ -public interface ChunkBuffer extends UncheckedAutoCloseable { +public interface ChunkBuffer extends ChunkBufferToByteString, UncheckedAutoCloseable { /** Similar to {@link ByteBuffer#allocate(int)}. */ static ChunkBuffer allocate(int capacity) { @@ -142,53 +138,4 @@ default ChunkBuffer put(ByteString b) { * @return The number of bytes written, possibly zero */ long writeTo(GatheringByteChannel channel) throws IOException; - - /** - * Convert this buffer to a {@link ByteString}. - * The position and limit of this {@link ChunkBuffer} remains unchanged. - * The given function must preserve the position and limit - * of the input {@link ByteBuffer}. - */ - default ByteString toByteString(Function function) { - return toByteStringImpl(b -> applyAndAssertFunction(b, function, this)); - } - - /** - * Convert this buffer(s) to a list of {@link ByteString}. - * The position and limit of this {@link ChunkBuffer} remains unchanged. - * The given function must preserve the position and limit - * of the input {@link ByteBuffer}. - */ - default List toByteStringList( - Function function) { - return toByteStringListImpl(b -> applyAndAssertFunction(b, function, this)); - } - - // for testing - default ByteString toByteString() { - return toByteString(ByteStringConversion::safeWrap); - } - - ByteString toByteStringImpl(Function function); - - List toByteStringListImpl( - Function function); - - static void assertInt(int expected, int computed, Supplier prefix) { - if (expected != computed) { - throw new IllegalStateException(prefix.get() - + ": expected = " + expected + " but computed = " + computed); - } - } - - /** Apply the function and assert if it preserves position and limit. */ - static ByteString applyAndAssertFunction(ByteBuffer buffer, - Function function, Object name) { - final int pos = buffer.position(); - final int lim = buffer.limit(); - final ByteString bytes = function.apply(buffer); - assertInt(pos, buffer.position(), () -> name + ": Unexpected position"); - assertInt(lim, buffer.limit(), () -> name + ": Unexpected limit"); - return bytes; - } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBufferToByteString.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBufferToByteString.java new file mode 100644 index 00000000000..384b661f6e0 --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBufferToByteString.java @@ -0,0 +1,93 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.common; + +import org.apache.hadoop.hdds.scm.ByteStringConversion; +import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; +import org.apache.ratis.thirdparty.io.netty.buffer.ByteBuf; + +import java.nio.ByteBuffer; +import java.util.List; +import java.util.function.Function; +import java.util.function.Supplier; + +/** For converting to {@link ByteString}s. */ +public interface ChunkBufferToByteString { + /** + * Wrap the given list of {@link ByteBuf}s + * as a {@link ChunkBufferToByteString}. + */ + static ChunkBufferToByteString wrap(List buffers) { + return new ChunkBufferToByteStringByByteBufs(buffers); + } + + /** Release the underlying resource. */ + default void release() { + } + + /** + * Convert this buffer to a {@link ByteString}. + * The position and limit of this {@link ChunkBufferToByteString} + * remains unchanged. + * The given function must preserve the position and limit + * of the input {@link ByteBuffer}. + */ + default ByteString toByteString(Function function) { + return toByteStringImpl(b -> applyAndAssertFunction(b, function, this)); + } + + /** + * Convert this buffer(s) to a list of {@link ByteString}. + * The position and limit of this {@link ChunkBufferToByteString} + * remains unchanged. + * The given function must preserve the position and limit + * of the input {@link ByteBuffer}. + */ + default List toByteStringList( + Function function) { + return toByteStringListImpl(b -> applyAndAssertFunction(b, function, this)); + } + + // for testing + default ByteString toByteString() { + return toByteString(ByteStringConversion::safeWrap); + } + + ByteString toByteStringImpl(Function function); + + List toByteStringListImpl( + Function function); + + static void assertInt(int expected, int computed, Supplier prefix) { + if (expected != computed) { + throw new IllegalStateException(prefix.get() + + ": expected = " + expected + " but computed = " + computed); + } + } + + /** Apply the function and assert if it preserves position and limit. */ + static ByteString applyAndAssertFunction(ByteBuffer buffer, + Function function, Object name) { + final int pos = buffer.position(); + final int lim = buffer.limit(); + final ByteString bytes = function.apply(buffer); + assertInt(pos, buffer.position(), () -> name + ": Unexpected position"); + assertInt(lim, buffer.limit(), () -> name + ": Unexpected limit"); + return bytes; + } +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBufferToByteStringByByteBufs.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBufferToByteStringByByteBufs.java new file mode 100644 index 00000000000..167191f5a44 --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/ChunkBufferToByteStringByByteBufs.java @@ -0,0 +1,101 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.common; + +import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; +import org.apache.ratis.thirdparty.io.netty.buffer.ByteBuf; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.function.Function; + +/** + * A {@link ChunkBufferToByteString} implementation + * using a list of {@link ByteBuf}s. + */ +class ChunkBufferToByteStringByByteBufs implements ChunkBufferToByteString { + private final List buffers; + + private volatile List byteStrings; + private volatile ByteString concatenated; + + ChunkBufferToByteStringByByteBufs(List buffers) { + this.buffers = buffers == null || buffers.isEmpty() ? + Collections.emptyList() : Collections.unmodifiableList(buffers); + } + + @Override + public void release() { + for (ByteBuf buf : buffers) { + buf.release(); + } + } + + @Override + public ByteString toByteStringImpl(Function f) { + if (concatenated != null) { + return concatenated; + } + initByteStrings(f); + return Objects.requireNonNull(concatenated, "concatenated == null"); + } + + @Override + public List toByteStringListImpl(Function f) { + if (byteStrings != null) { + return byteStrings; + } + return initByteStrings(f); + } + + private synchronized List initByteStrings(Function f) { + if (byteStrings != null) { + return byteStrings; + } + if (buffers.isEmpty()) { + byteStrings = Collections.emptyList(); + concatenated = ByteString.EMPTY; + return byteStrings; + } + + final List array = new ArrayList<>(); + concatenated = convert(buffers, array, f); + byteStrings = Collections.unmodifiableList(array); + return byteStrings; + } + + static ByteString convert(List bufs, List byteStrings, Function f) { + ByteString concatenated = ByteString.EMPTY; + for (ByteBuf buf : bufs) { + for (ByteBuffer b : buf.nioBuffers()) { + final ByteString s = f.apply(b); + byteStrings.add(s); + concatenated = concatenated.concat(s); + } + } + return concatenated; + } + + @Override + public String toString() { + return getClass().getSimpleName() + ":n=" + buffers.size(); + } +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/IncrementalChunkBuffer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/IncrementalChunkBuffer.java index 732af4b6850..500acf74c98 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/IncrementalChunkBuffer.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/IncrementalChunkBuffer.java @@ -82,7 +82,7 @@ private int getBufferCapacityAtIndex(int i) { } private void assertInt(int expected, int computed, String name, int i) { - ChunkBuffer.assertInt(expected, computed, + ChunkBufferToByteString.assertInt(expected, computed, () -> this + ": Unexpected " + name + " at index " + i); } @@ -182,7 +182,7 @@ private boolean assertRemainingList(ByteBuffer ith, int i) { } } final int j = i; - ChunkBuffer.assertInt(buffers.size(), i, + ChunkBufferToByteString.assertInt(buffers.size(), i, () -> "i = " + j + " != buffers.size() = " + buffers.size()); return true; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/GrpcXceiverService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/GrpcXceiverService.java index 5f1914402d0..20cbdf8f02f 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/GrpcXceiverService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/GrpcXceiverService.java @@ -19,8 +19,10 @@ package org.apache.hadoop.ozone.container.common.transport.server; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type; import org.apache.hadoop.hdds.protocol.datanode.proto.XceiverClientProtocolServiceGrpc; import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; +import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext; import org.apache.ratis.grpc.util.ZeroCopyMessageMarshaller; import org.apache.ratis.thirdparty.com.google.protobuf.MessageLite; import org.apache.ratis.thirdparty.io.grpc.MethodDescriptor; @@ -97,9 +99,13 @@ public StreamObserver send( @Override public void onNext(ContainerCommandRequestProto request) { + final DispatcherContext context = request.getCmdType() != Type.ReadChunk ? null + : DispatcherContext.newBuilder(DispatcherContext.Op.HANDLE_READ_CHUNK) + .setReleaseSupported(true) + .build(); + try { - ContainerCommandResponseProto resp = - dispatcher.dispatch(request, null); + final ContainerCommandResponseProto resp = dispatcher.dispatch(request, context); responseObserver.onNext(resp); } catch (Throwable e) { LOG.error("Got exception when processing" @@ -108,6 +114,9 @@ public void onNext(ContainerCommandRequestProto request) { responseObserver.onError(e); } finally { zeroCopyMessageMarshaller.release(request); + if (context != null) { + context.release(); + } } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/DispatcherContext.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/DispatcherContext.java index 15af2645352..45bfbbc12a1 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/DispatcherContext.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/DispatcherContext.java @@ -21,6 +21,7 @@ import org.apache.hadoop.hdds.annotation.InterfaceStability; import org.apache.hadoop.util.Time; import org.apache.ratis.server.protocol.TermIndex; +import org.apache.ratis.util.Preconditions; import java.util.Map; import java.util.Objects; @@ -119,7 +120,10 @@ public static Op op(DispatcherContext context) { private final Map container2BCSIDMap; - private long startTime; + private final boolean releaseSupported; + private volatile Runnable releaseMethod; + + private final long startTime = Time.monotonicNowNanos(); private DispatcherContext(Builder b) { this.op = Objects.requireNonNull(b.op, "op == null"); @@ -127,7 +131,7 @@ private DispatcherContext(Builder b) { this.logIndex = b.logIndex; this.stage = b.stage; this.container2BCSIDMap = b.container2BCSIDMap; - this.startTime = Time.monotonicNowNanos(); + this.releaseSupported = b.releaseSupported; } /** Use {@link DispatcherContext#op(DispatcherContext)} for handling null. */ @@ -155,6 +159,21 @@ public long getStartTime() { return startTime; } + public boolean isReleaseSupported() { + return releaseSupported; + } + + public void setReleaseMethod(Runnable releaseMethod) { + Preconditions.assertTrue(releaseSupported, "Unsupported release method"); + this.releaseMethod = releaseMethod; + } + + public void release() { + if (releaseMethod != null) { + releaseMethod.run(); + } + } + @Override public String toString() { return op + "-" + stage + TermIndex.valueOf(term, logIndex); @@ -173,6 +192,7 @@ public static final class Builder { private long term; private long logIndex; private Map container2BCSIDMap; + private boolean releaseSupported; private Builder(Op op) { this.op = op; @@ -221,6 +241,12 @@ public Builder setContainer2BCSIDMap(Map map) { this.container2BCSIDMap = map; return this; } + + public Builder setReleaseSupported(boolean releaseSupported) { + this.releaseSupported = releaseSupported; + return this; + } + /** * Builds and returns DispatcherContext instance. * diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java index 06a4543bd79..9cae71e9baf 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java @@ -59,6 +59,7 @@ import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.common.Checksum; import org.apache.hadoop.ozone.common.ChunkBuffer; +import org.apache.hadoop.ozone.common.ChunkBufferToByteString; import org.apache.hadoop.ozone.common.OzoneChecksumException; import org.apache.hadoop.ozone.common.utils.BufferUtils; import org.apache.hadoop.ozone.container.common.helpers.BlockData; @@ -799,7 +800,8 @@ ContainerCommandResponseProto handleReadChunk( } return malformedRequest(request); } - ChunkBuffer data; + + final ChunkBufferToByteString data; try { BlockID blockID = BlockID.getFromProtobuf( request.getReadChunk().getBlockID()); @@ -861,11 +863,16 @@ ContainerCommandResponseProto handleDeleteChunk( "using BlockDeletingService"); } - private void validateChunkChecksumData(ChunkBuffer data, ChunkInfo info) + private void validateChunkChecksumData(ChunkBufferToByteString data, ChunkInfo info) throws StorageContainerException { if (validateChunkChecksumData) { try { - Checksum.verifyChecksum(data.duplicate(data.position(), data.limit()), info.getChecksumData(), 0); + if (data instanceof ChunkBuffer) { + final ChunkBuffer b = (ChunkBuffer)data; + Checksum.verifyChecksum(b.duplicate(b.position(), b.limit()), info.getChecksumData(), 0); + } else { + Checksum.verifyChecksum(data.toByteString(byteBufferToByteString), info.getChecksumData(), 0); + } } catch (OzoneChecksumException ex) { throw ChunkUtils.wrapInStorageContainerException(ex); } @@ -1058,8 +1065,8 @@ ContainerCommandResponseProto handleGetSmallFile( // of ByteStrings. chunkInfo.setReadDataIntoSingleBuffer(true); } - ChunkBuffer data = chunkManager.readChunk(kvContainer, blockID, - chunkInfo, dispatcherContext); + final ChunkBufferToByteString data = chunkManager.readChunk( + kvContainer, blockID, chunkInfo, dispatcherContext); dataBuffers.addAll(data.toByteStringList(byteBufferToByteString)); chunkInfoProto = chunk; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java index dc048ac16aa..8ada6b10bcf 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java @@ -49,7 +49,9 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.common.ChunkBuffer; import org.apache.hadoop.ozone.common.utils.BufferUtils; +import org.apache.hadoop.ozone.common.ChunkBufferToByteString; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; +import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; import org.apache.hadoop.ozone.container.keyvalue.impl.MappedBufferManager; import org.apache.hadoop.util.Time; @@ -68,6 +70,9 @@ import static org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil.onFailure; import org.apache.ratis.util.AutoCloseableLock; +import org.apache.ratis.thirdparty.io.netty.buffer.ByteBuf; +import org.apache.ratis.thirdparty.io.netty.buffer.PooledByteBufAllocator; +import org.apache.ratis.thirdparty.io.netty.handler.stream.ChunkedNioFile; import org.apache.ratis.util.function.CheckedFunction; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -220,7 +225,7 @@ public static ChunkBuffer readData(long len, int bufferCapacity, } private static void readData(File file, long offset, long len, - CheckedFunction readMethod, + CheckedFunction readMethod, HddsVolume volume) throws StorageContainerException { final Path path = file.toPath(); @@ -230,7 +235,7 @@ private static void readData(File file, long offset, long len, try (AutoCloseableLock ignoredLock = getFileReadLock(path); FileChannel channel = open(path, READ_OPTIONS, NO_ATTRIBUTES)) { bytesRead = readMethod.apply(channel); - } catch (IOException e) { + } catch (Exception e) { onFailure(volume); throw wrapInStorageContainerException(e); } @@ -307,6 +312,37 @@ private static ChunkBuffer readData(File file, int chunkSize, } } + public static ChunkBufferToByteString readData(File file, long chunkSize, + long offset, long length, HddsVolume volume, DispatcherContext context) + throws StorageContainerException { + final List buffers = readDataNettyChunkedNioFile( + file, Math.toIntExact(chunkSize), offset, length, volume); + final ChunkBufferToByteString b = ChunkBufferToByteString.wrap(buffers); + context.setReleaseMethod(b::release); + return b; + } + + /** + * Read data from the given file using {@link ChunkedNioFile}. + * + * @return a list of {@link ByteBuf} containing the data. + */ + private static List readDataNettyChunkedNioFile( + File file, int chunkSize, long offset, long length, HddsVolume volume) throws StorageContainerException { + final List buffers = new ArrayList<>(Math.toIntExact((length - 1) / chunkSize) + 1); + readData(file, offset, length, channel -> { + final ChunkedNioFile f = new ChunkedNioFile(channel, offset, length, chunkSize); + long readLen = 0; + while (readLen < length) { + final ByteBuf buf = f.readChunk(PooledByteBufAllocator.DEFAULT); + readLen += buf.readableBytes(); + buffers.add(buf); + } + return readLen; + }, volume); + return buffers; + } + /** * Validates chunk data and returns a file object to Chunk File that we are * expected to write data to. diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java index 6232b843567..a9dfcdc57a0 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java @@ -65,6 +65,7 @@ public class BlockManagerImpl implements BlockManager { private final int defaultReadBufferCapacity; private final int readMappedBufferThreshold; private final int readMappedBufferMaxCount; + private final boolean readNettyChunkedNioFile; /** * Constructs a Block Manager. @@ -83,6 +84,9 @@ public BlockManagerImpl(ConfigurationSource conf) { this.readMappedBufferMaxCount = config.getInt( ScmConfigKeys.OZONE_CHUNK_READ_MAPPED_BUFFER_MAX_COUNT_KEY, ScmConfigKeys.OZONE_CHUNK_READ_MAPPED_BUFFER_MAX_COUNT_DEFAULT); + this.readNettyChunkedNioFile = config.getBoolean( + ScmConfigKeys.OZONE_CHUNK_READ_NETTY_CHUNKED_NIO_FILE_KEY, + ScmConfigKeys.OZONE_CHUNK_READ_NETTY_CHUNKED_NIO_FILE_DEFAULT); } @Override @@ -304,15 +308,21 @@ public int getDefaultReadBufferCapacity() { return defaultReadBufferCapacity; } + @Override public int getReadMappedBufferThreshold() { return readMappedBufferThreshold; } - /** @return the max count of memory mapped buffers for read. */ + @Override public int getReadMappedBufferMaxCount() { return readMappedBufferMaxCount; } + @Override + public boolean isReadNettyChunkedNioFile() { + return readNettyChunkedNioFile; + } + /** * Deletes an existing block. * As Deletion is handled by BlockDeletingService, diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDispatcher.java index 6a1d5533cf2..89854169388 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDispatcher.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDispatcher.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.ozone.common.ChunkBuffer; +import org.apache.hadoop.ozone.common.ChunkBufferToByteString; import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; @@ -104,15 +105,15 @@ public void finalizeWriteChunk(KeyValueContainer kvContainer, } @Override - public ChunkBuffer readChunk(Container container, BlockID blockID, + public ChunkBufferToByteString readChunk(Container container, BlockID blockID, ChunkInfo info, DispatcherContext dispatcherContext) throws StorageContainerException { - ChunkBuffer data = selectHandler(container) + final ChunkBufferToByteString data = selectHandler(container) .readChunk(container, blockID, info, dispatcherContext); Preconditions.checkState(data != null); - container.getContainerData().updateReadStats(data.remaining()); + container.getContainerData().updateReadStats(info.getLen()); return data; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java index 4ca578d7717..26ccc5379b6 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.ozone.common.ChunkBuffer; +import org.apache.hadoop.ozone.common.ChunkBufferToByteString; import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; @@ -77,6 +78,8 @@ public class FilePerBlockStrategy implements ChunkManager { private final int readMappedBufferThreshold; private final int readMappedBufferMaxCount; private final MappedBufferManager mappedBufferManager; + + private final boolean readNettyChunkedNioFile; private final VolumeSet volumeSet; public FilePerBlockStrategy(boolean sync, BlockManager manager, @@ -95,6 +98,8 @@ public FilePerBlockStrategy(boolean sync, BlockManager manager, } else { mappedBufferManager = null; } + + this.readNettyChunkedNioFile = manager != null && manager.isReadNettyChunkedNioFile(); } private static void checkLayoutVersion(Container container) { @@ -180,7 +185,7 @@ public void writeChunk(Container container, BlockID blockID, ChunkInfo info, } @Override - public ChunkBuffer readChunk(Container container, BlockID blockID, + public ChunkBufferToByteString readChunk(Container container, BlockID blockID, ChunkInfo info, DispatcherContext dispatcherContext) throws StorageContainerException { @@ -204,6 +209,10 @@ public ChunkBuffer readChunk(Container container, BlockID blockID, long offset = info.getOffset(); int bufferCapacity = ChunkManager.getBufferCapacityForChunkRead(info, defaultReadBufferCapacity); + + if (readNettyChunkedNioFile && dispatcherContext != null && dispatcherContext.isReleaseSupported()) { + return ChunkUtils.readData(chunkFile, bufferCapacity, offset, len, volume, dispatcherContext); + } return ChunkUtils.readData(len, bufferCapacity, chunkFile, offset, volume, readMappedBufferThreshold, readMappedBufferMaxCount > 0, mappedBufferManager); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java index 6ac88cad7f5..344cd0a9f0c 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.common.ChunkBuffer; +import org.apache.hadoop.ozone.common.ChunkBufferToByteString; import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext; @@ -69,6 +70,8 @@ public class FilePerChunkStrategy implements ChunkManager { private final int readMappedBufferThreshold; private final int readMappedBufferMaxCount; private final MappedBufferManager mappedBufferManager; + + private final boolean readNettyChunkedNioFile; private final VolumeSet volumeSet; public FilePerChunkStrategy(boolean sync, BlockManager manager, @@ -88,6 +91,8 @@ public FilePerChunkStrategy(boolean sync, BlockManager manager, } else { mappedBufferManager = null; } + + this.readNettyChunkedNioFile = manager != null && manager.isReadNettyChunkedNioFile(); } private static void checkLayoutVersion(Container container) { @@ -214,7 +219,7 @@ public void writeChunk(Container container, BlockID blockID, ChunkInfo info, * TODO: Explore if we need to do that for ozone. */ @Override - public ChunkBuffer readChunk(Container container, BlockID blockID, + public ChunkBufferToByteString readChunk(Container container, BlockID blockID, ChunkInfo info, DispatcherContext dispatcherContext) throws StorageContainerException { @@ -274,6 +279,9 @@ public ChunkBuffer readChunk(Container container, BlockID blockID, if (file.exists()) { long offset = info.getOffset() - chunkFileOffset; Preconditions.checkState(offset >= 0); + if (readNettyChunkedNioFile && dispatcherContext != null && dispatcherContext.isReleaseSupported()) { + return ChunkUtils.readData(file, bufferCapacity, offset, len, volume, dispatcherContext); + } return ChunkUtils.readData(len, bufferCapacity, file, offset, volume, readMappedBufferThreshold, readMappedBufferMaxCount > 0, mappedBufferManager); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/BlockManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/BlockManager.java index 256d357a31d..53f5f154cce 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/BlockManager.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/BlockManager.java @@ -102,6 +102,9 @@ void finalizeBlock(Container container, BlockID blockId) /** @return the max count of memory mapped buffers to read. */ int getReadMappedBufferMaxCount(); + /** @return true iff Netty ChunkedNioFile read is enabled. */ + boolean isReadNettyChunkedNioFile(); + /** * Shutdown ContainerManager. */ diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/ChunkManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/ChunkManager.java index 7751dba429d..6e5d064b7da 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/ChunkManager.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/ChunkManager.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.ozone.common.ChecksumData; import org.apache.hadoop.ozone.common.ChunkBuffer; +import org.apache.hadoop.ozone.common.ChunkBufferToByteString; import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; @@ -75,7 +76,7 @@ default void writeChunk(Container container, BlockID blockID, ChunkInfo info, * TODO: Right now we do not support partial reads and writes of chunks. * TODO: Explore if we need to do that for ozone. */ - ChunkBuffer readChunk(Container container, BlockID blockID, ChunkInfo info, + ChunkBufferToByteString readChunk(Container container, BlockID blockID, ChunkInfo info, DispatcherContext dispatcherContext) throws StorageContainerException; /** diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java index 3ff8f9e625d..073cbfa6edd 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java @@ -25,6 +25,7 @@ import java.security.NoSuchAlgorithmException; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.LinkedList; import java.util.List; @@ -48,6 +49,7 @@ import org.apache.hadoop.ozone.common.Checksum; import org.apache.hadoop.ozone.common.ChecksumData; import org.apache.hadoop.ozone.common.ChunkBuffer; +import org.apache.hadoop.ozone.common.ChunkBufferToByteString; import org.apache.hadoop.ozone.container.ContainerTestHelper; import org.apache.hadoop.ozone.container.common.helpers.BlockData; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; @@ -668,9 +670,9 @@ public void testWritReadManyChunks(ContainerTestVersionInfo versionInfo) // Read chunk via ReadChunk call. for (int x = 0; x < chunkCount; x++) { ChunkInfo info = chunks.get(x); - final ChunkBuffer data = chunkManager.readChunk(container, blockID, info, + final ChunkBufferToByteString data = chunkManager.readChunk(container, blockID, info, DispatcherContext.getHandleReadChunk()); - ChecksumData checksumData = checksum.computeChecksum(data); + ChecksumData checksumData = checksum.computeChecksum(Collections.singletonList(data.toByteString())); assertEquals(info.getChecksumData(), checksumData); } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestChunkManagerDummyImpl.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestChunkManagerDummyImpl.java index 714426108bc..b249aa4fcfb 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestChunkManagerDummyImpl.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestChunkManagerDummyImpl.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.container.keyvalue.impl; -import org.apache.hadoop.ozone.common.ChunkBuffer; +import org.apache.hadoop.ozone.common.ChunkBufferToByteString; import org.apache.hadoop.ozone.container.keyvalue.ContainerLayoutTestInfo; import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager; import org.junit.jupiter.api.Test; @@ -50,7 +50,7 @@ public void dummyManagerDoesNotWriteToFile() throws Exception { public void dummyManagerReadsAnyChunk() throws Exception { ChunkManager dummy = createTestSubject(); - ChunkBuffer dataRead = dummy.readChunk(getKeyValueContainer(), + final ChunkBufferToByteString dataRead = dummy.readChunk(getKeyValueContainer(), getBlockID(), getChunkInfo(), null); assertNotNull(dataRead); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerBlockStrategy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerBlockStrategy.java index 36d71655192..d2fd394271c 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerBlockStrategy.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestFilePerBlockStrategy.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.common.ChunkBuffer; +import org.apache.hadoop.ozone.common.ChunkBufferToByteString; import org.apache.hadoop.ozone.container.ContainerTestHelper; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.ozone.container.keyvalue.ContainerLayoutTestInfo; @@ -92,7 +93,7 @@ public void testMultipleWriteSingleRead() throws Exception { // Request to read the whole data in a single go. ChunkInfo largeChunk = getChunk(blockID.getLocalID(), 0, 0, datalen * chunkCount); - ChunkBuffer chunk = + final ChunkBufferToByteString chunk = subject.readChunk(container, blockID, largeChunk, null); ByteBuffer newdata = chunk.toByteString().asReadOnlyByteBuffer(); @@ -119,18 +120,16 @@ public void testPartialRead() throws Exception { ChunkManager subject = createTestSubject(); subject.writeChunk(container, blockID, info, data, WRITE_STAGE); - ChunkBuffer readData = subject.readChunk(container, blockID, info, null); + final ChunkBufferToByteString readData = subject.readChunk(container, blockID, info, null); // data will be ChunkBufferImplWithByteBuffer and readData will return // ChunkBufferImplWithByteBufferList. Hence, convert both ByteStrings // before comparing. - assertEquals(data.rewind().toByteString(), - readData.rewind().toByteString()); + assertEquals(data.rewind().toByteString(), readData.toByteString()); ChunkInfo info2 = getChunk(blockID.getLocalID(), 0, start, length); - ChunkBuffer readData2 = subject.readChunk(container, blockID, info2, null); + final ChunkBufferToByteString readData2 = subject.readChunk(container, blockID, info2, null); assertEquals(length, info2.getLen()); - assertEquals(data.rewind().toByteString().substring(start, start + length), - readData2.rewind().toByteString()); + assertEquals(data.rewind().toByteString().substring(start, start + length), readData2.toByteString()); } @Override diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java index 1fbfc1f1f70..3b650f1bf51 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java @@ -90,6 +90,7 @@ private void addPropertiesNotInXml() { ScmConfigKeys.OZONE_SCM_NODES_KEY, ScmConfigKeys.OZONE_SCM_ADDRESS_KEY, ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, + ScmConfigKeys.OZONE_CHUNK_READ_NETTY_CHUNKED_NIO_FILE_KEY, OMConfigKeys.OZONE_FS_TRASH_INTERVAL_KEY, OMConfigKeys.OZONE_FS_TRASH_CHECKPOINT_INTERVAL_KEY, OMConfigKeys.OZONE_OM_S3_GPRC_SERVER_ENABLED, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java index 46c3c716271..ecc9e8fae46 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java @@ -54,6 +54,7 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type.PutBlock; import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type.WriteChunk; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_CHUNK_READ_NETTY_CHUNKED_NIO_FILE_KEY; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -87,6 +88,7 @@ public static void init() throws Exception { OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); conf.setFromObject(clientConfig); + conf.setBoolean(OZONE_CHUNK_READ_NETTY_CHUNKED_NIO_FILE_KEY, true); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS); conf.setQuietMode(false); conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 4, From 6280c6643dd610579b261f695f50cf511b7f97c3 Mon Sep 17 00:00:00 2001 From: Nandakumar Vadivelu Date: Sun, 5 Jan 2025 13:42:18 +0530 Subject: [PATCH 039/168] HDDS-11952. Enable sortpom in hadoop-ozone. (#7646) --- hadoop-ozone/client/pom.xml | 1 + hadoop-ozone/common/pom.xml | 1 + hadoop-ozone/csi/pom.xml | 1 + hadoop-ozone/datanode/pom.xml | 1 + hadoop-ozone/dist/pom.xml | 1 + hadoop-ozone/fault-injection-test/pom.xml | 4 + hadoop-ozone/httpfsgateway/pom.xml | 1 + hadoop-ozone/insight/pom.xml | 1 + hadoop-ozone/integration-test/pom.xml | 4 + hadoop-ozone/interface-client/pom.xml | 1 + hadoop-ozone/interface-storage/pom.xml | 1 + hadoop-ozone/ozone-manager/pom.xml | 1 + hadoop-ozone/ozonefs-common/pom.xml | 1 + hadoop-ozone/ozonefs-hadoop2/pom.xml | 1 + hadoop-ozone/ozonefs-hadoop3-client/pom.xml | 1 + hadoop-ozone/ozonefs-hadoop3/pom.xml | 1 + hadoop-ozone/ozonefs-shaded/pom.xml | 1 + hadoop-ozone/ozonefs/pom.xml | 1 + hadoop-ozone/pom.xml | 243 ++++++++++---------- hadoop-ozone/recon-codegen/pom.xml | 1 + hadoop-ozone/recon/pom.xml | 1 + hadoop-ozone/s3-secret-store/pom.xml | 1 + hadoop-ozone/s3gateway/pom.xml | 1 + hadoop-ozone/tools/pom.xml | 1 + 24 files changed, 150 insertions(+), 122 deletions(-) diff --git a/hadoop-ozone/client/pom.xml b/hadoop-ozone/client/pom.xml index 6b5a1ac0c8b..427237eeaed 100644 --- a/hadoop-ozone/client/pom.xml +++ b/hadoop-ozone/client/pom.xml @@ -28,6 +28,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> Apache Ozone Client jar + true diff --git a/hadoop-ozone/common/pom.xml b/hadoop-ozone/common/pom.xml index f7f60dcd1d1..1084e418069 100644 --- a/hadoop-ozone/common/pom.xml +++ b/hadoop-ozone/common/pom.xml @@ -29,6 +29,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> jar + true diff --git a/hadoop-ozone/csi/pom.xml b/hadoop-ozone/csi/pom.xml index ba66c5d5272..2c5bb5d7f96 100644 --- a/hadoop-ozone/csi/pom.xml +++ b/hadoop-ozone/csi/pom.xml @@ -31,6 +31,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> false true + true diff --git a/hadoop-ozone/datanode/pom.xml b/hadoop-ozone/datanode/pom.xml index 733f0837fda..2c98b3b8500 100644 --- a/hadoop-ozone/datanode/pom.xml +++ b/hadoop-ozone/datanode/pom.xml @@ -30,6 +30,7 @@ false true true + true diff --git a/hadoop-ozone/dist/pom.xml b/hadoop-ozone/dist/pom.xml index 9540a5195b7..82126325f26 100644 --- a/hadoop-ozone/dist/pom.xml +++ b/hadoop-ozone/dist/pom.xml @@ -33,6 +33,7 @@ 20241212-1-jdk21 ghcr.io/apache/ozone-testkrb5:20241129-1 true + true diff --git a/hadoop-ozone/fault-injection-test/pom.xml b/hadoop-ozone/fault-injection-test/pom.xml index e62f7e47dc0..1306013726d 100644 --- a/hadoop-ozone/fault-injection-test/pom.xml +++ b/hadoop-ozone/fault-injection-test/pom.xml @@ -28,6 +28,10 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> Apache Ozone Fault Injection Tests pom + + true + + network-tests mini-chaos-tests diff --git a/hadoop-ozone/httpfsgateway/pom.xml b/hadoop-ozone/httpfsgateway/pom.xml index bacc730a00f..e21bc166817 100644 --- a/hadoop-ozone/httpfsgateway/pom.xml +++ b/hadoop-ozone/httpfsgateway/pom.xml @@ -38,6 +38,7 @@ yyyy-MM-dd'T'HH:mm:ssZ ${maven.build.timestamp} true + true diff --git a/hadoop-ozone/insight/pom.xml b/hadoop-ozone/insight/pom.xml index fa3862a7f71..97cdf786502 100644 --- a/hadoop-ozone/insight/pom.xml +++ b/hadoop-ozone/insight/pom.xml @@ -30,6 +30,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> false + true diff --git a/hadoop-ozone/integration-test/pom.xml b/hadoop-ozone/integration-test/pom.xml index f4a2f713185..f4e6b73cc63 100644 --- a/hadoop-ozone/integration-test/pom.xml +++ b/hadoop-ozone/integration-test/pom.xml @@ -28,6 +28,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> Apache Ozone Integration Tests jar + + true + + diff --git a/hadoop-ozone/interface-client/pom.xml b/hadoop-ozone/interface-client/pom.xml index 2e68deeeb3b..f7f53bda6a6 100644 --- a/hadoop-ozone/interface-client/pom.xml +++ b/hadoop-ozone/interface-client/pom.xml @@ -31,6 +31,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> true true + true diff --git a/hadoop-ozone/interface-storage/pom.xml b/hadoop-ozone/interface-storage/pom.xml index cd2e1e34783..63ee02a0da1 100644 --- a/hadoop-ozone/interface-storage/pom.xml +++ b/hadoop-ozone/interface-storage/pom.xml @@ -28,6 +28,7 @@ Apache Ozone Storage Interface jar + true diff --git a/hadoop-ozone/ozone-manager/pom.xml b/hadoop-ozone/ozone-manager/pom.xml index 8e78814eb6b..0547ec4c2cf 100644 --- a/hadoop-ozone/ozone-manager/pom.xml +++ b/hadoop-ozone/ozone-manager/pom.xml @@ -30,6 +30,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> false + true diff --git a/hadoop-ozone/ozonefs-common/pom.xml b/hadoop-ozone/ozonefs-common/pom.xml index 18839deaee5..faad52a8f07 100644 --- a/hadoop-ozone/ozonefs-common/pom.xml +++ b/hadoop-ozone/ozonefs-common/pom.xml @@ -28,6 +28,7 @@ UTF-8 true + true diff --git a/hadoop-ozone/ozonefs-hadoop2/pom.xml b/hadoop-ozone/ozonefs-hadoop2/pom.xml index 8585a9dd544..48a64745dae 100644 --- a/hadoop-ozone/ozonefs-hadoop2/pom.xml +++ b/hadoop-ozone/ozonefs-hadoop2/pom.xml @@ -27,6 +27,7 @@ 2.0.0-SNAPSHOT org.apache.hadoop.ozone.shaded + true diff --git a/hadoop-ozone/ozonefs-hadoop3-client/pom.xml b/hadoop-ozone/ozonefs-hadoop3-client/pom.xml index 2f23a5d318e..445c4a3fe54 100644 --- a/hadoop-ozone/ozonefs-hadoop3-client/pom.xml +++ b/hadoop-ozone/ozonefs-hadoop3-client/pom.xml @@ -40,6 +40,7 @@ true + true diff --git a/hadoop-ozone/ozonefs-hadoop3/pom.xml b/hadoop-ozone/ozonefs-hadoop3/pom.xml index 6ec67b522f3..baf68142c3b 100644 --- a/hadoop-ozone/ozonefs-hadoop3/pom.xml +++ b/hadoop-ozone/ozonefs-hadoop3/pom.xml @@ -28,6 +28,7 @@ true org.apache.hadoop.ozone.shaded + true diff --git a/hadoop-ozone/ozonefs-shaded/pom.xml b/hadoop-ozone/ozonefs-shaded/pom.xml index 9e77ffd7c33..0aacb602cb4 100644 --- a/hadoop-ozone/ozonefs-shaded/pom.xml +++ b/hadoop-ozone/ozonefs-shaded/pom.xml @@ -29,6 +29,7 @@ true org.apache.hadoop.ozone.shaded + true diff --git a/hadoop-ozone/ozonefs/pom.xml b/hadoop-ozone/ozonefs/pom.xml index aa554c422e5..90514ae5b2e 100644 --- a/hadoop-ozone/ozonefs/pom.xml +++ b/hadoop-ozone/ozonefs/pom.xml @@ -28,6 +28,7 @@ UTF-8 true + true diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml index fdd9b2734cd..3f73ca96460 100644 --- a/hadoop-ozone/pom.xml +++ b/hadoop-ozone/pom.xml @@ -11,7 +11,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. ---> +--> + 4.0.0 org.apache.ozone @@ -20,284 +21,282 @@ ozone 2.0.0-SNAPSHOT - Apache Ozone Project - Apache Ozone pom - - - apache/ozone:${project.version} - true - + Apache Ozone + Apache Ozone Project + client + common + csi + datanode + dist + fault-injection-test + httpfsgateway + insight + integration-test interface-client interface-storage - common - client ozone-manager - tools - integration-test - ozonefs-common ozonefs - datanode + ozonefs-common recon recon-codegen - s3gateway - dist - csi - fault-injection-test - insight - httpfsgateway s3-secret-store + s3gateway + tools - + + apache/ozone:${project.version} + + org.apache.ozone - ozone-common - ${ozone.version} - - - org.apache.ozone - ozone-common - ${ozone.version} - test-jar + hdds-annotation-processing + ${hdds.version} org.apache.ozone - ozone-client - ${ozone.version} + hdds-client + ${hdds.version} org.apache.ozone - ozone-interface-client - ${ozone.version} + hdds-common + ${hdds.version} org.apache.ozone - ozone-interface-storage - ${ozone.version} + hdds-config + ${hdds.version} org.apache.ozone - ozone-manager - ${ozone.version} + hdds-container-service + ${hdds.version} org.apache.ozone - ozone-s3gateway - ${ozone.version} + hdds-container-service + ${hdds.version} + test-jar org.apache.ozone - ozone-csi - ${ozone.version} + hdds-docs + ${hdds.version} org.apache.ozone - ozone-datanode - ${ozone.version} + hdds-erasurecode + ${hdds.version} org.apache.ozone - ozone-tools - ${ozone.version} + hdds-hadoop-dependency-client + ${hdds.version} org.apache.ozone - ozone-filesystem - ${ozone.version} + hdds-hadoop-dependency-server + ${hdds.version} org.apache.ozone - ozone-filesystem-shaded - ${ozone.version} + hdds-interface-admin + ${hdds.version} org.apache.ozone - ozone-filesystem-common - ${ozone.version} + hdds-interface-client + ${hdds.version} org.apache.ozone - ozone-filesystem-hadoop3 - ${ozone.version} + hdds-interface-server + ${hdds.version} org.apache.ozone - ozone-filesystem-hadoop3-client - ${ozone.version} + hdds-managed-rocksdb + ${hdds.version} org.apache.ozone - ozone-filesystem-hadoop2 - ${ozone.version} + hdds-rocks-native + ${hdds.rocks.native.version} org.apache.ozone - hdds-annotation-processing + hdds-server-framework ${hdds.version} org.apache.ozone - hdds-config + hdds-server-scm ${hdds.version} org.apache.ozone - hdds-erasurecode + hdds-server-scm ${hdds.version} + test-jar org.apache.ozone - hdds-interface-admin + hdds-tools ${hdds.version} org.apache.ozone - hdds-interface-client - ${hdds.version} + ozone-client + ${ozone.version} org.apache.ozone - hdds-interface-server - ${hdds.version} + ozone-common + ${ozone.version} org.apache.ozone - hdds-managed-rocksdb - ${hdds.version} + ozone-common + ${ozone.version} + test-jar org.apache.ozone - ozone-s3-secret-store - ${hdds.version} + ozone-csi + ${ozone.version} org.apache.ozone - hdds-test-utils - ${hdds.version} - test + ozone-datanode + ${ozone.version} org.apache.ozone - hdds-hadoop-dependency-client - ${hdds.version} + ozone-filesystem + ${ozone.version} org.apache.ozone - hdds-hadoop-dependency-server - ${hdds.version} + ozone-filesystem-common + ${ozone.version} org.apache.ozone - rocksdb-checkpoint-differ - ${hdds.version} + ozone-filesystem-hadoop2 + ${ozone.version} org.apache.ozone - hdds-hadoop-dependency-test - ${hdds.version} - test + ozone-filesystem-hadoop3 + ${ozone.version} org.apache.ozone - hdds-common - ${hdds.version} - test-jar - test + ozone-filesystem-hadoop3-client + ${ozone.version} org.apache.ozone - ozone-integration-test + ozone-filesystem-shaded ${ozone.version} - test-jar org.apache.ozone - ozone-manager + ozone-httpfsgateway ${ozone.version} - test-jar org.apache.ozone - hdds-common + ozone-insight ${hdds.version} org.apache.ozone - hdds-server-framework - ${hdds.version} + ozone-integration-test + ${ozone.version} + test-jar org.apache.ozone - hdds-server-scm - ${hdds.version} + ozone-interface-client + ${ozone.version} org.apache.ozone - hdds-docs - ${hdds.version} + ozone-interface-storage + ${ozone.version} org.apache.ozone - hdds-container-service - ${hdds.version} + ozone-manager + ${ozone.version} org.apache.ozone - hdds-client - ${hdds.version} + ozone-manager + ${ozone.version} + test-jar org.apache.ozone - hdds-client - ${hdds.version} - test-jar - test + ozone-recon + ${ozone.version} org.apache.ozone - hdds-tools + ozone-s3-secret-store ${hdds.version} org.apache.ozone - ozone-insight - ${hdds.version} + ozone-s3gateway + ${ozone.version} org.apache.ozone - ozone-httpfsgateway + ozone-tools ${ozone.version} org.apache.ozone - ozone-recon - ${ozone.version} + rocksdb-checkpoint-differ + ${hdds.version} org.apache.ozone - hdds-container-service + hdds-client ${hdds.version} test-jar + test org.apache.ozone - hdds-server-scm + hdds-common + ${hdds.version} test-jar + test + + + org.apache.ozone + hdds-hadoop-dependency-test ${hdds.version} + test org.apache.ozone - hdds-rocks-native - ${hdds.rocks.native.version} + hdds-test-utils + ${hdds.version} + test - - + + @@ -307,6 +306,10 @@ depcheck + + enforce + + validate @@ -314,10 +317,6 @@ - - enforce - - validate @@ -379,17 +378,17 @@ - ozonefs-shaded ozonefs-hadoop2 ozonefs-hadoop3 ozonefs-hadoop3-client + ozonefs-shaded go-offline - ozonefs-shaded ozonefs-hadoop2 + ozonefs-shaded diff --git a/hadoop-ozone/recon-codegen/pom.xml b/hadoop-ozone/recon-codegen/pom.xml index b8345c7d343..0a279c40ab2 100644 --- a/hadoop-ozone/recon-codegen/pom.xml +++ b/hadoop-ozone/recon-codegen/pom.xml @@ -25,6 +25,7 @@ Apache Ozone Recon CodeGen true + true diff --git a/hadoop-ozone/recon/pom.xml b/hadoop-ozone/recon/pom.xml index f203689b669..85d2eac9d2b 100644 --- a/hadoop-ozone/recon/pom.xml +++ b/hadoop-ozone/recon/pom.xml @@ -26,6 +26,7 @@ false 8.15.7 + true diff --git a/hadoop-ozone/s3-secret-store/pom.xml b/hadoop-ozone/s3-secret-store/pom.xml index 210969e766a..1dcaa17d560 100644 --- a/hadoop-ozone/s3-secret-store/pom.xml +++ b/hadoop-ozone/s3-secret-store/pom.xml @@ -28,6 +28,7 @@ UTF-8 true + true diff --git a/hadoop-ozone/s3gateway/pom.xml b/hadoop-ozone/s3gateway/pom.xml index f012d3f1aab..351db4b61fb 100644 --- a/hadoop-ozone/s3gateway/pom.xml +++ b/hadoop-ozone/s3gateway/pom.xml @@ -29,6 +29,7 @@ false UTF-8 true + true diff --git a/hadoop-ozone/tools/pom.xml b/hadoop-ozone/tools/pom.xml index 8ea8ded01ce..f3605d358eb 100644 --- a/hadoop-ozone/tools/pom.xml +++ b/hadoop-ozone/tools/pom.xml @@ -30,6 +30,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> false + true From f9bd05566784b015f091541c5be0d3b259ff01ec Mon Sep 17 00:00:00 2001 From: Nandakumar Vadivelu Date: Sun, 5 Jan 2025 14:54:34 +0530 Subject: [PATCH 040/168] HDDS-12020. Enable sortpom in hdds-erasurecode, hdds-server-framework, hdds-hadoop-dependency-client. (#7644) --- hadoop-hdds/client/pom.xml | 5 - hadoop-hdds/common/pom.xml | 2 +- hadoop-hdds/container-service/pom.xml | 8 +- hadoop-hdds/erasurecode/pom.xml | 30 +- hadoop-hdds/framework/pom.xml | 280 +++++++++---------- hadoop-hdds/hadoop-dependency-client/pom.xml | 233 ++++++++------- 6 files changed, 259 insertions(+), 299 deletions(-) diff --git a/hadoop-hdds/client/pom.xml b/hadoop-hdds/client/pom.xml index e32457ec799..d1ce5d53019 100644 --- a/hadoop-hdds/client/pom.xml +++ b/hadoop-hdds/client/pom.xml @@ -27,12 +27,10 @@ Apache Ozone Distributed Data Store Client Library - com.google.guava guava - io.opentracing opentracing-api @@ -41,12 +39,10 @@ io.opentracing opentracing-util - jakarta.annotation jakarta.annotation-api - org.apache.commons commons-lang3 @@ -87,7 +83,6 @@ org.apache.ratis ratis-thirdparty-misc - org.slf4j slf4j-api diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml index e1cf736f6d3..c1a2749fde5 100644 --- a/hadoop-hdds/common/pom.xml +++ b/hadoop-hdds/common/pom.xml @@ -226,10 +226,10 @@ + com.codahale.metrics metrics-core test - org.apache.ozone diff --git a/hadoop-hdds/container-service/pom.xml b/hadoop-hdds/container-service/pom.xml index 0c1f0bbf4a1..1ee4017fedf 100644 --- a/hadoop-hdds/container-service/pom.xml +++ b/hadoop-hdds/container-service/pom.xml @@ -24,8 +24,8 @@ jar Apache Ozone HDDS Container Service Apache Ozone Distributed Data Store Container Service - + com.fasterxml.jackson.core jackson-annotations @@ -94,7 +94,6 @@ io.opentracing opentracing-util - jakarta.annotation jakarta.annotation-api @@ -103,7 +102,6 @@ jakarta.xml.bind jakarta.xml.bind-api - org.apache.commons commons-compress @@ -168,7 +166,6 @@ org.apache.ratis ratis-proto - org.apache.ratis ratis-server @@ -185,7 +182,6 @@ org.glassfish.jaxb jaxb-runtime - org.rocksdb rocksdbjni @@ -194,12 +190,10 @@ org.slf4j slf4j-api - org.yaml snakeyaml - org.apache.ozone hdds-docs diff --git a/hadoop-hdds/erasurecode/pom.xml b/hadoop-hdds/erasurecode/pom.xml index 5f34b7b7292..bb98efe1894 100644 --- a/hadoop-hdds/erasurecode/pom.xml +++ b/hadoop-hdds/erasurecode/pom.xml @@ -12,10 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -24,29 +21,27 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hdds-erasurecode 2.0.0-SNAPSHOT - Apache Ozone Distributed Data Store Earsurecode utils - - Apache Ozone HDDS Erasurecode jar - - - true - + Apache Ozone HDDS Erasurecode + Apache Ozone Distributed Data Store Earsurecode utils + + com.google.guava + guava + org.apache.ozone hdds-common - org.slf4j slf4j-api - - com.google.guava - guava + org.apache.ozone + hdds-config + test @@ -60,11 +55,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hdds-test-utils test - - org.apache.ozone - hdds-config - test - diff --git a/hadoop-hdds/framework/pom.xml b/hadoop-hdds/framework/pom.xml index af233cbae9a..5108af9172d 100644 --- a/hadoop-hdds/framework/pom.xml +++ b/hadoop-hdds/framework/pom.xml @@ -12,10 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -24,67 +21,46 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hdds-server-framework 2.0.0-SNAPSHOT - Apache Ozone Distributed Data Store Server Framework - - Apache Ozone HDDS Server Framework jar - - - true - + Apache Ozone HDDS Server Framework + Apache Ozone Distributed Data Store Server Framework - org.apache.ozone - hdds-config - - - org.apache.ozone - hdds-interface-client - - - org.apache.ozone - hdds-interface-server - - - org.apache.ozone - hdds-interface-admin - - - org.apache.ozone - hdds-common + ch.qos.reload4j + reload4j - org.apache.ozone - hdds-managed-rocksdb + com.fasterxml.jackson.core + jackson-annotations - org.apache.ozone - hdds-hadoop-dependency-server + com.fasterxml.jackson.core + jackson-databind - ch.qos.reload4j - reload4j + com.fasterxml.jackson.datatype + jackson-datatype-jsr310 - org.slf4j - slf4j-api + com.github.jnr + jnr-constants - org.slf4j - slf4j-reload4j + com.github.jnr + jnr-posix - org.apache.commons - commons-compress + com.google.code.gson + gson - org.apache.commons - commons-configuration2 + com.google.guava + guava - org.apache.commons - commons-lang3 + com.google.protobuf + protobuf-java commons-codec @@ -107,76 +83,101 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> commons-validator commons-validator - - - org.apache.logging.log4j - log4j-api - - org.apache.logging.log4j - log4j-core + io.dropwizard.metrics + metrics-core - - com.lmax - disruptor - runtime + io.opentracing + opentracing-api - org.bouncycastle - bcpkix-jdk18on + io.prometheus + simpleclient - org.bouncycastle - bcprov-jdk18on + io.prometheus + simpleclient_common - org.eclipse.jetty - jetty-http + io.prometheus + simpleclient_dropwizard - org.eclipse.jetty - jetty-util + jakarta.annotation + jakarta.annotation-api - org.eclipse.jetty - jetty-server + jakarta.ws.rs + jakarta.ws.rs-api - org.eclipse.jetty - jetty-servlet + javax.servlet + javax.servlet-api - org.eclipse.jetty - jetty-webapp + org.apache.commons + commons-compress - org.glassfish.jersey.core - jersey-server + org.apache.commons + commons-configuration2 - org.glassfish.jersey.containers - jersey-container-servlet-core + org.apache.commons + commons-lang3 - org.rocksdb - rocksdbjni - - - - ratis-server - org.apache.ratis + org.apache.hadoop + hadoop-hdfs-client - org.slf4j - slf4j-reload4j - - - org.bouncycastle - bcprov-jdk18on + com.squareup.okhttp + okhttp + + org.apache.logging.log4j + log4j-api + + + org.apache.logging.log4j + log4j-core + + + org.apache.ozone + hdds-common + + + org.apache.ozone + hdds-config + + + org.apache.ozone + hdds-hadoop-dependency-server + + + org.apache.ozone + hdds-interface-admin + + + org.apache.ozone + hdds-interface-client + + + org.apache.ozone + hdds-interface-server + + + org.apache.ozone + hdds-managed-rocksdb + + + org.apache.ozone + rocksdb-checkpoint-differ + ${hdds.version} + org.apache.ratis ratis-common @@ -187,11 +188,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ratis - ratis-server-api - - ratis-metrics-dropwizard3 - org.apache.ratis io.dropwizard.metrics @@ -201,90 +198,79 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ratis - ratis-thirdparty-misc - - - - io.dropwizard.metrics - metrics-core - - - io.opentracing - opentracing-api - - - io.prometheus - simpleclient + ratis-server + + + org.bouncycastle + bcprov-jdk18on + + + org.slf4j + slf4j-reload4j + + - io.prometheus - simpleclient_dropwizard + org.apache.ratis + ratis-server-api - io.prometheus - simpleclient_common + org.apache.ratis + ratis-thirdparty-misc - com.fasterxml.jackson.core - jackson-annotations + org.bouncycastle + bcpkix-jdk18on - com.fasterxml.jackson.core - jackson-databind + org.bouncycastle + bcprov-jdk18on - com.fasterxml.jackson.datatype - jackson-datatype-jsr310 + org.eclipse.jetty + jetty-http - com.github.jnr - jnr-constants + org.eclipse.jetty + jetty-server - com.github.jnr - jnr-posix + org.eclipse.jetty + jetty-servlet - com.google.code.gson - gson + org.eclipse.jetty + jetty-util - com.google.guava - guava + org.eclipse.jetty + jetty-webapp - com.google.protobuf - protobuf-java + org.glassfish.jersey.containers + jersey-container-servlet-core - - org.apache.hadoop - hadoop-hdfs-client - - - com.squareup.okhttp - okhttp - - + org.glassfish.jersey.core + jersey-server - - org.apache.ozone - rocksdb-checkpoint-differ - ${hdds.version} + org.rocksdb + rocksdbjni - - jakarta.annotation - jakarta.annotation-api + org.slf4j + slf4j-api - jakarta.ws.rs - jakarta.ws.rs-api + org.slf4j + slf4j-reload4j - javax.servlet - javax.servlet-api + + com.lmax + disruptor + runtime @@ -311,7 +297,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> - @@ -348,7 +333,8 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> maven-enforcer-plugin - ban-annotations + ban-annotations + diff --git a/hadoop-hdds/hadoop-dependency-client/pom.xml b/hadoop-hdds/hadoop-dependency-client/pom.xml index 5bb9b138687..276f6935584 100644 --- a/hadoop-hdds/hadoop-dependency-client/pom.xml +++ b/hadoop-hdds/hadoop-dependency-client/pom.xml @@ -12,10 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -24,17 +21,20 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hdds-hadoop-dependency-client 2.0.0-SNAPSHOT - Apache Ozone Distributed Data Store Hadoop client dependencies - - Apache Ozone HDDS Hadoop Client dependencies jar + Apache Ozone HDDS Hadoop Client dependencies + Apache Ozone Distributed Data Store Hadoop client dependencies - true - true + + true + + com.nimbusds + nimbus-jose-jwt + org.apache.hadoop hadoop-annotations @@ -45,108 +45,99 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> ${hadoop.version} - com.nimbusds - nimbus-jose-jwt - - - org.xerial.snappy - snappy-java - - - org.apache.hadoop - hadoop-annotations - - - com.google.guava - guava - - - - org.apache.commons - commons-math3 + ch.qos.reload4j + reload4j - commons-codec - commons-codec + com.fasterxml.jackson.core + jackson-databind - commons-io - commons-io + com.github.pjfanning + jersey-json - commons-net - commons-net + com.google.code.findbugs + jsr305 - commons-collections - commons-collections + com.google.code.gson + gson - javax.servlet - javax.servlet-api + com.google.guava + guava - org.eclipse.jetty - jetty-server + com.jcraft + jsch - org.eclipse.jetty - jetty-util + com.nimbusds + * - org.eclipse.jetty - jetty-servlet + com.nimbusds + nimbus-jose-jwt - org.eclipse.jetty - jetty-webapp + com.sun.jersey + jersey-core - com.github.pjfanning + com.sun.jersey jersey-json - - com.google.code.findbugs - jsr305 - com.sun.jersey - jersey-core + jersey-server com.sun.jersey jersey-servlet - com.sun.jersey - jersey-json + commons-beanutils + commons-beanutils - com.sun.jersey - jersey-server + commons-codec + commons-codec + + + commons-collections + commons-collections + + + commons-io + commons-io commons-logging commons-logging - log4j - log4j + commons-net + commons-net - ch.qos.reload4j - reload4j + dnsjava + dnsjava - commons-beanutils - commons-beanutils + javax.servlet + javax.servlet-api - org.apache.commons - commons-lang3 + javax.servlet.jsp + * - org.slf4j + log4j + log4j + + + net.minidev * @@ -154,59 +145,63 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> avro - com.google.code.gson - gson + org.apache.commons + commons-compress - com.jcraft - jsch + org.apache.commons + commons-lang3 + + + + org.apache.commons + commons-math3 org.apache.curator * - org.apache.zookeeper - zookeeper + org.apache.curator + * - org.apache.commons - commons-compress + org.apache.hadoop + hadoop-annotations org.apache.kerby kerb-simplekdc - com.fasterxml.jackson.core - jackson-databind + org.apache.zookeeper + zookeeper - dnsjava - dnsjava + org.eclipse.jetty + jetty-server - com.nimbusds - * + org.eclipse.jetty + jetty-servlet - net.minidev - * + org.eclipse.jetty + jetty-util - org.apache.curator - * + org.eclipse.jetty + jetty-webapp - javax.servlet.jsp + org.slf4j * + + org.xerial.snappy + snappy-java + - - - - com.nimbusds - nimbus-jose-jwt org.apache.hadoop @@ -215,20 +210,20 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> compile - com.google.guava - guava + ch.qos.reload4j + reload4j - org.eclipse.jetty - jetty-server + com.fasterxml.jackson.core + jackson-databind - org.eclipse.jetty - jetty-util + com.google.guava + guava - org.eclipse.jetty - jetty-util-ajax + com.google.protobuf + protobuf-java com.sun.jersey @@ -246,6 +241,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> commons-codec commons-codec + + commons-daemon + commons-daemon + commons-io commons-io @@ -255,48 +254,44 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> commons-logging - commons-daemon - commons-daemon - - - log4j - log4j + io.netty + netty - ch.qos.reload4j - reload4j + io.netty + netty-all - org.slf4j - slf4j-reload4j + javax.servlet + javax.servlet-api - com.google.protobuf - protobuf-java + log4j + log4j - javax.servlet - javax.servlet-api + org.apache.htrace + htrace-core4 - io.netty - netty + org.eclipse.jetty + jetty-server - io.netty - netty-all + org.eclipse.jetty + jetty-util - org.apache.htrace - htrace-core4 + org.eclipse.jetty + jetty-util-ajax org.fusesource.leveldbjni leveldbjni-all - com.fasterxml.jackson.core - jackson-databind + org.slf4j + slf4j-reload4j From 71de2a200aaa1f2d8885745312e5bbf8503bfcb7 Mon Sep 17 00:00:00 2001 From: Cyrill Date: Sun, 5 Jan 2025 15:37:20 +0300 Subject: [PATCH 041/168] HDDS-10469. Ozone Manager should continue to work when S3 secret storage is unavailable (#6339) --- .../s3/security/S3GetSecretRequest.java | 8 ++++++ .../s3/security/S3GetSecretResponse.java | 9 ++---- .../s3/security/TestS3GetSecretRequest.java | 28 +++++++++++++++++++ .../s3/remote/vault/VaultS3SecretStore.java | 8 ++++-- 4 files changed, 45 insertions(+), 8 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/S3GetSecretRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/S3GetSecretRequest.java index bb57a58dd00..31df897513e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/S3GetSecretRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/S3GetSecretRequest.java @@ -188,6 +188,14 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn OMException.ResultCodes.ACCESS_ID_NOT_FOUND); } + if (assignS3SecretValue != null && !s3SecretManager.isBatchSupported()) { + // A storage that does not support batch writing is likely to be a + // third-party secret storage that might throw an exception on write. + // In the case of the exception the request will fail. + s3SecretManager.storeSecret(assignS3SecretValue.getKerberosID(), + assignS3SecretValue); + } + // Compose response final GetS3SecretResponse.Builder getS3SecretResponse = GetS3SecretResponse.newBuilder().setS3Secret( diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/S3GetSecretResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/S3GetSecretResponse.java index df55af31fd6..6e30f755f68 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/S3GetSecretResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/security/S3GetSecretResponse.java @@ -59,12 +59,9 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, = getOMResponse().getStatus() == OzoneManagerProtocolProtos.Status.OK; if (s3SecretValue != null && isOk) { if (s3SecretManager.isBatchSupported()) { - s3SecretManager.batcher().addWithBatch(batchOperation, - s3SecretValue.getKerberosID(), s3SecretValue); - } else { - s3SecretManager.storeSecret(s3SecretValue.getKerberosID(), - s3SecretValue); - } + s3SecretManager.batcher() + .addWithBatch(batchOperation, s3SecretValue.getKerberosID(), s3SecretValue); + } // else - the secret has already been stored in S3GetSecretRequest. } } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/security/TestS3GetSecretRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/security/TestS3GetSecretRequest.java index 47cc293e280..9c80a05c127 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/security/TestS3GetSecretRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/security/TestS3GetSecretRequest.java @@ -27,9 +27,11 @@ import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.S3SecretFunction; import org.apache.hadoop.ozone.om.S3SecretLockedManager; import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OMMultiTenantManager; +import org.apache.hadoop.ozone.om.S3SecretManager; import org.apache.hadoop.ozone.om.S3SecretManagerImpl; import org.apache.hadoop.ozone.om.TenantOp; import org.apache.hadoop.ozone.om.S3SecretCache; @@ -76,10 +78,12 @@ import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.Mockito.any; import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.framework; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; @@ -319,6 +323,30 @@ public void testGetSecretOfAnotherUserAsAdmin() throws IOException { processSuccessSecretRequest(USER_CAROL, 1, true); } + @Test + public void testFailSecretManagerOnGetSecret() throws IOException { + + // This effectively makes alice an S3 admin. + when(ozoneManager.isS3Admin(ugiAlice)).thenReturn(true); + + S3SecretManager failingS3Secret = mock(S3SecretManager.class); + doThrow(new IOException("Test Exception: Failed to store secret")) + .when(failingS3Secret).storeSecret(any(), any()); + when(failingS3Secret.doUnderLock(any(), any())) + .thenAnswer(invocationOnMock -> { + S3SecretFunction action = + invocationOnMock.getArgument(1, S3SecretFunction.class); + + return action.accept(failingS3Secret); + }); + + when(ozoneManager.getS3SecretManager()).thenReturn(failingS3Secret); + + assertThrows(Exception.class, () -> + processSuccessSecretRequest(USER_ALICE, 1, true) + ); + } + @Test public void testGetOwnSecretAsNonAdmin() throws IOException { diff --git a/hadoop-ozone/s3-secret-store/src/main/java/org/apache/hadoop/ozone/s3/remote/vault/VaultS3SecretStore.java b/hadoop-ozone/s3-secret-store/src/main/java/org/apache/hadoop/ozone/s3/remote/vault/VaultS3SecretStore.java index c9bb4d6435e..a96b888994f 100644 --- a/hadoop-ozone/s3-secret-store/src/main/java/org/apache/hadoop/ozone/s3/remote/vault/VaultS3SecretStore.java +++ b/hadoop-ozone/s3-secret-store/src/main/java/org/apache/hadoop/ozone/s3/remote/vault/VaultS3SecretStore.java @@ -72,15 +72,19 @@ public VaultS3SecretStore(String vaultAddress, .nameSpace(nameSpace) .sslConfig(sslConfig) .build(); - this.auth = auth; - vault = auth.auth(config); this.secretPath = secretPath.endsWith("/") ? secretPath.substring(0, secretPath.length() - 1) : secretPath; } catch (VaultException e) { throw new IOException("Failed to initialize remote secret store", e); } + + try { + auth(); + } catch (VaultException e) { + LOG.error("Failed to authenticate with remote secret store", e); + } } private void auth() throws VaultException { From 8f2689dfe5f9b79e64f7d78acbaaf300c1ccbd1c Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Mon, 6 Jan 2025 08:40:12 +0100 Subject: [PATCH 042/168] HDDS-11864. Remove config from OM for disabling Ratis (#7640) --- .../src/main/resources/ozone-default.xml | 9 ------ hadoop-hdds/docs/content/feature/OM-HA.md | 8 ------ hadoop-hdds/docs/content/feature/OM-HA.zh.md | 9 ------ .../apache/hadoop/ozone/om/OMConfigKeys.java | 4 --- .../main/compose/ozone-balancer/docker-config | 1 - .../src/main/compose/ozone-ha/docker-config | 1 - .../main/compose/ozone-om-ha/docker-config | 1 - .../compose/ozone-om-prepare/docker-config | 1 - .../main/compose/ozonesecure-ha/docker-config | 1 - .../compose/upgrade/compose/ha/docker-config | 1 - .../AbstractRootedOzoneFileSystemTest.java | 15 +--------- ...tractRootedOzoneFileSystemTestWithFSO.java | 4 +-- .../org/apache/hadoop/fs/ozone/TestOFS.java | 2 +- .../hadoop/fs/ozone/TestOFSWithCacheOnly.java | 28 ------------------- .../hadoop/fs/ozone/TestOFSWithFSO.java | 2 +- .../fs/ozone/TestOFSWithFSOAndCacheOnly.java | 27 ------------------ .../hadoop/fs/ozone/TestOFSWithFSPaths.java | 2 +- .../ratis/TestOzoneManagerRatisRequest.java | 18 +++++------- .../ozone/om/OmMetadataManagerImpl.java | 7 +---- .../apache/hadoop/ozone/om/OzoneManager.java | 7 +---- ...ManagerProtocolServerSideTranslatorPB.java | 15 +--------- .../OzoneDelegationTokenSecretManager.java | 5 +--- .../impl/OzoneManagerServiceProviderImpl.java | 4 --- 23 files changed, 17 insertions(+), 155 deletions(-) delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithCacheOnly.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithFSOAndCacheOnly.java diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index fdeb5c1c043..a0fdcd4b683 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -2045,15 +2045,6 @@ - - ozone.om.ratis.enable - true - OZONE, OM, RATIS, MANAGEMENT - Property to enable or disable Ratis server on OM. - Please note - this is a temporary property to disable OM Ratis server. - - - ozone.om.ratis.port 9872 diff --git a/hadoop-hdds/docs/content/feature/OM-HA.md b/hadoop-hdds/docs/content/feature/OM-HA.md index 3872c387335..cf8ca4351f3 100644 --- a/hadoop-hdds/docs/content/feature/OM-HA.md +++ b/hadoop-hdds/docs/content/feature/OM-HA.md @@ -41,14 +41,6 @@ Client connects to the Leader Ozone Manager which process the request and schedu ## Configuration -HA mode of Ozone Manager can be enabled with the following settings in `ozone-site.xml`: - -```XML - - ozone.om.ratis.enable - true - -``` One Ozone configuration (`ozone-site.xml`) can support multiple Ozone HA cluster. To select between the available HA clusters a logical name is required for each of the clusters which can be resolved to the IP addresses (and domain names) of the Ozone Managers. This logical name is called `serviceId` and can be configured in the `ozone-site.xml` diff --git a/hadoop-hdds/docs/content/feature/OM-HA.zh.md b/hadoop-hdds/docs/content/feature/OM-HA.zh.md index 2ce92087a0c..fae76ef03b4 100644 --- a/hadoop-hdds/docs/content/feature/OM-HA.zh.md +++ b/hadoop-hdds/docs/content/feature/OM-HA.zh.md @@ -42,15 +42,6 @@ Ozone Manager 和 Storage Container Manager 都支持 HA。在这种模式下, ## 配置 -可以在 `ozone-site.xml` 中配置以下设置来启用 Ozone Manager 的高可用模式: - -```XML - - ozone.om.ratis.enable - true - -``` - 一个 Ozone 的配置(`ozone-site.xml`)支持多个 Ozone 高可用集群。为了支持在多个高可用集群之间进行选择,每个集群都需要一个逻辑名称,该逻辑名称可以解析为 Ozone Manager 的 IP 地址(和域名)。 该逻辑名称叫做 `serviceId`,可以在 `ozone-site.xml` 中进行配置: diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java index 880fe8614b2..e274d822b63 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java @@ -177,10 +177,6 @@ private OMConfigKeys() { /** * OM Ratis related configurations. */ - public static final String OZONE_OM_RATIS_ENABLE_KEY - = "ozone.om.ratis.enable"; - public static final boolean OZONE_OM_RATIS_ENABLE_DEFAULT - = true; public static final String OZONE_OM_RATIS_PORT_KEY = "ozone.om.ratis.port"; public static final int OZONE_OM_RATIS_PORT_DEFAULT diff --git a/hadoop-ozone/dist/src/main/compose/ozone-balancer/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-balancer/docker-config index 6e0781a1d9e..3d0cfce1eaa 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-balancer/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone-balancer/docker-config @@ -26,7 +26,6 @@ OZONE-SITE.XML_ozone.om.nodes.om=om1,om2,om3 OZONE-SITE.XML_ozone.om.address.om.om1=om1 OZONE-SITE.XML_ozone.om.address.om.om2=om2 OZONE-SITE.XML_ozone.om.address.om.om3=om3 -OZONE-SITE.XML_ozone.om.ratis.enable=true OZONE-SITE.XML_ozone.scm.service.ids=scmservice OZONE-SITE.XML_ozone.scm.nodes.scmservice=scm1,scm2,scm3 diff --git a/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config index ebf2ce532bd..92a71eea3c1 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config @@ -26,7 +26,6 @@ OZONE-SITE.XML_ozone.om.nodes.omservice=om1,om2,om3 OZONE-SITE.XML_ozone.om.address.omservice.om1=om1 OZONE-SITE.XML_ozone.om.address.omservice.om2=om2 OZONE-SITE.XML_ozone.om.address.omservice.om3=om3 -OZONE-SITE.XML_ozone.om.ratis.enable=true OZONE-SITE.XML_ozone.scm.service.ids=scmservice OZONE-SITE.XML_ozone.scm.nodes.scmservice=scm1,scm2,scm3 diff --git a/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config index ae2fb092be6..b0ebb395f9a 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone-om-ha/docker-config @@ -21,7 +21,6 @@ OZONE-SITE.XML_ozone.om.nodes.omservice=om1,om2,om3 OZONE-SITE.XML_ozone.om.address.omservice.om1=om1 OZONE-SITE.XML_ozone.om.address.omservice.om2=om2 OZONE-SITE.XML_ozone.om.address.omservice.om3=om3 -OZONE-SITE.XML_ozone.om.ratis.enable=true OZONE-SITE.XML_ozone.scm.names=scm OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.scm.block.client.address=scm diff --git a/hadoop-ozone/dist/src/main/compose/ozone-om-prepare/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-om-prepare/docker-config index f0ec8fcaa1a..8550e618501 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-om-prepare/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone-om-prepare/docker-config @@ -21,7 +21,6 @@ OZONE-SITE.XML_ozone.om.nodes.omservice=om1,om2,om3 OZONE-SITE.XML_ozone.om.address.omservice.om1=om1 OZONE-SITE.XML_ozone.om.address.omservice.om2=om2 OZONE-SITE.XML_ozone.om.address.omservice.om3=om3 -OZONE-SITE.XML_ozone.om.ratis.enable=true OZONE-SITE.XML_ozone.scm.names=scm OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config index 1495e89813a..a4f030d45f5 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config @@ -30,7 +30,6 @@ OZONE-SITE.XML_ozone.om.address.omservice.om3=om3 OZONE-SITE.XML_ozone.om.http-address.omservice.om1=om1 OZONE-SITE.XML_ozone.om.http-address.omservice.om2=om2 OZONE-SITE.XML_ozone.om.http-address.omservice.om3=om3 -OZONE-SITE.XML_ozone.om.ratis.enable=true OZONE-SITE.XML_ozone.scm.service.ids=scmservice OZONE-SITE.XML_ozone.scm.primordial.node.id=scm1 diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/docker-config b/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/docker-config index d06d3279dc9..bb68e9bf60f 100644 --- a/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/docker-config +++ b/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/docker-config @@ -23,7 +23,6 @@ OZONE-SITE.XML_ozone.om.nodes.omservice=om1,om2,om3 OZONE-SITE.XML_ozone.om.address.omservice.om1=om1 OZONE-SITE.XML_ozone.om.address.omservice.om2=om2 OZONE-SITE.XML_ozone.om.address.omservice.om3=om3 -OZONE-SITE.XML_ozone.om.ratis.enable=true OZONE-SITE.XML_ozone.scm.service.ids=scmservice OZONE-SITE.XML_ozone.scm.nodes.scmservice=scm1,scm2,scm3 diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java index a3b59824548..c2697ef541a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTest.java @@ -157,12 +157,11 @@ abstract class AbstractRootedOzoneFileSystemTest { private OzoneClient client; AbstractRootedOzoneFileSystemTest(BucketLayout bucketLayout, boolean setDefaultFs, - boolean isAclEnabled, boolean noFlush) { + boolean isAclEnabled) { // Initialize the cluster before EACH set of parameters this.bucketLayout = bucketLayout; enabledFileSystemPaths = setDefaultFs; enableAcl = isAclEnabled; - useOnlyCache = noFlush; isBucketFSOptimized = bucketLayout.isFileSystemOptimized(); } @@ -204,8 +203,6 @@ public Path getBucketPath() { private final boolean isBucketFSOptimized; private final boolean enableAcl; - private final boolean useOnlyCache; - private OzoneConfiguration conf; private MiniOzoneCluster cluster; private FileSystem fs; @@ -279,10 +276,6 @@ void initClusterAndEnv() throws IOException, InterruptedException, TimeoutExcept userOfs = UGI_USER1.doAs( (PrivilegedExceptionAction)() -> (RootedOzoneFileSystem) FileSystem.get(conf)); - - if (useOnlyCache) { - cluster.getOzoneManager().getOmServerProtocol().setShouldFlushCache(true); - } } protected OMMetrics getOMMetrics() { @@ -2361,9 +2354,6 @@ private Path createAndGetBucketPath() @Test void testSnapshotRead() throws Exception { - if (useOnlyCache) { - return; - } // Init data OzoneBucket bucket1 = TestDataUtil.createVolumeAndBucket(client, bucketLayout); @@ -2410,9 +2400,6 @@ void testFileSystemDeclaresCapability() throws Throwable { @Test void testSnapshotDiff() throws Exception { - if (useOnlyCache) { - return; - } OzoneBucket bucket1 = TestDataUtil.createVolumeAndBucket(client, bucketLayout); Path volumePath1 = new Path(OZONE_URI_DELIMITER, bucket1.getVolumeName()); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTestWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTestWithFSO.java index 40ef0bff7ec..1698b814617 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTestWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractRootedOzoneFileSystemTestWithFSO.java @@ -47,8 +47,8 @@ abstract class AbstractRootedOzoneFileSystemTestWithFSO extends AbstractRootedOz private static final Logger LOG = LoggerFactory.getLogger(AbstractRootedOzoneFileSystemTestWithFSO.class); - AbstractRootedOzoneFileSystemTestWithFSO(boolean enableOMRatis, boolean isAclEnabled, boolean noFlush) { - super(BucketLayout.FILE_SYSTEM_OPTIMIZED, true, isAclEnabled, noFlush); + AbstractRootedOzoneFileSystemTestWithFSO(boolean isAclEnabled) { + super(BucketLayout.FILE_SYSTEM_OPTIMIZED, true, isAclEnabled); } /** diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFS.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFS.java index e9f734a426c..7c5460b29fd 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFS.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFS.java @@ -23,6 +23,6 @@ @TestInstance(TestInstance.Lifecycle.PER_CLASS) class TestOFS extends AbstractRootedOzoneFileSystemTest { TestOFS() { - super(BucketLayout.LEGACY, false, false, false); + super(BucketLayout.LEGACY, false, false); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithCacheOnly.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithCacheOnly.java deleted file mode 100644 index 58b1f97a8d8..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithCacheOnly.java +++ /dev/null @@ -1,28 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.fs.ozone; - -import org.apache.hadoop.ozone.om.helpers.BucketLayout; -import org.junit.jupiter.api.TestInstance; - -@TestInstance(TestInstance.Lifecycle.PER_CLASS) -class TestOFSWithCacheOnly extends AbstractRootedOzoneFileSystemTest { - TestOFSWithCacheOnly() { - super(BucketLayout.LEGACY, false, false, true); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithFSO.java index de38b786f31..1c81c6e5bf9 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithFSO.java @@ -22,6 +22,6 @@ @TestInstance(TestInstance.Lifecycle.PER_CLASS) class TestOFSWithFSO extends AbstractRootedOzoneFileSystemTestWithFSO { TestOFSWithFSO() { - super(false, false, false); + super(false); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithFSOAndCacheOnly.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithFSOAndCacheOnly.java deleted file mode 100644 index 99e08f9eeff..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithFSOAndCacheOnly.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.fs.ozone; - -import org.junit.jupiter.api.TestInstance; - -@TestInstance(TestInstance.Lifecycle.PER_CLASS) -class TestOFSWithFSOAndCacheOnly extends AbstractRootedOzoneFileSystemTestWithFSO { - TestOFSWithFSOAndCacheOnly() { - super(false, false, true); - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithFSPaths.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithFSPaths.java index 75c09467237..37937bf5e63 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithFSPaths.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOFSWithFSPaths.java @@ -23,6 +23,6 @@ @TestInstance(TestInstance.Lifecycle.PER_CLASS) class TestOFSWithFSPaths extends AbstractRootedOzoneFileSystemTest { TestOFSWithFSPaths() { - super(BucketLayout.LEGACY, true, false, false); + super(BucketLayout.LEGACY, true, false); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisRequest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisRequest.java index 6d396cf3af0..3fc991ff580 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisRequest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisRequest.java @@ -114,7 +114,6 @@ public void testUnknownRequestHandling() OzoneManagerRatisServer ratisServer = mock(OzoneManagerRatisServer.class); ProtocolMessageMetrics protocolMessageMetrics = mock(ProtocolMessageMetrics.class); - long lastTransactionIndexForNonRatis = 100L; OzoneManagerProtocolProtos.OMResponse expectedResponse = OzoneManagerProtocolProtos.OMResponse.newBuilder() @@ -126,17 +125,14 @@ public void testUnknownRequestHandling() omRequest.getCmdType()) .build(); - boolean[] enableRatisValues = {true, false}; - for (boolean enableRatis : enableRatisValues) { - OzoneManagerProtocolServerSideTranslatorPB serverSideTranslatorPB = - new OzoneManagerProtocolServerSideTranslatorPB(ozoneManager, - ratisServer, protocolMessageMetrics, enableRatis, - lastTransactionIndexForNonRatis); + OzoneManagerProtocolServerSideTranslatorPB serverSideTranslatorPB = + new OzoneManagerProtocolServerSideTranslatorPB(ozoneManager, + ratisServer, protocolMessageMetrics, true, + 100L); - OzoneManagerProtocolProtos.OMResponse actualResponse = - serverSideTranslatorPB.processRequest(omRequest); + OzoneManagerProtocolProtos.OMResponse actualResponse = + serverSideTranslatorPB.processRequest(omRequest); - assertEquals(expectedResponse, actualResponse); - } + assertEquals(expectedResponse, actualResponse); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java index 6698ece4a8d..8f4c070b76c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java @@ -351,12 +351,7 @@ public OmMetadataManagerImpl(OzoneConfiguration conf, this.ozoneManager = ozoneManager; this.perfMetrics = perfMetrics; this.lock = new OzoneManagerLock(conf); - // TODO: This is a temporary check. Once fully implemented, all OM state - // change should go through Ratis - be it standalone (for non-HA) or - // replicated (for HA). - isRatisEnabled = conf.getBoolean( - OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, - OMConfigKeys.OZONE_OM_RATIS_ENABLE_DEFAULT); + isRatisEnabled = true; this.omEpoch = OmUtils.getOMEpoch(isRatisEnabled); // For test purpose only ignorePipelineinKey = conf.getBoolean( diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index 6720f314748..d26546e47ee 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -582,12 +582,7 @@ private OzoneManager(OzoneConfiguration conf, StartupOption startupOption) OZONE_OM_NAMESPACE_STRICT_S3, OZONE_OM_NAMESPACE_STRICT_S3_DEFAULT); - // TODO: This is a temporary check. Once fully implemented, all OM state - // change should go through Ratis - be it standalone (for non-HA) or - // replicated (for HA). - isRatisEnabled = configuration.getBoolean( - OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, - OMConfigKeys.OZONE_OM_RATIS_ENABLE_DEFAULT); + isRatisEnabled = true; // Ratis server comes with JvmPauseMonitor, no need to start another jvmPauseMonitor = !isRatisEnabled ? newJvmPauseMonitor(omId) : null; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java index 6b55b7384bd..654610f81dc 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java @@ -85,9 +85,6 @@ public class OzoneManagerProtocolServerSideTranslatorPB implements OzoneManagerP private final RequestValidations requestValidations; private final OMPerformanceMetrics perfMetrics; - // always true, only used in tests - private boolean shouldFlushCache = true; - private OMRequest lastRequestToSubmit; @@ -313,9 +310,7 @@ private OMResponse submitRequestDirectlyToOM(OMRequest request) { return createErrorResponse(request, ex); } try { - if (shouldFlushCache) { - omClientResponse.getFlushFuture().get(); - } + omClientResponse.getFlushFuture().get(); if (LOG.isTraceEnabled()) { LOG.trace("Future for {} is completed", request); } @@ -365,12 +360,4 @@ public static Logger getLog() { public void awaitDoubleBufferFlush() throws InterruptedException { ozoneManagerDoubleBuffer.awaitFlush(); } - - @VisibleForTesting - public void setShouldFlushCache(boolean shouldFlushCache) { - if (ozoneManagerDoubleBuffer != null) { - ozoneManagerDoubleBuffer.stopDaemon(); - } - this.shouldFlushCache = shouldFlushCache; - } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java index 420cb6c6dcb..c496da41eb4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/security/OzoneDelegationTokenSecretManager.java @@ -41,7 +41,6 @@ import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; import org.apache.hadoop.hdds.security.x509.exception.CertificateException; import org.apache.hadoop.io.Text; -import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.S3SecretManager; import org.apache.hadoop.ozone.om.exceptions.OMException; @@ -105,9 +104,7 @@ public OzoneDelegationTokenSecretManager(Builder b) throws IOException { this.ozoneManager = b.ozoneManager; this.store = new OzoneSecretStore(b.ozoneConf, this.ozoneManager.getMetadataManager()); - isRatisEnabled = b.ozoneConf.getBoolean( - OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, - OMConfigKeys.OZONE_OM_RATIS_ENABLE_DEFAULT); + isRatisEnabled = true; this.secretKeyClient = b.secretKeyClient; loadTokenSecretState(store.loadState()); } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java index 491d631249c..6bff4344f45 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java @@ -352,10 +352,6 @@ public void stop() throws Exception { */ @VisibleForTesting public String getOzoneManagerSnapshotUrl() throws IOException { - if (!configuration.getBoolean( - OMConfigKeys.OZONE_OM_RATIS_ENABLE_KEY, false)) { - return omDBSnapshotUrl; - } String omLeaderUrl = omDBSnapshotUrl; List serviceList = ozoneManagerClient.getServiceList(); From ab161dd72afe9fcef060f11e0d2721b16b8fd34d Mon Sep 17 00:00:00 2001 From: Devesh Kumar Singh Date: Mon, 6 Jan 2025 13:33:28 +0530 Subject: [PATCH 043/168] HDDS-11949. Ozone Recon - Update Recon OM Sync default configs and docker configs. (#7600) --- hadoop-hdds/common/src/main/resources/ozone-default.xml | 6 +++--- .../apache/hadoop/ozone/recon/ReconServerConfigKeys.java | 6 +++--- .../recon/spi/impl/OzoneManagerServiceProviderImpl.java | 8 ++++---- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index a0fdcd4b683..1fcef139daf 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -3314,7 +3314,7 @@ ozone.recon.om.snapshot.task.interval.delay - 10m + 5s OZONE, RECON, OM Interval in MINUTES by Recon to request OM DB Snapshot. @@ -3330,7 +3330,7 @@ recon.om.delta.update.limit - 2000 + 50000 OZONE, RECON Recon each time get a limited delta updates from OM. @@ -3351,7 +3351,7 @@ recon.om.delta.update.loop.limit - 10 + 50 OZONE, RECON The sync between Recon and OM consists of several small diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java index 5c9e4039635..02060c03ef8 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java @@ -85,7 +85,7 @@ public final class ReconServerConfigKeys { public static final String OZONE_RECON_OM_SNAPSHOT_TASK_INTERVAL_DELAY = "ozone.recon.om.snapshot.task.interval.delay"; public static final String OZONE_RECON_OM_SNAPSHOT_TASK_INTERVAL_DEFAULT - = "10m"; + = "5s"; @Deprecated public static final String RECON_OM_SNAPSHOT_TASK_INTERVAL_DELAY = "recon.om.snapshot.task.interval.delay"; @@ -98,10 +98,10 @@ public final class ReconServerConfigKeys { public static final String RECON_OM_DELTA_UPDATE_LIMIT = "recon.om.delta.update.limit"; - public static final long RECON_OM_DELTA_UPDATE_LIMIT_DEFUALT = 2000; + public static final long RECON_OM_DELTA_UPDATE_LIMIT_DEFAULT = 50000; public static final String RECON_OM_DELTA_UPDATE_LOOP_LIMIT = "recon.om.delta.update.loop.limit"; - public static final int RECON_OM_DELTA_UPDATE_LOOP_LIMIT_DEFUALT = 10; + public static final int RECON_OM_DELTA_UPDATE_LOOP_LIMIT_DEFAULT = 50; public static final String OZONE_RECON_TASK_THREAD_COUNT_KEY = "ozone.recon.task.thread.count"; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java index 6bff4344f45..d5b7b1cfc91 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java @@ -91,9 +91,9 @@ import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_OM_SNAPSHOT_TASK_INTERVAL_DELAY; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_OM_SNAPSHOT_TASK_INTERVAL_DEFAULT; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_DELTA_UPDATE_LIMIT; -import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_DELTA_UPDATE_LIMIT_DEFUALT; +import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_DELTA_UPDATE_LIMIT_DEFAULT; import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_DELTA_UPDATE_LOOP_LIMIT; -import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_DELTA_UPDATE_LOOP_LIMIT_DEFUALT; +import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_DELTA_UPDATE_LOOP_LIMIT_DEFAULT; import static org.apache.hadoop.ozone.recon.ReconUtils.convertNumericToSymbolic; import static org.apache.ratis.proto.RaftProtos.RaftPeerRole.LEADER; @@ -178,10 +178,10 @@ public OzoneManagerServiceProviderImpl( .OZONE_OM_HTTPS_ADDRESS_KEY); long deltaUpdateLimits = configuration.getLong(RECON_OM_DELTA_UPDATE_LIMIT, - RECON_OM_DELTA_UPDATE_LIMIT_DEFUALT); + RECON_OM_DELTA_UPDATE_LIMIT_DEFAULT); int deltaUpdateLoopLimits = configuration.getInt( RECON_OM_DELTA_UPDATE_LOOP_LIMIT, - RECON_OM_DELTA_UPDATE_LOOP_LIMIT_DEFUALT); + RECON_OM_DELTA_UPDATE_LOOP_LIMIT_DEFAULT); omSnapshotDBParentDir = reconUtils.getReconDbDir(configuration, OZONE_RECON_OM_SNAPSHOT_DB_DIR); From 3d35b01e625d86864a7bec99e5ac490b9e51b573 Mon Sep 17 00:00:00 2001 From: Sarveksha Yeshavantha Raju <79865743+sarvekshayr@users.noreply.github.com> Date: Mon, 6 Jan 2025 17:46:01 +0530 Subject: [PATCH 044/168] HDDS-12011. Show PID of running service. (#7648) --- hadoop-ozone/dist/src/shell/ozone/ozone | 4 +++- .../apache/hadoop/ozone/repair/RepairTool.java | 15 +++++++++------ 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/hadoop-ozone/dist/src/shell/ozone/ozone b/hadoop-ozone/dist/src/shell/ozone/ozone index 5bb05fee724..d3d226a8b89 100755 --- a/hadoop-ozone/dist/src/shell/ozone/ozone +++ b/hadoop-ozone/dist/src/shell/ozone/ozone @@ -257,8 +257,10 @@ function check_running_ozone_services for service in "${services[@]}"; do for pid_file in ${OZONE_PID_DIR}/ozone-*-${service}.pid; do if [[ -f "${pid_file}" ]]; then - if kill -0 "$(cat "${pid_file}")" 2>/dev/null; then + pid=$(cat "${pid_file}") + if kill -0 "${pid}" 2>/dev/null; then export "OZONE_${service^^}_RUNNING=true" + export "OZONE_${service^^}_PID=${pid}" fi fi done diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RepairTool.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RepairTool.java index d8a976b2fd8..20a30f0b187 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RepairTool.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RepairTool.java @@ -40,15 +40,18 @@ public final Void call() throws Exception { } protected boolean checkIfServiceIsRunning(String serviceName) { - String envVariable = String.format("OZONE_%s_RUNNING", serviceName); - String runningServices = System.getenv(envVariable); - if ("true".equals(runningServices)) { + String runningEnvVar = String.format("OZONE_%s_RUNNING", serviceName); + String pidEnvVar = String.format("OZONE_%s_PID", serviceName); + String isServiceRunning = System.getenv(runningEnvVar); + String servicePid = System.getenv(pidEnvVar); + if ("true".equals(isServiceRunning)) { if (!force) { - error("Error: %s is currently running on this host. " + - "Stop the service before running the repair tool.", serviceName); + error("Error: %s is currently running on this host with PID %s. " + + "Stop the service before running the repair tool.", serviceName, servicePid); return true; } else { - info("Warning: --force flag used. Proceeding despite %s being detected as running.", serviceName); + info("Warning: --force flag used. Proceeding despite %s being detected as running with PID %s.", + serviceName, servicePid); } } else { info("No running %s service detected. Proceeding with repair.", serviceName); From 2ec05cb122f8ae8036d0a55e03f623fcbb8cf93b Mon Sep 17 00:00:00 2001 From: Abhishek Pal <43001336+devabhishekpal@users.noreply.github.com> Date: Tue, 7 Jan 2025 00:54:20 +0530 Subject: [PATCH 045/168] HDDS-11987. Remove duplicate Quota In Bytes field from DU metadata (#7649) --- .../src/v2/components/duMetadata/duMetadata.tsx | 5 ----- 1 file changed, 5 deletions(-) diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/duMetadata/duMetadata.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/duMetadata/duMetadata.tsx index f2c740f7dbc..e46282f1856 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/duMetadata/duMetadata.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/duMetadata/duMetadata.tsx @@ -179,11 +179,6 @@ const DUMetadata: React.FC = ({ values.push(moment(objectInfo.modificationTime).format('ll LTS')); } - if (objectInfo?.quotaInBytes !== undefined && objectInfo?.quotaInBytes !== -1) { - keys.push('Quota In Bytes'); - values.push(byteToSize(objectInfo.quotaInBytes, 3)); - } - if (objectInfo?.quotaInNamespace !== undefined && objectInfo?.quotaInNamespace !== -1) { keys.push('Quota In Namespace'); values.push(byteToSize(objectInfo.quotaInNamespace, 3)); From 5354cec5d46deab4fa54feb1f4b3fd7b100bce46 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" Date: Mon, 6 Jan 2025 22:13:56 +0100 Subject: [PATCH 046/168] HDDS-12027. Mark TestBlockDataStreamOutput#testMultiBlockWrite as flaky --- .../hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java index ecc9e8fae46..c1345207d99 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java @@ -41,6 +41,7 @@ import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput; import org.apache.hadoop.ozone.container.ContainerTestHelper; import org.apache.hadoop.ozone.container.TestHelper; +import org.apache.ozone.test.tag.Flaky; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; @@ -152,6 +153,7 @@ public void testMultiChunkWrite() throws Exception { } @Test + @Flaky("HDDS-12027") public void testMultiBlockWrite() throws Exception { testWrite(blockSize + 50); testWriteWithFailure(blockSize + 50); From ae9a56fc476df9d6dbad0109f1543bdbf1c43595 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Tue, 7 Jan 2025 07:16:07 +0100 Subject: [PATCH 047/168] HDDS-11991. Use picocli built-in for missing subcommand of GenericCli (#7635) --- .../apache/hadoop/hdds/cli/GenericCli.java | 21 ++----------- .../hdds/cli/MissingSubcommandException.java | 31 ------------------- .../hadoop/ozone/HddsDatanodeService.java | 3 +- .../StorageContainerManagerStarter.java | 3 +- .../src/main/smoketest/admincli/admin.robot | 2 +- .../hadoop/ozone/om/OzoneManagerStarter.java | 3 +- .../hadoop/ozone/recon/ReconServer.java | 3 +- .../org/apache/hadoop/ozone/s3/Gateway.java | 3 +- .../GenerateOzoneRequiredConfigurations.java | 4 ++- .../ozone/shell/checknative/CheckNative.java | 3 +- .../shell/tenant/GetUserInfoHandler.java | 10 ------ 11 files changed, 19 insertions(+), 67 deletions(-) delete mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/MissingSubcommandException.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java index 3afda85498b..c698a9f3d50 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java @@ -18,7 +18,6 @@ import java.io.IOException; import java.util.Map; -import java.util.concurrent.Callable; import com.google.common.base.Strings; import org.apache.hadoop.fs.Path; @@ -28,13 +27,13 @@ import org.apache.hadoop.security.UserGroupInformation; import picocli.CommandLine; import picocli.CommandLine.ExitCode; -import picocli.CommandLine.Model.CommandSpec; import picocli.CommandLine.Option; /** * This is a generic parent class for all the ozone related cli tools. */ -public class GenericCli implements Callable, GenericParentCommand { +@CommandLine.Command +public abstract class GenericCli implements GenericParentCommand { public static final int EXECUTION_ERROR_EXIT_CODE = -1; @@ -71,15 +70,6 @@ public GenericCli(CommandLine.IFactory factory) { ExtensibleParentCommand.addSubcommands(cmd); } - /** - * Handle the error when subcommand is required but not set. - */ - public static void missingSubcommand(CommandSpec spec) { - System.err.println("Incomplete command"); - spec.commandLine().usage(System.err); - System.exit(EXECUTION_ERROR_EXIT_CODE); - } - public void run(String[] argv) { int exitCode = execute(argv); @@ -103,11 +93,6 @@ protected void printError(Throwable error) { } } - @Override - public Void call() throws Exception { - throw new MissingSubcommandException(cmd); - } - @Override public OzoneConfiguration getOzoneConf() { return config; @@ -121,7 +106,7 @@ public UserGroupInformation getUser() throws IOException { } @VisibleForTesting - public picocli.CommandLine getCmd() { + public CommandLine getCmd() { return cmd; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/MissingSubcommandException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/MissingSubcommandException.java deleted file mode 100644 index 759476579e9..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/MissingSubcommandException.java +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hdds.cli; - -import picocli.CommandLine; - -/** - * Exception to throw if subcommand is not selected but required. - */ -public class MissingSubcommandException extends CommandLine.ParameterException { - - public MissingSubcommandException(CommandLine cmd) { - super(cmd, "Incomplete command"); - } - -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java index bf33b9780d2..a6980e232b1 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java @@ -26,6 +26,7 @@ import java.util.List; import java.util.Map; import java.util.UUID; +import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.atomic.AtomicBoolean; @@ -97,7 +98,7 @@ hidden = true, description = "Start the datanode for ozone", versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true) -public class HddsDatanodeService extends GenericCli implements ServicePlugin { +public class HddsDatanodeService extends GenericCli implements Callable, ServicePlugin { private static final Logger LOG = LoggerFactory.getLogger( HddsDatanodeService.class); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerStarter.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerStarter.java index 8c0044f66a9..1eef7bce14c 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerStarter.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerStarter.java @@ -37,6 +37,7 @@ import picocli.CommandLine.Command; import java.io.IOException; +import java.util.concurrent.Callable; import static org.apache.hadoop.ozone.conf.OzoneServiceConfig.DEFAULT_SHUTDOWN_HOOK_PRIORITY; @@ -49,7 +50,7 @@ hidden = true, description = "Start or initialize the scm server.", versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true) -public class StorageContainerManagerStarter extends GenericCli { +public class StorageContainerManagerStarter extends GenericCli implements Callable { private OzoneConfiguration conf; private SCMStarterInterface receiver; diff --git a/hadoop-ozone/dist/src/main/smoketest/admincli/admin.robot b/hadoop-ozone/dist/src/main/smoketest/admincli/admin.robot index a28888b23f4..2f1d0825b39 100644 --- a/hadoop-ozone/dist/src/main/smoketest/admincli/admin.robot +++ b/hadoop-ozone/dist/src/main/smoketest/admincli/admin.robot @@ -22,7 +22,7 @@ Test Timeout 5 minutes *** Test Cases *** Incomplete command ${output} = Execute And Ignore Error ozone admin - Should contain ${output} Incomplete command + Should contain ${output} Missing required subcommand Should contain ${output} container Should contain ${output} datanode Should contain ${output} om diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java index 27cb8d8aa3c..a587a628533 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManagerStarter.java @@ -33,6 +33,7 @@ import picocli.CommandLine.Command; import java.io.IOException; +import java.util.concurrent.Callable; import static org.apache.hadoop.ozone.conf.OzoneServiceConfig.DEFAULT_SHUTDOWN_HOOK_PRIORITY; @@ -44,7 +45,7 @@ hidden = true, description = "Start or initialize the Ozone Manager.", versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true) -public class OzoneManagerStarter extends GenericCli { +public class OzoneManagerStarter extends GenericCli implements Callable { private OzoneConfiguration conf; private OMStarterInterface receiver; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java index 0970c2da687..fc0dc18cce9 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServer.java @@ -56,6 +56,7 @@ import java.io.IOException; import java.net.InetSocketAddress; +import java.util.concurrent.Callable; import java.util.concurrent.atomic.AtomicBoolean; import static org.apache.hadoop.hdds.ratis.RatisHelper.newJvmPauseMonitor; @@ -70,7 +71,7 @@ /** * Recon server main class that stops and starts recon services. */ -public class ReconServer extends GenericCli { +public class ReconServer extends GenericCli implements Callable { private static final Logger LOG = LoggerFactory.getLogger(ReconServer.class); private Injector injector; diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/Gateway.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/Gateway.java index c20c9b496f0..511592d3a04 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/Gateway.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/Gateway.java @@ -19,6 +19,7 @@ import java.io.IOException; import java.net.InetSocketAddress; +import java.util.concurrent.Callable; import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hdds.cli.GenericCli; @@ -53,7 +54,7 @@ hidden = true, description = "S3 compatible rest server.", versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true) -public class Gateway extends GenericCli { +public class Gateway extends GenericCli implements Callable { private static final Logger LOG = LoggerFactory.getLogger(Gateway.class); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/GenerateOzoneRequiredConfigurations.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/GenerateOzoneRequiredConfigurations.java index 927e9186ff5..c88b6b2d698 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/GenerateOzoneRequiredConfigurations.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/GenerateOzoneRequiredConfigurations.java @@ -41,6 +41,8 @@ import java.nio.file.Paths; import java.util.ArrayList; import java.util.List; +import java.util.concurrent.Callable; + /** * GenerateOzoneRequiredConfigurations - A tool to generate ozone-site.xml
* This tool generates an ozone-site.xml with minimally required configs. @@ -56,7 +58,7 @@ description = "Tool to generate template ozone-site.xml", versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true) -public final class GenerateOzoneRequiredConfigurations extends GenericCli { +public final class GenerateOzoneRequiredConfigurations extends GenericCli implements Callable { @Parameters(arity = "1..1", description = "Directory path where ozone-site file should be generated.") diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/checknative/CheckNative.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/checknative/CheckNative.java index f19548a1fa7..b6b5cc989b9 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/checknative/CheckNative.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/checknative/CheckNative.java @@ -25,6 +25,7 @@ import picocli.CommandLine; import java.util.Collections; +import java.util.concurrent.Callable; import static org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_LIBRARY_NAME; @@ -33,7 +34,7 @@ */ @CommandLine.Command(name = "ozone checknative", description = "Checks if native libraries are loaded") -public class CheckNative extends GenericCli { +public class CheckNative extends GenericCli implements Callable { public static void main(String[] argv) { new CheckNative().run(argv); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/GetUserInfoHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/GetUserInfoHandler.java index be8b4ceed17..d1a3518769f 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/GetUserInfoHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/GetUserInfoHandler.java @@ -19,13 +19,11 @@ import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.ObjectNode; -import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.server.JsonUtils; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.om.helpers.TenantUserInfoValue; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ExtendedUserAccessIdInfo; import org.apache.hadoop.ozone.shell.OzoneAddress; -import org.jooq.tools.StringUtils; import picocli.CommandLine; import java.io.IOException; @@ -38,9 +36,6 @@ description = "Get tenant related information of a user") public class GetUserInfoHandler extends TenantHandler { - @CommandLine.Spec - private CommandLine.Model.CommandSpec spec; - @CommandLine.Parameters(description = "User name (principal)", arity = "1..1") private String userPrincipal; @@ -52,11 +47,6 @@ public class GetUserInfoHandler extends TenantHandler { protected void execute(OzoneClient client, OzoneAddress address) throws IOException { - if (StringUtils.isEmpty(userPrincipal)) { - GenericCli.missingSubcommand(spec); - return; - } - final TenantUserInfoValue tenantUserInfo = client.getObjectStore().tenantGetUserInfo(userPrincipal); final List accessIdInfoList = From 6b8b844dfa7eb30df29ab849e834e0a00d3bc97e Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Tue, 7 Jan 2025 07:42:06 +0100 Subject: [PATCH 048/168] HDDS-12009. Merge FSORepairTool and FSORepairCLI (#7639) --- .../ozone/repair/om/TestFSORepairTool.java | 25 +- .../hadoop/ozone/repair/om/FSORepairCLI.java | 78 -- .../hadoop/ozone/repair/om/FSORepairTool.java | 752 +++++++++--------- .../hadoop/ozone/repair/om/OMRepair.java | 2 +- 4 files changed, 404 insertions(+), 453 deletions(-) delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairCLI.java diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/repair/om/TestFSORepairTool.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/repair/om/TestFSORepairTool.java index d37f8ce57fb..fb6472d7bc7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/repair/om/TestFSORepairTool.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/repair/om/TestFSORepairTool.java @@ -36,7 +36,6 @@ import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; import org.apache.hadoop.ozone.repair.OzoneRepair; import org.apache.ozone.test.GenericTestUtils; -import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.BeforeAll; @@ -89,8 +88,8 @@ public class TestFSORepairTool { private static FSORepairTool.Report fullReport; private static FSORepairTool.Report emptyReport; - private GenericTestUtils.PrintStreamCapturer out; - private GenericTestUtils.PrintStreamCapturer err; + private static GenericTestUtils.PrintStreamCapturer out; + private static GenericTestUtils.PrintStreamCapturer err; @BeforeAll public static void setup() throws Exception { @@ -103,6 +102,8 @@ public static void setup() throws Exception { conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); fs = FileSystem.get(conf); + out = GenericTestUtils.captureOut(); + err = GenericTestUtils.captureErr(); cmd = new OzoneRepair().getCmd(); dbPath = new File(OMStorage.getOmDbDir(conf) + "/" + OM_DB_NAME).getPath(); @@ -147,19 +148,13 @@ public static void setup() throws Exception { @BeforeEach public void init() throws Exception { - out = GenericTestUtils.captureOut(); - err = GenericTestUtils.captureErr(); - } - - @AfterEach - public void clean() throws Exception { - // reset stream after each unit test - IOUtils.closeQuietly(out, err); + out.reset(); + err.reset(); } @AfterAll public static void reset() throws IOException { - IOUtils.closeQuietly(fs, client, cluster); + IOUtils.closeQuietly(fs, client, cluster, out, err); } /** @@ -239,7 +234,7 @@ public void testNonExistentBucket() { // When a non-existent bucket filter is passed int exitCode = dryRun("--volume", "/vol1", "--bucket", "bucket3"); assertEquals(0, exitCode); - String cliOutput = out.getOutput(); + String cliOutput = err.getOutput(); assertThat(cliOutput).contains("Bucket 'bucket3' does not exist in volume '/vol1'."); } @@ -249,7 +244,7 @@ public void testNonExistentVolume() { // When a non-existent volume filter is passed int exitCode = dryRun("--volume", "/vol5"); assertEquals(0, exitCode); - String cliOutput = out.getOutput(); + String cliOutput = err.getOutput(); assertThat(cliOutput).contains("Volume '/vol5' does not exist."); } @@ -259,7 +254,7 @@ public void testBucketFilterWithoutVolume() { // When bucket filter is passed without the volume filter. int exitCode = dryRun("--bucket", "bucket1"); assertEquals(0, exitCode); - String cliOutput = out.getOutput(); + String cliOutput = err.getOutput(); assertThat(cliOutput).contains("--bucket flag cannot be used without specifying --volume."); } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairCLI.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairCLI.java deleted file mode 100644 index fd6d75c7136..00000000000 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairCLI.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.repair.om; - -import org.apache.hadoop.ozone.repair.RepairTool; -import picocli.CommandLine; - -/** - * Parser for scm.db file. - */ -@CommandLine.Command( - name = "fso-tree", - description = "Identify and repair a disconnected FSO tree by marking unreferenced entries for deletion. " + - "OM should be stopped while this tool is run." -) -public class FSORepairCLI extends RepairTool { - - @CommandLine.Option(names = {"--db"}, - required = true, - description = "Path to OM RocksDB") - private String dbPath; - - @CommandLine.Option(names = {"-r", "--repair"}, - defaultValue = "false", - description = "Run in repair mode to move unreferenced files and directories to deleted tables.") - private boolean repair; - - @CommandLine.Option(names = {"-v", "--volume"}, - description = "Filter by volume name. Add '/' before the volume name.") - private String volume; - - @CommandLine.Option(names = {"-b", "--bucket"}, - description = "Filter by bucket name") - private String bucket; - - @CommandLine.Option(names = {"--verbose"}, - description = "Verbose output. Show all intermediate steps and deleted keys info.") - private boolean verbose; - - @Override - public void execute() throws Exception { - if (checkIfServiceIsRunning("OM")) { - return; - } - if (repair) { - info("FSO Repair Tool is running in repair mode"); - } else { - info("FSO Repair Tool is running in debug mode"); - } - try { - FSORepairTool - repairTool = new FSORepairTool(dbPath, repair, volume, bucket, verbose); - repairTool.run(); - } catch (Exception ex) { - throw new IllegalArgumentException("FSO repair failed: " + ex.getMessage()); - } - - if (verbose) { - info("FSO repair finished."); - } - } -} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairTool.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairTool.java index 7e0fb23f5aa..a4068415db6 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairTool.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairTool.java @@ -36,9 +36,11 @@ import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.helpers.WithObjectID; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; +import org.apache.hadoop.ozone.repair.RepairTool; import org.apache.ratis.util.Preconditions; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import picocli.CommandLine; import java.io.File; import java.io.IOException; @@ -69,402 +71,471 @@ * The tool is idempotent. reachable.db will not be deleted automatically when the tool finishes, * in case users want to manually inspect it. It can be safely deleted once the tool finishes. */ -public class FSORepairTool { +@CommandLine.Command( + name = "fso-tree", + description = "Identify and repair a disconnected FSO tree by marking unreferenced entries for deletion. " + + "OM should be stopped while this tool is run." +) +public class FSORepairTool extends RepairTool { public static final Logger LOG = LoggerFactory.getLogger(FSORepairTool.class); - - private final String omDBPath; - private final DBStore store; - private final Table volumeTable; - private final Table bucketTable; - private final Table directoryTable; - private final Table fileTable; - private final Table deletedDirectoryTable; - private final Table deletedTable; - private final Table snapshotInfoTable; - private final String volumeFilter; - private final String bucketFilter; private static final String REACHABLE_TABLE = "reachable"; - private DBStore reachableDB; - private final ReportStatistics reachableStats; - private final ReportStatistics unreachableStats; - private final ReportStatistics unreferencedStats; - private final boolean repair; - private final boolean verbose; - - public FSORepairTool(String dbPath, boolean repair, String volume, String bucket, boolean verbose) - throws IOException { - this(getStoreFromPath(dbPath), dbPath, repair, volume, bucket, verbose); - } - /** - * Allows passing RocksDB instance from a MiniOzoneCluster directly to this class for testing. - */ - public FSORepairTool(DBStore dbStore, String dbPath, boolean repair, String volume, String bucket, boolean verbose) - throws IOException { - this.reachableStats = new ReportStatistics(0, 0, 0); - this.unreachableStats = new ReportStatistics(0, 0, 0); - this.unreferencedStats = new ReportStatistics(0, 0, 0); - - this.store = dbStore; - this.omDBPath = dbPath; - this.repair = repair; - this.volumeFilter = volume; - this.bucketFilter = bucket; - this.verbose = verbose; - volumeTable = store.getTable(OmMetadataManagerImpl.VOLUME_TABLE, - String.class, - OmVolumeArgs.class); - bucketTable = store.getTable(OmMetadataManagerImpl.BUCKET_TABLE, - String.class, - OmBucketInfo.class); - directoryTable = store.getTable(OmMetadataManagerImpl.DIRECTORY_TABLE, - String.class, - OmDirectoryInfo.class); - fileTable = store.getTable(OmMetadataManagerImpl.FILE_TABLE, - String.class, - OmKeyInfo.class); - deletedDirectoryTable = store.getTable(OmMetadataManagerImpl.DELETED_DIR_TABLE, - String.class, - OmKeyInfo.class); - deletedTable = store.getTable(OmMetadataManagerImpl.DELETED_TABLE, - String.class, - RepeatedOmKeyInfo.class); - snapshotInfoTable = store.getTable(OmMetadataManagerImpl.SNAPSHOT_INFO_TABLE, - String.class, - SnapshotInfo.class); - } + @CommandLine.Option(names = {"--db"}, + required = true, + description = "Path to OM RocksDB") + private String omDBPath; - protected static DBStore getStoreFromPath(String dbPath) throws IOException { - File omDBFile = new File(dbPath); - if (!omDBFile.exists() || !omDBFile.isDirectory()) { - throw new IOException(String.format("Specified OM DB instance %s does " + - "not exist or is not a RocksDB directory.", dbPath)); - } - // Load RocksDB and tables needed. - return OmMetadataManagerImpl.loadDB(new OzoneConfiguration(), new File(dbPath).getParentFile(), -1); - } + @CommandLine.Option(names = {"-r", "--repair"}, + defaultValue = "false", + description = "Run in repair mode to move unreferenced files and directories to deleted tables.") + private boolean repair; + + @CommandLine.Option(names = {"-v", "--volume"}, + description = "Filter by volume name. Add '/' before the volume name.") + private String volumeFilter; - public FSORepairTool.Report run() throws Exception { + @CommandLine.Option(names = {"-b", "--bucket"}, + description = "Filter by bucket name") + private String bucketFilter; + + @CommandLine.Option(names = {"--verbose"}, + description = "Verbose output. Show all intermediate steps and deleted keys info.") + private boolean verbose; + + @Override + public void execute() throws Exception { + if (checkIfServiceIsRunning("OM")) { + return; + } + if (repair) { + info("FSO Repair Tool is running in repair mode"); + } else { + info("FSO Repair Tool is running in debug mode"); + } try { - if (bucketFilter != null && volumeFilter == null) { - System.out.println("--bucket flag cannot be used without specifying --volume."); - return null; - } + Impl repairTool = new Impl(); + repairTool.run(); + } catch (Exception ex) { + throw new IllegalArgumentException("FSO repair failed: " + ex.getMessage()); + } - if (volumeFilter != null) { - OmVolumeArgs volumeArgs = volumeTable.getIfExist(volumeFilter); - if (volumeArgs == null) { - System.out.println("Volume '" + volumeFilter + "' does not exist."); + if (verbose) { + info("FSO repair finished."); + } + } + + private class Impl { + + private final DBStore store; + private final Table volumeTable; + private final Table bucketTable; + private final Table directoryTable; + private final Table fileTable; + private final Table deletedDirectoryTable; + private final Table deletedTable; + private final Table snapshotInfoTable; + private DBStore reachableDB; + private final ReportStatistics reachableStats; + private final ReportStatistics unreachableStats; + private final ReportStatistics unreferencedStats; + + Impl() throws IOException { + this.reachableStats = new ReportStatistics(0, 0, 0); + this.unreachableStats = new ReportStatistics(0, 0, 0); + this.unreferencedStats = new ReportStatistics(0, 0, 0); + + this.store = getStoreFromPath(omDBPath); + volumeTable = store.getTable(OmMetadataManagerImpl.VOLUME_TABLE, + String.class, + OmVolumeArgs.class); + bucketTable = store.getTable(OmMetadataManagerImpl.BUCKET_TABLE, + String.class, + OmBucketInfo.class); + directoryTable = store.getTable(OmMetadataManagerImpl.DIRECTORY_TABLE, + String.class, + OmDirectoryInfo.class); + fileTable = store.getTable(OmMetadataManagerImpl.FILE_TABLE, + String.class, + OmKeyInfo.class); + deletedDirectoryTable = store.getTable(OmMetadataManagerImpl.DELETED_DIR_TABLE, + String.class, + OmKeyInfo.class); + deletedTable = store.getTable(OmMetadataManagerImpl.DELETED_TABLE, + String.class, + RepeatedOmKeyInfo.class); + snapshotInfoTable = store.getTable(OmMetadataManagerImpl.SNAPSHOT_INFO_TABLE, + String.class, + SnapshotInfo.class); + } + + public Report run() throws Exception { + try { + if (bucketFilter != null && volumeFilter == null) { + error("--bucket flag cannot be used without specifying --volume."); return null; } - } - // Iterate all volumes or a specific volume if specified - try (TableIterator> - volumeIterator = volumeTable.iterator()) { - try { - openReachableDB(); - } catch (IOException e) { - System.out.println("Failed to open reachable database: " + e.getMessage()); - throw e; + if (volumeFilter != null) { + OmVolumeArgs volumeArgs = volumeTable.getIfExist(volumeFilter); + if (volumeArgs == null) { + error("Volume '" + volumeFilter + "' does not exist."); + return null; + } } - while (volumeIterator.hasNext()) { - Table.KeyValue volumeEntry = volumeIterator.next(); - String volumeKey = volumeEntry.getKey(); - if (volumeFilter != null && !volumeFilter.equals(volumeKey)) { - continue; + // Iterate all volumes or a specific volume if specified + try (TableIterator> + volumeIterator = volumeTable.iterator()) { + try { + openReachableDB(); + } catch (IOException e) { + error("Failed to open reachable database: " + e.getMessage()); + throw e; } + while (volumeIterator.hasNext()) { + Table.KeyValue volumeEntry = volumeIterator.next(); + String volumeKey = volumeEntry.getKey(); - System.out.println("Processing volume: " + volumeKey); - - if (bucketFilter != null) { - OmBucketInfo bucketInfo = bucketTable.getIfExist(volumeKey + "/" + bucketFilter); - if (bucketInfo == null) { - //Bucket does not exist in the volume - System.out.println("Bucket '" + bucketFilter + "' does not exist in volume '" + volumeKey + "'."); - return null; - } - - if (bucketInfo.getBucketLayout() != BucketLayout.FILE_SYSTEM_OPTIMIZED) { - System.out.println("Skipping non-FSO bucket " + bucketFilter); + if (volumeFilter != null && !volumeFilter.equals(volumeKey)) { continue; } - processBucket(volumeEntry.getValue(), bucketInfo); - } else { + info("Processing volume: " + volumeKey); - // Iterate all buckets in the volume. - try (TableIterator> - bucketIterator = bucketTable.iterator()) { - bucketIterator.seek(volumeKey); - while (bucketIterator.hasNext()) { - Table.KeyValue bucketEntry = bucketIterator.next(); - String bucketKey = bucketEntry.getKey(); - OmBucketInfo bucketInfo = bucketEntry.getValue(); - - if (bucketInfo.getBucketLayout() != BucketLayout.FILE_SYSTEM_OPTIMIZED) { - System.out.println("Skipping non-FSO bucket " + bucketKey); - continue; - } + if (bucketFilter != null) { + OmBucketInfo bucketInfo = bucketTable.getIfExist(volumeKey + "/" + bucketFilter); + if (bucketInfo == null) { + //Bucket does not exist in the volume + error("Bucket '" + bucketFilter + "' does not exist in volume '" + volumeKey + "'."); + return null; + } - // Stop this loop once we have seen all buckets in the current - // volume. - if (!bucketKey.startsWith(volumeKey)) { - break; - } + if (bucketInfo.getBucketLayout() != BucketLayout.FILE_SYSTEM_OPTIMIZED) { + info("Skipping non-FSO bucket " + bucketFilter); + continue; + } - processBucket(volumeEntry.getValue(), bucketInfo); + processBucket(volumeEntry.getValue(), bucketInfo); + } else { + + // Iterate all buckets in the volume. + try (TableIterator> + bucketIterator = bucketTable.iterator()) { + bucketIterator.seek(volumeKey); + while (bucketIterator.hasNext()) { + Table.KeyValue bucketEntry = bucketIterator.next(); + String bucketKey = bucketEntry.getKey(); + OmBucketInfo bucketInfo = bucketEntry.getValue(); + + if (bucketInfo.getBucketLayout() != BucketLayout.FILE_SYSTEM_OPTIMIZED) { + info("Skipping non-FSO bucket " + bucketKey); + continue; + } + + // Stop this loop once we have seen all buckets in the current + // volume. + if (!bucketKey.startsWith(volumeKey)) { + break; + } + + processBucket(volumeEntry.getValue(), bucketInfo); + } } } } } + } catch (IOException e) { + error("An error occurred while processing" + e.getMessage()); + throw e; + } finally { + closeReachableDB(); + store.close(); } - } catch (IOException e) { - System.out.println("An error occurred while processing" + e.getMessage()); - throw e; - } finally { - closeReachableDB(); - store.close(); + + return buildReportAndLog(); } - return buildReportAndLog(); - } + private boolean checkIfSnapshotExistsForBucket(String volumeName, String bucketName) throws IOException { + if (snapshotInfoTable == null) { + return false; + } - private boolean checkIfSnapshotExistsForBucket(String volumeName, String bucketName) throws IOException { - if (snapshotInfoTable == null) { + try (TableIterator> iterator = + snapshotInfoTable.iterator()) { + while (iterator.hasNext()) { + SnapshotInfo snapshotInfo = iterator.next().getValue(); + String snapshotPath = (volumeName + "/" + bucketName).replaceFirst("^/", ""); + if (snapshotInfo.getSnapshotPath().equals(snapshotPath)) { + return true; + } + } + } return false; } - try (TableIterator> iterator = - snapshotInfoTable.iterator()) { - while (iterator.hasNext()) { - SnapshotInfo snapshotInfo = iterator.next().getValue(); - String snapshotPath = (volumeName + "/" + bucketName).replaceFirst("^/", ""); - if (snapshotInfo.getSnapshotPath().equals(snapshotPath)) { - return true; + private void processBucket(OmVolumeArgs volume, OmBucketInfo bucketInfo) throws IOException { + info("Processing bucket: " + volume.getVolume() + "/" + bucketInfo.getBucketName()); + if (checkIfSnapshotExistsForBucket(volume.getVolume(), bucketInfo.getBucketName())) { + if (!repair) { + info( + "Snapshot detected in bucket '" + volume.getVolume() + "/" + bucketInfo.getBucketName() + "'. "); + } else { + info( + "Skipping repair for bucket '" + volume.getVolume() + "/" + bucketInfo.getBucketName() + "' " + + "due to snapshot presence."); + return; } } + markReachableObjectsInBucket(volume, bucketInfo); + handleUnreachableAndUnreferencedObjects(volume, bucketInfo); } - return false; - } - private void processBucket(OmVolumeArgs volume, OmBucketInfo bucketInfo) throws IOException { - System.out.println("Processing bucket: " + volume.getVolume() + "/" + bucketInfo.getBucketName()); - if (checkIfSnapshotExistsForBucket(volume.getVolume(), bucketInfo.getBucketName())) { - if (!repair) { - System.out.println( - "Snapshot detected in bucket '" + volume.getVolume() + "/" + bucketInfo.getBucketName() + "'. "); - } else { - System.out.println( - "Skipping repair for bucket '" + volume.getVolume() + "/" + bucketInfo.getBucketName() + "' " + - "due to snapshot presence."); - return; - } + private Report buildReportAndLog() { + Report report = new Report.Builder() + .setReachable(reachableStats) + .setUnreachable(unreachableStats) + .setUnreferenced(unreferencedStats) + .build(); + + info("\n" + report); + return report; } - markReachableObjectsInBucket(volume, bucketInfo); - handleUnreachableAndUnreferencedObjects(volume, bucketInfo); - } - private Report buildReportAndLog() { - Report report = new Report.Builder() - .setReachable(reachableStats) - .setUnreachable(unreachableStats) - .setUnreferenced(unreferencedStats) - .build(); + private void markReachableObjectsInBucket(OmVolumeArgs volume, OmBucketInfo bucket) throws IOException { + // Only put directories in the stack. + // Directory keys should have the form /volumeID/bucketID/parentID/name. + Stack dirKeyStack = new Stack<>(); - System.out.println("\n" + report); - return report; - } + // Since the tool uses parent directories to check for reachability, add + // a reachable entry for the bucket as well. + addReachableEntry(volume, bucket, bucket); + // Initialize the stack with all immediate child directories of the + // bucket, and mark them all as reachable. + Collection childDirs = getChildDirectoriesAndMarkAsReachable(volume, bucket, bucket); + dirKeyStack.addAll(childDirs); + + while (!dirKeyStack.isEmpty()) { + // Get one directory and process its immediate children. + String currentDirKey = dirKeyStack.pop(); + OmDirectoryInfo currentDir = directoryTable.get(currentDirKey); + if (currentDir == null) { + info("Directory key" + currentDirKey + "to be processed was not found in the directory table."); + continue; + } - private void markReachableObjectsInBucket(OmVolumeArgs volume, OmBucketInfo bucket) throws IOException { - // Only put directories in the stack. - // Directory keys should have the form /volumeID/bucketID/parentID/name. - Stack dirKeyStack = new Stack<>(); - - // Since the tool uses parent directories to check for reachability, add - // a reachable entry for the bucket as well. - addReachableEntry(volume, bucket, bucket); - // Initialize the stack with all immediate child directories of the - // bucket, and mark them all as reachable. - Collection childDirs = getChildDirectoriesAndMarkAsReachable(volume, bucket, bucket); - dirKeyStack.addAll(childDirs); - - while (!dirKeyStack.isEmpty()) { - // Get one directory and process its immediate children. - String currentDirKey = dirKeyStack.pop(); - OmDirectoryInfo currentDir = directoryTable.get(currentDirKey); - if (currentDir == null) { - System.out.println("Directory key" + currentDirKey + "to be processed was not found in the directory table."); - continue; + // TODO revisit this for a more memory efficient implementation, + // possibly making better use of RocksDB iterators. + childDirs = getChildDirectoriesAndMarkAsReachable(volume, bucket, currentDir); + dirKeyStack.addAll(childDirs); } + } - // TODO revisit this for a more memory efficient implementation, - // possibly making better use of RocksDB iterators. - childDirs = getChildDirectoriesAndMarkAsReachable(volume, bucket, currentDir); - dirKeyStack.addAll(childDirs); + private boolean isDirectoryInDeletedDirTable(String dirKey) throws IOException { + return deletedDirectoryTable.isExist(dirKey); } - } - private boolean isDirectoryInDeletedDirTable(String dirKey) throws IOException { - return deletedDirectoryTable.isExist(dirKey); - } + private boolean isFileKeyInDeletedTable(String fileKey) throws IOException { + return deletedTable.isExist(fileKey); + } - private boolean isFileKeyInDeletedTable(String fileKey) throws IOException { - return deletedTable.isExist(fileKey); - } + private void handleUnreachableAndUnreferencedObjects(OmVolumeArgs volume, OmBucketInfo bucket) throws IOException { + // Check for unreachable and unreferenced directories in the bucket. + String bucketPrefix = OM_KEY_PREFIX + + volume.getObjectID() + + OM_KEY_PREFIX + + bucket.getObjectID(); - private void handleUnreachableAndUnreferencedObjects(OmVolumeArgs volume, OmBucketInfo bucket) throws IOException { - // Check for unreachable and unreferenced directories in the bucket. - String bucketPrefix = OM_KEY_PREFIX + - volume.getObjectID() + - OM_KEY_PREFIX + - bucket.getObjectID(); - - try (TableIterator> dirIterator = - directoryTable.iterator()) { - dirIterator.seek(bucketPrefix); - while (dirIterator.hasNext()) { - Table.KeyValue dirEntry = dirIterator.next(); - String dirKey = dirEntry.getKey(); - - // Only search directories in this bucket. - if (!dirKey.startsWith(bucketPrefix)) { - break; - } + try (TableIterator> dirIterator = + directoryTable.iterator()) { + dirIterator.seek(bucketPrefix); + while (dirIterator.hasNext()) { + Table.KeyValue dirEntry = dirIterator.next(); + String dirKey = dirEntry.getKey(); + + // Only search directories in this bucket. + if (!dirKey.startsWith(bucketPrefix)) { + break; + } - if (!isReachable(dirKey)) { - if (!isDirectoryInDeletedDirTable(dirKey)) { - System.out.println("Found unreferenced directory: " + dirKey); - unreferencedStats.addDir(); + if (!isReachable(dirKey)) { + if (!isDirectoryInDeletedDirTable(dirKey)) { + info("Found unreferenced directory: " + dirKey); + unreferencedStats.addDir(); - if (!repair) { - if (verbose) { - System.out.println("Marking unreferenced directory " + dirKey + " for deletion."); + if (!repair) { + if (verbose) { + info("Marking unreferenced directory " + dirKey + " for deletion."); + } + } else { + info("Deleting unreferenced directory " + dirKey); + OmDirectoryInfo dirInfo = dirEntry.getValue(); + markDirectoryForDeletion(volume.getVolume(), bucket.getBucketName(), dirKey, dirInfo); } } else { - System.out.println("Deleting unreferenced directory " + dirKey); - OmDirectoryInfo dirInfo = dirEntry.getValue(); - markDirectoryForDeletion(volume.getVolume(), bucket.getBucketName(), dirKey, dirInfo); + unreachableStats.addDir(); } - } else { - unreachableStats.addDir(); } } } - } - // Check for unreachable and unreferenced files - try (TableIterator> - fileIterator = fileTable.iterator()) { - fileIterator.seek(bucketPrefix); - while (fileIterator.hasNext()) { - Table.KeyValue fileEntry = fileIterator.next(); - String fileKey = fileEntry.getKey(); - // Only search files in this bucket. - if (!fileKey.startsWith(bucketPrefix)) { - break; - } + // Check for unreachable and unreferenced files + try (TableIterator> + fileIterator = fileTable.iterator()) { + fileIterator.seek(bucketPrefix); + while (fileIterator.hasNext()) { + Table.KeyValue fileEntry = fileIterator.next(); + String fileKey = fileEntry.getKey(); + // Only search files in this bucket. + if (!fileKey.startsWith(bucketPrefix)) { + break; + } - OmKeyInfo fileInfo = fileEntry.getValue(); - if (!isReachable(fileKey)) { - if (!isFileKeyInDeletedTable(fileKey)) { - System.out.println("Found unreferenced file: " + fileKey); - unreferencedStats.addFile(fileInfo.getDataSize()); + OmKeyInfo fileInfo = fileEntry.getValue(); + if (!isReachable(fileKey)) { + if (!isFileKeyInDeletedTable(fileKey)) { + info("Found unreferenced file: " + fileKey); + unreferencedStats.addFile(fileInfo.getDataSize()); - if (!repair) { - if (verbose) { - System.out.println("Marking unreferenced file " + fileKey + " for deletion." + fileKey); + if (!repair) { + if (verbose) { + info("Marking unreferenced file " + fileKey + " for deletion." + fileKey); + } + } else { + info("Deleting unreferenced file " + fileKey); + markFileForDeletion(fileKey, fileInfo); } } else { - System.out.println("Deleting unreferenced file " + fileKey); - markFileForDeletion(fileKey, fileInfo); + unreachableStats.addFile(fileInfo.getDataSize()); } } else { - unreachableStats.addFile(fileInfo.getDataSize()); + // NOTE: We are deserializing the proto of every reachable file + // just to log it's size. If we don't need this information we could + // save time by skipping this step. + reachableStats.addFile(fileInfo.getDataSize()); } - } else { - // NOTE: We are deserializing the proto of every reachable file - // just to log it's size. If we don't need this information we could - // save time by skipping this step. - reachableStats.addFile(fileInfo.getDataSize()); } } } - } - protected void markFileForDeletion(String fileKey, OmKeyInfo fileInfo) throws IOException { - try (BatchOperation batch = store.initBatchOperation()) { - fileTable.deleteWithBatch(batch, fileKey); - - RepeatedOmKeyInfo originalRepeatedKeyInfo = deletedTable.get(fileKey); - RepeatedOmKeyInfo updatedRepeatedOmKeyInfo = OmUtils.prepareKeyForDelete( - fileInfo, fileInfo.getUpdateID(), true); - // NOTE: The FSO code seems to write the open key entry with the whole - // path, using the object's names instead of their ID. This would only - // be possible when the file is deleted explicitly, and not part of a - // directory delete. It is also not possible here if the file's parent - // is gone. The name of the key does not matter so just use IDs. - deletedTable.putWithBatch(batch, fileKey, updatedRepeatedOmKeyInfo); - if (verbose) { - System.out.println("Added entry " + fileKey + " to open key table: " + updatedRepeatedOmKeyInfo); + protected void markFileForDeletion(String fileKey, OmKeyInfo fileInfo) throws IOException { + try (BatchOperation batch = store.initBatchOperation()) { + fileTable.deleteWithBatch(batch, fileKey); + + RepeatedOmKeyInfo originalRepeatedKeyInfo = deletedTable.get(fileKey); + RepeatedOmKeyInfo updatedRepeatedOmKeyInfo = OmUtils.prepareKeyForDelete( + fileInfo, fileInfo.getUpdateID(), true); + // NOTE: The FSO code seems to write the open key entry with the whole + // path, using the object's names instead of their ID. This would only + // be possible when the file is deleted explicitly, and not part of a + // directory delete. It is also not possible here if the file's parent + // is gone. The name of the key does not matter so just use IDs. + deletedTable.putWithBatch(batch, fileKey, updatedRepeatedOmKeyInfo); + if (verbose) { + info("Added entry " + fileKey + " to open key table: " + updatedRepeatedOmKeyInfo); + } + store.commitBatchOperation(batch); } - store.commitBatchOperation(batch); } - } - protected void markDirectoryForDeletion(String volumeName, String bucketName, - String dirKeyName, OmDirectoryInfo dirInfo) throws IOException { - try (BatchOperation batch = store.initBatchOperation()) { - directoryTable.deleteWithBatch(batch, dirKeyName); - // HDDS-7592: Make directory entries in deleted dir table unique. - String deleteDirKeyName = dirKeyName + OM_KEY_PREFIX + dirInfo.getObjectID(); + protected void markDirectoryForDeletion(String volumeName, String bucketName, + String dirKeyName, OmDirectoryInfo dirInfo) throws IOException { + try (BatchOperation batch = store.initBatchOperation()) { + directoryTable.deleteWithBatch(batch, dirKeyName); + // HDDS-7592: Make directory entries in deleted dir table unique. + String deleteDirKeyName = dirKeyName + OM_KEY_PREFIX + dirInfo.getObjectID(); - // Convert the directory to OmKeyInfo for deletion. - OmKeyInfo dirAsKeyInfo = OMFileRequest.getOmKeyInfo(volumeName, bucketName, dirInfo, dirInfo.getName()); - deletedDirectoryTable.putWithBatch(batch, deleteDirKeyName, dirAsKeyInfo); + // Convert the directory to OmKeyInfo for deletion. + OmKeyInfo dirAsKeyInfo = OMFileRequest.getOmKeyInfo(volumeName, bucketName, dirInfo, dirInfo.getName()); + deletedDirectoryTable.putWithBatch(batch, deleteDirKeyName, dirAsKeyInfo); - store.commitBatchOperation(batch); + store.commitBatchOperation(batch); + } } - } - private Collection getChildDirectoriesAndMarkAsReachable(OmVolumeArgs volume, OmBucketInfo bucket, - WithObjectID currentDir) throws IOException { - - Collection childDirs = new ArrayList<>(); - - try (TableIterator> - dirIterator = directoryTable.iterator()) { - String dirPrefix = buildReachableKey(volume, bucket, currentDir); - // Start searching the directory table at the current directory's - // prefix to get its immediate children. - dirIterator.seek(dirPrefix); - while (dirIterator.hasNext()) { - Table.KeyValue childDirEntry = dirIterator.next(); - String childDirKey = childDirEntry.getKey(); - // Stop processing once we have seen all immediate children of this - // directory. - if (!childDirKey.startsWith(dirPrefix)) { - break; + private Collection getChildDirectoriesAndMarkAsReachable(OmVolumeArgs volume, OmBucketInfo bucket, + WithObjectID currentDir) throws IOException { + + Collection childDirs = new ArrayList<>(); + + try (TableIterator> + dirIterator = directoryTable.iterator()) { + String dirPrefix = buildReachableKey(volume, bucket, currentDir); + // Start searching the directory table at the current directory's + // prefix to get its immediate children. + dirIterator.seek(dirPrefix); + while (dirIterator.hasNext()) { + Table.KeyValue childDirEntry = dirIterator.next(); + String childDirKey = childDirEntry.getKey(); + // Stop processing once we have seen all immediate children of this + // directory. + if (!childDirKey.startsWith(dirPrefix)) { + break; + } + // This directory was reached by search. + addReachableEntry(volume, bucket, childDirEntry.getValue()); + childDirs.add(childDirKey); + reachableStats.addDir(); } - // This directory was reached by search. - addReachableEntry(volume, bucket, childDirEntry.getValue()); - childDirs.add(childDirKey); - reachableStats.addDir(); } + + return childDirs; + } + + /** + * Add the specified object to the reachable table, indicating it is part + * of the connected FSO tree. + */ + private void addReachableEntry(OmVolumeArgs volume, OmBucketInfo bucket, WithObjectID object) throws IOException { + String reachableKey = buildReachableKey(volume, bucket, object); + // No value is needed for this table. + reachableDB.getTable(REACHABLE_TABLE, String.class, byte[].class).put(reachableKey, new byte[]{}); + } + + /** + * @param fileOrDirKey The key of a file or directory in RocksDB. + * @return true if the entry's parent is in the reachable table. + */ + protected boolean isReachable(String fileOrDirKey) throws IOException { + String reachableParentKey = buildReachableParentKey(fileOrDirKey); + + return reachableDB.getTable(REACHABLE_TABLE, String.class, byte[].class).get(reachableParentKey) != null; + } + + private void openReachableDB() throws IOException { + File reachableDBFile = new File(new File(omDBPath).getParentFile(), "reachable.db"); + info("Creating database of reachable directories at " + reachableDBFile); + // Delete the DB from the last run if it exists. + if (reachableDBFile.exists()) { + FileUtils.deleteDirectory(reachableDBFile); + } + + ConfigurationSource conf = new OzoneConfiguration(); + reachableDB = DBStoreBuilder.newBuilder(conf) + .setName("reachable.db") + .setPath(reachableDBFile.getParentFile().toPath()) + .addTable(REACHABLE_TABLE) + .build(); } - return childDirs; + private void closeReachableDB() throws IOException { + if (reachableDB != null) { + reachableDB.close(); + } + File reachableDBFile = new File(new File(omDBPath).getParentFile(), "reachable.db"); + if (reachableDBFile.exists()) { + FileUtils.deleteDirectory(reachableDBFile); + } + } } - /** - * Add the specified object to the reachable table, indicating it is part - * of the connected FSO tree. - */ - private void addReachableEntry(OmVolumeArgs volume, OmBucketInfo bucket, WithObjectID object) throws IOException { - String reachableKey = buildReachableKey(volume, bucket, object); - // No value is needed for this table. - reachableDB.getTable(REACHABLE_TABLE, String.class, byte[].class).put(reachableKey, new byte[]{}); + protected static DBStore getStoreFromPath(String dbPath) throws IOException { + File omDBFile = new File(dbPath); + if (!omDBFile.exists() || !omDBFile.isDirectory()) { + throw new IOException(String.format("Specified OM DB instance %s does " + + "not exist or is not a RocksDB directory.", dbPath)); + } + // Load RocksDB and tables needed. + return OmMetadataManagerImpl.loadDB(new OzoneConfiguration(), new File(dbPath).getParentFile(), -1); } /** @@ -480,17 +551,6 @@ private static String buildReachableKey(OmVolumeArgs volume, OmBucketInfo bucket object.getObjectID(); } - /** - * - * @param fileOrDirKey The key of a file or directory in RocksDB. - * @return true if the entry's parent is in the reachable table. - */ - protected boolean isReachable(String fileOrDirKey) throws IOException { - String reachableParentKey = buildReachableParentKey(fileOrDirKey); - - return reachableDB.getTable(REACHABLE_TABLE, String.class, byte[].class).get(reachableParentKey) != null; - } - /** * Build an entry in the reachable table for the current object's parent * object. The object could be a file or directory. @@ -512,32 +572,6 @@ private static String buildReachableParentKey(String fileOrDirKey) { parentID; } - private void openReachableDB() throws IOException { - File reachableDBFile = new File(new File(omDBPath).getParentFile(), "reachable.db"); - System.out.println("Creating database of reachable directories at " + reachableDBFile); - // Delete the DB from the last run if it exists. - if (reachableDBFile.exists()) { - FileUtils.deleteDirectory(reachableDBFile); - } - - ConfigurationSource conf = new OzoneConfiguration(); - reachableDB = DBStoreBuilder.newBuilder(conf) - .setName("reachable.db") - .setPath(reachableDBFile.getParentFile().toPath()) - .addTable(REACHABLE_TABLE) - .build(); - } - - private void closeReachableDB() throws IOException { - if (reachableDB != null) { - reachableDB.close(); - } - File reachableDBFile = new File(new File(omDBPath).getParentFile(), "reachable.db"); - if (reachableDBFile.exists()) { - FileUtils.deleteDirectory(reachableDBFile); - } - } - /** * Define a Report to be created. */ @@ -549,19 +583,19 @@ public static class Report { /** * Builds one report that is the aggregate of multiple others. */ - public Report(FSORepairTool.Report... reports) { + public Report(Report... reports) { reachable = new ReportStatistics(); unreachable = new ReportStatistics(); unreferenced = new ReportStatistics(); - for (FSORepairTool.Report report : reports) { + for (Report report : reports) { reachable.add(report.reachable); unreachable.add(report.unreachable); unreferenced.add(report.unreferenced); } } - private Report(FSORepairTool.Report.Builder builder) { + private Report(Report.Builder builder) { this.reachable = builder.reachable; this.unreachable = builder.unreachable; this.unreferenced = builder.unreferenced; @@ -591,7 +625,7 @@ public boolean equals(Object other) { if (other == null || getClass() != other.getClass()) { return false; } - FSORepairTool.Report report = (FSORepairTool.Report) other; + Report report = (Report) other; // Useful for testing. System.out.println("Comparing reports\nExpect:\n" + this + "\nActual:\n" + report); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/OMRepair.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/OMRepair.java index 3b880f87543..9e20f6b9d1f 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/OMRepair.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/OMRepair.java @@ -27,7 +27,7 @@ */ @CommandLine.Command(name = "om", subcommands = { - FSORepairCLI.class, + FSORepairTool.class, SnapshotRepair.class, TransactionInfoRepair.class }, From 8a774a57df907c1e5c6c274054cfde21f914a33b Mon Sep 17 00:00:00 2001 From: Chung En Lee Date: Tue, 7 Jan 2025 15:15:26 +0800 Subject: [PATCH 049/168] HDDS-11989. Enable SCM Ratis in tests related to DeletedBlockLog (#7615) --- .../hdds/scm/TestStorageContainerManager.java | 12 +---- .../apache/hadoop/ozone/OzoneTestUtils.java | 33 +++++++++++++ .../rpc/TestDeleteWithInAdequateDN.java | 5 ++ .../commandhandler/TestBlockDeletion.java | 48 ++++++++++--------- .../TestDeleteContainerHandler.java | 8 ++++ 5 files changed, 72 insertions(+), 34 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java index 94c8f914294..47f6d3823d2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java @@ -321,17 +321,7 @@ public void testBlockDeletionTransactions() throws Exception { // after sometime, all the TX should be proceed and by then // the number of containerBlocks of all known containers will be // empty again. - GenericTestUtils.waitFor(() -> { - try { - if (SCMHAUtils.isSCMHAEnabled(cluster.getConf())) { - cluster.getStorageContainerManager().getScmHAManager() - .asSCMHADBTransactionBuffer().flush(); - } - return delLog.getNumOfValidTransactions() == 0; - } catch (IOException e) { - return false; - } - }, 1000, 22000); + OzoneTestUtils.waitBlockDeleted(cluster.getStorageContainerManager()); assertTrue(verifyBlocksWithTxnTable(cluster, conf, containerBlocks)); // Continue the work, add some TXs that with known container names, // but unknown block IDs. diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java index 884e435d25e..0a5f7114c40 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java @@ -161,4 +161,37 @@ public static void closeContainer(StorageContainerManager scm, container.getState() == HddsProtos.LifeCycleState.CLOSED, 200, 30000); } + + /** + * Flush deleted block log & wait till something was flushed. + */ + public static void flushAndWaitForDeletedBlockLog(StorageContainerManager scm) + throws InterruptedException, TimeoutException { + GenericTestUtils.waitFor(() -> { + try { + scm.getScmHAManager().asSCMHADBTransactionBuffer().flush(); + if (scm.getScmBlockManager().getDeletedBlockLog().getNumOfValidTransactions() > 0) { + return true; + } + } catch (IOException e) { + } + return false; + }, 100, 3000); + } + + /** + * Wait till all blocks are removed. + */ + public static void waitBlockDeleted(StorageContainerManager scm) + throws InterruptedException, TimeoutException { + GenericTestUtils.waitFor(() -> { + try { + if (scm.getScmBlockManager().getDeletedBlockLog().getNumOfValidTransactions() == 0) { + return true; + } + } catch (IOException e) { + } + return false; + }, 1000, 60000); + } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java index bb42d8a0f57..2b199306b76 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java @@ -42,6 +42,7 @@ import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.OzoneTestUtils; import org.apache.hadoop.ozone.RatisTestHelper; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneClient; @@ -65,6 +66,7 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; @@ -73,6 +75,7 @@ import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertSame; import static org.junit.jupiter.api.Assertions.assertThrows; + import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.Assumptions; import org.junit.jupiter.api.BeforeAll; @@ -103,6 +106,7 @@ public static void init() throws Exception { conf = new OzoneConfiguration(); + conf.setBoolean(OZONE_SCM_HA_ENABLE_KEY, true); conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 100, TimeUnit.MILLISECONDS); conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200, @@ -281,6 +285,7 @@ void testDeleteKeyWithInAdequateDN() throws Exception { //cluster.getOzoneManager().deleteKey(keyArgs); client.getObjectStore().getVolume(volumeName).getBucket(bucketName). deleteKey("ratis"); + OzoneTestUtils.flushAndWaitForDeletedBlockLog(cluster.getStorageContainerManager()); // make sure the chunk was never deleted on the leader even though // deleteBlock handler is invoked diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java index cf7d26847bb..e38312e02e6 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java @@ -47,7 +47,6 @@ import org.apache.hadoop.hdds.scm.block.ScmBlockDeletingServiceMetrics; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException; import org.apache.hadoop.hdds.scm.container.ContainerReplica; import org.apache.hadoop.hdds.scm.container.ContainerStateManager; import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager; @@ -95,6 +94,7 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_EXPIRED_CONTAINER_REPLICA_OP_SCRUB_INTERVAL; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; @@ -133,6 +133,7 @@ public void init() throws Exception { GenericTestUtils.setLogLevel(SCMBlockDeletingService.LOG, Level.DEBUG); GenericTestUtils.setLogLevel(ReplicationManager.LOG, Level.DEBUG); + conf.setBoolean(OZONE_SCM_HA_ENABLE_KEY, true); conf.set("ozone.replication.allowed-configs", "^(RATIS/THREE)|(EC/2-1-256k)$"); conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 100, @@ -239,6 +240,7 @@ public void testBlockDeletion(ReplicationConfig repConfig) throws Exception { // verify key blocks were created in DN. GenericTestUtils.waitFor(() -> { try { + scm.getScmHAManager().asSCMHADBTransactionBuffer().flush(); verifyBlocksCreated(omKeyLocationInfoGroupList); return true; } catch (Throwable t) { @@ -283,6 +285,7 @@ public void testBlockDeletion(ReplicationConfig repConfig) throws Exception { // The blocks should be deleted in the DN. GenericTestUtils.waitFor(() -> { try { + scm.getScmHAManager().asSCMHADBTransactionBuffer().flush(); verifyBlocksDeleted(omKeyLocationInfoGroupList); return true; } catch (Throwable t) { @@ -299,6 +302,7 @@ public void testBlockDeletion(ReplicationConfig repConfig) throws Exception { // Verify transactions committed GenericTestUtils.waitFor(() -> { try { + scm.getScmHAManager().asSCMHADBTransactionBuffer().flush(); verifyTransactionsCommitted(); return true; } catch (Throwable t) { @@ -380,10 +384,16 @@ public void testContainerStatisticsAfterDelete() throws Exception { writeClient.deleteKey(keyArgs); // Wait for blocks to be deleted and container reports to be processed - GenericTestUtils.waitFor(() -> - scm.getContainerManager().getContainers().stream() - .allMatch(c -> c.getUsedBytes() == 0 && - c.getNumberOfKeys() == 0), 500, 20000); + GenericTestUtils.waitFor(() -> { + try { + scm.getScmHAManager().asSCMHADBTransactionBuffer().flush(); + } catch (IOException e) { + throw new RuntimeException(e); + } + return scm.getContainerManager().getContainers().stream() + .allMatch(c -> c.getUsedBytes() == 0 && + c.getNumberOfKeys() == 0); + }, 500, 20000); Thread.sleep(5000); // Verify that pending block delete num are as expected with resent cmds cluster.getHddsDatanodes().forEach(dn -> { @@ -425,6 +435,7 @@ public void testContainerStatisticsAfterDelete() throws Exception { assertEquals(HddsProtos.LifeCycleState.DELETED, container.getState()); try { + scm.getScmHAManager().asSCMHADBTransactionBuffer().flush(); assertEquals(HddsProtos.LifeCycleState.DELETED, scm.getScmMetadataStore().getContainerTable() .get(container.containerID()).getState()); @@ -516,14 +527,14 @@ public void testContainerStateAfterDNRestart() throws Exception { GenericTestUtils.waitFor(() -> { try { + scm.getScmHAManager().asSCMHADBTransactionBuffer().flush(); return scm.getContainerManager().getContainerReplicas( containerId).stream(). allMatch(replica -> replica.isEmpty()); - } catch (ContainerNotFoundException e) { + } catch (IOException e) { throw new RuntimeException(e); } - }, - 100, 10 * 1000); + }, 100, 10 * 1000); // Container state should be empty now as key got deleted assertTrue(getContainerFromDN( @@ -546,6 +557,7 @@ public void testContainerStateAfterDNRestart() throws Exception { assertEquals(HddsProtos.LifeCycleState.DELETED, container.getState()); try { + scm.getScmHAManager().asSCMHADBTransactionBuffer().flush(); assertEquals(HddsProtos.LifeCycleState.DELETED, scm.getScmMetadataStore().getContainerTable() .get(container.containerID()).getState()); @@ -560,7 +572,6 @@ public void testContainerStateAfterDNRestart() throws Exception { } return true; }, 500, 30000); - LOG.info(metrics.toString()); } /** @@ -646,14 +657,14 @@ public void testContainerDeleteWithInvalidKeyCount() // Ensure isEmpty are true for all replica after delete key GenericTestUtils.waitFor(() -> { try { + scm.getScmHAManager().asSCMHADBTransactionBuffer().flush(); return scm.getContainerManager().getContainerReplicas( containerId).stream() .allMatch(replica -> replica.isEmpty()); - } catch (ContainerNotFoundException e) { + } catch (IOException e) { throw new RuntimeException(e); } - }, - 500, 5 * 2000); + }, 500, 5 * 2000); // Update container replica by making invalid keyCount in one replica ContainerReplica replicaOne = ContainerReplica.newBuilder() @@ -683,6 +694,7 @@ public void testContainerDeleteWithInvalidKeyCount() assertEquals(HddsProtos.LifeCycleState.DELETED, container.getState()); try { + scm.getScmHAManager().asSCMHADBTransactionBuffer().flush(); assertEquals(HddsProtos.LifeCycleState.DELETED, scm.getScmMetadataStore().getContainerTable() .get(container.containerID()).getState()); @@ -812,17 +824,7 @@ public void testBlockDeleteCommandParallelProcess() throws Exception { } // Wait for block delete command sent from OM - GenericTestUtils.waitFor(() -> { - try { - if (scm.getScmBlockManager().getDeletedBlockLog() - .getNumOfValidTransactions() > 0) { - return true; - } - } catch (IOException e) { - } - return false; - }, 100, 5000); - + OzoneTestUtils.flushAndWaitForDeletedBlockLog(scm); long start = System.currentTimeMillis(); // Wait for all blocks been deleted. GenericTestUtils.waitFor(() -> { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java index 192c933f53c..705ef1e0d86 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java @@ -75,6 +75,7 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -97,6 +98,7 @@ public class TestDeleteContainerHandler { @BeforeAll public static void setup() throws Exception { conf = new OzoneConfiguration(); + conf.setBoolean(OZONE_SCM_HA_ENABLE_KEY, true); conf.set(OZONE_SCM_CONTAINER_SIZE, "1GB"); conf.setStorageSize(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN, 0, StorageUnit.MB); @@ -196,6 +198,8 @@ public void testDeleteNonEmptyContainerOnDirEmptyCheckTrue() // Delete key, which will make isEmpty flag to true in containerData objectStore.getVolume(volumeName) .getBucket(bucketName).deleteKey(keyName); + OzoneTestUtils.flushAndWaitForDeletedBlockLog(cluster.getStorageContainerManager()); + OzoneTestUtils.waitBlockDeleted(cluster.getStorageContainerManager()); // Ensure isEmpty flag is true when key is deleted and container is empty GenericTestUtils.waitFor(() -> getContainerfromDN( @@ -313,6 +317,8 @@ public void testDeleteNonEmptyContainerOnDirEmptyCheckFalse() // Delete key, which will make isEmpty flag to true in containerData objectStore.getVolume(volumeName) .getBucket(bucketName).deleteKey(keyName); + OzoneTestUtils.flushAndWaitForDeletedBlockLog(cluster.getStorageContainerManager()); + OzoneTestUtils.waitBlockDeleted(cluster.getStorageContainerManager()); // Ensure isEmpty flag is true when key is deleted and container is empty GenericTestUtils.waitFor(() -> getContainerfromDN( @@ -652,6 +658,8 @@ public void testDeleteContainerRequestHandlerOnClosedContainer() // Delete key, which will make isEmpty flag to true in containerData objectStore.getVolume(volumeName) .getBucket(bucketName).deleteKey(keyName); + OzoneTestUtils.flushAndWaitForDeletedBlockLog(cluster.getStorageContainerManager()); + OzoneTestUtils.waitBlockDeleted(cluster.getStorageContainerManager()); // Ensure isEmpty flag is true when key is deleted GenericTestUtils.waitFor(() -> getContainerfromDN( From 44ba9a3f5d689d003cc8770ad62815d04d2596a2 Mon Sep 17 00:00:00 2001 From: Chung En Lee Date: Tue, 7 Jan 2025 16:14:24 +0800 Subject: [PATCH 050/168] HDDS-12023. Enable SCM Ratis in TestContainerCommandsEC (#7650) --- .../hadoop/hdds/scm/storage/TestContainerCommandsEC.java | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java index 1b7eb837cf8..bf40a600e29 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java @@ -70,6 +70,7 @@ import org.apache.hadoop.ozone.common.utils.BufferUtils; import org.apache.hadoop.ozone.container.ContainerTestHelper; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; +import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.ozone.container.ec.reconstruction.ECContainerOperationClient; import org.apache.hadoop.ozone.container.ec.reconstruction.ECReconstructionCoordinator; import org.apache.hadoop.ozone.container.ec.reconstruction.ECReconstructionMetrics; @@ -170,6 +171,7 @@ public class TestContainerCommandsEC { @BeforeAll public static void init() throws Exception { config = new OzoneConfiguration(); + config.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); config.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, 1); config.setTimeDuration(ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS); config.setBoolean(OzoneConfigKeys.OZONE_ACL_ENABLED, true); @@ -320,8 +322,10 @@ public void testOrphanBlock() throws Exception { .setTxID(1L) .setCount(10) .build())); - dn2Service.getDatanodeStateMachine().getContext() - .addCommand(deleteBlocksCommand); + StateContext context = dn2Service.getDatanodeStateMachine().getContext(); + deleteBlocksCommand.setTerm(context.getTermOfLeaderSCM().isPresent() ? + context.getTermOfLeaderSCM().getAsLong() : 0); + context.addCommand(deleteBlocksCommand); try (XceiverClientGrpc client = new XceiverClientGrpc( createSingleNodePipeline(orphanPipeline, dn2, 1), cluster.getConf())) { From e8d96f422efe094b9191dc2d65459a29e8a8faac Mon Sep 17 00:00:00 2001 From: Chung En Lee Date: Tue, 7 Jan 2025 17:26:12 +0800 Subject: [PATCH 051/168] HDDS-12022. Enable SCM Ratis in TestStorageContainerManager (#7651) --- .../hdds/scm/TestStorageContainerManager.java | 71 ++++++------------- 1 file changed, 22 insertions(+), 49 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java index 47f6d3823d2..14df7670f67 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.ha.RatisUtil; import org.apache.hadoop.hdds.scm.ha.SCMContext; +import org.apache.hadoop.hdds.scm.ha.SCMHANodeDetails; import org.apache.hadoop.hdds.scm.ha.SCMHAUtils; import org.apache.hadoop.hdds.scm.ha.SCMRatisServerImpl; import org.apache.hadoop.hdds.scm.node.DatanodeInfo; @@ -92,7 +93,6 @@ import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand; import org.apache.hadoop.ozone.protocol.commands.SCMCommand; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.util.Time; import org.apache.log4j.Level; @@ -142,15 +142,12 @@ import static org.apache.hadoop.hdds.scm.HddsTestUtils.mockRemoteUser; import static org.apache.hadoop.hdds.scm.HddsWhiteboxTestUtils.setInternalState; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; -import static org.apache.ozone.test.GenericTestUtils.PortAllocator.getFreePort; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.any; @@ -191,11 +188,13 @@ public void cleanupDefaults() { public void testRpcPermission() throws Exception { // Test with default configuration OzoneConfiguration defaultConf = new OzoneConfiguration(); + defaultConf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); testRpcPermissionWithConf(defaultConf, any -> false, "unknownUser"); // Test with ozone.administrators defined in configuration String admins = "adminUser1, adminUser2"; OzoneConfiguration ozoneConf = new OzoneConfiguration(); + ozoneConf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); ozoneConf.setStrings(OzoneConfigKeys.OZONE_ADMINISTRATORS, admins); // Non-admin user will get permission denied. // Admin user will pass the permission check. @@ -267,6 +266,7 @@ private void verifyPermissionDeniedException(Exception e, String userName) { public void testBlockDeletionTransactions() throws Exception { int numKeys = 5; OzoneConfiguration conf = new OzoneConfiguration(); + conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 100, TimeUnit.MILLISECONDS); DatanodeConfiguration datanodeConfiguration = conf.getObject( @@ -358,6 +358,7 @@ public void testBlockDeletionTransactions() throws Exception { @Test public void testOldDNRegistersToReInitialisedSCM() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); + conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1000, TimeUnit.MILLISECONDS); conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 3000, TimeUnit.MILLISECONDS); @@ -369,10 +370,13 @@ public void testOldDNRegistersToReInitialisedSCM() throws Exception { cluster.waitForClusterToBeReady(); HddsDatanodeService datanode = cluster.getHddsDatanodes().get(0); StorageContainerManager scm = cluster.getStorageContainerManager(); + File dbDir = scm.getScmMetadataStore().getStore().getDbLocation(); scm.stop(); // re-initialise SCM with new clusterID + GenericTestUtils.deleteDirectory(new File(SCMHAUtils.getRatisStorageDir(conf))); + GenericTestUtils.deleteDirectory(dbDir); GenericTestUtils.deleteDirectory( new File(scm.getScmStorageConfig().getStorageDir())); String newClusterId = UUID.randomUUID().toString(); @@ -413,7 +417,7 @@ public void testOldDNRegistersToReInitialisedSCM() throws Exception { datanode.getDatanodeDetails()); GenericTestUtils.waitFor( () -> scmDnHBDispatcherLog.getOutput().contains(expectedLog), 100, - 5000); + 30000); ExitUtil.disableSystemExit(); // As part of processing response for re-register, DN EndpointStateMachine // goes to GET-VERSION state which checks if there is already existing @@ -432,6 +436,7 @@ public void testOldDNRegistersToReInitialisedSCM() throws Exception { assertThat(versionEndPointTaskLog.getOutput()).contains( "org.apache.hadoop.ozone.common" + ".InconsistentStorageStateException: Mismatched ClusterIDs"); + scm.stop(); } } @@ -439,6 +444,7 @@ public void testOldDNRegistersToReInitialisedSCM() throws Exception { public void testBlockDeletingThrottling() throws Exception { int numKeys = 15; OzoneConfiguration conf = new OzoneConfiguration(); + conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 1, TimeUnit.SECONDS); conf.setInt(ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 5); conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, @@ -549,6 +555,7 @@ private Map> createDeleteTXLog( @Test public void testSCMInitialization(@TempDir Path tempDir) throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); + conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); Path scmPath = tempDir.resolve("scm-meta"); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); @@ -564,27 +571,13 @@ public void testSCMInitialization(@TempDir Path tempDir) throws Exception { assertEquals(NodeType.SCM, scmStore.getNodeType()); assertEquals(testClusterId, scmStore.getClusterID()); assertTrue(scmStore.isSCMHAEnabled()); - } - - @Test - public void testSCMInitializationWithHAEnabled(@TempDir Path tempDir) throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); - conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_INTERVAL, "10s"); - Path scmPath = tempDir.resolve("scm-meta"); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); - - final UUID clusterId = UUID.randomUUID(); - // This will initialize SCM - StorageContainerManager.scmInit(conf, clusterId.toString()); - SCMStorageConfig scmStore = new SCMStorageConfig(conf); - assertTrue(scmStore.isSCMHAEnabled()); validateRatisGroupExists(conf, clusterId.toString()); } @Test public void testSCMReinitialization(@TempDir Path tempDir) throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); + conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); Path scmPath = tempDir.resolve("scm-meta"); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); //This will set the cluster id in the version file @@ -646,6 +639,7 @@ public static void validateRatisGroupExists(OzoneConfiguration conf, @Test void testSCMInitializationFailure(@TempDir Path tempDir) { OzoneConfiguration conf = new OzoneConfiguration(); + conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); Path scmPath = tempDir.resolve("scm-meta"); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); @@ -658,15 +652,21 @@ public void testScmInfo(@TempDir Path tempDir) throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); Path scmPath = tempDir.resolve("scm-meta"); + conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); SCMStorageConfig scmStore = new SCMStorageConfig(conf); String clusterId = UUID.randomUUID().toString(); String scmId = UUID.randomUUID().toString(); scmStore.setClusterId(clusterId); scmStore.setScmId(scmId); + scmStore.setSCMHAFlag(true); // writes the version file properties scmStore.initialize(); + SCMRatisServerImpl.initialize(clusterId, scmId, + SCMHANodeDetails.loadSCMHAConfig(conf, scmStore) + .getLocalNodeDetails(), conf); StorageContainerManager scm = HddsTestUtils.getScmSimple(conf); + scm.start(); //Reads the SCM Info from SCM instance ScmInfo scmInfo = scm.getClientProtocolServer().getScmInfo(); assertEquals(clusterId, scmInfo.getClusterId()); @@ -684,6 +684,7 @@ public void testScmInfo(@TempDir Path tempDir) throws Exception { public void testScmProcessDatanodeHeartbeat() throws Exception { String rackName = "/rack1"; OzoneConfiguration conf = new OzoneConfiguration(); + conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, StaticMapping.class, DNSToSwitchMapping.class); StaticMapping.addNodeToRack(NetUtils.normalizeHostName(HddsUtils.getHostName(conf)), @@ -726,6 +727,7 @@ public void testScmProcessDatanodeHeartbeat() throws Exception { public void testCloseContainerCommandOnRestart() throws Exception { int numKeys = 15; OzoneConfiguration conf = new OzoneConfiguration(); + conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 1, TimeUnit.SECONDS); conf.setInt(ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 5); conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, @@ -943,35 +945,6 @@ public void testIncrementalContainerReportQueue() throws Exception { containerReportExecutors.close(); } - @Test - public void testNonRatisToRatis() - throws IOException, AuthenticationException, InterruptedException, - TimeoutException { - final OzoneConfiguration conf = new OzoneConfiguration(); - try (MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(3) - .build()) { - final StorageContainerManager nonRatisSCM = cluster - .getStorageContainerManager(); - assertNull(nonRatisSCM.getScmHAManager().getRatisServer()); - assertFalse(nonRatisSCM.getScmStorageConfig().isSCMHAEnabled()); - nonRatisSCM.stop(); - nonRatisSCM.join(); - - DefaultConfigManager.clearDefaultConfigs(); - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); - StorageContainerManager.scmInit(conf, cluster.getClusterId()); - conf.setInt(ScmConfigKeys.OZONE_SCM_DATANODE_PORT_KEY, getFreePort()); - conf.unset(ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY); - cluster.restartStorageContainerManager(false); - - final StorageContainerManager ratisSCM = cluster - .getStorageContainerManager(); - assertNotNull(ratisSCM.getScmHAManager().getRatisServer()); - assertTrue(ratisSCM.getScmStorageConfig().isSCMHAEnabled()); - } - } - private void addTransactions(StorageContainerManager scm, DeletedBlockLog delLog, Map> containerBlocksMap) From 3dfd2410a04259e58afc99c08c89adb4abcea30b Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Tue, 7 Jan 2025 10:21:17 -0800 Subject: [PATCH 052/168] HDDS-11753. Deprecate file per chunk layout from datanode code. (#7654) --- .../container/common/impl/ContainerLayoutVersion.java | 3 ++- .../ozone/container/keyvalue/KeyValueHandler.java | 10 ++++++++++ .../ozone/container/keyvalue/TestKeyValueHandler.java | 8 ++++++++ 3 files changed, 20 insertions(+), 1 deletion(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLayoutVersion.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLayoutVersion.java index 210c538f274..99f56baa799 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLayoutVersion.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLayoutVersion.java @@ -34,6 +34,7 @@ */ public enum ContainerLayoutVersion { + @Deprecated /* Use FILE_PER_BLOCK instead */ FILE_PER_CHUNK(1, "One file per chunk") { @Override public File getChunkFile(File chunkDir, BlockID blockID, String chunkName) { @@ -47,7 +48,7 @@ public File getChunkFile(File chunkDir, BlockID blockID, String chunkName) { } }; - private static final ContainerLayoutVersion + public static final ContainerLayoutVersion DEFAULT_LAYOUT = ContainerLayoutVersion.FILE_PER_BLOCK; private static final List CONTAINER_LAYOUT_VERSIONS = diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java index 9cae71e9baf..0ef8d5e68a0 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java @@ -40,6 +40,7 @@ import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; @@ -124,6 +125,7 @@ import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.unsupportedRequest; import static org.apache.hadoop.hdds.scm.utils.ClientCommandsUtils.getReadChunkVersion; import static org.apache.hadoop.ozone.OzoneConsts.INCREMENTAL_CHUNK_LIST; +import static org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion.DEFAULT_LAYOUT; import static org.apache.hadoop.ozone.container.common.interfaces.Container.ScanResult; import org.apache.hadoop.util.Time; @@ -191,6 +193,14 @@ public KeyValueHandler(ConfigurationSource config, byteBufferToByteString = ByteStringConversion .createByteBufferConversion(isUnsafeByteBufferConversionEnabled); + + if (ContainerLayoutVersion.getConfiguredVersion(conf) == + ContainerLayoutVersion.FILE_PER_CHUNK) { + LOG.warn("FILE_PER_CHUNK layout is not supported. Falling back to default : {}.", + DEFAULT_LAYOUT.name()); + OzoneConfiguration.of(conf).set(ScmConfigKeys.OZONE_SCM_CONTAINER_LAYOUT_KEY, + DEFAULT_LAYOUT.name()); + } } @VisibleForTesting diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java index 655ecbb48b4..d02910358de 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java @@ -57,6 +57,7 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_VOLUME_CHOOSING_POLICY; import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_LAYOUT_KEY; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -292,6 +293,13 @@ public void testVolumeSetInKeyValueHandler() throws Exception { keyValueHandler.getVolumeChoosingPolicyForTesting() .getClass().getName()); + // Ensures that KeyValueHandler falls back to FILE_PER_BLOCK. + conf.set(OZONE_SCM_CONTAINER_LAYOUT_KEY, "FILE_PER_CHUNK"); + new KeyValueHandler(conf, context.getParent().getDatanodeDetails().getUuidString(), cset, volumeSet, + metrics, c -> { }); + assertEquals(ContainerLayoutVersion.FILE_PER_BLOCK, + conf.getEnum(OZONE_SCM_CONTAINER_LAYOUT_KEY, ContainerLayoutVersion.FILE_PER_CHUNK)); + //Set a class which is not of sub class of VolumeChoosingPolicy conf.set(HDDS_DATANODE_VOLUME_CHOOSING_POLICY, "org.apache.hadoop.ozone.container.common.impl.HddsDispatcher"); From 984027cc250448418a253dc1f2ea3e8b596263de Mon Sep 17 00:00:00 2001 From: Nandakumar Vadivelu Date: Wed, 8 Jan 2025 02:05:10 +0530 Subject: [PATCH 053/168] HDDS-12035. Enable sortpom in hdds-hadoop-dependency-server and -test (#7659) --- hadoop-hdds/erasurecode/pom.xml | 4 +- hadoop-hdds/hadoop-dependency-client/pom.xml | 2 +- hadoop-hdds/hadoop-dependency-server/pom.xml | 124 +++++++++---------- hadoop-hdds/hadoop-dependency-test/pom.xml | 30 ++--- 4 files changed, 76 insertions(+), 84 deletions(-) diff --git a/hadoop-hdds/erasurecode/pom.xml b/hadoop-hdds/erasurecode/pom.xml index bb98efe1894..b84b6e087c3 100644 --- a/hadoop-hdds/erasurecode/pom.xml +++ b/hadoop-hdds/erasurecode/pom.xml @@ -38,13 +38,13 @@ org.slf4j slf4j-api + + org.apache.ozone hdds-config test - - org.apache.ozone hdds-hadoop-dependency-test diff --git a/hadoop-hdds/hadoop-dependency-client/pom.xml b/hadoop-hdds/hadoop-dependency-client/pom.xml index 276f6935584..c05614456e7 100644 --- a/hadoop-hdds/hadoop-dependency-client/pom.xml +++ b/hadoop-hdds/hadoop-dependency-client/pom.xml @@ -100,6 +100,7 @@ commons-beanutils commons-beanutils + commons-codec commons-codec @@ -152,7 +153,6 @@ org.apache.commons commons-lang3 - org.apache.commons commons-math3 diff --git a/hadoop-hdds/hadoop-dependency-server/pom.xml b/hadoop-hdds/hadoop-dependency-server/pom.xml index 05923dab2cd..324b21ef668 100644 --- a/hadoop-hdds/hadoop-dependency-server/pom.xml +++ b/hadoop-hdds/hadoop-dependency-server/pom.xml @@ -12,10 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -24,65 +21,68 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hdds-hadoop-dependency-server 2.0.0-SNAPSHOT - Apache Ozone Distributed Data Store Hadoop server dependencies - - Apache Ozone HDDS Hadoop Server dependencies jar + Apache Ozone HDDS Hadoop Server dependencies + Apache Ozone Distributed Data Store Hadoop server dependencies - true - true + + true + + com.nimbusds + nimbus-jose-jwt + + + + commons-cli + commons-cli + org.apache.hadoop hadoop-annotations org.apache.hadoop - hadoop-common + hadoop-auth ${hadoop.version} - com.nimbusds - nimbus-jose-jwt + ch.qos.reload4j + reload4j - org.xerial.snappy - snappy-java + log4j + log4j org.apache.curator * - org.apache.avro - avro + org.apache.kerby + kerb-simplekdc org.apache.zookeeper zookeeper - org.apache.commons + org.slf4j * + + + + org.apache.hadoop + hadoop-common + ${hadoop.version} + - org.codehaus.jackson - jackson-mapper-asl - - - org.codehaus.jackson - jackson-core-asl - - - org.codehaus.jackson - jackson-jaxrs - - - org.codehaus.jackson - jackson-xc + ch.qos.reload4j + reload4j com.github.pjfanning @@ -93,32 +93,25 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> jsr305 - com.sun.jersey - * + com.nimbusds + nimbus-jose-jwt - org.apache.kerby - kerb-simplekdc + com.sun.jersey + * log4j log4j - ch.qos.reload4j - reload4j + org.apache.avro + avro - org.slf4j + org.apache.commons * - - - - org.apache.hadoop - hadoop-auth - ${hadoop.version} - org.apache.curator * @@ -132,34 +125,41 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> zookeeper - log4j - log4j + org.codehaus.jackson + jackson-core-asl - ch.qos.reload4j - reload4j + org.codehaus.jackson + jackson-jaxrs + + + org.codehaus.jackson + jackson-mapper-asl + + + org.codehaus.jackson + jackson-xc org.slf4j * + + org.xerial.snappy + snappy-java + - - com.nimbusds - nimbus-jose-jwt - - - - commons-cli - commons-cli - org.apache.hadoop hadoop-hdfs ${hadoop.version} compile + + ch.qos.reload4j + reload4j + com.sun.jersey * @@ -168,17 +168,13 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> io.netty * - - org.fusesource.leveldbjni - leveldbjni-all - log4j log4j - ch.qos.reload4j - reload4j + org.fusesource.leveldbjni + leveldbjni-all org.slf4j diff --git a/hadoop-hdds/hadoop-dependency-test/pom.xml b/hadoop-hdds/hadoop-dependency-test/pom.xml index 5df30c7dfdd..48bdff714fb 100644 --- a/hadoop-hdds/hadoop-dependency-test/pom.xml +++ b/hadoop-hdds/hadoop-dependency-test/pom.xml @@ -12,10 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -24,17 +21,24 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hdds-hadoop-dependency-test 2.0.0-SNAPSHOT - Apache Ozone Distributed Data Store Hadoop test dependencies - - Apache Ozone HDDS Hadoop Test dependencies jar + Apache Ozone HDDS Hadoop Test dependencies + Apache Ozone Distributed Data Store Hadoop test dependencies - true - true + + true + + commons-codec + commons-codec + + + org.apache.commons + commons-compress + org.apache.hadoop hadoop-common @@ -59,14 +63,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> - - commons-codec - commons-codec - - - org.apache.commons - commons-compress - org.assertj From 2fc9c6e5f16724ef4bdc4ba4f9e1988e959a25cf Mon Sep 17 00:00:00 2001 From: Nandakumar Vadivelu Date: Wed, 8 Jan 2025 10:03:33 +0530 Subject: [PATCH 054/168] HDDS-12034. Enable sortpom in hdds-interface-admin, -client and -server. (#7660) --- hadoop-hdds/interface-admin/pom.xml | 21 +++----- hadoop-hdds/interface-client/pom.xml | 78 +++++++++------------------- hadoop-hdds/interface-server/pom.xml | 58 +++++++-------------- 3 files changed, 53 insertions(+), 104 deletions(-) diff --git a/hadoop-hdds/interface-admin/pom.xml b/hadoop-hdds/interface-admin/pom.xml index 94122423085..047db244faa 100644 --- a/hadoop-hdds/interface-admin/pom.xml +++ b/hadoop-hdds/interface-admin/pom.xml @@ -12,10 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -24,15 +21,15 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hdds-interface-admin 2.0.0-SNAPSHOT - Apache Ozone Distributed Data Store Admin interface - - Apache Ozone HDDS Admin Interface jar + Apache Ozone HDDS Admin Interface + Apache Ozone Distributed Data Store Admin interface - true - true - true + + true + + true @@ -72,9 +69,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> test-compile - - com.google.protobuf:protoc:${proto2.hadooprpc.protobuf.version}:exe:${os.detected.classifier} - + com.google.protobuf:protoc:${proto2.hadooprpc.protobuf.version}:exe:${os.detected.classifier} ${basedir}/src/main/proto/ target/generated-sources/java false diff --git a/hadoop-hdds/interface-client/pom.xml b/hadoop-hdds/interface-client/pom.xml index b373d11d507..da6dec5cda4 100644 --- a/hadoop-hdds/interface-client/pom.xml +++ b/hadoop-hdds/interface-client/pom.xml @@ -12,10 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -24,15 +21,15 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hdds-interface-client 2.0.0-SNAPSHOT - Apache Ozone Distributed Data Store Client interface - - Apache Ozone HDDS Client Interface jar + Apache Ozone HDDS Client Interface + Apache Ozone Distributed Data Store Client interface - true - true - true + + true + + true @@ -40,6 +37,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> com.google.protobuf protobuf-java + + javax.annotation + javax.annotation-api + org.apache.hadoop.thirdparty hadoop-shaded-protobuf_3_25 @@ -49,10 +50,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> ratis-thirdparty-misc ${ratis.thirdparty.version} - - javax.annotation - javax.annotation-api - @@ -82,9 +79,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> test-compile-custom - - com.google.protobuf:protoc:${grpc.protobuf-compile.version}:exe:${os.detected.classifier} - + com.google.protobuf:protoc:${grpc.protobuf-compile.version}:exe:${os.detected.classifier} ${basedir}/src/main/proto/ DatanodeClientProtocol.proto @@ -93,9 +88,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> target/generated-sources/java false grpc-java - - io.grpc:protoc-gen-grpc-java:${io.grpc.version}:exe:${os.detected.classifier} - + io.grpc:protoc-gen-grpc-java:${io.grpc.version}:exe:${os.detected.classifier} @@ -105,9 +98,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> test-compile - - com.google.protobuf:protoc:${proto2.hadooprpc.protobuf.version}:exe:${os.detected.classifier} - + com.google.protobuf:protoc:${proto2.hadooprpc.protobuf.version}:exe:${os.detected.classifier} ${basedir}/src/main/proto/ hdds.proto @@ -124,9 +115,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> test-compile - - com.google.protobuf:protoc:${proto3.hadooprpc.protobuf.version}:exe:${os.detected.classifier} - + com.google.protobuf:protoc:${proto3.hadooprpc.protobuf.version}:exe:${os.detected.classifier} ${basedir}/src/main/proto/ hdds.proto @@ -143,38 +132,21 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> ${maven-antrun-plugin.version} + + run + generate-sources - - - - - - - - - - - - + + + + + + + - - run - diff --git a/hadoop-hdds/interface-server/pom.xml b/hadoop-hdds/interface-server/pom.xml index 539a0a5430e..83aa5f72e36 100644 --- a/hadoop-hdds/interface-server/pom.xml +++ b/hadoop-hdds/interface-server/pom.xml @@ -12,10 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -24,22 +21,18 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hdds-interface-server 2.0.0-SNAPSHOT - Apache Ozone Distributed Data Store Server interface - - Apache Ozone HDDS Server Interface jar + Apache Ozone HDDS Server Interface + Apache Ozone Distributed Data Store Server interface - true - true - true + + true + + true - - org.apache.ratis - ratis-thirdparty-misc - com.google.protobuf protobuf-java @@ -50,6 +43,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds-interface-client + + org.apache.ratis + ratis-thirdparty-misc + @@ -79,9 +76,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> test-compile-custom - - com.google.protobuf:protoc:${grpc.protobuf-compile.version}:exe:${os.detected.classifier} - + com.google.protobuf:protoc:${grpc.protobuf-compile.version}:exe:${os.detected.classifier} ${basedir}/src/main/proto/ InterSCMProtocol.proto @@ -90,9 +85,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> target/generated-sources/java false grpc-java - - io.grpc:protoc-gen-grpc-java:${io.grpc.version}:exe:${os.detected.classifier} - + io.grpc:protoc-gen-grpc-java:${io.grpc.version}:exe:${os.detected.classifier} @@ -102,9 +95,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> test-compile - - com.google.protobuf:protoc:${proto2.hadooprpc.protobuf.version}:exe:${os.detected.classifier} - + com.google.protobuf:protoc:${proto2.hadooprpc.protobuf.version}:exe:${os.detected.classifier} ${basedir}/src/main/proto/ InterSCMProtocol.proto @@ -121,26 +112,17 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> ${maven-antrun-plugin.version} + + run + generate-sources - - - - - - + + + - - run - From 36a430db458a0b9b913a51c28199694f9273c64f Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Wed, 8 Jan 2025 06:43:36 +0100 Subject: [PATCH 055/168] HDDS-7307. Move S3 Gateway web content to separate port (#6898) --- .../src/main/resources/ozone-default.xml | 42 ++++ .../hdds/server/http/BaseHttpServer.java | 22 ++- .../hadoop/hdds/server/http/HttpServer2.java | 47 +++-- .../main/compose/ozone/docker-compose.yaml | 1 + .../src/main/compose/ozone/prometheus.yml | 4 +- .../src/main/smoketest/s3/bucketcreate.robot | 12 +- .../src/main/smoketest/s3/commonawslib.robot | 14 ++ .../smoketest/s3/s3_compatbility_check.sh | 1 + .../main/smoketest/s3/secretgenerate.robot | 2 +- .../src/main/smoketest/s3/secretrevoke.robot | 3 +- .../dist/src/main/smoketest/s3/webui.robot | 13 +- .../dist/src/main/smoketest/spnego/web.robot | 2 +- .../hadoop/ozone/s3/AuthorizationFilter.java | 6 - .../org/apache/hadoop/ozone/s3/Gateway.java | 6 + .../hadoop/ozone/s3/GatewayApplication.java | 2 +- .../ozone/s3/RootPageDisplayFilter.java | 64 ------ .../hadoop/ozone/s3/S3GatewayConfigKeys.java | 14 ++ .../hadoop/ozone/s3/S3GatewayHttpServer.java | 80 +------- .../ozone/s3/S3GatewayWebAdminServer.java | 186 ++++++++++++++++++ .../ozone/s3/VirtualHostStyleFilter.java | 7 - .../hadoop/ozone/s3secret/Application.java} | 16 +- .../s3secret/S3SecretManagementEndpoint.java | 2 +- .../resources/webapps/s3g-web/WEB-INF/web.xml | 33 ++++ .../{static => s3g-web}/images/ozone.ico | Bin .../webapps/{static => s3g-web}/index.html | 24 +-- .../webapps/s3gateway/WEB-INF/web.xml | 11 +- .../ozone/s3/TestAuthorizationFilter.java | 22 --- .../ozone/s3/TestVirtualHostStyleFilter.java | 23 --- 28 files changed, 388 insertions(+), 271 deletions(-) delete mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/RootPageDisplayFilter.java create mode 100644 hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayWebAdminServer.java rename hadoop-ozone/s3gateway/src/main/{resources/webapps/static/s3g.js => java/org/apache/hadoop/ozone/s3secret/Application.java} (75%) create mode 100644 hadoop-ozone/s3gateway/src/main/resources/webapps/s3g-web/WEB-INF/web.xml rename hadoop-ozone/s3gateway/src/main/resources/webapps/{static => s3g-web}/images/ozone.ico (100%) rename hadoop-ozone/s3gateway/src/main/resources/webapps/{static => s3g-web}/index.html (74%) diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index 1fcef139daf..dfd058f5d70 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -1892,6 +1892,48 @@ interfaces by setting it to 0.0.0.0. + + ozone.s3g.webadmin.http.enabled + true + OZONE, S3GATEWAY + This option can be used to disable the web server which serves additional content in Ozone S3 Gateway. + + + + ozone.s3g.webadmin.https-address + + OZONE, S3GATEWAY + Ozone S3Gateway content server's HTTPS address and port. + + + + ozone.s3g.webadmin.https-bind-host + + OZONE, S3GATEWAY + The actual address the HTTPS server will bind to. If this optional address + is set, it overrides only the hostname portion of ozone.s3g.webadmin.https-address. + This is useful for making the Ozone S3Gateway HTTPS server listen on all + interfaces by setting it to 0.0.0.0. + + + + ozone.s3g.webadmin.http-address + 0.0.0.0:19878 + OZONE, S3GATEWAY + The address and port where Ozone S3Gateway serves + web content. + + + + ozone.s3g.webadmin.http-bind-host + 0.0.0.0 + OZONE, S3GATEWAY + The actual address the HTTP server will bind to. If this optional address + is set, it overrides only the hostname portion of ozone.s3g.webadmin.http-address. + This is useful for making the Ozone S3Gateway HTTP server listen on all + interfaces by setting it to 0.0.0.0. + + ozone.s3g.http.auth.kerberos.principal HTTP/_HOST@REALM diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/BaseHttpServer.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/BaseHttpServer.java index 44c18231549..ffa91404688 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/BaseHttpServer.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/BaseHttpServer.java @@ -139,14 +139,23 @@ public BaseHttpServer(MutableConfigurationSource conf, String name) builder.configureXFrame(xFrameEnabled).setXFrameOption(xFrameOptionValue); + boolean addDefaultApps = shouldAddDefaultApps(); + if (!addDefaultApps) { + builder.withoutDefaultApps(); + } + httpServer = builder.build(); - httpServer.addServlet("conf", "/conf", HddsConfServlet.class); - httpServer.addServlet("logstream", "/logstream", LogStreamServlet.class); - prometheusSupport = + // TODO move these to HttpServer2.addDefaultApps + if (addDefaultApps) { + httpServer.addServlet("conf", "/conf", HddsConfServlet.class); + httpServer.addServlet("logstream", "/logstream", LogStreamServlet.class); + } + + prometheusSupport = addDefaultApps && conf.getBoolean(HddsConfigKeys.HDDS_PROMETHEUS_ENABLED, true); - profilerSupport = + profilerSupport = addDefaultApps && conf.getBoolean(HddsConfigKeys.HDDS_PROFILER_ENABLED, false); if (prometheusSupport) { @@ -477,4 +486,9 @@ public boolean isSecurityEnabled() { protected abstract String getHttpAuthConfigPrefix(); + /** Override to disable the default servlets. */ + protected boolean shouldAddDefaultApps() { + return true; + } + } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java index 9d037fed6bc..691f5374e6f 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java @@ -250,6 +250,7 @@ public static class Builder { private boolean xFrameEnabled; private XFrameOption xFrameOption = XFrameOption.SAMEORIGIN; + private boolean skipDefaultApps; public Builder setName(String serverName) { this.name = serverName; @@ -446,6 +447,11 @@ private void loadSSLConfiguration() throws IOException { excludeCiphers = sslConf.get(SSLFactory.SSL_SERVER_EXCLUDE_CIPHER_LIST); } + public Builder withoutDefaultApps() { + this.skipDefaultApps = true; + return this; + } + public HttpServer2 build() throws IOException { Preconditions.checkNotNull(name, "name is not set"); Preconditions.checkState(!endpoints.isEmpty(), "No endpoints specified"); @@ -592,18 +598,13 @@ private HttpServer2(final Builder b) throws IOException { this.findPort = b.findPort; this.portRanges = b.portRanges; - initializeWebServer(b.name, b.hostName, b.conf, b.pathSpecs, - b.authFilterConfigurationPrefix, b.securityEnabled); + initializeWebServer(b); } - private void initializeWebServer(String name, String hostName, - MutableConfigurationSource conf, String[] pathSpecs, - String authFilterConfigPrefix, - boolean securityEnabled) throws IOException { - + private void initializeWebServer(Builder builder) throws IOException { Preconditions.checkNotNull(webAppContext); - int maxThreads = conf.getInt(HTTP_MAX_THREADS_KEY, -1); + int maxThreads = builder.conf.getInt(HTTP_MAX_THREADS_KEY, -1); // If HTTP_MAX_THREADS is not configured, QueueThreadPool() will use the // default value (currently 250). @@ -613,13 +614,13 @@ private void initializeWebServer(String name, String hostName, threadPool.setMaxThreads(maxThreads); } - metrics = HttpServer2Metrics.create(threadPool, name); + metrics = HttpServer2Metrics.create(threadPool, builder.name); SessionHandler handler = webAppContext.getSessionHandler(); handler.setHttpOnly(true); handler.getSessionCookieConfig().setSecure(true); ContextHandlerCollection contexts = new ContextHandlerCollection(); - RequestLog requestLog = HttpRequestLog.getRequestLog(name); + RequestLog requestLog = HttpRequestLog.getRequestLog(builder.name); handlers.addHandler(contexts); if (requestLog != null) { @@ -628,20 +629,22 @@ private void initializeWebServer(String name, String hostName, handlers.addHandler(requestLogHandler); } handlers.addHandler(webAppContext); - final String appDir = getWebAppsPath(name); - addDefaultApps(contexts, appDir, conf); + final String appDir = getWebAppsPath(builder.name); + if (!builder.skipDefaultApps) { + addDefaultApps(contexts, appDir, builder.conf); + } webServer.setHandler(handlers); - Map config = generateFilterConfiguration(conf); + Map config = generateFilterConfiguration(builder.conf); addGlobalFilter("safety", QuotingInputFilter.class.getName(), config); - final FilterInitializer[] initializers = getFilterInitializers(conf); + final FilterInitializer[] initializers = getFilterInitializers(builder.conf); if (initializers != null) { - conf.set(BIND_ADDRESS, hostName); + builder.conf.set(BIND_ADDRESS, builder.hostName); org.apache.hadoop.conf.Configuration hadoopConf = - LegacyHadoopConfigurationSource.asHadoopConfiguration(conf); + LegacyHadoopConfigurationSource.asHadoopConfiguration(builder.conf); Map filterConfig = getFilterConfigMap(hadoopConf, - authFilterConfigPrefix); + builder.authFilterConfigurationPrefix); for (FilterInitializer c : initializers) { - if ((c instanceof AuthenticationFilterInitializer) && securityEnabled) { + if ((c instanceof AuthenticationFilterInitializer) && builder.securityEnabled) { addFilter("authentication", AuthenticationFilter.class.getName(), filterConfig); } else { @@ -650,10 +653,12 @@ private void initializeWebServer(String name, String hostName, } } - addDefaultServlets(); + if (!builder.skipDefaultApps) { + addDefaultServlets(); + } - if (pathSpecs != null) { - for (String path : pathSpecs) { + if (builder.pathSpecs != null) { + for (String path : builder.pathSpecs) { LOG.info("adding path spec: {}", path); addFilterPathMapping(path, webAppContext); } diff --git a/hadoop-ozone/dist/src/main/compose/ozone/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone/docker-compose.yaml index b5424cc7c88..3aa99da311e 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone/docker-compose.yaml +++ b/hadoop-ozone/dist/src/main/compose/ozone/docker-compose.yaml @@ -73,6 +73,7 @@ services: <<: *replication ports: - 9878:9878 + - 19878:19878 command: ["ozone","s3g"] recon: <<: *common-config diff --git a/hadoop-ozone/dist/src/main/compose/ozone/prometheus.yml b/hadoop-ozone/dist/src/main/compose/ozone/prometheus.yml index a88c30d57f4..562bcb9e71b 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone/prometheus.yml +++ b/hadoop-ozone/dist/src/main/compose/ozone/prometheus.yml @@ -28,7 +28,7 @@ scrape_configs: labels: component: om - targets: - - "s3g:9878" + - "s3g:19878" labels: component: s3g - targets: # During compose bring up the number of datanodes can be specific, adding 10 nodes to account for that. @@ -60,4 +60,4 @@ scrape_configs: - targets: - "recon:9888" labels: - component: recon \ No newline at end of file + component: recon diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/bucketcreate.robot b/hadoop-ozone/dist/src/main/smoketest/s3/bucketcreate.robot index 39ddbde41b0..b8c0820bfd5 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/bucketcreate.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/bucketcreate.robot @@ -21,7 +21,7 @@ Resource ../commonlib.robot Resource commonawslib.robot Test Timeout 5 minutes Suite Setup Setup s3 tests -Default Tags no-bucket-type +Test Tags no-bucket-type *** Variables *** ${ENDPOINT_URL} http://s3g:9878 @@ -43,6 +43,7 @@ Create bucket with invalid bucket name Should contain ${result} InvalidBucketName Create new bucket and check default group ACL + [tags] aws-skip ${bucket} = Create bucket ${acl} = Execute ozone sh bucket getacl s3v/${bucket} ${group} = Get Regexp Matches ${acl} "GROUP" @@ -53,3 +54,12 @@ Create new bucket and check default group ACL Should contain ${json}[1][aclList] READ Should contain ${json}[1][aclList] LIST END + +Test buckets named like web endpoints + [tags] aws-skip + ${path} = Create Random File KB 64 + + FOR ${name} IN conf jmx logs logstream prof prom secret stacks static + Create bucket with name ${name} + Put object to bucket bucket=${name} key=testkey path=${path} + END diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot b/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot index ac64ee36537..7b5bee321bb 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/commonawslib.robot @@ -172,6 +172,20 @@ Generate random prefix ${random} = Generate Ozone String Set Global Variable ${PREFIX} ${random} +# Verify object put by listing and getting it +Put object to bucket + [arguments] ${bucket} ${key} ${path} + + Execute AWSS3ApiCli put-object --bucket ${bucket} --key ${key} --body ${path} + + ${result} = Execute AWSS3ApiCli list-objects --bucket ${bucket} + Should contain ${result} ${key} + + Execute AWSS3ApiCli get-object --bucket ${bucket} --key ${key} ${path}.verify + Compare files ${path} ${path}.verify + + [teardown] Remove File ${path}.verify + Revoke S3 secrets Execute and Ignore Error ozone s3 revokesecret -y Execute and Ignore Error ozone s3 revokesecret -y -u testuser diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/s3_compatbility_check.sh b/hadoop-ozone/dist/src/main/smoketest/s3/s3_compatbility_check.sh index ab2807167d0..f147de90852 100755 --- a/hadoop-ozone/dist/src/main/smoketest/s3/s3_compatbility_check.sh +++ b/hadoop-ozone/dist/src/main/smoketest/s3/s3_compatbility_check.sh @@ -60,6 +60,7 @@ run_robot_test() { TEST_NAME=$1 robot \ --nostatusrc \ + --exclude aws-skip \ -v ENDPOINT_URL:https://s3.$OZONE_TEST_S3_REGION.amazonaws.com \ -v BUCKET:$OZONE_TEST_S3_BUCKET1 \ -v DESTBUCKET:$OZONE_TEST_S3_BUCKET2 \ diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/secretgenerate.robot b/hadoop-ozone/dist/src/main/smoketest/s3/secretgenerate.robot index e0c2fc7f818..db561397e1c 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/secretgenerate.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/secretgenerate.robot @@ -27,7 +27,7 @@ Test Setup Run Keywords Kinit test user testuser testuser.k Test Teardown Run Keyword Revoke S3 secrets *** Variables *** -${ENDPOINT_URL} http://s3g:9878 +${ENDPOINT_URL} http://s3g:19878 ${SECURITY_ENABLED} true *** Test Cases *** diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/secretrevoke.robot b/hadoop-ozone/dist/src/main/smoketest/s3/secretrevoke.robot index ffb03a85a8a..ed66a27a578 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/secretrevoke.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/secretrevoke.robot @@ -24,9 +24,10 @@ Test Timeout 5 minutes Default Tags no-bucket-type Test Setup Run Keywords Kinit test user testuser testuser.keytab ... AND Revoke S3 secrets +Suite Teardown Setup v4 headers *** Variables *** -${ENDPOINT_URL} http://s3g:9878 +${ENDPOINT_URL} http://s3g:19878 ${SECURITY_ENABLED} true *** Test Cases *** diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/webui.robot b/hadoop-ozone/dist/src/main/smoketest/s3/webui.robot index 43bd76659bd..896a86c7e4e 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/webui.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/webui.robot @@ -24,15 +24,12 @@ Suite Setup Setup s3 tests Default Tags no-bucket-type *** Variables *** -${ENDPOINT_URL} http://s3g:9878 -${BUCKET} generated + +${S3G_WEB_UI} http://s3g:19878 + *** Test Cases *** -S3 Gateway Web UI - Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit HTTP user - ${result} = Execute curl --negotiate -u : -v ${ENDPOINT_URL} - Should contain ${result} Location: ignore_case=True - Should contain ${result} /static/ - ${result} = Execute curl --negotiate -u : -v ${ENDPOINT_URL}/static/index.html +Check web UI + ${result} = Execute curl --negotiate -u : -v ${S3G_WEB_UI} Should contain ${result} Apache Ozone S3 diff --git a/hadoop-ozone/dist/src/main/smoketest/spnego/web.robot b/hadoop-ozone/dist/src/main/smoketest/spnego/web.robot index 654f8aef675..b18a99443ce 100644 --- a/hadoop-ozone/dist/src/main/smoketest/spnego/web.robot +++ b/hadoop-ozone/dist/src/main/smoketest/spnego/web.robot @@ -27,7 +27,7 @@ ${SCM} scm ${OM_URL} http://om:9874 ${RECON_URL} http://recon:9888 -${S3G_URL} http://s3g:9878 +${S3G_URL} http://s3g:19878 ${SCM_URL} http://${SCM}:9876 @{BASE_URLS} ${OM_URL} ${RECON_URL} ${S3G_URL} ${SCM_URL} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AuthorizationFilter.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AuthorizationFilter.java index cc63663bf22..d71dc83a049 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AuthorizationFilter.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/AuthorizationFilter.java @@ -65,12 +65,6 @@ public class AuthorizationFilter implements ContainerRequestFilter { @Override public void filter(ContainerRequestContext context) throws IOException { - // Skip authentication if the uri is hitting S3Secret generation or - // revocation endpoint. - if (context.getUriInfo().getRequestUri().getPath().startsWith("/secret")) { - return; - } - try { signatureInfo.initialize(signatureProcessor.parseSignature()); if (signatureInfo.getVersion() == Version.V4) { diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/Gateway.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/Gateway.java index 511592d3a04..96effa757b5 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/Gateway.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/Gateway.java @@ -25,6 +25,7 @@ import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.server.http.BaseHttpServer; import org.apache.hadoop.hdds.tracing.TracingUtil; import org.apache.hadoop.hdds.utils.HddsServerUtil; import org.apache.hadoop.ozone.OzoneSecurityUtil; @@ -59,6 +60,8 @@ public class Gateway extends GenericCli implements Callable { private static final Logger LOG = LoggerFactory.getLogger(Gateway.class); private S3GatewayHttpServer httpServer; + /** Servlets and static content on separate port. */ + private BaseHttpServer contentServer; private S3GatewayMetrics metrics; private final JvmPauseMonitor jvmPauseMonitor = newJvmPauseMonitor("S3G"); @@ -80,6 +83,7 @@ public Void call() throws Exception { loginS3GUser(OzoneConfigurationHolder.configuration()); setHttpBaseDir(OzoneConfigurationHolder.configuration()); httpServer = new S3GatewayHttpServer(OzoneConfigurationHolder.configuration(), "s3gateway"); + contentServer = new S3GatewayWebAdminServer(OzoneConfigurationHolder.configuration(), "s3g-web"); metrics = S3GatewayMetrics.create(OzoneConfigurationHolder.configuration()); start(); @@ -103,11 +107,13 @@ public void start() throws IOException { HddsServerUtil.initializeMetrics(OzoneConfigurationHolder.configuration(), "S3Gateway"); jvmPauseMonitor.start(); httpServer.start(); + contentServer.start(); } public void stop() throws Exception { LOG.info("Stopping Ozone S3 gateway"); httpServer.stop(); + contentServer.stop(); jvmPauseMonitor.stop(); S3GatewayMetrics.unRegister(); } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/GatewayApplication.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/GatewayApplication.java index 778b375a66e..c5a291b4450 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/GatewayApplication.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/GatewayApplication.java @@ -24,6 +24,6 @@ */ public class GatewayApplication extends ResourceConfig { public GatewayApplication() { - packages("org.apache.hadoop.ozone.s3", "org.apache.hadoop.ozone.s3secret"); + packages("org.apache.hadoop.ozone.s3"); } } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/RootPageDisplayFilter.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/RootPageDisplayFilter.java deleted file mode 100644 index 5cd3bd85f00..00000000000 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/RootPageDisplayFilter.java +++ /dev/null @@ -1,64 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.s3; - -import javax.servlet.Filter; -import javax.servlet.FilterChain; -import javax.servlet.FilterConfig; -import javax.servlet.ServletException; -import javax.servlet.ServletRequest; -import javax.servlet.ServletResponse; -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; -import java.io.IOException; - -/** - * This redirect helps to show and info page in case the endpoint is opened - * from the browser. - */ -public class RootPageDisplayFilter implements Filter { - - @Override - public void init(FilterConfig filterConfig) throws ServletException { - - } - - @Override - public void doFilter(ServletRequest servletRequest, - ServletResponse servletResponse, FilterChain filterChain) - throws IOException, ServletException { - HttpServletRequest httpRequest = (HttpServletRequest) servletRequest; - String httpMethod = httpRequest.getMethod(); - String uri = httpRequest.getRequestURI(); - String authorizationHeader = httpRequest.getHeader("Authorization"); - if (httpMethod.equalsIgnoreCase("GET") && !containsAWSAuth(authorizationHeader) && uri.equals("/")) { - ((HttpServletResponse) servletResponse).sendRedirect("/static/"); - } else { - filterChain.doFilter(httpRequest, servletResponse); - } - } - - private boolean containsAWSAuth(String authorizationHeader) { - return authorizationHeader != null && authorizationHeader.startsWith("AWS"); - } - - @Override - public void destroy() { - - } -} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayConfigKeys.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayConfigKeys.java index 9160025a016..3b9155298c6 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayConfigKeys.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayConfigKeys.java @@ -38,10 +38,24 @@ public final class S3GatewayConfigKeys { public static final String OZONE_S3G_HTTPS_ADDRESS_KEY = "ozone.s3g.https-address"; + public static final String OZONE_S3G_WEBADMIN_HTTP_ENABLED_KEY = + "ozone.s3g.webadmin.http.enabled"; + public static final String OZONE_S3G_WEBADMIN_HTTP_BIND_HOST_KEY = + "ozone.s3g.webadmin.http-bind-host"; + public static final String OZONE_S3G_WEBADMIN_HTTPS_BIND_HOST_KEY = + "ozone.s3g.webadmin.https-bind-host"; + public static final String OZONE_S3G_WEBADMIN_HTTP_ADDRESS_KEY = + "ozone.s3g.webadmin.http-address"; + public static final String OZONE_S3G_WEBADMIN_HTTPS_ADDRESS_KEY = + "ozone.s3g.webadmin.https-address"; + public static final String OZONE_S3G_HTTP_BIND_HOST_DEFAULT = "0.0.0.0"; public static final int OZONE_S3G_HTTP_BIND_PORT_DEFAULT = 9878; public static final int OZONE_S3G_HTTPS_BIND_PORT_DEFAULT = 9879; + public static final int OZONE_S3G_WEBADMIN_HTTP_BIND_PORT_DEFAULT = 19878; + public static final int OZONE_S3G_WEBADMIN_HTTPS_BIND_PORT_DEFAULT = 19879; + public static final String OZONE_S3G_DOMAIN_NAME = "ozone.s3g.domain.name"; public static final String OZONE_S3G_HTTP_AUTH_CONFIG_PREFIX = diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayHttpServer.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayHttpServer.java index 8b6af74e072..ae7b428d363 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayHttpServer.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayHttpServer.java @@ -17,34 +17,12 @@ */ package org.apache.hadoop.ozone.s3; -import com.google.common.base.Strings; import java.io.IOException; -import java.util.HashMap; -import java.util.Map; -import javax.servlet.ServletException; -import javax.servlet.http.HttpServlet; -import javax.servlet.http.HttpServletRequest; -import javax.servlet.http.HttpServletResponse; import org.apache.hadoop.hdds.conf.MutableConfigurationSource; import org.apache.hadoop.hdds.server.http.BaseHttpServer; -import org.apache.hadoop.hdds.server.http.ServletElementsFactory; -import org.apache.hadoop.security.SecurityUtil; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.authentication.server.AuthenticationFilter; -import org.eclipse.jetty.servlet.FilterHolder; -import org.eclipse.jetty.servlet.FilterMapping; -import org.eclipse.jetty.servlet.ServletHandler; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_HTTP_BIND_HOST_KEY; -import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_KEYTAB_FILE; -import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL; -import static org.apache.hadoop.ozone.s3secret.S3SecretConfigKeys.OZONE_S3G_SECRET_HTTP_AUTH_TYPE_KEY; -import static org.apache.hadoop.ozone.s3secret.S3SecretConfigKeys.OZONE_S3G_SECRET_HTTP_AUTH_TYPE_DEFAULT; -import static org.apache.hadoop.ozone.s3secret.S3SecretConfigKeys.OZONE_S3G_SECRET_HTTP_ENABLED_KEY; -import static org.apache.hadoop.ozone.s3secret.S3SecretConfigKeys.OZONE_S3G_SECRET_HTTP_ENABLED_KEY_DEFAULT; - /** * Http server to provide S3-compatible API. */ @@ -61,50 +39,11 @@ public class S3GatewayHttpServer extends BaseHttpServer { public S3GatewayHttpServer(MutableConfigurationSource conf, String name) throws IOException { super(conf, name); - addServlet("icon", "/favicon.ico", IconServlet.class); - addSecretAuthentication(conf); } - private void addSecretAuthentication(MutableConfigurationSource conf) - throws IOException { - - if (conf.getBoolean(OZONE_S3G_SECRET_HTTP_ENABLED_KEY, - OZONE_S3G_SECRET_HTTP_ENABLED_KEY_DEFAULT)) { - String authType = conf.get(OZONE_S3G_SECRET_HTTP_AUTH_TYPE_KEY, - OZONE_S3G_SECRET_HTTP_AUTH_TYPE_DEFAULT); - - if (UserGroupInformation.isSecurityEnabled() - && authType.equals("kerberos")) { - ServletHandler handler = getWebAppContext().getServletHandler(); - Map params = new HashMap<>(); - - String principalInConf = - conf.get(OZONE_S3G_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL); - if (!Strings.isNullOrEmpty(principalInConf)) { - params.put("kerberos.principal", SecurityUtil.getServerPrincipal( - principalInConf, conf.get(OZONE_S3G_HTTP_BIND_HOST_KEY))); - } - String httpKeytab = conf.get(OZONE_S3G_KEYTAB_FILE); - if (!Strings.isNullOrEmpty(httpKeytab)) { - params.put("kerberos.keytab", httpKeytab); - } - params.put(AuthenticationFilter.AUTH_TYPE, "kerberos"); - - FilterHolder holder = ServletElementsFactory.createFilterHolder( - "secretAuthentication", AuthenticationFilter.class.getName(), - params); - FilterMapping filterMapping = - ServletElementsFactory.createFilterMapping( - "secretAuthentication", - new String[]{"/secret/*"}); - - handler.addFilter(holder, filterMapping); - } else { - LOG.error("Secret Endpoint should be secured with Kerberos"); - throw new IllegalStateException("Secret Endpoint should be secured" - + " with Kerberos"); - } - } + @Override + protected boolean shouldAddDefaultApps() { + return false; } @Override @@ -167,17 +106,4 @@ protected String getHttpAuthConfigPrefix() { return S3GatewayConfigKeys.OZONE_S3G_HTTP_AUTH_CONFIG_PREFIX; } - /** - * Servlet for favicon.ico. - */ - public static class IconServlet extends HttpServlet { - private static final long serialVersionUID = -1L; - - @Override - public void doGet(HttpServletRequest request, HttpServletResponse response) - throws ServletException, IOException { - response.setContentType("image/png"); - response.sendRedirect("/static/images/ozone.ico"); - } - } } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayWebAdminServer.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayWebAdminServer.java new file mode 100644 index 00000000000..81c9a4f8ca3 --- /dev/null +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/S3GatewayWebAdminServer.java @@ -0,0 +1,186 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.s3; + +import com.google.common.base.Strings; +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; +import javax.servlet.http.HttpServlet; +import javax.servlet.http.HttpServletRequest; +import javax.servlet.http.HttpServletResponse; +import org.apache.hadoop.hdds.conf.MutableConfigurationSource; +import org.apache.hadoop.hdds.server.http.BaseHttpServer; +import org.apache.hadoop.hdds.server.http.ServletElementsFactory; +import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.authentication.server.AuthenticationFilter; +import org.eclipse.jetty.servlet.FilterHolder; +import org.eclipse.jetty.servlet.FilterMapping; +import org.eclipse.jetty.servlet.ServletHandler; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_WEBADMIN_HTTPS_ADDRESS_KEY; +import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_WEBADMIN_HTTPS_BIND_HOST_KEY; +import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_WEBADMIN_HTTPS_BIND_PORT_DEFAULT; +import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_WEBADMIN_HTTP_ADDRESS_KEY; +import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_WEBADMIN_HTTP_BIND_HOST_KEY; +import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_WEBADMIN_HTTP_BIND_PORT_DEFAULT; +import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_WEBADMIN_HTTP_ENABLED_KEY; +import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_HTTP_AUTH_CONFIG_PREFIX; +import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_HTTP_AUTH_TYPE; +import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_HTTP_BIND_HOST_DEFAULT; +import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_KEYTAB_FILE; +import static org.apache.hadoop.ozone.s3.S3GatewayConfigKeys.OZONE_S3G_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL; +import static org.apache.hadoop.ozone.s3secret.S3SecretConfigKeys.OZONE_S3G_SECRET_HTTP_AUTH_TYPE_KEY; +import static org.apache.hadoop.ozone.s3secret.S3SecretConfigKeys.OZONE_S3G_SECRET_HTTP_AUTH_TYPE_DEFAULT; +import static org.apache.hadoop.ozone.s3secret.S3SecretConfigKeys.OZONE_S3G_SECRET_HTTP_ENABLED_KEY; +import static org.apache.hadoop.ozone.s3secret.S3SecretConfigKeys.OZONE_S3G_SECRET_HTTP_ENABLED_KEY_DEFAULT; +import static org.apache.hadoop.security.authentication.server.AuthenticationFilter.AUTH_TYPE; + +/** + * HTTP server for serving static content and Ozone-specific endpoints (/conf, etc.). + */ +class S3GatewayWebAdminServer extends BaseHttpServer { + + private static final Logger LOG = + LoggerFactory.getLogger(S3GatewayWebAdminServer.class); + + S3GatewayWebAdminServer(MutableConfigurationSource conf, String name) throws IOException { + super(conf, name); + addServlet("icon", "/favicon.ico", IconServlet.class); + addSecretAuthentication(conf); + } + + private void addSecretAuthentication(MutableConfigurationSource conf) + throws IOException { + + if (conf.getBoolean(OZONE_S3G_SECRET_HTTP_ENABLED_KEY, + OZONE_S3G_SECRET_HTTP_ENABLED_KEY_DEFAULT)) { + String authType = conf.get(OZONE_S3G_SECRET_HTTP_AUTH_TYPE_KEY, + OZONE_S3G_SECRET_HTTP_AUTH_TYPE_DEFAULT); + + if (UserGroupInformation.isSecurityEnabled() + && authType.equals("kerberos")) { + ServletHandler handler = getWebAppContext().getServletHandler(); + Map params = new HashMap<>(); + + String principalInConf = + conf.get(OZONE_S3G_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL); + if (!Strings.isNullOrEmpty(principalInConf)) { + params.put("kerberos.principal", SecurityUtil.getServerPrincipal( + principalInConf, conf.get(OZONE_S3G_WEBADMIN_HTTP_BIND_HOST_KEY))); + } + String httpKeytab = conf.get(OZONE_S3G_KEYTAB_FILE); + if (!Strings.isNullOrEmpty(httpKeytab)) { + params.put("kerberos.keytab", httpKeytab); + } + params.put(AUTH_TYPE, "kerberos"); + + FilterHolder holder = ServletElementsFactory.createFilterHolder( + "secretAuthentication", AuthenticationFilter.class.getName(), + params); + FilterMapping filterMapping = + ServletElementsFactory.createFilterMapping( + "secretAuthentication", + new String[]{"/secret/*"}); + + handler.addFilter(holder, filterMapping); + } else { + LOG.error("Secret Endpoint should be secured with Kerberos"); + throw new IllegalStateException("Secret Endpoint should be secured" + + " with Kerberos"); + } + } + } + + @Override + protected String getHttpAddressKey() { + return OZONE_S3G_WEBADMIN_HTTP_ADDRESS_KEY; + } + + @Override + protected String getHttpBindHostKey() { + return OZONE_S3G_WEBADMIN_HTTP_BIND_HOST_KEY; + } + + @Override + protected String getHttpsAddressKey() { + return OZONE_S3G_WEBADMIN_HTTPS_ADDRESS_KEY; + } + + @Override + protected String getHttpsBindHostKey() { + return OZONE_S3G_WEBADMIN_HTTPS_BIND_HOST_KEY; + } + + @Override + protected String getBindHostDefault() { + return OZONE_S3G_HTTP_BIND_HOST_DEFAULT; + } + + @Override + protected int getHttpBindPortDefault() { + return OZONE_S3G_WEBADMIN_HTTP_BIND_PORT_DEFAULT; + } + + @Override + protected int getHttpsBindPortDefault() { + return OZONE_S3G_WEBADMIN_HTTPS_BIND_PORT_DEFAULT; + } + + @Override + protected String getKeytabFile() { + return OZONE_S3G_KEYTAB_FILE; + } + + @Override + protected String getSpnegoPrincipal() { + return OZONE_S3G_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL; + } + + @Override + protected String getEnabledKey() { + return OZONE_S3G_WEBADMIN_HTTP_ENABLED_KEY; + } + + @Override + protected String getHttpAuthType() { + return OZONE_S3G_HTTP_AUTH_TYPE; + } + + @Override + protected String getHttpAuthConfigPrefix() { + return OZONE_S3G_HTTP_AUTH_CONFIG_PREFIX; + } + + /** + * Servlet for favicon.ico. + */ + public static class IconServlet extends HttpServlet { + private static final long serialVersionUID = -1L; + + @Override + public void doGet(HttpServletRequest request, HttpServletResponse response) + throws IOException { + response.setContentType("image/png"); + response.sendRedirect("/images/ozone.ico"); + } + } +} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java index 32c1eb9eb23..6b4ae47db71 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/VirtualHostStyleFilter.java @@ -63,13 +63,6 @@ public class VirtualHostStyleFilter implements ContainerRequestFilter { @Override public void filter(ContainerRequestContext requestContext) throws IOException { - // Skip this filter if the uri is hitting S3Secret generation or - // revocation endpoint. - if (requestContext.getUriInfo().getRequestUri().getPath() - .startsWith("/secret")) { - return; - } - domains = conf.getTrimmedStrings(OZONE_S3G_DOMAIN_NAME); if (domains.length == 0) { diff --git a/hadoop-ozone/s3gateway/src/main/resources/webapps/static/s3g.js b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/Application.java similarity index 75% rename from hadoop-ozone/s3gateway/src/main/resources/webapps/static/s3g.js rename to hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/Application.java index f32b47fd823..07c8f919214 100644 --- a/hadoop-ozone/s3gateway/src/main/resources/webapps/static/s3g.js +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/Application.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -15,7 +15,15 @@ * See the License for the specific language governing permissions and * limitations under the License. */ +package org.apache.hadoop.ozone.s3secret; + +import org.glassfish.jersey.server.ResourceConfig; -window.onload = function () { - document.getElementById('s3gurl').innerHTML = window.location.origin; -}; +/** + * JaxRS resource definition. + */ +public class Application extends ResourceConfig { + public Application() { + packages("org.apache.hadoop.ozone.s3secret"); + } +} diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretManagementEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretManagementEndpoint.java index 739dadfb28e..e5ad1c3a57f 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretManagementEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3secret/S3SecretManagementEndpoint.java @@ -38,7 +38,7 @@ /** * Endpoint to manage S3 secret. */ -@Path("/secret") +@Path("/") @S3SecretEnabled @S3AdminEndpoint public class S3SecretManagementEndpoint extends S3SecretEndpointBase { diff --git a/hadoop-ozone/s3gateway/src/main/resources/webapps/s3g-web/WEB-INF/web.xml b/hadoop-ozone/s3gateway/src/main/resources/webapps/s3g-web/WEB-INF/web.xml new file mode 100644 index 00000000000..092c8a41af5 --- /dev/null +++ b/hadoop-ozone/s3gateway/src/main/resources/webapps/s3g-web/WEB-INF/web.xml @@ -0,0 +1,33 @@ + + + + secret + org.glassfish.jersey.servlet.ServletContainer + + javax.ws.rs.Application + org.apache.hadoop.ozone.s3secret.Application + + 1 + + + secret + /secret/* + + + org.jboss.weld.environment.servlet.Listener + + diff --git a/hadoop-ozone/s3gateway/src/main/resources/webapps/static/images/ozone.ico b/hadoop-ozone/s3gateway/src/main/resources/webapps/s3g-web/images/ozone.ico similarity index 100% rename from hadoop-ozone/s3gateway/src/main/resources/webapps/static/images/ozone.ico rename to hadoop-ozone/s3gateway/src/main/resources/webapps/s3g-web/images/ozone.ico diff --git a/hadoop-ozone/s3gateway/src/main/resources/webapps/static/index.html b/hadoop-ozone/s3gateway/src/main/resources/webapps/s3g-web/index.html similarity index 74% rename from hadoop-ozone/s3gateway/src/main/resources/webapps/static/index.html rename to hadoop-ozone/s3gateway/src/main/resources/webapps/s3g-web/index.html index 32e02172a64..7489f93b6f7 100644 --- a/hadoop-ozone/s3gateway/src/main/resources/webapps/static/index.html +++ b/hadoop-ozone/s3gateway/src/main/resources/webapps/s3g-web/index.html @@ -21,15 +21,14 @@ - S3 gateway -- Apache Ozone - - - + + + @@ -50,7 +49,7 @@

@@ -63,21 +62,12 @@

S3 gateway

-

This is an endpoint of Apache Ozone S3 gateway. Use it with any - AWS S3 compatible tool - with setting this url as an endpoint

- -

For example with aws-cli:

- -
aws s3api --endpoint  create-bucket --bucket=wordcount
- -

For more information, please check the documentation. +

For more information, please check the documentation.

- - - + + diff --git a/hadoop-ozone/s3gateway/src/main/resources/webapps/s3gateway/WEB-INF/web.xml b/hadoop-ozone/s3gateway/src/main/resources/webapps/s3gateway/WEB-INF/web.xml index 79bf7b9855c..b3d7a72b2cd 100644 --- a/hadoop-ozone/s3gateway/src/main/resources/webapps/s3gateway/WEB-INF/web.xml +++ b/hadoop-ozone/s3gateway/src/main/resources/webapps/s3gateway/WEB-INF/web.xml @@ -32,22 +32,13 @@ org.apache.hadoop.ozone.s3.EmptyContentTypeFilter - - info-page-redirect - org.apache.hadoop.ozone.s3.RootPageDisplayFilter - - optional-content-type /* - - info-page-redirect - /* - org.jboss.weld.environment.servlet.Listener - \ No newline at end of file + diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestAuthorizationFilter.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestAuthorizationFilter.java index c42036cb1a3..294d50fe69f 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestAuthorizationFilter.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestAuthorizationFilter.java @@ -207,28 +207,6 @@ void testAuthFilterFailures( DATETIME, "", "/key1" - ), - // S3 secret generation endpoint - arguments( - "POST", - null, - null, - "s3g:9878", - null, - null, - "", - "/secret/generate" - ), - // S3 secret generation endpoint - arguments( - "POST", - null, - null, - "s3g:9878", - null, - null, - "", - "/secret/revoke" ) ); } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java index 3051fb2276e..89679e9f845 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestVirtualHostStyleFilter.java @@ -125,29 +125,6 @@ public void testPathStyle() throws Exception { } - @Test - public void testS3SecretEndpoint() throws Exception { - - VirtualHostStyleFilter virtualHostStyleFilter = - new VirtualHostStyleFilter(); - virtualHostStyleFilter.setConfiguration(conf); - - ContainerRequest containerRequest = createContainerRequest("mybucket" + - ".localhost:9878", "/secret/generate", - null, true); - virtualHostStyleFilter.filter(containerRequest); - URI expected = new URI("http://" + s3HttpAddr + "/secret/generate"); - assertEquals(expected, containerRequest.getRequestUri()); - - containerRequest = createContainerRequest("mybucket" + - ".localhost:9878", "/secret/revoke", - null, true); - virtualHostStyleFilter.filter(containerRequest); - expected = new URI("http://" + s3HttpAddr + "/secret/revoke"); - assertEquals(expected, containerRequest.getRequestUri()); - - } - @Test public void testVirtualHostStyleWithCreateBucketRequest() throws Exception { From f8394cf5b7afa07f4cf1752a82723d49d063bf0b Mon Sep 17 00:00:00 2001 From: Sumit Agrawal Date: Wed, 8 Jan 2025 11:58:29 +0530 Subject: [PATCH 056/168] HDDS-11975. wrap TermIndex in ExecutionContext (#7602) --- .../flowcontrol/ExecutionContext.java | 48 +++++++++++++++++++ .../execution/flowcontrol/package-info.java | 22 +++++++++ .../om/ratis/OzoneManagerStateMachine.java | 4 +- .../ozone/om/request/OMClientRequest.java | 8 ++-- .../request/bucket/OMBucketCreateRequest.java | 6 +-- .../request/bucket/OMBucketDeleteRequest.java | 6 +-- .../bucket/OMBucketSetOwnerRequest.java | 6 +-- .../bucket/OMBucketSetPropertyRequest.java | 6 +-- .../bucket/acl/OMBucketAclRequest.java | 6 +-- .../bucket/acl/OMBucketAddAclRequest.java | 6 +-- .../bucket/acl/OMBucketRemoveAclRequest.java | 6 +-- .../bucket/acl/OMBucketSetAclRequest.java | 6 +-- .../file/OMDirectoryCreateRequest.java | 6 +-- .../file/OMDirectoryCreateRequestWithFSO.java | 6 +-- .../om/request/file/OMFileCreateRequest.java | 6 +-- .../file/OMFileCreateRequestWithFSO.java | 6 +-- .../request/file/OMRecoverLeaseRequest.java | 6 +-- .../request/key/OMAllocateBlockRequest.java | 6 +-- .../key/OMAllocateBlockRequestWithFSO.java | 6 +-- .../key/OMDirectoriesPurgeRequestWithFSO.java | 12 ++--- .../om/request/key/OMKeyCommitRequest.java | 6 +-- .../key/OMKeyCommitRequestWithFSO.java | 6 +-- .../om/request/key/OMKeyCreateRequest.java | 6 +-- .../key/OMKeyCreateRequestWithFSO.java | 6 +-- .../om/request/key/OMKeyDeleteRequest.java | 6 +-- .../key/OMKeyDeleteRequestWithFSO.java | 6 +-- .../om/request/key/OMKeyPurgeRequest.java | 8 ++-- .../om/request/key/OMKeyRenameRequest.java | 6 +-- .../key/OMKeyRenameRequestWithFSO.java | 6 +-- .../om/request/key/OMKeySetTimesRequest.java | 6 +-- .../key/OMKeySetTimesRequestWithFSO.java | 6 +-- .../om/request/key/OMKeysDeleteRequest.java | 6 +-- .../om/request/key/OMKeysRenameRequest.java | 6 +-- .../request/key/OMOpenKeysDeleteRequest.java | 6 +-- .../om/request/key/acl/OMKeyAclRequest.java | 6 +-- .../key/acl/OMKeyAclRequestWithFSO.java | 6 +-- .../request/key/acl/OMKeyAddAclRequest.java | 6 +-- .../key/acl/OMKeyAddAclRequestWithFSO.java | 6 +-- .../key/acl/OMKeyRemoveAclRequest.java | 6 +-- .../key/acl/OMKeyRemoveAclRequestWithFSO.java | 6 +-- .../request/key/acl/OMKeySetAclRequest.java | 6 +-- .../key/acl/OMKeySetAclRequestWithFSO.java | 6 +-- .../key/acl/prefix/OMPrefixAclRequest.java | 6 +-- ...S3ExpiredMultipartUploadsAbortRequest.java | 6 +-- .../S3InitiateMultipartUploadRequest.java | 6 +-- ...InitiateMultipartUploadRequestWithFSO.java | 6 +-- .../S3MultipartUploadAbortRequest.java | 6 +-- .../S3MultipartUploadCommitPartRequest.java | 6 +-- .../S3MultipartUploadCompleteRequest.java | 6 +-- .../s3/security/OMSetSecretRequest.java | 6 +-- .../s3/security/S3GetSecretRequest.java | 6 +-- .../s3/security/S3RevokeSecretRequest.java | 4 +- .../tagging/S3DeleteObjectTaggingRequest.java | 6 +-- .../S3DeleteObjectTaggingRequestWithFSO.java | 6 +-- .../s3/tagging/S3PutObjectTaggingRequest.java | 6 +-- .../S3PutObjectTaggingRequestWithFSO.java | 6 +-- .../OMSetRangerServiceVersionRequest.java | 6 +-- .../s3/tenant/OMTenantAssignAdminRequest.java | 6 +-- .../OMTenantAssignUserAccessIdRequest.java | 6 +-- .../s3/tenant/OMTenantCreateRequest.java | 6 +-- .../s3/tenant/OMTenantDeleteRequest.java | 6 +-- .../s3/tenant/OMTenantRevokeAdminRequest.java | 6 +-- .../OMTenantRevokeUserAccessIdRequest.java | 6 +-- .../OMCancelDelegationTokenRequest.java | 6 +-- .../security/OMGetDelegationTokenRequest.java | 6 +-- .../OMRenewDelegationTokenRequest.java | 6 +-- .../snapshot/OMSnapshotCreateRequest.java | 8 ++-- .../snapshot/OMSnapshotDeleteRequest.java | 6 +-- .../OMSnapshotMoveDeletedKeysRequest.java | 12 ++--- .../OMSnapshotMoveTableKeysRequest.java | 12 ++--- .../snapshot/OMSnapshotPurgeRequest.java | 10 ++-- .../snapshot/OMSnapshotRenameRequest.java | 9 ++-- .../OMSnapshotSetPropertyRequest.java | 6 +-- .../upgrade/OMCancelPrepareRequest.java | 8 ++-- .../upgrade/OMFinalizeUpgradeRequest.java | 6 +-- .../om/request/upgrade/OMPrepareRequest.java | 10 ++-- .../request/util/OMEchoRPCWriteRequest.java | 4 +- .../request/volume/OMQuotaRepairRequest.java | 6 +-- .../request/volume/OMVolumeCreateRequest.java | 6 +-- .../request/volume/OMVolumeDeleteRequest.java | 6 +-- .../volume/OMVolumeSetOwnerRequest.java | 6 +-- .../volume/OMVolumeSetQuotaRequest.java | 6 +-- .../volume/acl/OMVolumeAclRequest.java | 6 +-- .../volume/acl/OMVolumeAddAclRequest.java | 6 +-- .../volume/acl/OMVolumeRemoveAclRequest.java | 6 +-- .../volume/acl/OMVolumeSetAclRequest.java | 6 +-- ...ManagerProtocolServerSideTranslatorPB.java | 4 +- .../OzoneManagerRequestHandler.java | 11 +++-- .../ozone/protocolPB/RequestHandler.java | 14 +++--- ...zoneManagerDoubleBufferWithOMResponse.java | 10 ++-- 90 files changed, 362 insertions(+), 282 deletions(-) create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/execution/flowcontrol/ExecutionContext.java create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/execution/flowcontrol/package-info.java diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/execution/flowcontrol/ExecutionContext.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/execution/flowcontrol/ExecutionContext.java new file mode 100644 index 00000000000..ba21dec10db --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/execution/flowcontrol/ExecutionContext.java @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.om.execution.flowcontrol; + +import org.apache.ratis.server.protocol.TermIndex; + +/** + * Context required for execution of a request. + */ +public final class ExecutionContext { + private final long index; + private final TermIndex termIndex; + + private ExecutionContext(long index, TermIndex termIndex) { + this.index = index; + if (null == termIndex) { + termIndex = TermIndex.valueOf(-1, index); + } + this.termIndex = termIndex; + } + + public static ExecutionContext of(long index, TermIndex termIndex) { + return new ExecutionContext(index, termIndex); + } + + public long getIndex() { + return index; + } + + public TermIndex getTermIndex() { + return termIndex; + } +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/execution/flowcontrol/package-info.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/execution/flowcontrol/package-info.java new file mode 100644 index 00000000000..7818bc628d8 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/execution/flowcontrol/package-info.java @@ -0,0 +1,22 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.om.execution.flowcontrol; + +/** + * This package contains classes for the execution flow handling. + */ diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java index 6a5274ca01f..62e548f408b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/OzoneManagerStateMachine.java @@ -35,6 +35,7 @@ import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.OzoneManagerPrepareState; import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.om.helpers.OMRatisHelper; import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils; import org.apache.hadoop.ozone.om.lock.OMLockDetails; @@ -555,8 +556,9 @@ public void close() { */ private OMResponse runCommand(OMRequest request, TermIndex termIndex) { try { + ExecutionContext context = ExecutionContext.of(termIndex.getIndex(), termIndex); final OMClientResponse omClientResponse = handler.handleWriteRequest( - request, termIndex, ozoneManagerDoubleBuffer); + request, context, ozoneManagerDoubleBuffer); OMLockDetails omLockDetails = omClientResponse.getOmLockDetails(); OMResponse omResponse = omClientResponse.getOMResponse(); if (omLockDetails != null) { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java index c9c664b303f..2fcb19f39d1 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/OMClientRequest.java @@ -23,7 +23,7 @@ import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.ozone.om.helpers.OMAuditLogger; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.OzoneConsts; @@ -140,12 +140,14 @@ public void handleRequestFailure(OzoneManager ozoneManager) { * * @return the response that will be returned to the client. */ - public abstract OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex); + public abstract OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context); /** For testing only. */ @VisibleForTesting public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, long transactionLogIndex) { - return validateAndUpdateCache(ozoneManager, TransactionInfo.getTermIndex(transactionLogIndex)); + ExecutionContext context = ExecutionContext.of(transactionLogIndex, + TransactionInfo.getTermIndex(transactionLogIndex)); + return validateAndUpdateCache(ozoneManager, context); } @VisibleForTesting diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java index 3c21a2a851b..38a4d78b538 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketCreateRequest.java @@ -20,7 +20,7 @@ import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; import org.apache.hadoop.hdds.client.DefaultReplicationConfig; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.ClientVersion; @@ -162,8 +162,8 @@ private static void validateMaxBucket(OzoneManager ozoneManager) } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long transactionLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long transactionLogIndex = context.getIndex(); OMMetrics omMetrics = ozoneManager.getMetrics(); omMetrics.incNumBucketCreates(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java index 22e710dc911..c984c66a259 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketDeleteRequest.java @@ -23,7 +23,7 @@ import java.util.Iterator; import java.util.Map; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; @@ -81,8 +81,8 @@ public OMBucketDeleteRequest(OMRequest omRequest) { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long transactionLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long transactionLogIndex = context.getIndex(); OMMetrics omMetrics = ozoneManager.getMetrics(); omMetrics.incNumBucketDeletes(); OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetOwnerRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetOwnerRequest.java index 2afab85e9ae..239083a58c8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetOwnerRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetOwnerRequest.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.bucket; import com.google.common.base.Preconditions; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.audit.AuditLogger; @@ -77,8 +77,8 @@ public OMRequest preExecute(OzoneManager ozoneManager) } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long transactionLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long transactionLogIndex = context.getIndex(); SetBucketPropertyRequest setBucketPropertyRequest = getOmRequest().getSetBucketPropertyRequest(); Preconditions.checkNotNull(setBucketPropertyRequest); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java index 941b41ca49b..e76aa0d7093 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/OMBucketSetPropertyRequest.java @@ -25,7 +25,7 @@ import com.google.common.base.Preconditions; import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension; import org.apache.hadoop.hdds.client.DefaultReplicationConfig; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.audit.OMAction; @@ -110,8 +110,8 @@ public OMRequest preExecute(OzoneManager ozoneManager) @Override @SuppressWarnings("methodlength") - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long transactionLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long transactionLogIndex = context.getIndex(); SetBucketPropertyRequest setBucketPropertyRequest = getOmRequest().getSetBucketPropertyRequest(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAclRequest.java index 89d9dbf5dfd..23c92b8ae54 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAclRequest.java @@ -25,7 +25,7 @@ import java.util.function.BiPredicate; import org.apache.commons.lang3.tuple.Pair; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.audit.AuditLogger; @@ -63,8 +63,8 @@ public OMBucketAclRequest(OMRequest omRequest, } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long transactionLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long transactionLogIndex = context.getIndex(); // protobuf guarantees acls are non-null. List ozoneAcls = getAcls(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAddAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAddAclRequest.java index c37f83f0666..4bbf94c6e67 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAddAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketAddAclRequest.java @@ -23,7 +23,7 @@ import java.util.Map; import com.google.common.collect.Lists; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.audit.OMAction; import org.apache.hadoop.ozone.om.OMMetrics; @@ -133,9 +133,9 @@ void onComplete(boolean operationResult, Exception exception, } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { ozoneManager.getMetrics().incNumAddAcl(); - return super.validateAndUpdateCache(ozoneManager, termIndex); + return super.validateAndUpdateCache(ozoneManager, context); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketRemoveAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketRemoveAclRequest.java index 018692d9f0c..0647f8d58bb 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketRemoveAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketRemoveAclRequest.java @@ -22,7 +22,7 @@ import java.util.List; import java.util.Map; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.audit.OMAction; import org.apache.hadoop.ozone.om.OzoneManager; @@ -132,9 +132,9 @@ void onComplete(boolean operationResult, Exception exception, } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { ozoneManager.getMetrics().incNumRemoveAcl(); - return super.validateAndUpdateCache(ozoneManager, termIndex); + return super.validateAndUpdateCache(ozoneManager, context); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketSetAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketSetAclRequest.java index 813e5a7db3c..b94fbbc4363 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketSetAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/bucket/acl/OMBucketSetAclRequest.java @@ -23,7 +23,7 @@ import java.util.List; import java.util.Map; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.audit.OMAction; import org.apache.hadoop.ozone.om.OzoneManager; @@ -131,9 +131,9 @@ void onComplete(boolean operationResult, Exception exception, } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { ozoneManager.getMetrics().incNumSetAcl(); - return super.validateAndUpdateCache(ozoneManager, termIndex); + return super.validateAndUpdateCache(ozoneManager, context); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java index 732886fa0e6..cf07bc7d4d6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequest.java @@ -26,7 +26,7 @@ import java.util.Map; import com.google.common.base.Preconditions; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; @@ -122,8 +122,8 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); CreateDirectoryRequest createDirectoryRequest = getOmRequest() .getCreateDirectoryRequest(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java index 8bef8e17928..b8d17621701 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMDirectoryCreateRequestWithFSO.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.file; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.audit.OMAction; import org.apache.hadoop.ozone.om.OMMetadataManager; @@ -76,8 +76,8 @@ public OMDirectoryCreateRequestWithFSO(OMRequest omRequest, } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); CreateDirectoryRequest createDirectoryRequest = getOmRequest() .getCreateDirectoryRequest(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java index 08b25718288..f3df379103d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequest.java @@ -29,7 +29,7 @@ import com.google.common.base.Preconditions; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.client.ReplicationConfig; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.OzoneConfigUtil; import org.apache.hadoop.ozone.om.helpers.BucketLayout; @@ -162,8 +162,8 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { @Override @SuppressWarnings("methodlength") - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); CreateFileRequest createFileRequest = getOmRequest().getCreateFileRequest(); KeyArgs keyArgs = createFileRequest.getKeyArgs(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java index c4967d5af1f..f64454d3962 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java @@ -19,7 +19,7 @@ package org.apache.hadoop.ozone.om.request.file; import org.apache.hadoop.hdds.client.ReplicationConfig; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.audit.OMAction; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OMMetrics; @@ -68,8 +68,8 @@ public OMFileCreateRequestWithFSO(OMRequest omRequest, @Override @SuppressWarnings("methodlength") - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); CreateFileRequest createFileRequest = getOmRequest().getCreateFileRequest(); KeyArgs keyArgs = createFileRequest.getKeyArgs(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMRecoverLeaseRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMRecoverLeaseRequest.java index 73019af112a..0ae92f806ad 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMRecoverLeaseRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMRecoverLeaseRequest.java @@ -33,6 +33,7 @@ import org.apache.hadoop.ozone.om.helpers.OmFSOFile; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.om.request.key.OMKeyRequest; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; import org.apache.hadoop.ozone.om.response.OMClientResponse; @@ -53,7 +54,6 @@ import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.util.Time; -import org.apache.ratis.server.protocol.TermIndex; import org.slf4j.Logger; @@ -128,7 +128,7 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { RecoverLeaseRequest recoverLeaseRequest = getOmRequest() .getRecoverLeaseRequest(); Preconditions.checkNotNull(recoverLeaseRequest); @@ -156,7 +156,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn acquiredLock = getOmLockDetails().isLockAcquired(); validateBucketAndVolume(omMetadataManager, volumeName, bucketName); - RecoverLeaseResponse recoverLeaseResponse = doWork(ozoneManager, termIndex.getIndex()); + RecoverLeaseResponse recoverLeaseResponse = doWork(ozoneManager, context.getIndex()); // Prepare response omResponse.setRecoverLeaseResponse(recoverLeaseResponse).setCmdType(RecoverLease); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java index bbf5ec0afaf..b995f793453 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequest.java @@ -26,7 +26,7 @@ import com.google.common.base.Preconditions; import org.apache.hadoop.hdds.client.ReplicationConfig; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.QuotaUtil; @@ -150,8 +150,8 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); OzoneManagerProtocolProtos.AllocateBlockRequest allocateBlockRequest = getOmRequest().getAllocateBlockRequest(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequestWithFSO.java index 9db04d48b57..cba650644ad 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequestWithFSO.java @@ -20,7 +20,7 @@ import com.google.common.base.Preconditions; import org.apache.hadoop.hdds.client.ReplicationConfig; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.audit.OMAction; @@ -73,8 +73,8 @@ public OMAllocateBlockRequestWithFSO(OMRequest omRequest, } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); AllocateBlockRequest allocateBlockRequest = getOmRequest().getAllocateBlockRequest(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java index 29ed5d9fc7b..b24253e6f67 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java @@ -33,8 +33,8 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; -import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; @@ -62,7 +62,7 @@ public OMDirectoriesPurgeRequestWithFSO(OMRequest omRequest) { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { OzoneManagerProtocolProtos.PurgeDirectoriesRequest purgeDirsRequest = getOmRequest().getPurgeDirectoriesRequest(); String fromSnapshot = purgeDirsRequest.hasSnapshotTableKey() ? @@ -121,7 +121,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn String ozoneDbKey = omMetadataManager.getOzonePathKey(path.getVolumeId(), path.getBucketId(), keyInfo.getParentObjectID(), keyInfo.getFileName()); omMetadataManager.getDirectoryTable().addCacheEntry(new CacheKey<>(ozoneDbKey), - CacheValue.get(termIndex.getIndex())); + CacheValue.get(context.getIndex())); volBucketInfoMap.putIfAbsent(volBucketPair, omBucketInfo); } } @@ -164,15 +164,15 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn String ozoneDbKey = omMetadataManager.getOzonePathKey(path.getVolumeId(), path.getBucketId(), keyInfo.getParentObjectID(), keyInfo.getFileName()); omMetadataManager.getFileTable().addCacheEntry(new CacheKey<>(ozoneDbKey), - CacheValue.get(termIndex.getIndex())); + CacheValue.get(context.getIndex())); volBucketInfoMap.putIfAbsent(volBucketPair, omBucketInfo); } } } if (fromSnapshotInfo != null) { - fromSnapshotInfo.setLastTransactionInfo(TransactionInfo.valueOf(termIndex).toByteString()); + fromSnapshotInfo.setLastTransactionInfo(TransactionInfo.valueOf(context.getTermIndex()).toByteString()); omMetadataManager.getSnapshotInfoTable().addCacheEntry(new CacheKey<>(fromSnapshotInfo.getTableKey()), - CacheValue.get(termIndex.getIndex(), fromSnapshotInfo)); + CacheValue.get(context.getIndex(), fromSnapshotInfo)); } } catch (IOException ex) { // Case of IOException for fromProtobuf will not happen diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java index 87d126de98a..25b09a203ec 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java @@ -29,7 +29,7 @@ import com.google.common.base.Preconditions; import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.ozone.OzoneManagerVersion; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.helpers.KeyValueUtil; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; @@ -133,8 +133,8 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { @Override @SuppressWarnings("methodlength") - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); CommitKeyRequest commitKeyRequest = getOmRequest().getCommitKeyRequest(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java index 2a712bd2763..c1e686d3ce0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java @@ -23,9 +23,9 @@ import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.ozone.om.helpers.WithMetadata; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.om.request.util.OmKeyHSyncUtil; import org.apache.hadoop.util.Time; -import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.audit.OMAction; @@ -76,8 +76,8 @@ public OMKeyCommitRequestWithFSO(OMRequest omRequest, @Override @SuppressWarnings("methodlength") - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); CommitKeyRequest commitKeyRequest = getOmRequest().getCommitKeyRequest(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java index e817901c22e..4ac619a3a47 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequest.java @@ -29,7 +29,7 @@ import com.google.common.base.Preconditions; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.ozone.OzoneManagerVersion; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.om.OzoneConfigUtil; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; @@ -186,8 +186,8 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { @Override @SuppressWarnings("methodlength") - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); CreateKeyRequest createKeyRequest = getOmRequest().getCreateKeyRequest(); KeyArgs keyArgs = createKeyRequest.getKeyArgs(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java index 87cc151351e..31e9d088f7e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java @@ -19,7 +19,7 @@ package org.apache.hadoop.ozone.om.request.key; import org.apache.hadoop.hdds.client.ReplicationConfig; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.audit.OMAction; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OMMetrics; @@ -65,8 +65,8 @@ public OMKeyCreateRequestWithFSO(OMRequest omRequest, @Override @SuppressWarnings("methodlength") - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); OzoneManagerProtocolProtos.CreateKeyRequest createKeyRequest = getOmRequest().getCreateKeyRequest(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java index db1adc13893..3885c18aff3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java @@ -25,7 +25,7 @@ import org.apache.hadoop.ozone.om.OMPerformanceMetrics; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; @@ -108,8 +108,8 @@ protected KeyArgs resolveBucketAndCheckAcls(OzoneManager ozoneManager, @Override @SuppressWarnings("methodlength") - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); DeleteKeyRequest deleteKeyRequest = getOmRequest().getDeleteKeyRequest(); OzoneManagerProtocolProtos.KeyArgs keyArgs = deleteKeyRequest.getKeyArgs(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestWithFSO.java index e8960cd02b1..19fed5d85db 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequestWithFSO.java @@ -20,7 +20,7 @@ import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.audit.AuditLogger; @@ -71,8 +71,8 @@ public OMKeyDeleteRequestWithFSO(OMRequest omRequest, @Override @SuppressWarnings("methodlength") - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); DeleteKeyRequest deleteKeyRequest = getOmRequest().getDeleteKeyRequest(); OzoneManagerProtocolProtos.KeyArgs keyArgs = diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java index a5e8cb14525..5d0af563bb2 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java @@ -26,8 +26,8 @@ import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; -import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; @@ -61,7 +61,7 @@ public OMKeyPurgeRequest(OMRequest omRequest) { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { PurgeKeysRequest purgeKeysRequest = getOmRequest().getPurgeKeysRequest(); List bucketDeletedKeysList = purgeKeysRequest.getDeletedKeysList(); List keysToUpdateList = purgeKeysRequest.getKeysToUpdateList(); @@ -107,9 +107,9 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn // services. try { if (fromSnapshotInfo != null) { - fromSnapshotInfo.setLastTransactionInfo(TransactionInfo.valueOf(termIndex).toByteString()); + fromSnapshotInfo.setLastTransactionInfo(TransactionInfo.valueOf(context.getTermIndex()).toByteString()); omMetadataManager.getSnapshotInfoTable().addCacheEntry(new CacheKey<>(fromSnapshotInfo.getTableKey()), - CacheValue.get(termIndex.getIndex(), fromSnapshotInfo)); + CacheValue.get(context.getIndex(), fromSnapshotInfo)); } } catch (IOException e) { return new OMKeyPurgeResponse(createErrorOMResponse(omResponse, e)); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java index 35940f5a770..1c99fc1814a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequest.java @@ -23,7 +23,7 @@ import java.util.Map; import com.google.common.base.Preconditions; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; @@ -121,8 +121,8 @@ protected KeyArgs resolveBucketAndCheckAcls(KeyArgs keyArgs, @Override @SuppressWarnings("methodlength") - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); RenameKeyRequest renameKeyRequest = getOmRequest().getRenameKeyRequest(); OzoneManagerProtocolProtos.KeyArgs keyArgs = diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java index e57b6d99fd4..5919290062e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyRenameRequestWithFSO.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.key; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; @@ -76,8 +76,8 @@ public OMKeyRenameRequestWithFSO(OMRequest omRequest, @Override @SuppressWarnings("methodlength") - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); RenameKeyRequest renameKeyRequest = getOmRequest().getRenameKeyRequest(); KeyArgs keyArgs = renameKeyRequest.getKeyArgs(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequest.java index 78b5c258fa4..7c548029ce0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequest.java @@ -23,7 +23,7 @@ import java.util.LinkedHashMap; import java.util.Map; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OzoneConsts; @@ -173,8 +173,8 @@ protected void apply(OmKeyInfo omKeyInfo) { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); ozoneManager.getMetrics().incNumSetTime(); OmKeyInfo omKeyInfo; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequestWithFSO.java index 5e3a00886b7..630e0987aed 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeySetTimesRequestWithFSO.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.ozone.om.request.key; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; @@ -63,8 +63,8 @@ public OMKeySetTimesRequestWithFSO( } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); OmKeyInfo omKeyInfo = null; OzoneManagerProtocolProtos.OMResponse.Builder omResponse = onInit(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java index e090d61afd3..27fcf55ef90 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysDeleteRequest.java @@ -21,7 +21,7 @@ import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.util.Time; @@ -88,8 +88,8 @@ public OMKeysDeleteRequest(OMRequest omRequest, BucketLayout bucketLayout) { } @Override @SuppressWarnings("methodlength") - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); DeleteKeysRequest deleteKeyRequest = getOmRequest().getDeleteKeysRequest(); OzoneManagerProtocolProtos.DeleteKeyArgs deleteKeyArgs = diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysRenameRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysRenameRequest.java index 0d105c1d227..64da8241256 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysRenameRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysRenameRequest.java @@ -19,8 +19,8 @@ package org.apache.hadoop.ozone.om.request.key; import org.apache.commons.lang3.tuple.Pair; -import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.audit.AuditLogger; @@ -81,8 +81,8 @@ public OMKeysRenameRequest(OMRequest omRequest, BucketLayout bucketLayout) { @Override @SuppressWarnings("methodlength") - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); RenameKeysRequest renameKeysRequest = getOmRequest().getRenameKeysRequest(); RenameKeysArgs renameKeysArgs = renameKeysRequest.getRenameKeysArgs(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMOpenKeysDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMOpenKeysDeleteRequest.java index c1bc66ade90..f6f6a0cef23 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMOpenKeysDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMOpenKeysDeleteRequest.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.ozone.om.request.key; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.om.OMMetadataManager; @@ -60,8 +60,8 @@ public OMOpenKeysDeleteRequest(OMRequest omRequest, } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); OMMetrics omMetrics = ozoneManager.getMetrics(); omMetrics.incNumOpenKeyDeleteRequests(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java index dbc94646330..c847caa9481 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java @@ -23,7 +23,7 @@ import java.util.Map; import org.apache.commons.lang3.tuple.Pair; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OzoneManager; @@ -64,8 +64,8 @@ public OMKeyAclRequest(OMRequest omRequest) { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); OmKeyInfo omKeyInfo = null; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequestWithFSO.java index 5df1c0c0042..8f9cbbc6d8e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequestWithFSO.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.key.acl; import org.apache.commons.lang3.tuple.Pair; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.hdds.utils.db.Table; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; @@ -58,8 +58,8 @@ public OMKeyAclRequestWithFSO(OzoneManagerProtocolProtos.OMRequest omReq, } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); OmKeyInfo omKeyInfo = null; OzoneManagerProtocolProtos.OMResponse.Builder omResponse = onInit(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAddAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAddAclRequest.java index 90a6dfa31ad..b9269d0c7ab 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAddAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAddAclRequest.java @@ -23,7 +23,7 @@ import java.util.Map; import com.google.common.collect.Lists; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.audit.AuditLogger; @@ -149,9 +149,9 @@ boolean apply(OmKeyInfo omKeyInfo, long trxnLogIndex) { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { ozoneManager.getMetrics().incNumAddAcl(); - return super.validateAndUpdateCache(ozoneManager, termIndex); + return super.validateAndUpdateCache(ozoneManager, context); } /** diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAddAclRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAddAclRequestWithFSO.java index 854fa60089f..a3edd9ebcb1 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAddAclRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAddAclRequestWithFSO.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.key.acl; import com.google.common.collect.Lists; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.audit.AuditLogger; @@ -133,9 +133,9 @@ public OMKeyAddAclRequestWithFSO( } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { ozoneManager.getMetrics().incNumAddAcl(); - return super.validateAndUpdateCache(ozoneManager, termIndex); + return super.validateAndUpdateCache(ozoneManager, context); } @Override diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyRemoveAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyRemoveAclRequest.java index 00af126e1e4..fd5f9ea63f4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyRemoveAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyRemoveAclRequest.java @@ -23,7 +23,7 @@ import java.util.Map; import com.google.common.collect.Lists; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.audit.AuditLogger; @@ -150,9 +150,9 @@ boolean apply(OmKeyInfo omKeyInfo, long trxnLogIndex) { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { ozoneManager.getMetrics().incNumRemoveAcl(); - return super.validateAndUpdateCache(ozoneManager, termIndex); + return super.validateAndUpdateCache(ozoneManager, context); } /** diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyRemoveAclRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyRemoveAclRequestWithFSO.java index e4ba84bf4e9..171b2ed277b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyRemoveAclRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyRemoveAclRequestWithFSO.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.key.acl; import com.google.common.collect.Lists; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.audit.AuditLogger; @@ -143,9 +143,9 @@ boolean apply(OmKeyInfo omKeyInfo, long trxnLogIndex) { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { ozoneManager.getMetrics().incNumRemoveAcl(); - return super.validateAndUpdateCache(ozoneManager, termIndex); + return super.validateAndUpdateCache(ozoneManager, context); } @Override diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeySetAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeySetAclRequest.java index 2f50fde5cb9..674dca2581b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeySetAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeySetAclRequest.java @@ -23,7 +23,7 @@ import java.util.Map; import com.google.common.collect.Lists; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.audit.AuditLogger; @@ -146,9 +146,9 @@ boolean apply(OmKeyInfo omKeyInfo, long trxnLogIndex) { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { ozoneManager.getMetrics().incNumSetAcl(); - return super.validateAndUpdateCache(ozoneManager, termIndex); + return super.validateAndUpdateCache(ozoneManager, context); } /** diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeySetAclRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeySetAclRequestWithFSO.java index 2fb2aee0bb7..13c95db5f18 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeySetAclRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeySetAclRequestWithFSO.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.key.acl; import com.google.common.collect.Lists; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.audit.AuditLogger; @@ -136,9 +136,9 @@ boolean apply(OmKeyInfo omKeyInfo, long trxnLogIndex) { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { ozoneManager.getMetrics().incNumSetAcl(); - return super.validateAndUpdateCache(ozoneManager, termIndex); + return super.validateAndUpdateCache(ozoneManager, context); } @Override diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java index a8490b11152..0369c0bbbcc 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/prefix/OMPrefixAclRequest.java @@ -23,7 +23,7 @@ import java.util.Map; import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OMMetrics; @@ -53,8 +53,8 @@ public OMPrefixAclRequest(OMRequest omRequest) { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); OmPrefixInfo omPrefixInfo = null; diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3ExpiredMultipartUploadsAbortRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3ExpiredMultipartUploadsAbortRequest.java index a2f68a13774..85dc33b18c4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3ExpiredMultipartUploadsAbortRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3ExpiredMultipartUploadsAbortRequest.java @@ -20,7 +20,7 @@ package org.apache.hadoop.ozone.om.request.s3.multipart; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.audit.OMAction; @@ -73,8 +73,8 @@ public S3ExpiredMultipartUploadsAbortRequest(OMRequest omRequest) { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); OMMetrics omMetrics = ozoneManager.getMetrics(); omMetrics.incNumExpiredMPUAbortRequests(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java index 0a2703c769e..de9ff1db343 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequest.java @@ -22,7 +22,7 @@ import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.helpers.KeyValueUtil; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.audit.OMAction; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OzoneConfigUtil; @@ -111,8 +111,8 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { @Override @SuppressWarnings("methodlength") - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long transactionLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long transactionLogIndex = context.getIndex(); MultipartInfoInitiateRequest multipartInfoInitiateRequest = getOmRequest().getInitiateMultiPartUploadRequest(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java index d55a7b41918..962ac06f4d7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java @@ -22,7 +22,7 @@ import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.helpers.KeyValueUtil; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OzoneConfigUtil; import org.apache.hadoop.ozone.om.OzoneManager; @@ -68,8 +68,8 @@ public S3InitiateMultipartUploadRequestWithFSO(OMRequest omRequest, @Override @SuppressWarnings("methodlength") - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long transactionLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long transactionLogIndex = context.getIndex(); MultipartInfoInitiateRequest multipartInfoInitiateRequest = getOmRequest().getInitiateMultiPartUploadRequest(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java index 268c92dbd6e..c44d95492c8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadAbortRequest.java @@ -23,7 +23,7 @@ import java.util.Map; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.QuotaUtil; @@ -100,8 +100,8 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); MultipartUploadAbortRequest multipartUploadAbortRequest = getOmRequest() .getAbortMultiPartUploadRequest(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java index 16ab458a014..4997af5d7d5 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCommitPartRequest.java @@ -19,7 +19,7 @@ package org.apache.hadoop.ozone.om.request.s3.multipart; import com.google.common.annotations.VisibleForTesting; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.audit.OMAction; import org.apache.hadoop.ozone.om.OMMetadataManager; @@ -105,8 +105,8 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { @Override @SuppressWarnings("methodlength") - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); MultipartCommitUploadPartRequest multipartCommitUploadPartRequest = getOmRequest().getCommitMultiPartUploadRequest(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java index 2bb77005c95..17b96eaf9d9 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java @@ -33,9 +33,9 @@ import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.ozone.om.OzoneConfigUtil; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.protocolPB.OMPBHelper; -import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OzoneConsts; @@ -140,8 +140,8 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { @Override @SuppressWarnings("methodlength") - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); MultipartUploadCompleteRequest multipartUploadCompleteRequest = getOmRequest().getCompleteMultiPartUploadRequest(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/OMSetSecretRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/OMSetSecretRequest.java index 58809723125..e9a1c7bb04c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/OMSetSecretRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/OMSetSecretRequest.java @@ -19,7 +19,7 @@ package org.apache.hadoop.ozone.om.request.s3.security; import org.apache.commons.lang3.StringUtils; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.audit.OMAction; import org.apache.hadoop.ozone.om.OMMetadataManager; @@ -100,7 +100,7 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { OMClientResponse omClientResponse = null; OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( getOmRequest()); @@ -124,7 +124,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn // Update S3SecretTable cache entry in this case // Set the transactionLogIndex to be used for updating. - final S3SecretValue newS3SecretValue = S3SecretValue.of(accessId, secretKey, termIndex.getIndex()); + final S3SecretValue newS3SecretValue = S3SecretValue.of(accessId, secretKey, context.getIndex()); s3SecretManager.updateCache(accessId, newS3SecretValue); // Compose response diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/S3GetSecretRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/S3GetSecretRequest.java index 31df897513e..b7f4be1c5e3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/S3GetSecretRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/S3GetSecretRequest.java @@ -25,7 +25,7 @@ import java.util.concurrent.atomic.AtomicReference; import org.apache.commons.codec.digest.DigestUtils; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.om.OMMultiTenantManager; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; import org.slf4j.Logger; @@ -127,7 +127,7 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { OMClientResponse omClientResponse = null; OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( getOmRequest()); @@ -157,7 +157,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn // Not found in S3SecretTable. if (createIfNotExist) { // Add new entry in this case - assignS3SecretValue = S3SecretValue.of(accessId, awsSecret.get(), termIndex.getIndex()); + assignS3SecretValue = S3SecretValue.of(accessId, awsSecret.get(), context.getIndex()); // Add cache entry first. s3SecretManager.updateCache(accessId, assignS3SecretValue); } else { diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/S3RevokeSecretRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/S3RevokeSecretRequest.java index a1077c0e70e..c2cd011405d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/S3RevokeSecretRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/security/S3RevokeSecretRequest.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.s3.security; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.audit.OMAction; import org.apache.hadoop.ozone.om.OzoneManager; @@ -78,7 +78,7 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { OMClientResponse omClientResponse = null; OMResponse.Builder omResponse = diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequest.java index 6146e1ac105..9199494a4ea 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequest.java @@ -27,6 +27,7 @@ import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.om.request.OMClientRequestUtils; import org.apache.hadoop.ozone.om.request.key.OMKeyRequest; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; @@ -38,7 +39,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; -import org.apache.ratis.server.protocol.TermIndex; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -86,8 +86,8 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); DeleteObjectTaggingRequest deleteObjectTaggingRequest = getOmRequest().getDeleteObjectTaggingRequest(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequestWithFSO.java index fb0561702a6..f80c79e8728 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3DeleteObjectTaggingRequestWithFSO.java @@ -27,6 +27,7 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; import org.apache.hadoop.ozone.om.response.OMClientResponse; @@ -36,7 +37,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; -import org.apache.ratis.server.protocol.TermIndex; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -59,8 +59,8 @@ public S3DeleteObjectTaggingRequestWithFSO(OMRequest omRequest, } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); DeleteObjectTaggingRequest deleteObjectTaggingRequest = getOmRequest().getDeleteObjectTaggingRequest(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequest.java index aab67830383..4cfbe68a183 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequest.java @@ -28,6 +28,7 @@ import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.ozone.om.helpers.KeyValueUtil; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.om.request.OMClientRequestUtils; import org.apache.hadoop.ozone.om.request.key.OMKeyRequest; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; @@ -39,7 +40,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PutObjectTaggingRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PutObjectTaggingResponse; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType; -import org.apache.ratis.server.protocol.TermIndex; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -87,8 +87,8 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); PutObjectTaggingRequest putObjectTaggingRequest = getOmRequest().getPutObjectTaggingRequest(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequestWithFSO.java index 2b6ca8601cb..a7b7c363f06 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tagging/S3PutObjectTaggingRequestWithFSO.java @@ -28,6 +28,7 @@ import org.apache.hadoop.ozone.om.helpers.KeyValueUtil; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.om.request.file.OMFileRequest; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; import org.apache.hadoop.ozone.om.response.OMClientResponse; @@ -37,7 +38,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PutObjectTaggingRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PutObjectTaggingResponse; -import org.apache.ratis.server.protocol.TermIndex; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -60,8 +60,8 @@ public S3PutObjectTaggingRequestWithFSO(OMRequest omRequest, } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); PutObjectTaggingRequest putObjectTaggingRequest = getOmRequest().getPutObjectTaggingRequest(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMSetRangerServiceVersionRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMSetRangerServiceVersionRequest.java index 189d39e52cb..ff866bb396e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMSetRangerServiceVersionRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMSetRangerServiceVersionRequest.java @@ -18,7 +18,7 @@ */ package org.apache.hadoop.ozone.om.request.s3.tenant; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OzoneConsts; @@ -52,7 +52,7 @@ public OMSetRangerServiceVersionRequest(OMRequest omRequest) { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { OMClientResponse omClientResponse; final OMResponse.Builder omResponse = @@ -65,7 +65,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn omMetadataManager.getMetaTable().addCacheEntry( new CacheKey<>(OzoneConsts.RANGER_OZONE_SERVICE_VERSION_KEY), - CacheValue.get(termIndex.getIndex(), proposedVersionStr)); + CacheValue.get(context.getIndex(), proposedVersionStr)); omResponse.setSetRangerServiceVersionResponse( SetRangerServiceVersionResponse.newBuilder().build()); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignAdminRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignAdminRequest.java index 15292f61c65..ecea3e6cd14 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignAdminRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignAdminRequest.java @@ -20,7 +20,7 @@ import com.google.common.base.Preconditions; import org.apache.commons.lang3.StringUtils; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OzoneConsts; @@ -150,7 +150,7 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { @Override @SuppressWarnings("checkstyle:methodlength") - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { final OMMultiTenantManager multiTenantManager = ozoneManager.getMultiTenantManager(); @@ -205,7 +205,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn .build(); omMetadataManager.getTenantAccessIdTable().addCacheEntry( new CacheKey<>(accessId), - CacheValue.get(termIndex.getIndex(), newOmDBAccessIdInfo)); + CacheValue.get(context.getIndex(), newOmDBAccessIdInfo)); // Update tenant cache multiTenantManager.getCacheOp().assignTenantAdmin(accessId, delegated); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignUserAccessIdRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignUserAccessIdRequest.java index aefd056715e..3508ba51f13 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignUserAccessIdRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantAssignUserAccessIdRequest.java @@ -20,7 +20,7 @@ import com.google.common.base.Preconditions; import org.apache.commons.codec.digest.DigestUtils; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OmUtils; @@ -192,8 +192,8 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { @Override @SuppressWarnings("checkstyle:methodlength") - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long transactionLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long transactionLogIndex = context.getIndex(); final OMMultiTenantManager multiTenantManager = ozoneManager.getMultiTenantManager(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantCreateRequest.java index 4cab83c0a33..b68279683a5 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantCreateRequest.java @@ -19,7 +19,7 @@ package org.apache.hadoop.ozone.om.request.s3.tenant; import com.google.common.base.Preconditions; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ipc.ProtobufRpcEngine; @@ -212,8 +212,8 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { @Override @SuppressWarnings("methodlength") - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long transactionLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long transactionLogIndex = context.getIndex(); final OMMultiTenantManager multiTenantManager = ozoneManager.getMultiTenantManager(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantDeleteRequest.java index fa630183006..7cc7f109d43 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantDeleteRequest.java @@ -19,7 +19,7 @@ package org.apache.hadoop.ozone.om.request.s3.tenant; import com.google.common.base.Preconditions; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OzoneConsts; @@ -103,8 +103,8 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { @Override @SuppressWarnings("methodlength") - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long transactionLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long transactionLogIndex = context.getIndex(); final OMMultiTenantManager multiTenantManager = ozoneManager.getMultiTenantManager(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeAdminRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeAdminRequest.java index ba82ab12144..21e7cc57671 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeAdminRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeAdminRequest.java @@ -20,7 +20,7 @@ import com.google.common.base.Preconditions; import org.apache.commons.lang3.StringUtils; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OzoneConsts; @@ -141,7 +141,7 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { @Override @SuppressWarnings("checkstyle:methodlength") - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { final OMMultiTenantManager multiTenantManager = ozoneManager.getMultiTenantManager(); @@ -195,7 +195,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn .build(); omMetadataManager.getTenantAccessIdTable().addCacheEntry( new CacheKey<>(accessId), - CacheValue.get(termIndex.getIndex(), newOmDBAccessIdInfo)); + CacheValue.get(context.getIndex(), newOmDBAccessIdInfo)); // Update tenant cache multiTenantManager.getCacheOp().revokeTenantAdmin(accessId); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeUserAccessIdRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeUserAccessIdRequest.java index d2b55678490..5787b44f835 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeUserAccessIdRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/tenant/OMTenantRevokeUserAccessIdRequest.java @@ -20,7 +20,7 @@ import com.google.common.base.Preconditions; import org.apache.commons.lang3.StringUtils; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OzoneConsts; @@ -152,8 +152,8 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long transactionLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long transactionLogIndex = context.getIndex(); final OMMultiTenantManager multiTenantManager = ozoneManager.getMultiTenantManager(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMCancelDelegationTokenRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMCancelDelegationTokenRequest.java index 0ee5f6ab6b2..f57b9db5d38 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMCancelDelegationTokenRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMCancelDelegationTokenRequest.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.security; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.audit.OMAction; import org.apache.hadoop.ozone.om.OMMetadataManager; @@ -85,8 +85,8 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long transactionLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long transactionLogIndex = context.getIndex(); OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); Token token = getToken(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMGetDelegationTokenRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMGetDelegationTokenRequest.java index 8e2c56ab2cd..077e2bde28e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMGetDelegationTokenRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMGetDelegationTokenRequest.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.security; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.io.Text; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.audit.AuditLogger; @@ -129,7 +129,7 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { UpdateGetDelegationTokenRequest updateGetDelegationTokenRequest = getOmRequest().getUpdateGetDelegationTokenRequest(); @@ -180,7 +180,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn // Update Cache. omMetadataManager.getDelegationTokenTable().addCacheEntry( new CacheKey<>(ozoneTokenIdentifier), - CacheValue.get(termIndex.getIndex(), renewTime)); + CacheValue.get(context.getIndex(), renewTime)); omClientResponse = new OMGetDelegationTokenResponse(ozoneTokenIdentifier, renewTime, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMRenewDelegationTokenRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMRenewDelegationTokenRequest.java index 7985c762d6f..e25bc57ec39 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMRenewDelegationTokenRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/security/OMRenewDelegationTokenRequest.java @@ -22,7 +22,7 @@ import java.nio.file.InvalidPathException; import java.util.Map; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.audit.OMAction; @@ -127,7 +127,7 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { UpdateRenewDelegationTokenRequest updateRenewDelegationTokenRequest = getOmRequest().getUpdatedRenewDelegationTokenRequest(); @@ -166,7 +166,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn // Update Cache. omMetadataManager.getDelegationTokenTable().addCacheEntry( new CacheKey<>(ozoneTokenIdentifier), - CacheValue.get(termIndex.getIndex(), renewTime)); + CacheValue.get(context.getIndex(), renewTime)); omClientResponse = new OMRenewDelegationTokenResponse(ozoneTokenIdentifier, renewTime, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotCreateRequest.java index 59cc02b6fdb..94d89c70b5b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotCreateRequest.java @@ -24,7 +24,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.ozone.om.ResolvedBucket; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.hdds.utils.db.RDBStore; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; @@ -131,7 +131,7 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { OMMetrics omMetrics = ozoneManager.getMetrics(); omMetrics.incNumSnapshotCreates(); @@ -173,7 +173,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn ((RDBStore) omMetadataManager.getStore()).getDb() .getLatestSequenceNumber(); snapshotInfo.setDbTxSequenceNumber(dbLatestSequenceNumber); - snapshotInfo.setLastTransactionInfo(TransactionInfo.valueOf(termIndex).toByteString()); + snapshotInfo.setLastTransactionInfo(TransactionInfo.valueOf(context.getTermIndex()).toByteString()); // Snapshot referenced size should be bucket's used bytes OmBucketInfo omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName); @@ -190,7 +190,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn // pre-replicated key size counter in OmBucketInfo. snapshotInfo.setReferencedSize(estimateBucketDataSize(omBucketInfo)); - addSnapshotInfoToSnapshotChainAndCache(omMetadataManager, termIndex.getIndex()); + addSnapshotInfoToSnapshotChainAndCache(omMetadataManager, context.getIndex()); omResponse.setCreateSnapshotResponse( CreateSnapshotResponse.newBuilder() diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotDeleteRequest.java index 95f99c627c4..b6832545ada 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotDeleteRequest.java @@ -20,7 +20,7 @@ import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.ozone.om.ResolvedBucket; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OmUtils; @@ -115,7 +115,7 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { OMMetrics omMetrics = ozoneManager.getMetrics(); omMetrics.incNumSnapshotDeletes(); @@ -185,7 +185,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn // Update table cache first omMetadataManager.getSnapshotInfoTable().addCacheEntry( new CacheKey<>(tableKey), - CacheValue.get(termIndex.getIndex(), snapshotInfo)); + CacheValue.get(context.getIndex(), snapshotInfo)); omResponse.setDeleteSnapshotResponse( DeleteSnapshotResponse.newBuilder()); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveDeletedKeysRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveDeletedKeysRequest.java index 18055bdda40..108128b4a09 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveDeletedKeysRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveDeletedKeysRequest.java @@ -23,7 +23,7 @@ import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.SnapshotChainManager; @@ -61,7 +61,7 @@ public OMSnapshotMoveDeletedKeysRequest(OMRequest omRequest) { @Override @DisallowedUntilLayoutVersion(FILESYSTEM_SNAPSHOT) - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { OmMetadataManagerImpl omMetadataManager = (OmMetadataManagerImpl) ozoneManager.getMetadataManager(); SnapshotChainManager snapshotChainManager = @@ -91,13 +91,13 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn List movedDirs = moveDeletedKeysRequest.getDeletedDirsToMoveList(); // Update lastTransactionInfo for fromSnapshot and the nextSnapshot. - fromSnapshot.setLastTransactionInfo(TransactionInfo.valueOf(termIndex).toByteString()); + fromSnapshot.setLastTransactionInfo(TransactionInfo.valueOf(context.getTermIndex()).toByteString()); omMetadataManager.getSnapshotInfoTable().addCacheEntry(new CacheKey<>(fromSnapshot.getTableKey()), - CacheValue.get(termIndex.getIndex(), fromSnapshot)); + CacheValue.get(context.getIndex(), fromSnapshot)); if (nextSnapshot != null) { - nextSnapshot.setLastTransactionInfo(TransactionInfo.valueOf(termIndex).toByteString()); + nextSnapshot.setLastTransactionInfo(TransactionInfo.valueOf(context.getTermIndex()).toByteString()); omMetadataManager.getSnapshotInfoTable().addCacheEntry(new CacheKey<>(nextSnapshot.getTableKey()), - CacheValue.get(termIndex.getIndex(), nextSnapshot)); + CacheValue.get(context.getIndex(), nextSnapshot)); } omClientResponse = new OMSnapshotMoveDeletedKeysResponse( omResponse.build(), fromSnapshot, nextSnapshot, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveTableKeysRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveTableKeysRequest.java index 0eb0d3cd166..ef9c0261d63 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveTableKeysRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotMoveTableKeysRequest.java @@ -28,6 +28,7 @@ import org.apache.hadoop.ozone.om.SnapshotChainManager; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.om.request.OMClientRequest; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; import org.apache.hadoop.ozone.om.response.OMClientResponse; @@ -38,7 +39,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotMoveKeyInfos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.SnapshotMoveTableKeysRequest; -import org.apache.ratis.server.protocol.TermIndex; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -143,7 +143,7 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { @Override @DisallowedUntilLayoutVersion(FILESYSTEM_SNAPSHOT) - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { OmMetadataManagerImpl omMetadataManager = (OmMetadataManagerImpl) ozoneManager.getMetadataManager(); SnapshotChainManager snapshotChainManager = omMetadataManager.getSnapshotChainManager(); @@ -164,13 +164,13 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn } // Update lastTransactionInfo for fromSnapshot and the nextSnapshot. - fromSnapshot.setLastTransactionInfo(TransactionInfo.valueOf(termIndex).toByteString()); + fromSnapshot.setLastTransactionInfo(TransactionInfo.valueOf(context.getTermIndex()).toByteString()); omMetadataManager.getSnapshotInfoTable().addCacheEntry(new CacheKey<>(fromSnapshot.getTableKey()), - CacheValue.get(termIndex.getIndex(), fromSnapshot)); + CacheValue.get(context.getIndex(), fromSnapshot)); if (nextSnapshot != null) { - nextSnapshot.setLastTransactionInfo(TransactionInfo.valueOf(termIndex).toByteString()); + nextSnapshot.setLastTransactionInfo(TransactionInfo.valueOf(context.getTermIndex()).toByteString()); omMetadataManager.getSnapshotInfoTable().addCacheEntry(new CacheKey<>(nextSnapshot.getTableKey()), - CacheValue.get(termIndex.getIndex(), nextSnapshot)); + CacheValue.get(context.getIndex(), nextSnapshot)); } omClientResponse = new OMSnapshotMoveTableKeysResponse(omResponse.build(), fromSnapshot, nextSnapshot, moveTableKeysRequest.getDeletedKeysList(), moveTableKeysRequest.getDeletedDirsList(), diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java index 62fbb39417b..af701d361f1 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotPurgeRequest.java @@ -22,7 +22,7 @@ import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; @@ -67,10 +67,10 @@ public OMSnapshotPurgeRequest(OMRequest omRequest) { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { OMMetrics omMetrics = ozoneManager.getMetrics(); - final long trxnLogIndex = termIndex.getIndex(); + final long trxnLogIndex = context.getIndex(); OmMetadataManagerImpl omMetadataManager = (OmMetadataManagerImpl) ozoneManager.getMetadataManager(); @@ -116,9 +116,9 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn } // Update the snapshotInfo lastTransactionInfo. for (SnapshotInfo snapshotInfo : updatedSnapshotInfos.values()) { - snapshotInfo.setLastTransactionInfo(TransactionInfo.valueOf(termIndex).toByteString()); + snapshotInfo.setLastTransactionInfo(TransactionInfo.valueOf(context.getTermIndex()).toByteString()); omMetadataManager.getSnapshotInfoTable().addCacheEntry(new CacheKey<>(snapshotInfo.getTableKey()), - CacheValue.get(termIndex.getIndex(), snapshotInfo)); + CacheValue.get(context.getIndex(), snapshotInfo)); } omClientResponse = new OMSnapshotPurgeResponse(omResponse.build(), snapshotDbKeys, updatedSnapshotInfos); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotRenameRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotRenameRequest.java index 8cf0579647c..0c721c1035f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotRenameRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotRenameRequest.java @@ -37,6 +37,7 @@ import org.apache.hadoop.ozone.om.ResolvedBucket; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.om.request.OMClientRequest; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; import org.apache.hadoop.ozone.om.response.OMClientResponse; @@ -52,7 +53,6 @@ import org.apache.hadoop.ozone.security.acl.OzoneObj; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.Time; -import org.apache.ratis.server.protocol.TermIndex; /** * Changes snapshot name. @@ -111,8 +111,7 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, - TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { boolean acquiredBucketLock = false; boolean acquiredSnapshotOldLock = false; boolean acquiredSnapshotNewLock = false; @@ -188,11 +187,11 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, omMetadataManager.getSnapshotInfoTable().addCacheEntry( new CacheKey<>(snapshotOldTableKey), - CacheValue.get(termIndex.getIndex())); + CacheValue.get(context.getIndex())); omMetadataManager.getSnapshotInfoTable().addCacheEntry( new CacheKey<>(snapshotNewTableKey), - CacheValue.get(termIndex.getIndex(), snapshotOldInfo)); + CacheValue.get(context.getIndex(), snapshotOldInfo)); omMetadataManager.getSnapshotChainManager().updateSnapshot(snapshotOldInfo); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotSetPropertyRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotSetPropertyRequest.java index 53047fd8026..5fb6cb71c36 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotSetPropertyRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/snapshot/OMSnapshotSetPropertyRequest.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.snapshot; import org.apache.hadoop.ozone.om.OMMetrics; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.om.OMMetadataManager; @@ -51,7 +51,7 @@ public OMSnapshotSetPropertyRequest(OMRequest omRequest) { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { OMMetrics omMetrics = ozoneManager.getMetrics(); OMClientResponse omClientResponse; @@ -98,7 +98,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn // Update Table Cache metadataManager.getSnapshotInfoTable().addCacheEntry( new CacheKey<>(snapshotKey), - CacheValue.get(termIndex.getIndex(), updatedSnapInfo)); + CacheValue.get(context.getIndex(), updatedSnapInfo)); omClientResponse = new OMSnapshotSetPropertyResponse( omResponse.build(), updatedSnapInfo); omMetrics.incNumSnapshotSetProperties(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/upgrade/OMCancelPrepareRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/upgrade/OMCancelPrepareRequest.java index c7b348c06f0..2a334d8c99c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/upgrade/OMCancelPrepareRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/upgrade/OMCancelPrepareRequest.java @@ -20,8 +20,8 @@ import java.util.HashMap; import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.audit.OMAction; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.request.OMClientRequest; @@ -53,9 +53,9 @@ public OMCancelPrepareRequest(OMRequest omRequest) { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { - LOG.info("OM {} Received cancel prepare request with log {}", ozoneManager.getOMNodeId(), termIndex); + LOG.info("OM {} Received cancel prepare request with log {}", ozoneManager.getOMNodeId(), context.getTermIndex()); OMRequest omRequest = getOmRequest(); AuditLogger auditLogger = ozoneManager.getAuditLogger(); @@ -87,7 +87,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn ozoneManager.getPrepareState().cancelPrepare(); LOG.info("OM {} prepare state cancelled at log {}. Returning response {}", - ozoneManager.getOMNodeId(), termIndex, omResponse); + ozoneManager.getOMNodeId(), context.getTermIndex(), omResponse); } catch (IOException e) { exception = e; LOG.error("Cancel Prepare Request apply failed in {}. ", diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/upgrade/OMFinalizeUpgradeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/upgrade/OMFinalizeUpgradeRequest.java index 866e7b6c67f..580d06a147f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/upgrade/OMFinalizeUpgradeRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/upgrade/OMFinalizeUpgradeRequest.java @@ -25,8 +25,8 @@ .UpgradeFinalizationStatus; import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.audit.OMAction; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; -import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.om.OMMetadataManager; @@ -60,7 +60,7 @@ public OMFinalizeUpgradeRequest(OMRequest omRequest) { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { LOG.trace("Request: {}", getOmRequest()); AuditLogger auditLogger = ozoneManager.getAuditLogger(); OzoneManagerProtocolProtos.UserInfo userInfo = getOmRequest().getUserInfo(); @@ -99,7 +99,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn int lV = ozoneManager.getVersionManager().getMetadataLayoutVersion(); omMetadataManager.getMetaTable().addCacheEntry( new CacheKey<>(LAYOUT_VERSION_KEY), - CacheValue.get(termIndex.getIndex(), String.valueOf(lV))); + CacheValue.get(context.getIndex(), String.valueOf(lV))); FinalizeUpgradeResponse omResponse = FinalizeUpgradeResponse.newBuilder() diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/upgrade/OMPrepareRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/upgrade/OMPrepareRequest.java index f7c223eae09..654ee55b16d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/upgrade/OMPrepareRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/upgrade/OMPrepareRequest.java @@ -20,7 +20,7 @@ import java.util.HashMap; import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.audit.OMAction; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.ratis.OzoneManagerDoubleBuffer; @@ -66,10 +66,10 @@ public OMPrepareRequest(OMRequest omRequest) { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long transactionLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long transactionLogIndex = context.getIndex(); - LOG.info("OM {} Received prepare request with log {}", ozoneManager.getOMNodeId(), termIndex); + LOG.info("OM {} Received prepare request with log {}", ozoneManager.getOMNodeId(), context.getTermIndex()); OMRequest omRequest = getOmRequest(); AuditLogger auditLogger = ozoneManager.getAuditLogger(); @@ -104,7 +104,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIn // the snapshot index in the prepared state. OzoneManagerDoubleBuffer doubleBuffer = ozoneManager.getOmRatisServer().getOmStateMachine().getOzoneManagerDoubleBuffer(); - doubleBuffer.add(response, termIndex); + doubleBuffer.add(response, context.getTermIndex()); OzoneManagerRatisServer omRatisServer = ozoneManager.getOmRatisServer(); final RaftServer.Division division = omRatisServer.getServerDivision(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/util/OMEchoRPCWriteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/util/OMEchoRPCWriteRequest.java index e15782acafd..6d9740f399a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/util/OMEchoRPCWriteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/util/OMEchoRPCWriteRequest.java @@ -19,7 +19,7 @@ package org.apache.hadoop.ozone.om.request.util; import com.google.protobuf.ByteString; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.util.PayloadUtils; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.request.OMClientRequest; @@ -40,7 +40,7 @@ public OMEchoRPCWriteRequest(OMRequest omRequest) { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { EchoRPCRequest echoRPCRequest = getOmRequest().getEchoRPCRequest(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMQuotaRepairRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMQuotaRepairRequest.java index e307a1f95fd..5eadd47595f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMQuotaRepairRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMQuotaRepairRequest.java @@ -33,6 +33,7 @@ import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.om.request.OMClientRequest; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; import org.apache.hadoop.ozone.om.response.OMClientResponse; @@ -40,7 +41,6 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.ratis.server.protocol.TermIndex; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -72,8 +72,8 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { @Override @SuppressWarnings("methodlength") - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long transactionLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long transactionLogIndex = context.getIndex(); OzoneManagerProtocolProtos.QuotaRepairRequest quotaRepairRequest = getOmRequest().getQuotaRepairRequest(); Preconditions.checkNotNull(quotaRepairRequest); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java index a22775107b9..f6cb32a45d5 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeCreateRequest.java @@ -28,8 +28,8 @@ import com.google.common.base.Preconditions; import org.apache.hadoop.ozone.OzoneAcl; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.ratis.server.protocol.TermIndex; import org.apache.hadoop.ozone.OmUtils; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; @@ -98,8 +98,8 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long transactionLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long transactionLogIndex = context.getIndex(); CreateVolumeRequest createVolumeRequest = getOmRequest().getCreateVolumeRequest(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java index d2db7ed3d4e..9f1ad0f30c7 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeDeleteRequest.java @@ -22,7 +22,7 @@ import java.nio.file.InvalidPathException; import com.google.common.base.Preconditions; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; import org.apache.hadoop.ozone.storage.proto.OzoneManagerStorageProtos; import org.slf4j.Logger; @@ -65,8 +65,8 @@ public OMVolumeDeleteRequest(OMRequest omRequest) { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long transactionLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long transactionLogIndex = context.getIndex(); DeleteVolumeRequest deleteVolumeRequest = getOmRequest().getDeleteVolumeRequest(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java index 8481f2201fa..090b0186974 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetOwnerRequest.java @@ -19,7 +19,7 @@ package org.apache.hadoop.ozone.om.request.volume; import com.google.common.base.Preconditions; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.hdds.utils.db.cache.CacheKey; import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.OzoneConsts; @@ -76,8 +76,8 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long transactionLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long transactionLogIndex = context.getIndex(); SetVolumePropertyRequest setVolumePropertyRequest = getOmRequest().getSetVolumePropertyRequest(); Preconditions.checkNotNull(setVolumePropertyRequest); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java index 7a962a0e2b5..2174acf63e6 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/OMVolumeSetQuotaRequest.java @@ -24,7 +24,7 @@ import java.util.Map; import com.google.common.base.Preconditions; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; @@ -83,8 +83,8 @@ public OMRequest preExecute(OzoneManager ozoneManager) throws IOException { } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long transactionLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long transactionLogIndex = context.getIndex(); SetVolumePropertyRequest setVolumePropertyRequest = getOmRequest().getSetVolumePropertyRequest(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java index b431d70fa7e..5a83720e0b0 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAclRequest.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.volume.acl; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.audit.AuditLogger; @@ -66,8 +66,8 @@ public interface VolumeAclOp extends } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { - final long trxnLogIndex = termIndex.getIndex(); + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { + final long trxnLogIndex = context.getIndex(); // protobuf guarantees volume and acls are non-null. String volume = getVolumeName(); List ozoneAcls = getAcls(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAddAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAddAclRequest.java index 3eff4da0caf..3fad018ea8d 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAddAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeAddAclRequest.java @@ -19,7 +19,7 @@ import com.google.common.base.Preconditions; import com.google.common.collect.Lists; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.audit.OMAction; @@ -141,8 +141,8 @@ void onComplete(Result result, Exception ex, long trxnLogIndex, } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { ozoneManager.getMetrics().incNumAddAcl(); - return super.validateAndUpdateCache(ozoneManager, termIndex); + return super.validateAndUpdateCache(ozoneManager, context); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeRemoveAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeRemoveAclRequest.java index e0b14b4e2b2..2d862dbad4b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeRemoveAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeRemoveAclRequest.java @@ -19,7 +19,7 @@ import com.google.common.base.Preconditions; import com.google.common.collect.Lists; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.audit.OMAction; @@ -140,8 +140,8 @@ void onComplete(Result result, Exception ex, long trxnLogIndex, } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { ozoneManager.getMetrics().incNumRemoveAcl(); - return super.validateAndUpdateCache(ozoneManager, termIndex); + return super.validateAndUpdateCache(ozoneManager, context); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java index 687210982f7..53ba7778c89 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/volume/acl/OMVolumeSetAclRequest.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.om.request.volume.acl; import com.google.common.base.Preconditions; -import org.apache.ratis.server.protocol.TermIndex; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.audit.AuditLogger; import org.apache.hadoop.ozone.audit.OMAction; @@ -138,8 +138,8 @@ void onComplete(Result result, Exception ex, long trxnLogIndex, } @Override - public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, TermIndex termIndex) { + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, ExecutionContext context) { ozoneManager.getMetrics().incNumSetAcl(); - return super.validateAndUpdateCache(ozoneManager, termIndex); + return super.validateAndUpdateCache(ozoneManager, context); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java index 654610f81dc..91d234d4d0b 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java @@ -41,6 +41,7 @@ import org.apache.hadoop.ozone.om.exceptions.OMLeaderNotReadyException; import org.apache.hadoop.ozone.om.helpers.OMAuditLogger; import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.om.ratis.OzoneManagerDoubleBuffer; import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer; import org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer.RaftServerStatus; @@ -303,7 +304,8 @@ private OMResponse submitRequestDirectlyToOM(OMRequest request) { throw ex; } final TermIndex termIndex = TransactionInfo.getTermIndex(transactionIndex.incrementAndGet()); - omClientResponse = handler.handleWriteRequest(request, termIndex, ozoneManagerDoubleBuffer); + final ExecutionContext context = ExecutionContext.of(termIndex.getIndex(), termIndex); + omClientResponse = handler.handleWriteRequest(request, context, ozoneManagerDoubleBuffer); } } catch (IOException ex) { // As some preExecute returns error. So handle here. diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java index ab1f68d9928..09865ace27a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java @@ -44,6 +44,7 @@ import org.apache.hadoop.ozone.OzoneAcl; import org.apache.hadoop.ozone.om.helpers.OMAuditLogger; import org.apache.hadoop.ozone.om.helpers.KeyValueUtil; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetObjectTaggingRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.GetObjectTaggingResponse; import org.apache.hadoop.ozone.util.PayloadUtils; @@ -170,7 +171,6 @@ import org.apache.hadoop.ozone.snapshot.ListSnapshotResponse; import org.apache.hadoop.ozone.upgrade.UpgradeFinalizer.StatusAndMessages; import org.apache.hadoop.ozone.util.ProtobufUtils; -import org.apache.ratis.server.protocol.TermIndex; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -420,19 +420,20 @@ public OMResponse handleReadRequest(OMRequest request) { } @Override - public OMClientResponse handleWriteRequestImpl(OMRequest omRequest, TermIndex termIndex) throws IOException { + public OMClientResponse handleWriteRequestImpl(OMRequest omRequest, ExecutionContext context) throws IOException { injectPause(); OMClientRequest omClientRequest = OzoneManagerRatisUtils.createClientRequest(omRequest, impl); try { OMClientResponse omClientResponse = captureLatencyNs( impl.getPerfMetrics().getValidateAndUpdateCacheLatencyNs(), - () -> Objects.requireNonNull(omClientRequest.validateAndUpdateCache(getOzoneManager(), termIndex), + () -> Objects.requireNonNull(omClientRequest.validateAndUpdateCache(getOzoneManager(), context), "omClientResponse returned by validateAndUpdateCache cannot be null")); - OMAuditLogger.log(omClientRequest.getAuditBuilder(), termIndex); + OMAuditLogger.log(omClientRequest.getAuditBuilder(), context.getTermIndex()); return omClientResponse; } catch (Throwable th) { - OMAuditLogger.log(omClientRequest.getAuditBuilder(), omClientRequest, getOzoneManager(), termIndex, th); + OMAuditLogger.log(omClientRequest.getAuditBuilder(), omClientRequest, getOzoneManager(), context.getTermIndex(), + th); throw th; } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/RequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/RequestHandler.java index 76546f2e480..033911364d8 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/RequestHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/RequestHandler.java @@ -18,12 +18,12 @@ package org.apache.hadoop.ozone.protocolPB; import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.om.ratis.OzoneManagerDoubleBuffer; import org.apache.hadoop.ozone.om.response.OMClientResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Type; -import org.apache.ratis.server.protocol.TermIndex; import java.io.IOException; @@ -54,15 +54,15 @@ public interface RequestHandler { * In non-HA this will be called from {@link OzoneManagerProtocolServerSideTranslatorPB}. * * @param omRequest the write request - * @param termIndex - ratis transaction term and index + * @param context - context containing ratis term index and index * @param ozoneManagerDoubleBuffer for adding response * @return OMClientResponse */ - default OMClientResponse handleWriteRequest(OMRequest omRequest, TermIndex termIndex, + default OMClientResponse handleWriteRequest(OMRequest omRequest, ExecutionContext context, OzoneManagerDoubleBuffer ozoneManagerDoubleBuffer) throws IOException { - final OMClientResponse response = handleWriteRequestImpl(omRequest, termIndex); + final OMClientResponse response = handleWriteRequestImpl(omRequest, context); if (omRequest.getCmdType() != Type.Prepare) { - ozoneManagerDoubleBuffer.add(response, termIndex); + ozoneManagerDoubleBuffer.add(response, context.getTermIndex()); } return response; } @@ -71,8 +71,8 @@ default OMClientResponse handleWriteRequest(OMRequest omRequest, TermIndex termI * Implementation of {@link #handleWriteRequest}. * * @param omRequest the write request - * @param termIndex - ratis transaction term and index + * @param context - context containing ratis term index and index * @return OMClientResponse */ - OMClientResponse handleWriteRequestImpl(OMRequest omRequest, TermIndex termIndex) throws IOException; + OMClientResponse handleWriteRequestImpl(OMRequest omRequest, ExecutionContext context) throws IOException; } diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java index eb13f97d237..6b9f93e08c9 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerDoubleBufferWithOMResponse.java @@ -33,6 +33,7 @@ import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; import org.apache.hadoop.ozone.om.request.bucket.OMBucketCreateRequest; import org.apache.hadoop.ozone.om.request.bucket.OMBucketDeleteRequest; @@ -316,7 +317,8 @@ private OMClientResponse deleteBucket(String volumeName, String bucketName, new OMBucketDeleteRequest(omRequest); final TermIndex termIndex = TermIndex.valueOf(term, transactionID); - OMClientResponse omClientResponse = omBucketDeleteRequest.validateAndUpdateCache(ozoneManager, termIndex); + final ExecutionContext context = ExecutionContext.of(termIndex.getIndex(), termIndex); + OMClientResponse omClientResponse = omBucketDeleteRequest.validateAndUpdateCache(ozoneManager, context); doubleBuffer.add(omClientResponse, termIndex); return omClientResponse; } @@ -459,7 +461,8 @@ private OMClientResponse createVolume(String volumeName, } final TermIndex termIndex = TransactionInfo.getTermIndex(transactionId); - OMClientResponse omClientResponse = omVolumeCreateRequest.validateAndUpdateCache(ozoneManager, termIndex); + final ExecutionContext context = ExecutionContext.of(termIndex.getIndex(), termIndex); + OMClientResponse omClientResponse = omVolumeCreateRequest.validateAndUpdateCache(ozoneManager, context); doubleBuffer.add(omClientResponse, termIndex); return omClientResponse; } @@ -485,7 +488,8 @@ private OMBucketCreateResponse createBucket(String volumeName, } final TermIndex termIndex = TermIndex.valueOf(term, transactionID); - OMClientResponse omClientResponse = omBucketCreateRequest.validateAndUpdateCache(ozoneManager, termIndex); + final ExecutionContext context = ExecutionContext.of(termIndex.getIndex(), termIndex); + OMClientResponse omClientResponse = omBucketCreateRequest.validateAndUpdateCache(ozoneManager, context); doubleBuffer.add(omClientResponse, termIndex); return (OMBucketCreateResponse) omClientResponse; } From 14bba1ee7aee38c46ee9e9a4510c4e52442fac12 Mon Sep 17 00:00:00 2001 From: Rishabh Patel <1607531+ptlrs@users.noreply.github.com> Date: Tue, 7 Jan 2025 23:13:20 -0800 Subject: [PATCH 057/168] HDDS-5045. Create acceptance test for using rclone with s3 protocol (#7571) --- hadoop-ozone/dist/pom.xml | 2 +- .../dist/src/main/smoketest/s3/rclone.robot | 46 +++++++++++++++++++ 2 files changed, 47 insertions(+), 1 deletion(-) create mode 100644 hadoop-ozone/dist/src/main/smoketest/s3/rclone.robot diff --git a/hadoop-ozone/dist/pom.xml b/hadoop-ozone/dist/pom.xml index 82126325f26..0a9079747c9 100644 --- a/hadoop-ozone/dist/pom.xml +++ b/hadoop-ozone/dist/pom.xml @@ -30,7 +30,7 @@ true apache/ozone -rocky - 20241212-1-jdk21 + 20241216-1-jdk21 ghcr.io/apache/ozone-testkrb5:20241129-1 true true diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/rclone.robot b/hadoop-ozone/dist/src/main/smoketest/s3/rclone.robot new file mode 100644 index 00000000000..b3405d3acf8 --- /dev/null +++ b/hadoop-ozone/dist/src/main/smoketest/s3/rclone.robot @@ -0,0 +1,46 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +Documentation S3 gateway test with rclone client +Library OperatingSystem +Library BuiltIn +Resource ./commonawslib.robot +Test Timeout 15 minutes +Suite Setup Setup s3 tests + +*** Variables *** +${ENDPOINT_URL} http://s3g:9878 +${S3_VOLUME} s3v +${BUCKET} generated +${RCLONE_CONFIG_NAME} ozone +${RCLONE_CONFIG_PATH} /tmp/rclone.conf +${RCLONE_VERBOSE_LEVEL} 2 + +*** Keywords *** +# Export access key and secret to the environment +Setup aws credentials + ${accessKey} = Execute aws configure get aws_access_key_id + ${secret} = Execute aws configure get aws_secret_access_key + Set Environment Variable AWS_SECRET_ACCESS_KEY ${secret} + Set Environment Variable AWS_ACCESS_KEY_ID ${accessKey} + +*** Test Cases *** +Rclone Client Test + [Setup] Setup aws credentials + Set Environment Variable RCLONE_CONFIG ${RCLONE_CONFIG_PATH} + Set Environment Variable RCLONE_VERBOSE ${RCLONE_VERBOSE_LEVEL} + ${result} = Execute rclone config create ${RCLONE_CONFIG_NAME} s3 env_auth=true provider=Other endpoint=${ENDPOINT_URL} + ${result} = Execute rclone copy /opt/hadoop/smoketest ${RCLONE_CONFIG_NAME}:/${S3_VOLUME}/${BUCKET} From a1324b66a74e5fffdda7353d1d4ea752e0f69677 Mon Sep 17 00:00:00 2001 From: Tejaskriya <87555809+Tejaskriya@users.noreply.github.com> Date: Wed, 8 Jan 2025 17:23:00 +0530 Subject: [PATCH 058/168] HDDS-11511. Introduce metrics in deletion services of OM (#7377) --- .../TestDirectoryDeletingServiceWithFSO.java | 75 ++++++++ .../ozone/om/DeletingServiceMetrics.java | 163 ++++++++++++++++++ .../apache/hadoop/ozone/om/OzoneManager.java | 6 + .../key/OMDirectoriesPurgeRequestWithFSO.java | 13 ++ .../om/request/key/OMKeyPurgeRequest.java | 8 +- .../service/AbstractKeyDeletingService.java | 7 +- .../ozone/om/service/KeyDeletingService.java | 5 + .../om/request/key/TestOMKeyRequest.java | 4 + 8 files changed, 279 insertions(+), 2 deletions(-) create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/DeletingServiceMetrics.java diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java index 049db78cf6e..6cced078488 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java @@ -42,6 +42,7 @@ import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; +import org.apache.hadoop.ozone.om.DeletingServiceMetrics; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.om.OmSnapshotManager; import org.apache.hadoop.ozone.om.OzoneManager; @@ -109,6 +110,7 @@ public class TestDirectoryDeletingServiceWithFSO { private static String volumeName; private static String bucketName; private static OzoneClient client; + private static DeletingServiceMetrics metrics; @BeforeAll public static void init() throws Exception { @@ -140,6 +142,7 @@ public static void init() throws Exception { conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 5); fs = FileSystem.get(conf); + metrics = cluster.getOzoneManager().getDeletionMetrics(); } @AfterAll @@ -185,6 +188,7 @@ public void testDeleteEmptyDirectory() throws Exception { assertSubPathsCount(dirDeletingService::getMovedDirsCount, 0); assertSubPathsCount(dirDeletingService::getMovedFilesCount, 0); + metrics.resetDirectoryMetrics(); // Delete the appRoot, empty dir fs.delete(appRoot, true); @@ -197,6 +201,8 @@ public void testDeleteEmptyDirectory() throws Exception { assertSubPathsCount(dirDeletingService::getDeletedDirsCount, 1); assertSubPathsCount(dirDeletingService::getMovedDirsCount, 0); assertSubPathsCount(dirDeletingService::getMovedFilesCount, 0); + assertEquals(1, metrics.getNumDirsPurged()); + assertEquals(1, metrics.getNumDirsSentForPurge()); try (TableIterator> iterator = dirTable.iterator()) { @@ -255,6 +261,7 @@ public void testDeleteWithLargeSubPathsThanBatchSize() throws Exception { long preRunCount = dirDeletingService.getRunCount().get(); + metrics.resetDirectoryMetrics(); // Delete the appRoot fs.delete(appRoot, true); @@ -270,6 +277,14 @@ public void testDeleteWithLargeSubPathsThanBatchSize() throws Exception { assertSubPathsCount(dirDeletingService::getMovedDirsCount, 18); assertSubPathsCount(dirDeletingService::getDeletedDirsCount, 19); + assertEquals(15, metrics.getNumSubFilesSentForPurge()); + assertEquals(15, metrics.getNumSubFilesMovedToDeletedTable()); + assertEquals(19, metrics.getNumDirsPurged()); + assertEquals(19, metrics.getNumDirsSentForPurge()); + assertEquals(18, metrics.getNumSubDirsMovedToDeletedDirTable()); + assertEquals(18, metrics.getNumSubDirsSentForPurge()); + + long elapsedRunCount = dirDeletingService.getRunCount().get() - preRunCount; assertThat(dirDeletingService.getRunCount().get()).isGreaterThan(1); // Ensure dir deleting speed, here provide a backup value for safe CI @@ -308,6 +323,7 @@ public void testDeleteWithMultiLevels() throws Exception { assertSubPathsCount(dirDeletingService::getMovedDirsCount, 0); assertSubPathsCount(dirDeletingService::getDeletedDirsCount, 0); + metrics.resetDirectoryMetrics(); // Delete the rootDir, which should delete all keys. fs.delete(root, true); @@ -321,7 +337,66 @@ public void testDeleteWithMultiLevels() throws Exception { assertSubPathsCount(dirDeletingService::getMovedFilesCount, 3); assertSubPathsCount(dirDeletingService::getMovedDirsCount, 2); assertSubPathsCount(dirDeletingService::getDeletedDirsCount, 5); + assertEquals(5, metrics.getNumDirsSentForPurge()); + assertEquals(5, metrics.getNumDirsPurged()); + assertEquals(4, metrics.getNumSubDirsMovedToDeletedDirTable()); + assertEquals(4, metrics.getNumSubDirsSentForPurge()); + assertEquals(3, metrics.getNumSubFilesSentForPurge()); + assertEquals(3, metrics.getNumSubFilesMovedToDeletedTable()); + + assertThat(dirDeletingService.getRunCount().get()).isGreaterThan(1); + } + + /** + * Test to check the following scenario: + * A subdir gets marked for move in DirectoryDeletingService and + * marked for delete in AbstractKeyDeletingService#optimizeDirDeletesAndSubmitRequest. + */ + @Test + public void testDeleteWithLessDirsButMultipleLevels() throws Exception { + Path root = new Path("/rootDir"); + Path appRoot = new Path(root, "appRoot"); + Path parent = new Path(appRoot, "parentDir"); + fs.mkdirs(parent); + Path child = new Path(parent, "childFile"); + ContractTestUtils.touch(fs, child); + + Table deletedDirTable = + cluster.getOzoneManager().getMetadataManager().getDeletedDirTable(); + Table keyTable = + cluster.getOzoneManager().getMetadataManager().getKeyTable(getFSOBucketLayout()); + Table dirTable = cluster.getOzoneManager().getMetadataManager().getDirectoryTable(); + + DirectoryDeletingService dirDeletingService = + (DirectoryDeletingService) cluster.getOzoneManager().getKeyManager().getDirDeletingService(); + + // Before delete + assertTableRowCount(deletedDirTable, 0); + assertTableRowCount(dirTable, 3); + assertTableRowCount(keyTable, 1); + + assertSubPathsCount(dirDeletingService::getMovedFilesCount, 0); + assertSubPathsCount(dirDeletingService::getMovedDirsCount, 0); + assertSubPathsCount(dirDeletingService::getDeletedDirsCount, 0); + + metrics.resetDirectoryMetrics(); + fs.delete(appRoot, true); + + // After delete + checkPath(appRoot); + assertTableRowCount(deletedDirTable, 0); + assertTableRowCount(keyTable, 0); + assertTableRowCount(dirTable, 1); + assertSubPathsCount(dirDeletingService::getMovedFilesCount, 1); + assertSubPathsCount(dirDeletingService::getDeletedDirsCount, 2); + assertSubPathsCount(dirDeletingService::getMovedDirsCount, 0); + assertEquals(2, metrics.getNumDirsSentForPurge()); + assertEquals(2, metrics.getNumDirsPurged()); + assertEquals(1, metrics.getNumSubDirsMovedToDeletedDirTable()); + assertEquals(1, metrics.getNumSubDirsSentForPurge()); + assertEquals(1, metrics.getNumSubFilesSentForPurge()); + assertEquals(1, metrics.getNumSubFilesMovedToDeletedTable()); assertThat(dirDeletingService.getRunCount().get()).isGreaterThan(1); } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/DeletingServiceMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/DeletingServiceMetrics.java new file mode 100644 index 00000000000..5597ff0a6aa --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/DeletingServiceMetrics.java @@ -0,0 +1,163 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.om; + +import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.metrics2.annotation.Metric; +import org.apache.hadoop.metrics2.annotation.Metrics; +import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; +import org.apache.hadoop.metrics2.lib.MetricsRegistry; +import org.apache.hadoop.metrics2.lib.MutableGaugeLong; +import org.apache.hadoop.ozone.OzoneConsts; + +/** + * Class contains metrics related to the OM Deletion services. + */ +@Metrics(about = "Deletion Service Metrics", context = OzoneConsts.OZONE) +public final class DeletingServiceMetrics { + + public static final String METRICS_SOURCE_NAME = + DeletingServiceMetrics.class.getSimpleName(); + private MetricsRegistry registry; + + private DeletingServiceMetrics() { + this.registry = new MetricsRegistry(METRICS_SOURCE_NAME); + } + + /** + * Creates and returns DeletingServiceMetrics instance. + * + * @return DeletingServiceMetrics + */ + public static DeletingServiceMetrics create() { + return DefaultMetricsSystem.instance().register(METRICS_SOURCE_NAME, + "Metrics tracking the progress of deletion of directories and keys in the OM", + new DeletingServiceMetrics()); + } + /** + * Unregister the metrics instance. + */ + public static void unregister() { + DefaultMetricsSystem.instance().unregisterSource(METRICS_SOURCE_NAME); + } + + + /* + * Total directory deletion metrics across all iterations of DirectoryDeletingService since last restart. + */ + @Metric("Total no. of deleted directories sent for purge") + private MutableGaugeLong numDirsSentForPurge; + @Metric("Total no. of sub-directories sent for purge") + private MutableGaugeLong numSubDirsSentForPurge; + @Metric("Total no. of sub-files sent for purge") + private MutableGaugeLong numSubFilesSentForPurge; + + public void incrNumDirsSentForPurge(long dirDel) { + numDirsSentForPurge.incr(dirDel); + } + + public void incrNumSubDirsSentForPurge(long delta) { + numSubDirsSentForPurge.incr(delta); + } + + public void incrNumSubFilesSentForPurge(long delta) { + numSubFilesSentForPurge.incr(delta); + } + + public void incrementDirectoryDeletionTotalMetrics(long dirDel, long dirMove, long filesMove) { + incrNumDirsSentForPurge(dirDel); + incrNumSubDirsSentForPurge(dirMove); + incrNumSubFilesSentForPurge(filesMove); + } + + public long getNumDirsSentForPurge() { + return numDirsSentForPurge.value(); + } + public long getNumSubDirsSentForPurge() { + return numSubDirsSentForPurge.value(); + } + public long getNumSubFilesSentForPurge() { + return numSubFilesSentForPurge.value(); + } + + /* + * Total key deletion metrics across all iterations of KeyDeletingService since last restart. + */ + @Metric("Total no. of keys processed") + private MutableGaugeLong numKeysProcessed; + @Metric("Total no. of deleted keys sent for purge") + private MutableGaugeLong numKeysSentForPurge; + + public void incrNumKeysProcessed(long keysProcessed) { + this.numKeysProcessed.incr(keysProcessed); + } + + public void incrNumKeysSentForPurge(long keysPurge) { + this.numKeysSentForPurge.incr(keysPurge); + } + + /* + * Directory purge request metrics. + */ + @Metric("Total no. of directories purged") + private MutableGaugeLong numDirsPurged; + @Metric("Total no. of subFiles moved to deletedTable") + private MutableGaugeLong numSubFilesMovedToDeletedTable; + @Metric("Total no. of subDirectories moved to deletedDirTable") + private MutableGaugeLong numSubDirsMovedToDeletedDirTable; + + public void incrNumDirPurged(long dirPurged) { + this.numDirsPurged.incr(dirPurged); + } + public void incrNumSubFilesMoved(long subKeys) { + this.numSubFilesMovedToDeletedTable.incr(subKeys); + } + public void incrNumSubDirectoriesMoved(long subDirectories) { + this.numSubDirsMovedToDeletedDirTable.incr(subDirectories); + } + public long getNumDirsPurged() { + return numDirsPurged.value(); + } + public long getNumSubFilesMovedToDeletedTable() { + return numSubFilesMovedToDeletedTable.value(); + } + public long getNumSubDirsMovedToDeletedDirTable() { + return numSubDirsMovedToDeletedDirTable.value(); + } + + /* + * Key purge request metrics. + */ + @Metric("Total no. of keys purged") + private MutableGaugeLong numKeysPurged; + + public void incrNumKeysPurged(long keysPurged) { + this.numKeysPurged.incr(keysPurged); + } + + @VisibleForTesting + public void resetDirectoryMetrics() { + numDirsPurged.set(0); + numSubFilesMovedToDeletedTable.set(0); + numSubDirsMovedToDeletedDirTable.set(0); + numDirsSentForPurge.set(0); + numSubDirsSentForPurge.set(0); + numSubFilesSentForPurge.set(0); + } + +} diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java index d26546e47ee..f1d31d130cf 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java @@ -402,6 +402,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl private OMHAMetrics omhaMetrics; private final ProtocolMessageMetrics omClientProtocolMetrics; + private final DeletingServiceMetrics omDeletionMetrics; private OzoneManagerHttpServer httpServer; private final OMStorage omStorage; private ObjectName omInfoBeanName; @@ -684,6 +685,7 @@ private OzoneManager(OzoneConfiguration conf, StartupOption startupOption) metrics = OMMetrics.create(); perfMetrics = OMPerformanceMetrics.register(); + omDeletionMetrics = DeletingServiceMetrics.create(); // Get admin list omStarterUser = UserGroupInformation.getCurrentUser().getShortUserName(); omAdmins = OzoneAdmins.getOzoneAdmins(omStarterUser, conf); @@ -1652,6 +1654,9 @@ public OMMetrics getMetrics() { public OMPerformanceMetrics getPerfMetrics() { return perfMetrics; } + public DeletingServiceMetrics getDeletionMetrics() { + return omDeletionMetrics; + } /** * Start service. @@ -2317,6 +2322,7 @@ public boolean stop() { if (omRatisSnapshotProvider != null) { omRatisSnapshotProvider.close(); } + DeletingServiceMetrics.unregister(); OMPerformanceMetrics.unregister(); RatisDropwizardExports.clear(ratisMetricsMap, ratisReporterList); scmClient.close(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java index b24253e6f67..4758b8cb788 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMDirectoriesPurgeRequestWithFSO.java @@ -33,7 +33,9 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.om.OMMetrics; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; +import org.apache.hadoop.ozone.om.DeletingServiceMetrics; import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; + import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.helpers.BucketLayout; @@ -75,6 +77,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, Execut OmMetadataManagerImpl omMetadataManager = (OmMetadataManagerImpl) ozoneManager.getMetadataManager(); Map openKeyInfoMap = new HashMap<>(); OMMetrics omMetrics = ozoneManager.getMetrics(); + DeletingServiceMetrics deletingServiceMetrics = ozoneManager.getDeletionMetrics(); OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( getOmRequest()); final SnapshotInfo fromSnapshotInfo; @@ -98,6 +101,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, Execut return new OMDirectoriesPurgeResponseWithFSO(createErrorOMResponse(omResponse, e)); } try { + int numSubDirMoved = 0, numSubFilesMoved = 0, numDirsDeleted = 0; for (OzoneManagerProtocolProtos.PurgePathRequest path : purgeRequests) { for (OzoneManagerProtocolProtos.KeyInfo key : path.getMarkDeletedSubDirsList()) { @@ -111,6 +115,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, Execut lockSet.add(volBucketPair); } omMetrics.decNumKeys(); + numSubDirMoved++; OmBucketInfo omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName); // bucketInfo can be null in case of delete volume or bucket @@ -153,6 +158,7 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, Execut } omMetrics.decNumKeys(); + numSubFilesMoved++; OmBucketInfo omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName); // bucketInfo can be null in case of delete volume or bucket @@ -168,7 +174,14 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, Execut volBucketInfoMap.putIfAbsent(volBucketPair, omBucketInfo); } } + if (path.hasDeletedDir()) { + numDirsDeleted++; + } } + deletingServiceMetrics.incrNumSubDirectoriesMoved(numSubDirMoved); + deletingServiceMetrics.incrNumSubFilesMoved(numSubFilesMoved); + deletingServiceMetrics.incrNumDirPurged(numDirsDeleted); + if (fromSnapshotInfo != null) { fromSnapshotInfo.setLastTransactionInfo(TransactionInfo.valueOf(context.getTermIndex()).toByteString()); omMetadataManager.getSnapshotInfoTable().addCacheEntry(new CacheKey<>(fromSnapshotInfo.getTableKey()), diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java index 5d0af563bb2..b77381b674a 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyPurgeRequest.java @@ -26,6 +26,7 @@ import org.apache.hadoop.hdds.utils.db.cache.CacheValue; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.DeletingServiceMetrics; import org.apache.hadoop.ozone.om.execution.flowcontrol.ExecutionContext; import org.apache.hadoop.ozone.om.snapshot.SnapshotUtils; import org.apache.hadoop.ozone.om.OzoneManager; @@ -93,9 +94,14 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, Execut List keysToBePurgedList = new ArrayList<>(); + int numKeysDeleted = 0; for (DeletedKeys bucketWithDeleteKeys : bucketDeletedKeysList) { - keysToBePurgedList.addAll(bucketWithDeleteKeys.getKeysList()); + List keysList = bucketWithDeleteKeys.getKeysList(); + keysToBePurgedList.addAll(keysList); + numKeysDeleted = numKeysDeleted + keysList.size(); } + DeletingServiceMetrics deletingServiceMetrics = ozoneManager.getDeletionMetrics(); + deletingServiceMetrics.incrNumKeysPurged(numKeysDeleted); if (keysToBePurgedList.isEmpty()) { return new OMKeyPurgeResponse(createErrorOMResponse(omResponse, diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java index e60180938ff..a3d7ccb6618 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java @@ -30,6 +30,7 @@ import org.apache.hadoop.ozone.common.BlockGroup; import org.apache.hadoop.ozone.lock.BootstrapStateHandler; import org.apache.hadoop.ozone.common.DeleteBlockGroupResult; +import org.apache.hadoop.ozone.om.DeletingServiceMetrics; import org.apache.hadoop.ozone.om.KeyManager; import org.apache.hadoop.ozone.om.OMConfigKeys; import org.apache.hadoop.ozone.om.OMMetadataManager; @@ -72,6 +73,7 @@ public abstract class AbstractKeyDeletingService extends BackgroundService implements BootstrapStateHandler { private final OzoneManager ozoneManager; + private final DeletingServiceMetrics metrics; private final ScmBlockLocationProtocol scmClient; private final ClientId clientId = ClientId.randomId(); private final AtomicLong deletedDirsCount; @@ -92,6 +94,7 @@ public AbstractKeyDeletingService(String serviceName, long interval, this.movedDirsCount = new AtomicLong(0); this.movedFilesCount = new AtomicLong(0); this.runCount = new AtomicLong(0); + this.metrics = ozoneManager.getDeletionMetrics(); } protected int processKeyDeletes(List keyBlocksList, @@ -450,8 +453,9 @@ public long optimizeDirDeletesAndSubmitRequest(long remainNum, } if (dirNum != 0 || subDirNum != 0 || subFileNum != 0) { + long subdirMoved = subDirNum - subdirDelNum; deletedDirsCount.addAndGet(dirNum + subdirDelNum); - movedDirsCount.addAndGet(subDirNum - subdirDelNum); + movedDirsCount.addAndGet(subdirMoved); movedFilesCount.addAndGet(subFileNum); long timeTakenInIteration = Time.monotonicNow() - startTime; LOG.info("Number of dirs deleted: {}, Number of sub-dir " + @@ -461,6 +465,7 @@ public long optimizeDirDeletesAndSubmitRequest(long remainNum, " totalRunCount: {}", dirNum, subdirDelNum, subFileNum, (subDirNum - subdirDelNum), limit, timeTakenInIteration, rnCnt); + metrics.incrementDirectoryDeletionTotalMetrics(dirNum + subdirDelNum, subDirNum, subFileNum); } return remainNum; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java index 9a4f74eba59..797b2b9bf9f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/KeyDeletingService.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hdds.utils.db.TableIterator; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.common.BlockGroup; +import org.apache.hadoop.ozone.om.DeletingServiceMetrics; import org.apache.hadoop.ozone.om.KeyManager; import org.apache.hadoop.ozone.om.OmMetadataManagerImpl; import org.apache.hadoop.ozone.om.OmSnapshot; @@ -98,6 +99,7 @@ public class KeyDeletingService extends AbstractKeyDeletingService { private AtomicBoolean isRunningOnAOS; private final boolean deepCleanSnapshots; private final SnapshotChainManager snapshotChainManager; + private DeletingServiceMetrics metrics; public KeyDeletingService(OzoneManager ozoneManager, ScmBlockLocationProtocol scmClient, @@ -121,6 +123,7 @@ public KeyDeletingService(OzoneManager ozoneManager, this.isRunningOnAOS = new AtomicBoolean(false); this.deepCleanSnapshots = deepCleanSnapshots; this.snapshotChainManager = ((OmMetadataManagerImpl)manager.getMetadataManager()).getSnapshotChainManager(); + this.metrics = ozoneManager.getDeletionMetrics(); } /** @@ -222,6 +225,8 @@ public BackgroundTaskResult call() { getOzoneManager().getKeyManager(), pendingKeysDeletion.getKeysToModify(), null, expectedPreviousSnapshotId); deletedKeyCount.addAndGet(delCount); + metrics.incrNumKeysProcessed(keyBlocksList.size()); + metrics.incrNumKeysSentForPurge(delCount); } } catch (IOException e) { LOG.error("Error while running delete keys background task. Will " + diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java index c18e1ee7c3f..547cdbe895d 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyRequest.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; import org.apache.hadoop.hdds.utils.db.BatchOperation; import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.om.DeletingServiceMetrics; import org.apache.hadoop.ozone.om.IOmMetadataReader; import org.apache.hadoop.ozone.om.OMPerformanceMetrics; import org.apache.hadoop.ozone.om.OmMetadataReader; @@ -119,6 +120,7 @@ public class TestOMKeyRequest { protected ScmBlockLocationProtocol scmBlockLocationProtocol; protected StorageContainerLocationProtocol scmContainerLocationProtocol; protected OMPerformanceMetrics metrics; + protected DeletingServiceMetrics delMetrics; protected static final long CONTAINER_ID = 1000L; protected static final long LOCAL_ID = 100L; @@ -139,6 +141,7 @@ public void setup() throws Exception { ozoneManager = mock(OzoneManager.class); omMetrics = OMMetrics.create(); metrics = OMPerformanceMetrics.register(); + delMetrics = DeletingServiceMetrics.create(); OzoneConfiguration ozoneConfiguration = getOzoneConfiguration(); ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS, folder.toAbsolutePath().toString()); @@ -150,6 +153,7 @@ public void setup() throws Exception { ozoneManager); when(ozoneManager.getMetrics()).thenReturn(omMetrics); when(ozoneManager.getPerfMetrics()).thenReturn(metrics); + when(ozoneManager.getDeletionMetrics()).thenReturn(delMetrics); when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager); when(ozoneManager.getConfiguration()).thenReturn(ozoneConfiguration); OMLayoutVersionManager lvm = mock(OMLayoutVersionManager.class); From ab29a55eaea71db9f2912d2fc69c2865cad34af8 Mon Sep 17 00:00:00 2001 From: Rishabh Patel <1607531+ptlrs@users.noreply.github.com> Date: Wed, 8 Jan 2025 07:34:07 -0800 Subject: [PATCH 059/168] HDDS-7990. Add acceptance test for HA Proxy with secure Ozone S3 Gateway (#7562) --- .../compose/common/s3-haproxy-secure.yaml | 57 +++++++++++++++++++ .../main/compose/ozone/docker-compose.yaml | 1 + .../compose/ozonesecure/test-haproxy-s3g.sh | 48 ++++++++++++++++ .../src/main/smoketest/s3/objectputget.robot | 6 +- 4 files changed, 109 insertions(+), 3 deletions(-) create mode 100644 hadoop-ozone/dist/src/main/compose/common/s3-haproxy-secure.yaml create mode 100755 hadoop-ozone/dist/src/main/compose/ozonesecure/test-haproxy-s3g.sh diff --git a/hadoop-ozone/dist/src/main/compose/common/s3-haproxy-secure.yaml b/hadoop-ozone/dist/src/main/compose/common/s3-haproxy-secure.yaml new file mode 100644 index 00000000000..9e6fee1ac40 --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/common/s3-haproxy-secure.yaml @@ -0,0 +1,57 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +x-s3-worker: + &s3-worker + image: ${OZONE_RUNNER_IMAGE}:${OZONE_RUNNER_VERSION} + volumes: + - ../..:/opt/hadoop + - ../_keytabs:/etc/security/keytabs + - ./krb5.conf:/etc/krb5.conf + env_file: + - docker-config + command: ["ozone","s3g", "-Dozone.om.transport.class=${OZONE_S3_OM_TRANSPORT:-org.apache.hadoop.ozone.om.protocolPB.GrpcOmTransportFactory}"] + +services: + s3g: + image: haproxy:lts-alpine + hostname: s3g + dns_search: . + volumes: + - ../..:/opt/hadoop + - ../common/s3-haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg + ports: + - 9878:9878 + command: ["haproxy", "-f", "/usr/local/etc/haproxy/haproxy.cfg"] + s3g1: + <<: *s3-worker + hostname: s3g1 + dns_search: . + ports: + - 9879:9878 + s3g2: + + <<: *s3-worker + hostname: s3g2 + dns_search: . + ports: + - 9880:9878 + s3g3: + <<: *s3-worker + hostname: s3g3 + dns_search: . + ports: + - 9881:9878 diff --git a/hadoop-ozone/dist/src/main/compose/ozone/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone/docker-compose.yaml index 3aa99da311e..dee24e57002 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone/docker-compose.yaml +++ b/hadoop-ozone/dist/src/main/compose/ozone/docker-compose.yaml @@ -77,6 +77,7 @@ services: command: ["ozone","s3g"] recon: <<: *common-config + hostname: recon ports: - 9888:9888 environment: diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure/test-haproxy-s3g.sh b/hadoop-ozone/dist/src/main/compose/ozonesecure/test-haproxy-s3g.sh new file mode 100755 index 00000000000..3318a69cb25 --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure/test-haproxy-s3g.sh @@ -0,0 +1,48 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#suite:secure + +COMPOSE_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +export COMPOSE_DIR + +# shellcheck source=/dev/null +source "$COMPOSE_DIR/../testlib.sh" + +export SECURITY_ENABLED=true +export COMPOSE_FILE=docker-compose.yaml:../common/s3-haproxy-secure.yaml + +: ${OZONE_BUCKET_KEY_NAME:=key1} + +start_docker_env + +execute_command_in_container kms hadoop key create ${OZONE_BUCKET_KEY_NAME} + +execute_robot_test scm kinit.robot + +execute_robot_test scm security + +## Exclude virtual-host tests. This is tested separately as it requires additional config. +exclude="--exclude virtual-host" +for bucket in encrypted; do + execute_robot_test scm -v BUCKET:${bucket} -N s3-${bucket} ${exclude} s3 + # some tests are independent of the bucket type, only need to be run once + ## Exclude virtual-host.robot + exclude="--exclude virtual-host --exclude no-bucket-type" +done + +execute_robot_test scm spnego diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot b/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot index 82a985f1d50..cd5a7c7597c 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot @@ -238,7 +238,7 @@ Create key with custom etag metadata and expect it won't conflict with ETag resp ${file_md5_checksum} Execute md5sum /tmp/small_file | awk '{print $1}' Execute AWSS3CliDebug cp --metadata "ETag=custom-etag-value" /tmp/small_file s3://${BUCKET}/test_file ${result} Execute AWSS3CliDebug cp s3://${BUCKET}/test_file /tmp/test_file_downloaded - ${match} ${ETag} ${etagCustom} Should Match Regexp ${result} HEAD /${BUCKET}/test_file\ .*?Response headers.*?ETag':\ '"(.*?)"'.*?x-amz-meta-etag':\ '(.*?)' flags=DOTALL + ${match} ${ETag} ${etagCustom} Should Match Regexp ${result} HEAD /${BUCKET}/test_file\ .*?Response headers.*?ETag':\ '"(.*?)"'.*?x-amz-meta-etag':\ '(.*?)' flags=DOTALL | IGNORECASE Should Be Equal As Strings ${ETag} ${file_md5_checksum} Should BE Equal As Strings ${etagCustom} custom-etag-value Should Not Be Equal As Strings ${ETag} ${etagCustom} @@ -262,9 +262,9 @@ Create key twice with different content and expect different ETags Execute head -c 1MiB /tmp/file1 Execute head -c 1MiB /tmp/file2 ${file1UploadResult} Execute AWSS3CliDebug cp /tmp/file1 s3://${BUCKET}/test_key_to_check_etag_differences - ${match} ${etag1} Should Match Regexp ${file1UploadResult} PUT /${BUCKET}/test_key_to_check_etag_differences\ .*?Response headers.*?ETag':\ '"(.*?)"' flags=DOTALL + ${match} ${etag1} Should Match Regexp ${file1UploadResult} PUT /${BUCKET}/test_key_to_check_etag_differences\ .*?Response headers.*?ETag':\ '"(.*?)"' flags=DOTALL | IGNORECASE ${file2UploadResult} Execute AWSS3CliDebug cp /tmp/file2 s3://${BUCKET}/test_key_to_check_etag_differences - ${match} ${etag2} Should Match Regexp ${file2UploadResult} PUT /${BUCKET}/test_key_to_check_etag_differences\ .*?Response headers.*?ETag':\ '"(.*?)"' flags=DOTALL + ${match} ${etag2} Should Match Regexp ${file2UploadResult} PUT /${BUCKET}/test_key_to_check_etag_differences\ .*?Response headers.*?ETag':\ '"(.*?)"' flags=DOTALL | IGNORECASE Should Not Be Equal As Strings ${etag1} ${etag2} # clean up Execute AWSS3Cli rm s3://${BUCKET}/test_key_to_check_etag_differences From ae1843f0f565052e1aa3375ce5cbac9f13fcddfb Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Wed, 8 Jan 2025 22:40:43 +0100 Subject: [PATCH 060/168] HDDS-12045. S3 secret admin test fails with HAProxy (#7668) --- .../dist/src/main/compose/common/s3-haproxy.cfg | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/hadoop-ozone/dist/src/main/compose/common/s3-haproxy.cfg b/hadoop-ozone/dist/src/main/compose/common/s3-haproxy.cfg index 5af09fa400a..6e8728945f2 100644 --- a/hadoop-ozone/dist/src/main/compose/common/s3-haproxy.cfg +++ b/hadoop-ozone/dist/src/main/compose/common/s3-haproxy.cfg @@ -36,3 +36,13 @@ backend servers server server1 s3g1:9878 maxconn 32 server server2 s3g2:9878 maxconn 32 server server3 s3g3:9878 maxconn 32 + +frontend webadmin + bind *:19878 + default_backend webadmin-servers + +backend webadmin-servers + balance roundrobin + server server1 s3g1:19878 maxconn 32 + server server2 s3g2:19878 maxconn 32 + server server3 s3g3:19878 maxconn 32 From 400e94d543e13373bff1c3abd787fcb0f94b0ab9 Mon Sep 17 00:00:00 2001 From: Chung En Lee Date: Thu, 9 Jan 2025 08:17:59 +0800 Subject: [PATCH 061/168] HDDS-11959. Remove tests for non-Ratis SCM (#7612) --- .../upgrade/TestDatanodeUpgradeToScmHA.java | 604 ------------------ .../hdds/scm/block/TestDeletedBlockLog.java | 2 - ...uration.java => TestSCMConfiguration.java} | 73 +-- .../TestStatefulServiceStateManagerImpl.java | 2 - .../hdds/scm/node/TestSCMNodeManager.java | 1 - ...SCMHAUnfinalizedStateValidationAction.java | 54 +- .../hdds/scm/TestSCMInstallSnapshot.java | 3 +- .../hadoop/hdds/scm/TestSCMSnapshot.java | 1 - .../hdds/scm/TestStorageContainerManager.java | 100 ++- .../TestSCMContainerManagerMetrics.java | 1 + .../scm/storage/TestContainerCommandsEC.java | 1 - .../hadoop/ozone/MiniOzoneClusterImpl.java | 28 +- .../hadoop/ozone/MiniOzoneHAClusterImpl.java | 1 - .../hadoop/ozone/TestSecureOzoneCluster.java | 43 +- .../client/rpc/TestContainerStateMachine.java | 2 + .../rpc/TestDeleteWithInAdequateDN.java | 2 - .../commandhandler/TestBlockDeletion.java | 2 - .../TestDeleteContainerHandler.java | 2 - .../ozone/recon/TestReconScmHASnapshot.java | 65 -- .../recon/TestReconScmNonHASnapshot.java | 64 -- .../hadoop/ozone/recon/TestReconTasks.java | 1 + .../shell/TestDeletedBlocksTxnShell.java | 2 - 22 files changed, 87 insertions(+), 967 deletions(-) delete mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java rename hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/{TestSCMHAConfiguration.java => TestSCMConfiguration.java} (80%) delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconScmHASnapshot.java delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconScmNonHASnapshot.java diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java deleted file mode 100644 index d4a27e74cda..00000000000 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToScmHA.java +++ /dev/null @@ -1,604 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.ozone.container.upgrade; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto.State; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.hdds.scm.pipeline.MockPipeline; -import org.apache.hadoop.hdds.scm.pipeline.Pipeline; -import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature; -import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.ozone.container.common.SCMTestUtils; -import org.apache.hadoop.ozone.container.common.ScmTestMock; -import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils; -import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher; -import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine; -import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil; -import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; -import org.apache.hadoop.ozone.container.replication.ContainerImporter; -import org.apache.hadoop.ozone.container.replication.ContainerReplicationSource; -import org.apache.hadoop.ozone.container.replication.OnDemandContainerReplicationSource; -import org.apache.ozone.test.LambdaTestUtils; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.io.TempDir; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.ValueSource; - -import java.io.File; -import java.io.FileOutputStream; -import java.net.InetSocketAddress; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.StandardCopyOption; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; -import java.util.UUID; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; - -import static org.apache.hadoop.ozone.container.replication.CopyContainerCompression.NO_COMPRESSION; -import static org.assertj.core.api.Assertions.assertThat; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertTrue; - -/** - * Tests upgrading a single datanode from pre-SCM HA volume format that used - * SCM ID to the post-SCM HA volume format using cluster ID. If SCM HA was - * already being used before the upgrade, there should be no changes. - */ -public class TestDatanodeUpgradeToScmHA { - @TempDir - private Path tempFolder; - - private DatanodeStateMachine dsm; - private ContainerDispatcher dispatcher; - private OzoneConfiguration conf; - private static final String CLUSTER_ID = "clusterID"; - private boolean scmHAAlreadyEnabled; - - private RPC.Server scmRpcServer; - private InetSocketAddress address; - private ScmTestMock scmServerImpl; - - private void setScmHAEnabled(boolean enableSCMHA) - throws Exception { - this.scmHAAlreadyEnabled = enableSCMHA; - conf = new OzoneConfiguration(); - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, scmHAAlreadyEnabled); - setup(); - } - - private void setup() throws Exception { - address = SCMTestUtils.getReuseableAddress(); - conf.setSocketAddr(ScmConfigKeys.OZONE_SCM_NAMES, address); - } - - @AfterEach - public void teardown() throws Exception { - if (scmRpcServer != null) { - scmRpcServer.stop(); - } - - if (dsm != null) { - dsm.close(); - } - } - - @ParameterizedTest(name = "{index}: scmHAAlreadyEnabled={0}") - @ValueSource(booleans = {true, false}) - public void testReadsDuringFinalization(boolean enableSCMHA) - throws Exception { - setScmHAEnabled(enableSCMHA); - // start DN and SCM - startScmServer(); - UpgradeTestHelper.addHddsVolume(conf, tempFolder); - dsm = UpgradeTestHelper.startPreFinalizedDatanode(conf, tempFolder, dsm, address, - HDDSLayoutFeature.INITIAL_VERSION.layoutVersion()); - dispatcher = dsm.getContainer().getDispatcher(); - final Pipeline pipeline = MockPipeline.createPipeline( - Collections.singletonList(dsm.getDatanodeDetails())); - - // Add data to read. - final long containerID = UpgradeTestHelper.addContainer(dispatcher, pipeline); - ContainerProtos.WriteChunkRequestProto writeChunk = - UpgradeTestHelper.putBlock(dispatcher, containerID, pipeline); - UpgradeTestHelper.closeContainer(dispatcher, containerID, pipeline); - - // Create thread to keep reading during finalization. - ExecutorService executor = Executors.newFixedThreadPool(1); - Future readFuture = executor.submit(() -> { - // Layout version check should be thread safe. - while (!dsm.getLayoutVersionManager() - .isAllowed(HDDSLayoutFeature.SCM_HA)) { - UpgradeTestHelper.readChunk(dispatcher, writeChunk, pipeline); - } - // Make sure we can read after finalizing too. - UpgradeTestHelper.readChunk(dispatcher, writeChunk, pipeline); - return null; - }); - - dsm.finalizeUpgrade(); - // If there was a failure reading during the upgrade, the exception will - // be thrown here. - readFuture.get(); - } - - @ParameterizedTest(name = "{index}: scmHAAlreadyEnabled={0}") - @ValueSource(booleans = {true, false}) - public void testImportContainer(boolean enableSCMHA) throws Exception { - setScmHAEnabled(enableSCMHA); - // start DN and SCM - startScmServer(); - UpgradeTestHelper.addHddsVolume(conf, tempFolder); - dsm = UpgradeTestHelper.startPreFinalizedDatanode(conf, tempFolder, dsm, address, - HDDSLayoutFeature.INITIAL_VERSION.layoutVersion()); - dispatcher = dsm.getContainer().getDispatcher(); - final Pipeline pipeline = MockPipeline.createPipeline( - Collections.singletonList(dsm.getDatanodeDetails())); - - // Pre-export a container to continuously import and delete. - final long exportContainerID = UpgradeTestHelper.addContainer(dispatcher, pipeline); - ContainerProtos.WriteChunkRequestProto exportWriteChunk = - UpgradeTestHelper.putBlock(dispatcher, exportContainerID, pipeline); - UpgradeTestHelper.closeContainer(dispatcher, exportContainerID, pipeline); - File exportedContainerFile = exportContainer(exportContainerID); - UpgradeTestHelper.deleteContainer(dispatcher, exportContainerID, pipeline); - - // Export another container to import while pre-finalized and read - // finalized. - final long exportContainerID2 = UpgradeTestHelper.addContainer(dispatcher, pipeline); - ContainerProtos.WriteChunkRequestProto exportWriteChunk2 = - UpgradeTestHelper.putBlock(dispatcher, exportContainerID2, pipeline); - UpgradeTestHelper.closeContainer(dispatcher, exportContainerID2, pipeline); - File exportedContainerFile2 = exportContainer(exportContainerID2); - UpgradeTestHelper.deleteContainer(dispatcher, exportContainerID2, pipeline); - - // Make sure we can import and read a container pre-finalized. - importContainer(exportContainerID2, exportedContainerFile2); - UpgradeTestHelper.readChunk(dispatcher, exportWriteChunk2, pipeline); - - // Now SCM and enough other DNs finalize to enable SCM HA. This DN is - // restarted with SCM HA config and gets a different SCM ID. - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); - changeScmID(); - - dsm = UpgradeTestHelper.restartDatanode(conf, dsm, true, tempFolder, address, - HDDSLayoutFeature.INITIAL_VERSION.layoutVersion(), true); - dispatcher = dsm.getContainer().getDispatcher(); - - // Make sure the existing container can be read. - UpgradeTestHelper.readChunk(dispatcher, exportWriteChunk2, pipeline); - - // Create thread to keep importing containers during the upgrade. - // Since the datanode's MLV is behind SCM's, container creation is not - // allowed. We will keep importing and deleting the same container since - // we cannot create new ones to import here. - ExecutorService executor = Executors.newFixedThreadPool(1); - Future importFuture = executor.submit(() -> { - // Layout version check should be thread safe. - while (!dsm.getLayoutVersionManager() - .isAllowed(HDDSLayoutFeature.SCM_HA)) { - importContainer(exportContainerID, exportedContainerFile); - UpgradeTestHelper.readChunk(dispatcher, exportWriteChunk, pipeline); - UpgradeTestHelper.deleteContainer(dispatcher, exportContainerID, pipeline); - } - // Make sure we can import after finalizing too. - importContainer(exportContainerID, exportedContainerFile); - UpgradeTestHelper.readChunk(dispatcher, exportWriteChunk, pipeline); - return null; - }); - - dsm.finalizeUpgrade(); - // If there was a failure importing during the upgrade, the exception will - // be thrown here. - importFuture.get(); - - // Make sure we can read the container that was imported while - // pre-finalized after finalizing. - UpgradeTestHelper.readChunk(dispatcher, exportWriteChunk2, pipeline); - } - - @ParameterizedTest(name = "{index}: scmHAAlreadyEnabled={0}") - @ValueSource(booleans = {true, false}) - public void testFailedVolumeDuringFinalization(boolean enableSCMHA) - throws Exception { - setScmHAEnabled(enableSCMHA); - /// SETUP /// - - startScmServer(); - String originalScmID = scmServerImpl.getScmId(); - File volume = UpgradeTestHelper.addHddsVolume(conf, tempFolder); - dsm = UpgradeTestHelper.startPreFinalizedDatanode(conf, tempFolder, dsm, address, - HDDSLayoutFeature.INITIAL_VERSION.layoutVersion()); - dispatcher = dsm.getContainer().getDispatcher(); - final Pipeline pipeline = MockPipeline.createPipeline( - Collections.singletonList(dsm.getDatanodeDetails())); - - /// PRE-FINALIZED: Write and Read from formatted volume /// - - assertEquals(1, - dsm.getContainer().getVolumeSet().getVolumesList().size()); - assertEquals(0, - dsm.getContainer().getVolumeSet().getFailedVolumesList().size()); - - // Add container with data, make sure it can be read and written. - final long containerID = UpgradeTestHelper.addContainer(dispatcher, pipeline); - ContainerProtos.WriteChunkRequestProto writeChunk = - UpgradeTestHelper.putBlock(dispatcher, containerID, pipeline); - UpgradeTestHelper.readChunk(dispatcher, writeChunk, pipeline); - - checkPreFinalizedVolumePathID(volume, originalScmID, CLUSTER_ID); - checkContainerPathID(containerID, originalScmID, CLUSTER_ID); - - // FINALIZE: With failed volume /// - - failVolume(volume); - // Since volume is failed, container should be marked unhealthy. - // Finalization should proceed anyways. - UpgradeTestHelper.closeContainer(dispatcher, containerID, pipeline, - ContainerProtos.Result.CONTAINER_FILES_CREATE_ERROR); - State containerState = dsm.getContainer().getContainerSet() - .getContainer(containerID).getContainerState(); - assertEquals(State.UNHEALTHY, containerState); - dsm.finalizeUpgrade(); - LambdaTestUtils.await(2000, 500, - () -> dsm.getLayoutVersionManager() - .isAllowed(HDDSLayoutFeature.SCM_HA)); - - /// FINALIZED: Volume marked failed but gets restored on disk /// - - // Check that volume is marked failed during finalization. - assertEquals(0, - dsm.getContainer().getVolumeSet().getVolumesList().size()); - assertEquals(1, - dsm.getContainer().getVolumeSet().getFailedVolumesList().size()); - - // Since the volume was out during the upgrade, it should maintain its - // original format. - checkPreFinalizedVolumePathID(volume, originalScmID, CLUSTER_ID); - checkContainerPathID(containerID, originalScmID, CLUSTER_ID); - - // Now that we are done finalizing, restore the volume. - restoreVolume(volume); - // After restoring the failed volume, its containers are readable again. - // However, since it is marked as failed no containers can be created or - // imported to it. - // This should log a warning about reading from an unhealthy container - // but otherwise proceed successfully. - UpgradeTestHelper.readChunk(dispatcher, writeChunk, pipeline); - - /// FINALIZED: Restart datanode to upgrade the failed volume /// - - dsm = UpgradeTestHelper.restartDatanode(conf, dsm, true, tempFolder, address, - HDDSLayoutFeature.SCM_HA.layoutVersion(), false); - dispatcher = dsm.getContainer().getDispatcher(); - - assertEquals(1, - dsm.getContainer().getVolumeSet().getVolumesList().size()); - assertEquals(0, - dsm.getContainer().getVolumeSet().getFailedVolumesList().size()); - - checkFinalizedVolumePathID(volume, originalScmID, CLUSTER_ID); - checkContainerPathID(containerID, originalScmID, CLUSTER_ID); - - // Read container from before upgrade. The upgrade required it to be closed. - UpgradeTestHelper.readChunk(dispatcher, writeChunk, pipeline); - // Write and read container after upgrade. - long newContainerID = UpgradeTestHelper.addContainer(dispatcher, pipeline); - ContainerProtos.WriteChunkRequestProto newWriteChunk = - UpgradeTestHelper.putBlock(dispatcher, newContainerID, pipeline); - UpgradeTestHelper.readChunk(dispatcher, newWriteChunk, pipeline); - // The new container should use cluster ID in its path. - // The volume it is placed on is up to the implementation. - checkContainerPathID(newContainerID, CLUSTER_ID); - } - - @ParameterizedTest(name = "{index}: scmHAAlreadyEnabled={0}") - @ValueSource(booleans = {true, false}) - public void testFormattingNewVolumes(boolean enableSCMHA) throws Exception { - setScmHAEnabled(enableSCMHA); - /// SETUP /// - - startScmServer(); - String originalScmID = scmServerImpl.getScmId(); - File preFinVolume1 = UpgradeTestHelper.addHddsVolume(conf, tempFolder); - dsm = UpgradeTestHelper.startPreFinalizedDatanode(conf, tempFolder, dsm, address, - HDDSLayoutFeature.INITIAL_VERSION.layoutVersion()); - dispatcher = dsm.getContainer().getDispatcher(); - final Pipeline pipeline = MockPipeline.createPipeline( - Collections.singletonList(dsm.getDatanodeDetails())); - - /// PRE-FINALIZED: Write and Read from formatted volume /// - - assertEquals(1, - dsm.getContainer().getVolumeSet().getVolumesList().size()); - assertEquals(0, - dsm.getContainer().getVolumeSet().getFailedVolumesList().size()); - - // Add container with data, make sure it can be read and written. - final long containerID = UpgradeTestHelper.addContainer(dispatcher, pipeline); - ContainerProtos.WriteChunkRequestProto writeChunk = - UpgradeTestHelper.putBlock(dispatcher, containerID, pipeline); - UpgradeTestHelper.readChunk(dispatcher, writeChunk, pipeline); - - checkPreFinalizedVolumePathID(preFinVolume1, originalScmID, CLUSTER_ID); - checkContainerPathID(containerID, originalScmID, CLUSTER_ID); - - /// PRE-FINALIZED: Restart with SCM HA enabled and new SCM ID /// - - // Now SCM and enough other DNs finalize to enable SCM HA. This DN is - // restarted with SCM HA config and gets a different SCM ID. - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); - changeScmID(); - // A new volume is added that must be formatted. - File preFinVolume2 = UpgradeTestHelper.addHddsVolume(conf, tempFolder); - - dsm = UpgradeTestHelper.restartDatanode(conf, dsm, true, tempFolder, address, - HDDSLayoutFeature.INITIAL_VERSION.layoutVersion(), true); - dispatcher = dsm.getContainer().getDispatcher(); - - assertEquals(2, - dsm.getContainer().getVolumeSet().getVolumesList().size()); - assertEquals(0, - dsm.getContainer().getVolumeSet().getFailedVolumesList().size()); - - // Because DN mlv would be behind SCM mlv, only reads are allowed. - UpgradeTestHelper.readChunk(dispatcher, writeChunk, pipeline); - - // On restart, there should have been no changes to the paths already used. - checkPreFinalizedVolumePathID(preFinVolume1, originalScmID, CLUSTER_ID); - checkContainerPathID(containerID, originalScmID, CLUSTER_ID); - // No new containers can be created on this volume since SCM MLV is ahead - // of DN MLV at this point. - // cluster ID should always be used for the new volume since SCM HA is now - // enabled. - checkVolumePathID(preFinVolume2, CLUSTER_ID); - - /// FINALIZE /// - - UpgradeTestHelper.closeContainer(dispatcher, containerID, pipeline); - dsm.finalizeUpgrade(); - LambdaTestUtils.await(2000, 500, - () -> dsm.getLayoutVersionManager() - .isAllowed(HDDSLayoutFeature.SCM_HA)); - - /// FINALIZED: Add a new volume and check its formatting /// - - // Add a new volume that should be formatted with cluster ID only, since - // DN has finalized. - File finVolume = UpgradeTestHelper.addHddsVolume(conf, tempFolder); - // Yet another SCM ID is received this time, but it should not matter. - changeScmID(); - - dsm = UpgradeTestHelper.restartDatanode(conf, dsm, true, tempFolder, address, - HDDSLayoutFeature.SCM_HA.layoutVersion(), false); - dispatcher = dsm.getContainer().getDispatcher(); - - assertEquals(3, - dsm.getContainer().getVolumeSet().getVolumesList().size()); - assertEquals(0, - dsm.getContainer().getVolumeSet().getFailedVolumesList().size()); - - checkFinalizedVolumePathID(preFinVolume1, originalScmID, CLUSTER_ID); - checkVolumePathID(preFinVolume2, CLUSTER_ID); - checkContainerPathID(containerID, originalScmID, CLUSTER_ID); - // New volume should have been formatted with cluster ID only, since the - // datanode is finalized. - checkVolumePathID(finVolume, CLUSTER_ID); - - /// FINALIZED: Read old data and write + read new data /// - - // Read container from before upgrade. The upgrade required it to be closed. - UpgradeTestHelper.readChunk(dispatcher, writeChunk, pipeline); - // Write and read container after upgrade. - long newContainerID = UpgradeTestHelper.addContainer(dispatcher, pipeline); - ContainerProtos.WriteChunkRequestProto newWriteChunk = - UpgradeTestHelper.putBlock(dispatcher, newContainerID, pipeline); - UpgradeTestHelper.readChunk(dispatcher, newWriteChunk, pipeline); - // The new container should use cluster ID in its path. - // The volume it is placed on is up to the implementation. - checkContainerPathID(newContainerID, CLUSTER_ID); - } - - /// CHECKS FOR TESTING /// - - public void checkContainerPathID(long containerID, String scmID, - String clusterID) { - if (scmHAAlreadyEnabled) { - checkContainerPathID(containerID, clusterID); - } else { - checkContainerPathID(containerID, scmID); - } - } - - public void checkContainerPathID(long containerID, String expectedID) { - KeyValueContainerData data = - (KeyValueContainerData) dsm.getContainer().getContainerSet() - .getContainer(containerID).getContainerData(); - assertThat(data.getChunksPath()).contains(expectedID); - assertThat(data.getMetadataPath()).contains(expectedID); - } - - public void checkFinalizedVolumePathID(File volume, String scmID, - String clusterID) throws Exception { - - if (scmHAAlreadyEnabled) { - checkVolumePathID(volume, clusterID); - } else { - List subdirs = getHddsSubdirs(volume); - File hddsRoot = getHddsRoot(volume); - - // Volume should have SCM ID and cluster ID directory, where cluster ID - // is a symlink to SCM ID. - assertEquals(2, subdirs.size()); - - File scmIDDir = new File(hddsRoot, scmID); - assertThat(subdirs).contains(scmIDDir); - - File clusterIDDir = new File(hddsRoot, CLUSTER_ID); - assertThat(subdirs).contains(clusterIDDir); - assertTrue(Files.isSymbolicLink(clusterIDDir.toPath())); - Path symlinkTarget = Files.readSymbolicLink(clusterIDDir.toPath()); - assertEquals(scmID, symlinkTarget.toString()); - } - } - - public void checkPreFinalizedVolumePathID(File volume, String scmID, - String clusterID) { - - if (scmHAAlreadyEnabled) { - checkVolumePathID(volume, clusterID); - } else { - checkVolumePathID(volume, scmID); - } - - } - - public void checkVolumePathID(File volume, String expectedID) { - List subdirs; - File hddsRoot; - if (dnThinksVolumeFailed(volume)) { - // If the volume is failed, read from the failed location it was - // moved to. - subdirs = getHddsSubdirs(getFailedVolume(volume)); - hddsRoot = getHddsRoot(getFailedVolume(volume)); - } else { - subdirs = getHddsSubdirs(volume); - hddsRoot = getHddsRoot(volume); - } - - // Volume should only have the specified ID directory. - assertEquals(1, subdirs.size()); - File idDir = new File(hddsRoot, expectedID); - assertThat(subdirs).contains(idDir); - } - - public List getHddsSubdirs(File volume) { - File[] subdirsArray = getHddsRoot(volume).listFiles(File::isDirectory); - assertNotNull(subdirsArray); - return Arrays.asList(subdirsArray); - } - - public File getHddsRoot(File volume) { - return new File(HddsVolumeUtil.getHddsRoot(volume.getAbsolutePath())); - } - - /// CLUSTER OPERATIONS /// - - private void startScmServer() throws Exception { - scmServerImpl = new ScmTestMock(CLUSTER_ID); - scmRpcServer = SCMTestUtils.startScmRpcServer(conf, - scmServerImpl, address, 10); - } - - /** - * Updates the SCM ID on the SCM server. Datanode will not be aware of this - * until {@link UpgradeTestHelper#callVersionEndpointTask} is called. - * @return the new scm ID. - */ - private String changeScmID() { - String scmID = UUID.randomUUID().toString(); - scmServerImpl.setScmId(scmID); - return scmID; - } - - /// CONTAINER OPERATIONS /// - - /** - * Exports the specified container to a temporary file and returns the file. - */ - private File exportContainer(long containerId) throws Exception { - final ContainerReplicationSource replicationSource = - new OnDemandContainerReplicationSource( - dsm.getContainer().getController()); - - replicationSource.prepare(containerId); - - File destination = - Files.createFile(tempFolder.resolve("destFile" + containerId)).toFile(); - try (FileOutputStream fos = new FileOutputStream(destination)) { - replicationSource.copyData(containerId, fos, NO_COMPRESSION); - } - return destination; - } - - /** - * Imports the container found in {@code source} to the datanode with the ID - * {@code containerID}. - */ - private void importContainer(long containerID, File source) throws Exception { - ContainerImporter replicator = - new ContainerImporter(dsm.getConf(), - dsm.getContainer().getContainerSet(), - dsm.getContainer().getController(), - dsm.getContainer().getVolumeSet()); - - File tempFile = Files.createFile( - tempFolder.resolve(ContainerUtils.getContainerTarName(containerID))) - .toFile(); - Files.copy(source.toPath(), tempFile.toPath(), - StandardCopyOption.REPLACE_EXISTING); - replicator.importContainer(containerID, tempFile.toPath(), null, - NO_COMPRESSION); - } - - /// VOLUME OPERATIONS /// - - /** - * Renames the specified volume directory so it will appear as failed to - * the datanode. - */ - public void failVolume(File volume) { - File failedVolume = getFailedVolume(volume); - assertTrue(volume.renameTo(failedVolume)); - } - - /** - * Convert the specified volume from its failed name back to its original - * name. The File passed should be the original volume path, not the one it - * was renamed to to fail it. - */ - public void restoreVolume(File volume) { - File failedVolume = getFailedVolume(volume); - assertTrue(failedVolume.renameTo(volume)); - } - - /** - * @return The file name that will be used to rename a volume to fail it. - */ - public File getFailedVolume(File volume) { - return new File(volume.getParent(), volume.getName() + "-failed"); - } - - /** - * Checks whether the datanode thinks the volume has failed. - * This could be outdated information if the volume was restored already - * and the datanode has not been restarted since then. - */ - public boolean dnThinksVolumeFailed(File volume) { - return dsm.getContainer().getVolumeSet().getFailedVolumesList().stream() - .anyMatch(v -> - getHddsRoot(v.getStorageDir()).equals(getHddsRoot(volume))); - } -} diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java index 2a012cbe180..4fb323d7451 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java @@ -26,7 +26,6 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerManager; @@ -112,7 +111,6 @@ public class TestDeletedBlockLog { @BeforeEach public void setup() throws Exception { conf = new OzoneConfiguration(); - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); conf.setInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 20); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath()); replicationManager = mock(ReplicationManager.class); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAConfiguration.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMConfiguration.java similarity index 80% rename from hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAConfiguration.java rename to hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMConfiguration.java index 75a943ee8da..2d9a18c5a8e 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMHAConfiguration.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMConfiguration.java @@ -18,13 +18,11 @@ package org.apache.hadoop.hdds.scm.ha; import org.apache.hadoop.hdds.HddsConfigKeys; -import org.apache.hadoop.hdds.conf.ConfigurationException; import org.apache.hadoop.hdds.conf.DefaultConfigManager; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.ScmRatisServerConfig; import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; -import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.hdds.utils.HddsServerUtil; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.ozone.common.Storage; @@ -35,13 +33,10 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.ValueSource; import java.io.File; import java.io.IOException; import java.net.InetSocketAddress; -import java.util.UUID; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_ADDRESS_KEY; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY; @@ -63,8 +58,6 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_PORT_KEY; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_DIRS; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -72,7 +65,7 @@ /** * Test for SCM HA-related configuration. */ -class TestSCMHAConfiguration { +class TestSCMConfiguration { private OzoneConfiguration conf; @TempDir private File tempDir; @@ -85,7 +78,7 @@ void setup() { } @Test - public void testSCMHAConfig() throws Exception { + public void testSCMConfig() throws Exception { String scmServiceId = "scmserviceId"; conf.set(ScmConfigKeys.OZONE_SCM_SERVICE_IDS_KEY, scmServiceId); @@ -225,7 +218,7 @@ public void testSCMHAConfig() throws Exception { @Test - public void testHAWithSamePortConfig() throws Exception { + public void testSamePortConfig() throws Exception { String scmServiceId = "scmserviceId"; conf.set(ScmConfigKeys.OZONE_SCM_SERVICE_IDS_KEY, scmServiceId); @@ -301,25 +294,7 @@ public void testHAWithSamePortConfig() throws Exception { } @Test - public void testRatisEnabledDefaultConfigWithoutInitializedSCM() - throws IOException { - SCMStorageConfig scmStorageConfig = mock(SCMStorageConfig.class); - when(scmStorageConfig.getState()).thenReturn(Storage.StorageState.NOT_INITIALIZED); - SCMHANodeDetails.loadSCMHAConfig(conf, scmStorageConfig); - assertEquals(SCMHAUtils.isSCMHAEnabled(conf), - ScmConfigKeys.OZONE_SCM_HA_ENABLE_DEFAULT); - DefaultConfigManager.clearDefaultConfigs(); - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, false); - SCMHANodeDetails.loadSCMHAConfig(conf, scmStorageConfig); - assertFalse(SCMHAUtils.isSCMHAEnabled(conf)); - DefaultConfigManager.clearDefaultConfigs(); - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); - SCMHANodeDetails.loadSCMHAConfig(conf, scmStorageConfig); - assertTrue(SCMHAUtils.isSCMHAEnabled(conf)); - } - - @Test - public void testRatisEnabledDefaultConfigWithInitializedSCM() + public void testDefaultConfigWithInitializedSCM() throws IOException { SCMStorageConfig scmStorageConfig = mock(SCMStorageConfig.class); when(scmStorageConfig.getState()) @@ -333,44 +308,4 @@ public void testRatisEnabledDefaultConfigWithInitializedSCM() DefaultConfigManager.clearDefaultConfigs(); assertTrue(SCMHAUtils.isSCMHAEnabled(conf)); } - - @Test - public void testRatisEnabledDefaultConflictConfigWithInitializedSCM() { - SCMStorageConfig scmStorageConfig = mock(SCMStorageConfig.class); - when(scmStorageConfig.getState()) - .thenReturn(Storage.StorageState.INITIALIZED); - when(scmStorageConfig.isSCMHAEnabled()).thenReturn(true); - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, false); - assertThrows(ConfigurationException.class, - () -> SCMHANodeDetails.loadSCMHAConfig(conf, scmStorageConfig)); - } - - @ParameterizedTest - @ValueSource(booleans = {true, false}) - void testHAConfig(boolean ratisEnabled) throws IOException { - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, ratisEnabled); - SCMStorageConfig scmStorageConfig = newStorageConfig(ratisEnabled); - StorageContainerManager.scmInit(conf, scmStorageConfig.getClusterID()); - assertEquals(ratisEnabled, DefaultConfigManager.getValue( - ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, !ratisEnabled)); - } - - @Test - void testInvalidHAConfig() throws IOException { - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, false); - SCMStorageConfig scmStorageConfig = newStorageConfig(true); - String clusterID = scmStorageConfig.getClusterID(); - assertThrows(ConfigurationException.class, - () -> StorageContainerManager.scmInit(conf, clusterID)); - } - - private SCMStorageConfig newStorageConfig( - boolean ratisEnabled) throws IOException { - final SCMStorageConfig scmStorageConfig = new SCMStorageConfig(conf); - scmStorageConfig.setClusterId(UUID.randomUUID().toString()); - scmStorageConfig.setSCMHAFlag(ratisEnabled); - scmStorageConfig.initialize(); - return scmStorageConfig; - } - } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestStatefulServiceStateManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestStatefulServiceStateManagerImpl.java index 4e69f46b6e9..33da298423d 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestStatefulServiceStateManagerImpl.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestStatefulServiceStateManagerImpl.java @@ -20,7 +20,6 @@ import com.google.protobuf.ByteString; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition; import org.apache.hadoop.hdds.utils.db.DBStore; import org.apache.hadoop.hdds.utils.db.DBStoreBuilder; @@ -48,7 +47,6 @@ public class TestStatefulServiceStateManagerImpl { @BeforeEach void setup(@TempDir File testDir) throws IOException { conf = SCMTestUtils.getConf(testDir); - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); dbStore = DBStoreBuilder.createDBStore(conf, SCMDBDefinition.get()); statefulServiceConfig = SCMDBDefinition.STATEFUL_SERVICE_CONFIG.getTable(dbStore); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java index 568c11c541c..e4e4a57232f 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java @@ -184,7 +184,6 @@ OzoneConfiguration getConf() { TimeUnit.MILLISECONDS); conf.setBoolean(HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_CREATION, false); conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); return conf; } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/upgrade/TestSCMHAUnfinalizedStateValidationAction.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/upgrade/TestSCMHAUnfinalizedStateValidationAction.java index 8b4bc906e0d..91dfaa1dafb 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/upgrade/TestSCMHAUnfinalizedStateValidationAction.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/upgrade/TestSCMHAUnfinalizedStateValidationAction.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hdds.scm.upgrade; -import org.apache.hadoop.hdds.conf.ConfigurationException; import org.apache.hadoop.hdds.conf.DefaultConfigManager; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.HddsTestUtils; @@ -26,19 +25,16 @@ import org.apache.hadoop.hdds.scm.server.StorageContainerManager; import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature; import org.apache.hadoop.ozone.OzoneConfigKeys; -import org.apache.hadoop.ozone.upgrade.UpgradeException; import org.apache.hadoop.ozone.upgrade.UpgradeFinalizer; import org.apache.ratis.util.ExitUtils; import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.CsvSource; import java.nio.file.Path; import java.util.UUID; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; /** @@ -62,20 +58,12 @@ public static void setup() { ExitUtils.disableSystemExit(); } - @ParameterizedTest - @CsvSource({ - "true, true", - "true, false", - "false, true", - "false, false", - }) - public void testUpgrade(boolean haEnabledBefore, - boolean haEnabledPreFinalized, @TempDir Path dataPath) throws Exception { + @Test + public void testUpgrade(@TempDir Path dataPath) throws Exception { // Write version file for original version. OzoneConfiguration conf = new OzoneConfiguration(); conf.setInt(ScmConfig.ConfigStrings.HDDS_SCM_INIT_DEFAULT_LAYOUT_VERSION, HDDSLayoutFeature.INITIAL_VERSION.layoutVersion()); - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, haEnabledBefore); conf.set(ScmConfigKeys.OZONE_SCM_DB_DIRS, dataPath.toString()); conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, dataPath.toString()); // This init should always succeed, since SCM is not pre-finalized yet. @@ -83,43 +71,17 @@ public void testUpgrade(boolean haEnabledBefore, boolean initResult1 = StorageContainerManager.scmInit(conf, CLUSTER_ID); assertTrue(initResult1); - // Set up new pre-finalized SCM. - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, - haEnabledPreFinalized); - /* Clusters from Ratis SCM -> Non Ratis SCM - Ratis SCM -> Non Ratis SCM not supported - */ - if (haEnabledPreFinalized != haEnabledBefore) { - if (haEnabledBefore) { - assertThrows(ConfigurationException.class, - () -> StorageContainerManager.scmInit(conf, CLUSTER_ID)); - } else { - assertThrows(UpgradeException.class, - () -> StorageContainerManager.scmInit(conf, CLUSTER_ID)); - } - return; - } StorageContainerManager scm = HddsTestUtils.getScm(conf); assertEquals(UpgradeFinalizer.Status.FINALIZATION_REQUIRED, scm.getFinalizationManager().getUpgradeFinalizer().getStatus()); - final boolean shouldFail = !haEnabledBefore && haEnabledPreFinalized; + DefaultConfigManager.clearDefaultConfigs(); - if (shouldFail) { - // Start on its own should fail. - assertThrows(UpgradeException.class, scm::start); + boolean initResult2 = StorageContainerManager.scmInit(conf, CLUSTER_ID); + assertTrue(initResult2); + scm.start(); + scm.stop(); - // Init followed by start should both fail. - // Init is not necessary here, but is allowed to be run. - assertThrows(UpgradeException.class, - () -> StorageContainerManager.scmInit(conf, CLUSTER_ID)); - assertThrows(UpgradeException.class, scm::start); - } else { - boolean initResult2 = StorageContainerManager.scmInit(conf, CLUSTER_ID); - assertTrue(initResult2); - scm.start(); - scm.stop(); - } } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshot.java index e90c576e8dd..ffdc49fd099 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMInstallSnapshot.java @@ -66,7 +66,6 @@ public class TestSCMInstallSnapshot { @BeforeAll static void setup(@TempDir Path tempDir) throws Exception { conf = new OzoneConfiguration(); - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_INTERVAL, "10s"); conf.setLong(ScmConfigKeys.OZONE_SCM_HA_RATIS_SNAPSHOT_THRESHOLD, 1L); conf.set(ScmConfigKeys.OZONE_SCM_HA_RATIS_SNAPSHOT_DIR, tempDir.toString()); @@ -105,7 +104,7 @@ private DBCheckpoint downloadSnapshot() throws Exception { pipelineManager.openPipeline(ratisPipeline2.getId()); SCMNodeDetails scmNodeDetails = new SCMNodeDetails.Builder() .setRpcAddress(new InetSocketAddress("0.0.0.0", 0)) - .setGrpcPort(ScmConfigKeys.OZONE_SCM_GRPC_PORT_DEFAULT) + .setGrpcPort(conf.getInt(ScmConfigKeys.OZONE_SCM_GRPC_PORT_KEY, ScmConfigKeys.OZONE_SCM_GRPC_PORT_DEFAULT)) .setSCMNodeId("scm1") .build(); Map peerMap = new HashMap<>(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMSnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMSnapshot.java index 0375d83baaf..d0ad8222f60 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMSnapshot.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMSnapshot.java @@ -44,7 +44,6 @@ public class TestSCMSnapshot { @BeforeAll public static void setup() throws Exception { conf = new OzoneConfiguration(); - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_INTERVAL, "10s"); conf.setLong(ScmConfigKeys.OZONE_SCM_HA_RATIS_SNAPSHOT_THRESHOLD, 1L); cluster = MiniOzoneCluster diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java index 14df7670f67..b00c7f8040b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java @@ -188,13 +188,11 @@ public void cleanupDefaults() { public void testRpcPermission() throws Exception { // Test with default configuration OzoneConfiguration defaultConf = new OzoneConfiguration(); - defaultConf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); testRpcPermissionWithConf(defaultConf, any -> false, "unknownUser"); // Test with ozone.administrators defined in configuration String admins = "adminUser1, adminUser2"; OzoneConfiguration ozoneConf = new OzoneConfiguration(); - ozoneConf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); ozoneConf.setStrings(OzoneConfigKeys.OZONE_ADMINISTRATORS, admins); // Non-admin user will get permission denied. // Admin user will pass the permission check. @@ -266,7 +264,6 @@ private void verifyPermissionDeniedException(Exception e, String userName) { public void testBlockDeletionTransactions() throws Exception { int numKeys = 5; OzoneConfiguration conf = new OzoneConfiguration(); - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 100, TimeUnit.MILLISECONDS); DatanodeConfiguration datanodeConfiguration = conf.getObject( @@ -358,7 +355,6 @@ public void testBlockDeletionTransactions() throws Exception { @Test public void testOldDNRegistersToReInitialisedSCM() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1000, TimeUnit.MILLISECONDS); conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 3000, TimeUnit.MILLISECONDS); @@ -405,38 +401,41 @@ public void testOldDNRegistersToReInitialisedSCM() throws Exception { assertThat(scmDnHBDispatcherLog.getOutput()).isEmpty(); assertThat(versionEndPointTaskLog.getOutput()).isEmpty(); // start the new SCM - scm.start(); - // Initially DatanodeStateMachine will be in Running state - assertEquals(DatanodeStateMachine.DatanodeStates.RUNNING, - dsm.getContext().getState()); - // DN heartbeats to new SCM, SCM doesn't recognize the node, sends the - // command to DN to re-register. Wait for SCM to send re-register command - String expectedLog = String.format( - "SCM received heartbeat from an unregistered datanode %s. " - + "Asking datanode to re-register.", - datanode.getDatanodeDetails()); - GenericTestUtils.waitFor( - () -> scmDnHBDispatcherLog.getOutput().contains(expectedLog), 100, - 30000); - ExitUtil.disableSystemExit(); - // As part of processing response for re-register, DN EndpointStateMachine - // goes to GET-VERSION state which checks if there is already existing - // version file on the DN & if the clusterID matches with that of the SCM - // In this case, it won't match and gets InconsistentStorageStateException - // and DN shuts down. - String expectedLog2 = "Received SCM notification to register." - + " Interrupt HEARTBEAT and transit to GETVERSION state."; - GenericTestUtils.waitFor( - () -> heartbeatEndpointTaskLog.getOutput().contains(expectedLog2), - 100, 5000); - GenericTestUtils.waitFor(() -> dsm.getContext().getShutdownOnError(), 100, - 5000); - assertEquals(DatanodeStateMachine.DatanodeStates.SHUTDOWN, - dsm.getContext().getState()); - assertThat(versionEndPointTaskLog.getOutput()).contains( - "org.apache.hadoop.ozone.common" + - ".InconsistentStorageStateException: Mismatched ClusterIDs"); - scm.stop(); + try { + scm.start(); + // Initially DatanodeStateMachine will be in Running state + assertEquals(DatanodeStateMachine.DatanodeStates.RUNNING, + dsm.getContext().getState()); + // DN heartbeats to new SCM, SCM doesn't recognize the node, sends the + // command to DN to re-register. Wait for SCM to send re-register command + String expectedLog = String.format( + "SCM received heartbeat from an unregistered datanode %s. " + + "Asking datanode to re-register.", + datanode.getDatanodeDetails()); + GenericTestUtils.waitFor( + () -> scmDnHBDispatcherLog.getOutput().contains(expectedLog), 100, + 30000); + ExitUtil.disableSystemExit(); + // As part of processing response for re-register, DN EndpointStateMachine + // goes to GET-VERSION state which checks if there is already existing + // version file on the DN & if the clusterID matches with that of the SCM + // In this case, it won't match and gets InconsistentStorageStateException + // and DN shuts down. + String expectedLog2 = "Received SCM notification to register." + + " Interrupt HEARTBEAT and transit to GETVERSION state."; + GenericTestUtils.waitFor( + () -> heartbeatEndpointTaskLog.getOutput().contains(expectedLog2), + 100, 5000); + GenericTestUtils.waitFor(() -> dsm.getContext().getShutdownOnError(), 100, + 5000); + assertEquals(DatanodeStateMachine.DatanodeStates.SHUTDOWN, + dsm.getContext().getState()); + assertThat(versionEndPointTaskLog.getOutput()).contains( + "org.apache.hadoop.ozone.common" + + ".InconsistentStorageStateException: Mismatched ClusterIDs"); + } finally { + scm.stop(); + } } } @@ -444,7 +443,6 @@ public void testOldDNRegistersToReInitialisedSCM() throws Exception { public void testBlockDeletingThrottling() throws Exception { int numKeys = 15; OzoneConfiguration conf = new OzoneConfiguration(); - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 1, TimeUnit.SECONDS); conf.setInt(ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 5); conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, @@ -555,7 +553,6 @@ private Map> createDeleteTXLog( @Test public void testSCMInitialization(@TempDir Path tempDir) throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); Path scmPath = tempDir.resolve("scm-meta"); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); @@ -577,7 +574,6 @@ public void testSCMInitialization(@TempDir Path tempDir) throws Exception { @Test public void testSCMReinitialization(@TempDir Path tempDir) throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); Path scmPath = tempDir.resolve("scm-meta"); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); //This will set the cluster id in the version file @@ -639,7 +635,6 @@ public static void validateRatisGroupExists(OzoneConfiguration conf, @Test void testSCMInitializationFailure(@TempDir Path tempDir) { OzoneConfiguration conf = new OzoneConfiguration(); - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); Path scmPath = tempDir.resolve("scm-meta"); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); @@ -652,7 +647,6 @@ public void testScmInfo(@TempDir Path tempDir) throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); Path scmPath = tempDir.resolve("scm-meta"); - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); SCMStorageConfig scmStore = new SCMStorageConfig(conf); String clusterId = UUID.randomUUID().toString(); @@ -666,15 +660,19 @@ public void testScmInfo(@TempDir Path tempDir) throws Exception { SCMHANodeDetails.loadSCMHAConfig(conf, scmStore) .getLocalNodeDetails(), conf); StorageContainerManager scm = HddsTestUtils.getScmSimple(conf); - scm.start(); - //Reads the SCM Info from SCM instance - ScmInfo scmInfo = scm.getClientProtocolServer().getScmInfo(); - assertEquals(clusterId, scmInfo.getClusterId()); - assertEquals(scmId, scmInfo.getScmId()); - - String expectedVersion = HddsVersionInfo.HDDS_VERSION_INFO.getVersion(); - String actualVersion = scm.getSoftwareVersion(); - assertEquals(expectedVersion, actualVersion); + try { + scm.start(); + //Reads the SCM Info from SCM instance + ScmInfo scmInfo = scm.getClientProtocolServer().getScmInfo(); + assertEquals(clusterId, scmInfo.getClusterId()); + assertEquals(scmId, scmInfo.getScmId()); + + String expectedVersion = HddsVersionInfo.HDDS_VERSION_INFO.getVersion(); + String actualVersion = scm.getSoftwareVersion(); + assertEquals(expectedVersion, actualVersion); + } finally { + scm.stop(); + } } /** @@ -684,7 +682,6 @@ public void testScmInfo(@TempDir Path tempDir) throws Exception { public void testScmProcessDatanodeHeartbeat() throws Exception { String rackName = "/rack1"; OzoneConfiguration conf = new OzoneConfiguration(); - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, StaticMapping.class, DNSToSwitchMapping.class); StaticMapping.addNodeToRack(NetUtils.normalizeHostName(HddsUtils.getHostName(conf)), @@ -727,7 +724,6 @@ public void testScmProcessDatanodeHeartbeat() throws Exception { public void testCloseContainerCommandOnRestart() throws Exception { int numKeys = 15; OzoneConfiguration conf = new OzoneConfiguration(); - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 1, TimeUnit.SECONDS); conf.setInt(ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 5); conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java index 14875781b98..84b1f1610a1 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java @@ -68,6 +68,7 @@ public void setup() throws Exception { conf.setBoolean(HDDS_SCM_SAFEMODE_PIPELINE_CREATION, false); cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).build(); cluster.waitForClusterToBeReady(); + cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.ONE, 30000); client = cluster.newClient(); scm = cluster.getStorageContainerManager(); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java index bf40a600e29..ca4e1a896b0 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestContainerCommandsEC.java @@ -171,7 +171,6 @@ public class TestContainerCommandsEC { @BeforeAll public static void init() throws Exception { config = new OzoneConfiguration(); - config.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); config.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, 1); config.setTimeDuration(ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS); config.setBoolean(OzoneConfigKeys.OZONE_ACL_ENABLED, true); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java index 30e41764d3f..b3d9f780888 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java @@ -40,7 +40,6 @@ import com.amazonaws.regions.Regions; import com.amazonaws.services.s3.AmazonS3; import com.amazonaws.services.s3.AmazonS3ClientBuilder; -import org.apache.commons.lang3.StringUtils; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.client.RatisReplicationConfig; @@ -50,7 +49,6 @@ import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.ha.SCMHANodeDetails; -import org.apache.hadoop.hdds.scm.ha.SCMHAUtils; import org.apache.hadoop.hdds.scm.ha.SCMRatisServerImpl; import org.apache.hadoop.hdds.scm.node.NodeStatus; import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException; @@ -191,10 +189,8 @@ protected void setConf(OzoneConfiguration newConf) { public void waitForSCMToBeReady() throws TimeoutException, InterruptedException { - if (SCMHAUtils.isSCMHAEnabled(conf)) { - GenericTestUtils.waitFor(scm::checkLeader, - 1000, waitForClusterToBeReadyTimeout); - } + GenericTestUtils.waitFor(scm::checkLeader, + 1000, waitForClusterToBeReadyTimeout); } public StorageContainerManager getActiveSCM() { @@ -752,18 +748,12 @@ protected void initializeScmStorage(SCMStorageConfig scmStore) scmStore.setClusterId(clusterId); scmStore.setScmId(scmId); scmStore.initialize(); - //TODO: HDDS-6897 - //Disabling Ratis for only of MiniOzoneClusterImpl. - //MiniOzoneClusterImpl doesn't work with Ratis enabled SCM - if (StringUtils.isNotEmpty( - conf.get(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY)) - && SCMHAUtils.isSCMHAEnabled(conf)) { - scmStore.setSCMHAFlag(true); - scmStore.persistCurrentState(); - SCMRatisServerImpl.initialize(clusterId, scmId, - SCMHANodeDetails.loadSCMHAConfig(conf, scmStore) - .getLocalNodeDetails(), conf); - } + scmStore.setSCMHAFlag(true); + scmStore.persistCurrentState(); + SCMRatisServerImpl.initialize(clusterId, scmId, + SCMHANodeDetails.loadSCMHAConfig(conf, scmStore) + .getLocalNodeDetails(), conf); + } void initializeOmStorage(OMStorage omStorage) throws IOException { @@ -876,6 +866,8 @@ protected void configureSCM() { localhostWithFreePort()); conf.set(HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT, "3s"); + conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PORT_KEY, getFreePort()); + conf.setInt(ScmConfigKeys.OZONE_SCM_GRPC_PORT_KEY, getFreePort()); } private void configureOM() { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java index 9df70f1b7c2..15269330282 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneHAClusterImpl.java @@ -562,7 +562,6 @@ protected SCMHAService createSCMService() OzoneConfiguration scmConfig = new OzoneConfiguration(conf); scmConfig.set(OZONE_METADATA_DIRS, metaDirPath); scmConfig.set(ScmConfigKeys.OZONE_SCM_NODE_ID_KEY, nodeId); - scmConfig.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); configureSCM(); if (i == 1) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java index 637e8bd9e4f..d71a4854c9e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java @@ -49,7 +49,6 @@ import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertResponseProto; import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB; import org.apache.hadoop.hdds.scm.ScmConfig; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.ScmInfo; import org.apache.hadoop.hdds.scm.HddsTestUtils; import org.apache.hadoop.hdds.scm.client.ScmTopologyClient; @@ -65,7 +64,6 @@ import org.apache.hadoop.hdds.security.exception.SCMSecurityException; import org.apache.hadoop.hdds.security.SecurityConfig; import org.apache.hadoop.hdds.security.symmetric.ManagedSecretKey; -import org.apache.hadoop.hdds.security.symmetric.SecretKeyManager; import org.apache.hadoop.hdds.security.x509.certificate.authority.CAType; import org.apache.hadoop.hdds.security.x509.certificate.authority.DefaultApprover; import org.apache.hadoop.hdds.security.x509.certificate.authority.profile.DefaultProfile; @@ -117,7 +115,6 @@ import org.apache.commons.lang3.RandomStringUtils; import org.apache.commons.lang3.StringUtils; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION; -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_GRPC_TLS_ENABLED; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_CA_ROTATION_ACK_TIMEOUT; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_X509_CA_ROTATION_CHECK_INTERNAL; @@ -169,7 +166,6 @@ import static org.apache.ozone.test.GenericTestUtils.PortAllocator.getFreePort; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; @@ -232,7 +228,6 @@ void init() { conf.setInt(OZONE_SCM_GRPC_PORT_KEY, getFreePort()); conf.set(OZONE_OM_ADDRESS_KEY, InetAddress.getLocalHost().getCanonicalHostName() + ":" + getFreePort()); - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, false); DefaultMetricsSystem.setMiniClusterMode(true); ExitUtils.disableSystemExit(); @@ -353,10 +348,17 @@ void testSecureScmStartupSuccess() throws Exception { initSCM(); scm = HddsTestUtils.getScmSimple(conf); //Reads the SCM Info from SCM instance - ScmInfo scmInfo = scm.getClientProtocolServer().getScmInfo(); - assertEquals(clusterId, scmInfo.getClusterId()); - assertEquals(scmId, scmInfo.getScmId()); - assertEquals(2, scm.getScmCertificateClient().getTrustChain().size()); + try { + scm.start(); + ScmInfo scmInfo = scm.getClientProtocolServer().getScmInfo(); + assertEquals(clusterId, scmInfo.getClusterId()); + assertEquals(scmId, scmInfo.getScmId()); + assertEquals(2, scm.getScmCertificateClient().getTrustChain().size()); + } finally { + if (scm != null) { + scm.stop(); + } + } } @Test @@ -444,28 +446,6 @@ void testAdminAccessControlException() throws Exception { } } - @Test - void testSecretManagerInitializedNonHASCM() throws Exception { - conf.setBoolean(HDDS_BLOCK_TOKEN_ENABLED, true); - initSCM(); - scm = HddsTestUtils.getScmSimple(conf); - //Reads the SCM Info from SCM instance - try { - scm.start(); - - SecretKeyManager secretKeyManager = scm.getSecretKeyManager(); - boolean inSafeMode = scm.getScmSafeModeManager().getInSafeMode(); - assertFalse(SCMHAUtils.isSCMHAEnabled(conf)); - assertTrue(inSafeMode); - assertNotNull(secretKeyManager); - assertTrue(secretKeyManager.isInitialized()); - } finally { - if (scm != null) { - scm.stop(); - } - } - } - private void initSCM() throws IOException { Path scmPath = new File(tempDir, "scm-meta").toPath(); Files.createDirectories(scmPath); @@ -474,6 +454,7 @@ private void initSCM() throws IOException { SCMStorageConfig scmStore = new SCMStorageConfig(conf); scmStore.setClusterId(clusterId); scmStore.setScmId(scmId); + scmStore.setSCMHAFlag(true); HASecurityUtils.initializeSecurity(scmStore, conf, InetAddress.getLocalHost().getHostName(), true); scmStore.setPrimaryScmNodeId(scmId); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java index dc00b0acc55..7c1c6874c1b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachine.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hdds.client.ReplicationType; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClientTestImpl; @@ -114,6 +115,7 @@ public void setup() throws Exception { .build(); cluster.setWaitForClusterToBeReadyTimeout(300000); cluster.waitForClusterToBeReady(); + cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.ONE, 30000); cluster.getOzoneManager().startSecretManager(); //the easiest way to create an open container is creating a key client = OzoneClientFactory.getRpcClient(conf); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java index 2b199306b76..bc7bb36a242 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java @@ -66,7 +66,6 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; @@ -106,7 +105,6 @@ public static void init() throws Exception { conf = new OzoneConfiguration(); - conf.setBoolean(OZONE_SCM_HA_ENABLE_KEY, true); conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 100, TimeUnit.MILLISECONDS); conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java index e38312e02e6..df5f3ec0d27 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestBlockDeletion.java @@ -94,7 +94,6 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_HEARTBEAT_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_EXPIRED_CONTAINER_REPLICA_OP_SCRUB_INTERVAL; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; @@ -133,7 +132,6 @@ public void init() throws Exception { GenericTestUtils.setLogLevel(SCMBlockDeletingService.LOG, Level.DEBUG); GenericTestUtils.setLogLevel(ReplicationManager.LOG, Level.DEBUG); - conf.setBoolean(OZONE_SCM_HA_ENABLE_KEY, true); conf.set("ozone.replication.allowed-configs", "^(RATIS/THREE)|(EC/2-1-256k)$"); conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 100, diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java index 705ef1e0d86..0006feb858a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java @@ -75,7 +75,6 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -98,7 +97,6 @@ public class TestDeleteContainerHandler { @BeforeAll public static void setup() throws Exception { conf = new OzoneConfiguration(); - conf.setBoolean(OZONE_SCM_HA_ENABLE_KEY, true); conf.set(OZONE_SCM_CONTAINER_SIZE, "1GB"); conf.setStorageSize(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN, 0, StorageUnit.MB); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconScmHASnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconScmHASnapshot.java deleted file mode 100644 index 6006ce67580..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconScmHASnapshot.java +++ /dev/null @@ -1,65 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.recon; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.ozone.MiniOzoneCluster; - -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Timeout; - -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY; - -/** - * Test Recon SCM HA Snapshot Download implementation. - */ -@Timeout(300) -public class TestReconScmHASnapshot { - private OzoneConfiguration conf; - private MiniOzoneCluster ozoneCluster = null; - - @BeforeEach - public void setup() throws Exception { - conf = new OzoneConfiguration(); - conf.setBoolean(OZONE_SCM_HA_ENABLE_KEY, true); - conf.setBoolean( - ReconServerConfigKeys.OZONE_RECON_SCM_SNAPSHOT_ENABLED, true); - conf.setInt(ReconServerConfigKeys.OZONE_RECON_SCM_CONTAINER_THRESHOLD, 0); - conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 5); - ozoneCluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(4) - .includeRecon(true) - .build(); - ozoneCluster.waitForClusterToBeReady(); - } - - @Test - public void testScmHASnapshot() throws Exception { - TestReconScmSnapshot.testSnapshot(ozoneCluster); - } - - @AfterEach - public void shutdown() throws Exception { - if (ozoneCluster != null) { - ozoneCluster.shutdown(); - } - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconScmNonHASnapshot.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconScmNonHASnapshot.java deleted file mode 100644 index ae342e63e8c..00000000000 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconScmNonHASnapshot.java +++ /dev/null @@ -1,64 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.ozone.recon; - -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.Timeout; - -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY; - -/** - * Test Recon SCM HA Snapshot Download implementation. - */ -@Timeout(300) -public class TestReconScmNonHASnapshot { - private OzoneConfiguration conf; - private MiniOzoneCluster ozoneCluster = null; - - @BeforeEach - public void setup() throws Exception { - conf = new OzoneConfiguration(); - conf.setBoolean(OZONE_SCM_HA_ENABLE_KEY, false); - conf.setBoolean( - ReconServerConfigKeys.OZONE_RECON_SCM_SNAPSHOT_ENABLED, true); - conf.setInt(ReconServerConfigKeys.OZONE_RECON_SCM_CONTAINER_THRESHOLD, 0); - conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 5); - ozoneCluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(4) - .includeRecon(true) - .build(); - ozoneCluster.waitForClusterToBeReady(); - } - - @Test - public void testScmNonHASnapshot() throws Exception { - TestReconScmSnapshot.testSnapshot(ozoneCluster); - } - - @AfterEach - public void shutdown() throws Exception { - if (ozoneCluster != null) { - ozoneCluster.shutdown(); - } - } -} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconTasks.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconTasks.java index 4476cbc3e38..e4b81da0203 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconTasks.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconTasks.java @@ -79,6 +79,7 @@ public void init() throws Exception { cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1) .includeRecon(true).build(); cluster.waitForClusterToBeReady(); + cluster.waitForPipelineTobeReady(ONE, 30000); GenericTestUtils.setLogLevel(SCMDatanodeHeartbeatDispatcher.LOG, Level.DEBUG); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java index 730a2479a51..fd27652791b 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestDeletedBlocksTxnShell.java @@ -21,7 +21,6 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.block.DeletedBlockLog; import org.apache.hadoop.hdds.scm.cli.ContainerOperationClient; import org.apache.hadoop.hdds.scm.container.ContainerID; @@ -93,7 +92,6 @@ public void init() throws Exception { conf = new OzoneConfiguration(); scmServiceId = "scm-service-test1"; - conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); conf.setInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 20); cluster = MiniOzoneCluster.newHABuilder(conf) From e21e724d75071b6e5e8ab84d3a99e8227385a6cd Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Thu, 9 Jan 2025 07:54:04 +0100 Subject: [PATCH 062/168] HDDS-12003. Reduce code duplication related to tracing init (#7658) --- .../org/apache/hadoop/ozone/admin/OzoneAdmin.java | 13 ++----------- .../org/apache/hadoop/ozone/debug/OzoneDebug.java | 5 ++--- .../org/apache/hadoop/ozone/shell/OzoneShell.java | 15 --------------- .../java/org/apache/hadoop/ozone/shell/Shell.java | 6 +++++- .../org/apache/hadoop/ozone/shell/s3/S3Shell.java | 14 -------------- .../hadoop/ozone/shell/tenant/TenantShell.java | 15 +-------------- 6 files changed, 10 insertions(+), 58 deletions(-) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/OzoneAdmin.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/OzoneAdmin.java index bc75f6aee90..5b5b8fe8947 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/OzoneAdmin.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/OzoneAdmin.java @@ -19,9 +19,8 @@ import org.apache.hadoop.hdds.cli.AdminSubcommand; import org.apache.hadoop.hdds.cli.ExtensibleParentCommand; -import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.tracing.TracingUtil; +import org.apache.hadoop.ozone.shell.Shell; import picocli.CommandLine; @@ -33,20 +32,12 @@ description = "Developer tools for Ozone Admin operations", versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true) -public class OzoneAdmin extends GenericCli implements ExtensibleParentCommand { +public class OzoneAdmin extends Shell implements ExtensibleParentCommand { public static void main(String[] argv) { new OzoneAdmin().run(argv); } - @Override - public int execute(String[] argv) { - TracingUtil.initTracing("shell", getOzoneConf()); - String spanName = "ozone admin " + String.join(" ", argv); - return TracingUtil.executeInNewSpan(spanName, - () -> super.execute(argv)); - } - @Override public Class subcommandType() { return AdminSubcommand.class; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/OzoneDebug.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/OzoneDebug.java index 164d07f96b4..d626db81f3a 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/OzoneDebug.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/OzoneDebug.java @@ -20,9 +20,8 @@ import org.apache.hadoop.hdds.cli.DebugSubcommand; import org.apache.hadoop.hdds.cli.ExtensibleParentCommand; -import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; - +import org.apache.hadoop.ozone.shell.Shell; import picocli.CommandLine; /** @@ -32,7 +31,7 @@ description = "Developer tools for Ozone Debug operations", versionProvider = HddsVersionProvider.class, mixinStandardHelpOptions = true) -public class OzoneDebug extends GenericCli implements ExtensibleParentCommand { +public class OzoneDebug extends Shell implements ExtensibleParentCommand { public static void main(String[] argv) { new OzoneDebug().run(argv); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneShell.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneShell.java index c324618bfe4..782ecd4beb5 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneShell.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneShell.java @@ -18,7 +18,6 @@ package org.apache.hadoop.ozone.shell; import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.tracing.TracingUtil; import org.apache.hadoop.ozone.shell.bucket.BucketCommands; import org.apache.hadoop.ozone.shell.keys.KeyCommands; import org.apache.hadoop.ozone.shell.prefix.PrefixCommands; @@ -47,21 +46,7 @@ mixinStandardHelpOptions = true) public class OzoneShell extends Shell { - /** - * Main for the ozShell Command handling. - * - * @param argv - System Args Strings[] - */ public static void main(String[] argv) throws Exception { new OzoneShell().run(argv); } - - @Override - public int execute(String[] argv) { - TracingUtil.initTracing("shell", getOzoneConf()); - String spanName = "ozone sh " + String.join(" ", argv); - return TracingUtil.executeInNewSpan(spanName, - () -> super.execute(argv)); - } - } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/Shell.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/Shell.java index 3291ce87b08..8bca492f042 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/Shell.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/Shell.java @@ -19,7 +19,9 @@ package org.apache.hadoop.ozone.shell; import org.apache.hadoop.hdds.cli.GenericCli; +import org.apache.hadoop.hdds.tracing.TracingUtil; import org.apache.hadoop.ozone.om.exceptions.OMException; + import picocli.CommandLine; import picocli.shell.jline3.PicocliCommands.PicocliCommandsFactory; @@ -85,7 +87,9 @@ public void run(String[] argv) { spec.name(""); // use short name (e.g. "token get" instead of "ozone sh token get") new REPL(this, getCmd(), (PicocliCommandsFactory) getCmd().getFactory()); } else { - super.run(argv); + TracingUtil.initTracing("shell", getOzoneConf()); + String spanName = spec.name() + " " + String.join(" ", argv); + TracingUtil.executeInNewSpan(spanName, () -> super.run(argv)); } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/s3/S3Shell.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/s3/S3Shell.java index 53324ba03d6..c0b6c6f2f80 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/s3/S3Shell.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/s3/S3Shell.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.ozone.shell.s3; -import org.apache.hadoop.hdds.tracing.TracingUtil; import org.apache.hadoop.ozone.shell.Shell; import picocli.CommandLine.Command; @@ -34,19 +33,6 @@ }) public class S3Shell extends Shell { - @Override - public int execute(String[] argv) { - TracingUtil.initTracing("s3shell", getOzoneConf()); - String spanName = "ozone s3 " + String.join(" ", argv); - return TracingUtil.executeInNewSpan(spanName, - () -> super.execute(argv)); - } - - /** - * Main for the S3Shell Command handling. - * - * @param argv - System Args Strings[] - */ public static void main(String[] argv) { new S3Shell().run(argv); } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantShell.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantShell.java index c7baf789f19..857f24748df 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantShell.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantShell.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.ozone.shell.tenant; -import org.apache.hadoop.hdds.tracing.TracingUtil; import org.apache.hadoop.ozone.shell.Shell; + import picocli.CommandLine.Command; /** @@ -35,19 +35,6 @@ }) public class TenantShell extends Shell { - @Override - public int execute(String[] argv) { - TracingUtil.initTracing("tenant-shell", getOzoneConf()); - String spanName = "ozone tenant " + String.join(" ", argv); - return TracingUtil.executeInNewSpan(spanName, - () -> super.execute(argv)); - } - - /** - * Main for the TenantShell Command handling. - * - * @param argv - System Args Strings[] - */ public static void main(String[] argv) { new TenantShell().run(argv); } From f1f0ec323d3e5cb3b895c082aad3a02dd67303f6 Mon Sep 17 00:00:00 2001 From: Tsz-Wo Nicholas Sze Date: Thu, 9 Jan 2025 00:29:36 -0800 Subject: [PATCH 063/168] HDDS-12007. BlockDataStreamOutput should only send one PutBlock during close. (#7645) --- .../scm/storage/BlockDataStreamOutput.java | 4 ++ .../server/ratis/ContainerStateMachine.java | 40 +----------- .../impl/KeyValueStreamDataChannel.java | 65 ++++--------------- .../impl/TestKeyValueStreamDataChannel.java | 65 ++++++++++++++++++- 4 files changed, 81 insertions(+), 93 deletions(-) diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java index 8c2883a4374..342fcaba9af 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java @@ -410,6 +410,10 @@ public void executePutBlock(boolean close, waitFuturesComplete(); final BlockData blockData = containerBlockData.build(); if (close) { + // HDDS-12007 changed datanodes to ignore the following PutBlock request. + // However, clients still have to send it for maintaining compatibility. + // Otherwise, new clients won't send a PutBlock. + // Then, old datanodes will fail since they expect a PutBlock. final ContainerCommandRequestProto putBlockRequest = ContainerProtocolCalls.getPutBlockRequest( xceiverClient.getPipeline(), blockData, true, tokenString); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java index 23be4138b60..a0325311621 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java @@ -524,21 +524,6 @@ private ContainerCommandResponseProto dispatchCommand( return response; } - private CompletableFuture link( - ContainerCommandRequestProto requestProto, LogEntryProto entry) { - return CompletableFuture.supplyAsync(() -> { - final DispatcherContext context = DispatcherContext - .newBuilder(DispatcherContext.Op.STREAM_LINK) - .setTerm(entry.getTerm()) - .setLogIndex(entry.getIndex()) - .setStage(DispatcherContext.WriteChunkStage.COMMIT_DATA) - .setContainer2BCSIDMap(container2BCSIDMap) - .build(); - - return dispatchCommand(requestProto, context); - }, executor); - } - private CompletableFuture writeStateMachineData( ContainerCommandRequestProto requestProto, long entryIndex, long term, long startTime) { @@ -689,29 +674,8 @@ public CompletableFuture link(DataStream stream, LogEntryProto entry) { final KeyValueStreamDataChannel kvStreamDataChannel = (KeyValueStreamDataChannel) dataChannel; - - final ContainerCommandRequestProto request = - kvStreamDataChannel.getPutBlockRequest(); - - return link(request, entry).whenComplete((response, e) -> { - if (e != null) { - LOG.warn("Failed to link logEntry {} for request {}", - TermIndex.valueOf(entry), request, e); - } - if (response != null) { - final ContainerProtos.Result result = response.getResult(); - if (LOG.isDebugEnabled()) { - LOG.debug("{} to link logEntry {} for request {}, response: {}", - result, TermIndex.valueOf(entry), request, response); - } - if (result == ContainerProtos.Result.SUCCESS) { - kvStreamDataChannel.setLinked(); - return; - } - } - // failed to link, cleanup - kvStreamDataChannel.cleanUp(); - }); + kvStreamDataChannel.setLinked(); + return CompletableFuture.completedFuture(null); } private ExecutorService getChunkExecutor(WriteChunkRequestProto req) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/KeyValueStreamDataChannel.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/KeyValueStreamDataChannel.java index 7500860229d..52838aff2e2 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/KeyValueStreamDataChannel.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/KeyValueStreamDataChannel.java @@ -20,14 +20,11 @@ import com.google.common.base.Preconditions; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; -import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; -import org.apache.hadoop.hdds.ratis.ContainerCommandRequestMessage; import org.apache.hadoop.hdds.ratis.RatisHelper; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.hdds.scm.storage.BlockDataStreamOutput; import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; import org.apache.hadoop.ozone.container.common.impl.ContainerData; -import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import org.apache.ratis.thirdparty.io.netty.buffer.ByteBuf; import org.apache.ratis.util.ReferenceCountedObject; import org.slf4j.Logger; @@ -36,9 +33,7 @@ import java.io.File; import java.io.IOException; import java.nio.ByteBuffer; -import java.util.Objects; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.atomic.AtomicReference; /** * This class is used to get the DataChannel for streaming. @@ -53,8 +48,6 @@ interface WriteMethod { private final Buffers buffers = new Buffers( BlockDataStreamOutput.PUT_BLOCK_REQUEST_LENGTH_MAX); - private final AtomicReference putBlockRequest - = new AtomicReference<>(); private final AtomicBoolean closed = new AtomicBoolean(); KeyValueStreamDataChannel(File file, ContainerData containerData, @@ -90,7 +83,7 @@ static int writeBuffers(ReferenceCountedObject src, return src.get().remaining(); } - private static void writeFully(ByteBuffer b, WriteMethod writeMethod) + static void writeFully(ByteBuffer b, WriteMethod writeMethod) throws IOException { while (b.remaining() > 0) { final int written = writeMethod.applyAsInt(b); @@ -100,11 +93,6 @@ private static void writeFully(ByteBuffer b, WriteMethod writeMethod) } } - public ContainerCommandRequestProto getPutBlockRequest() { - return Objects.requireNonNull(putBlockRequest.get(), - () -> "putBlockRequest == null, " + this); - } - void assertOpen() throws IOException { if (closed.get()) { throw new IOException("Already closed: " + this); @@ -115,7 +103,7 @@ void assertOpen() throws IOException { public void close() throws IOException { if (closed.compareAndSet(false, true)) { try { - putBlockRequest.set(closeBuffers(buffers, super::writeFileChannel)); + writeBuffers(); } finally { super.close(); } @@ -130,22 +118,23 @@ protected void cleanupInternal() throws IOException { } } - static ContainerCommandRequestProto closeBuffers( - Buffers buffers, WriteMethod writeMethod) throws IOException { + /** + * Write the data in {@link #buffers} to the channel. + * Note that the PutBlock proto at the end is ignored; see HDDS-12007. + */ + private void writeBuffers() throws IOException { final ReferenceCountedObject ref = buffers.pollAll(); final ByteBuf buf = ref.retain(); - final ContainerCommandRequestProto putBlockRequest; try { - putBlockRequest = readPutBlockRequest(buf); + setEndIndex(buf); // write the remaining data - writeFully(buf.nioBuffer(), writeMethod); + writeFully(buf.nioBuffer(), super::writeFileChannel); } finally { ref.release(); } - return putBlockRequest; } - private static int readProtoLength(ByteBuf b, int lengthIndex) { + static int readProtoLength(ByteBuf b, int lengthIndex) { final int readerIndex = b.readerIndex(); LOG.debug("{}, lengthIndex = {}, readerIndex = {}", b, lengthIndex, readerIndex); @@ -158,8 +147,8 @@ private static int readProtoLength(ByteBuf b, int lengthIndex) { return b.nioBuffer().getInt(); } - static ContainerCommandRequestProto readPutBlockRequest(ByteBuf b) - throws IOException { + /** Set end index to the proto index in order to ignore the proto. */ + static void setEndIndex(ByteBuf b) { // readerIndex protoIndex lengthIndex readerIndex+readableBytes // V V V V // format: |--- data ---|--- proto ---|--- proto length (4 bytes) ---| @@ -168,37 +157,7 @@ static ContainerCommandRequestProto readPutBlockRequest(ByteBuf b) final int protoLength = readProtoLength(b.duplicate(), lengthIndex); final int protoIndex = lengthIndex - protoLength; - final ContainerCommandRequestProto proto; - try { - proto = readPutBlockRequest(b.slice(protoIndex, protoLength).nioBuffer()); - } catch (Throwable t) { - RatisHelper.debug(b, "catch", LOG); - throw new IOException("Failed to readPutBlockRequest from " + b - + ": readerIndex=" + readerIndex - + ", protoIndex=" + protoIndex - + ", protoLength=" + protoLength - + ", lengthIndex=" + lengthIndex, t); - } - // set index for reading data b.writerIndex(protoIndex); - - return proto; - } - - private static ContainerCommandRequestProto readPutBlockRequest(ByteBuffer b) - throws IOException { - RatisHelper.debug(b, "readPutBlockRequest", LOG); - final ByteString byteString = ByteString.copyFrom(b); - - final ContainerCommandRequestProto request = - ContainerCommandRequestMessage.toProto(byteString, null); - - if (!request.hasPutBlock()) { - throw new StorageContainerException( - "Malformed PutBlock request. trace ID: " + request.getTraceID(), - ContainerProtos.Result.MALFORMED_REQUEST); - } - return request; } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestKeyValueStreamDataChannel.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestKeyValueStreamDataChannel.java index e6067e5c560..99793a0201f 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestKeyValueStreamDataChannel.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestKeyValueStreamDataChannel.java @@ -22,10 +22,14 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.DatanodeBlockID; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.PutBlockRequestProto; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type; import org.apache.hadoop.hdds.ratis.ContainerCommandRequestMessage; +import org.apache.hadoop.hdds.ratis.RatisHelper; +import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.ozone.ClientVersion; import org.apache.hadoop.ozone.container.keyvalue.impl.KeyValueStreamDataChannel.WriteMethod; +import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import org.apache.ratis.client.api.DataStreamOutput; import org.apache.ratis.io.FilePositionCount; import org.apache.ratis.io.StandardWriteOption; @@ -58,9 +62,8 @@ import static org.apache.hadoop.hdds.scm.storage.BlockDataStreamOutput.PUT_BLOCK_REQUEST_LENGTH_MAX; import static org.apache.hadoop.hdds.scm.storage.BlockDataStreamOutput.executePutBlockClose; import static org.apache.hadoop.hdds.scm.storage.BlockDataStreamOutput.getProtoLength; -import static org.apache.hadoop.ozone.container.keyvalue.impl.KeyValueStreamDataChannel.closeBuffers; -import static org.apache.hadoop.ozone.container.keyvalue.impl.KeyValueStreamDataChannel.readPutBlockRequest; import static org.apache.hadoop.ozone.container.keyvalue.impl.KeyValueStreamDataChannel.writeBuffers; +import static org.apache.hadoop.ozone.container.keyvalue.impl.KeyValueStreamDataChannel.writeFully; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -106,6 +109,49 @@ public void testSerialization() throws Exception { assertEquals(PUT_BLOCK_PROTO, proto); } + static ContainerCommandRequestProto readPutBlockRequest(ByteBuf b) throws IOException { + // readerIndex protoIndex lengthIndex readerIndex+readableBytes + // V V V V + // format: |--- data ---|--- proto ---|--- proto length (4 bytes) ---| + final int readerIndex = b.readerIndex(); + final int lengthIndex = readerIndex + b.readableBytes() - 4; + final int protoLength = KeyValueStreamDataChannel.readProtoLength(b.duplicate(), lengthIndex); + final int protoIndex = lengthIndex - protoLength; + + final ContainerCommandRequestProto proto; + try { + proto = readPutBlockRequest(b.slice(protoIndex, protoLength).nioBuffer()); + } catch (Throwable t) { + RatisHelper.debug(b, "catch", LOG); + throw new IOException("Failed to readPutBlockRequest from " + b + + ": readerIndex=" + readerIndex + + ", protoIndex=" + protoIndex + + ", protoLength=" + protoLength + + ", lengthIndex=" + lengthIndex, t); + } + + // set index for reading data + b.writerIndex(protoIndex); + + return proto; + } + + private static ContainerCommandRequestProto readPutBlockRequest(ByteBuffer b) + throws IOException { + RatisHelper.debug(b, "readPutBlockRequest", LOG); + final ByteString byteString = ByteString.copyFrom(b); + + final ContainerCommandRequestProto request = + ContainerCommandRequestMessage.toProto(byteString, null); + + if (!request.hasPutBlock()) { + throw new StorageContainerException( + "Malformed PutBlock request. trace ID: " + request.getTraceID(), + Result.MALFORMED_REQUEST); + } + return request; + } + @Test public void testBuffers() throws Exception { final ExecutorService executor = Executors.newFixedThreadPool(32); @@ -230,6 +276,21 @@ public CompletableFuture closeAsync() { new Reply(true, 0, putBlockRequest)); } + static ContainerCommandRequestProto closeBuffers( + Buffers buffers, WriteMethod writeMethod) throws IOException { + final ReferenceCountedObject ref = buffers.pollAll(); + final ByteBuf buf = ref.retain(); + final ContainerCommandRequestProto putBlockRequest; + try { + putBlockRequest = readPutBlockRequest(buf); + // write the remaining data + writeFully(buf.nioBuffer(), writeMethod); + } finally { + ref.release(); + } + return putBlockRequest; + } + @Override public CompletableFuture writeAsync( FilePositionCount filePositionCount, WriteOption... writeOptions) { From 80dc87a652f6000cac853c5db01f4a1f20c6f4a6 Mon Sep 17 00:00:00 2001 From: Sarveksha Yeshavantha Raju <79865743+sarvekshayr@users.noreply.github.com> Date: Thu, 9 Jan 2025 15:24:53 +0530 Subject: [PATCH 064/168] HDDS-12039. Move quota subcommands under ozone repair om (#7662) --- .../ozone/shell/TestOzoneRepairShell.java | 6 +++--- .../apache/hadoop/ozone/repair/om/OMRepair.java | 4 +++- .../ozone/repair/om/SnapshotChainRepair.java | 3 +++ .../ozone/repair/om/TransactionInfoRepair.java | 3 +++ .../repair/{ => om}/quota/QuotaRepair.java | 17 ++++++----------- .../repair/{ => om}/quota/QuotaStatus.java | 6 ++---- .../repair/{ => om}/quota/QuotaTrigger.java | 2 +- .../repair/{ => om}/quota/package-info.java | 4 ++-- 8 files changed, 23 insertions(+), 22 deletions(-) rename hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/{ => om}/quota/QuotaRepair.java (89%) rename hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/{ => om}/quota/QuotaStatus.java (92%) rename hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/{ => om}/quota/QuotaTrigger.java (98%) rename hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/{ => om}/quota/package-info.java (91%) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneRepairShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneRepairShell.java index 6ddde5ffe82..dffc4bd552c 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneRepairShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneRepairShell.java @@ -128,14 +128,14 @@ private String[] parseScanOutput(String output) { public void testQuotaRepair() throws Exception { CommandLine cmd = new OzoneRepair().getCmd(); - int exitCode = cmd.execute("quota", "status", "--service-host", conf.get(OZONE_OM_ADDRESS_KEY)); + int exitCode = cmd.execute("om", "quota", "status", "--service-host", conf.get(OZONE_OM_ADDRESS_KEY)); assertEquals(0, exitCode, err); - exitCode = cmd.execute("quota", "start", "--service-host", conf.get(OZONE_OM_ADDRESS_KEY)); + exitCode = cmd.execute("om", "quota", "start", "--service-host", conf.get(OZONE_OM_ADDRESS_KEY)); assertEquals(0, exitCode, err); GenericTestUtils.waitFor(() -> { out.reset(); // verify quota trigger is completed having non-zero lastRunFinishedTime - cmd.execute("quota", "status", "--service-host", conf.get(OZONE_OM_ADDRESS_KEY)); + cmd.execute("om", "quota", "status", "--service-host", conf.get(OZONE_OM_ADDRESS_KEY)); try { return out.get().contains("\"lastRunFinishedTime\":\"\""); } catch (Exception ex) { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/OMRepair.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/OMRepair.java index 9e20f6b9d1f..c8e9f6e9e4b 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/OMRepair.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/OMRepair.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.repair.om; import org.apache.hadoop.hdds.cli.RepairSubcommand; +import org.apache.hadoop.ozone.repair.om.quota.QuotaRepair; import org.kohsuke.MetaInfServices; import picocli.CommandLine; @@ -29,7 +30,8 @@ subcommands = { FSORepairTool.class, SnapshotRepair.class, - TransactionInfoRepair.class + TransactionInfoRepair.class, + QuotaRepair.class }, description = "Operational tool to repair OM.") @MetaInfServices(RepairSubcommand.class) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/SnapshotChainRepair.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/SnapshotChainRepair.java index a5d1244f00e..37cf0c5ddbb 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/SnapshotChainRepair.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/SnapshotChainRepair.java @@ -83,6 +83,9 @@ public class SnapshotChainRepair extends RepairTool { @Override public void execute() throws Exception { + if (checkIfServiceIsRunning("OM")) { + return; + } List cfHandleList = new ArrayList<>(); List cfDescList = RocksDBUtils.getColumnFamilyDescriptors(dbPath); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/TransactionInfoRepair.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/TransactionInfoRepair.java index e737f0a9138..59ea67138ba 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/TransactionInfoRepair.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/TransactionInfoRepair.java @@ -68,6 +68,9 @@ public class TransactionInfoRepair extends RepairTool { @Override public void execute() throws Exception { + if (checkIfServiceIsRunning("OM")) { + return; + } List cfHandleList = new ArrayList<>(); List cfDescList = RocksDBUtils.getColumnFamilyDescriptors( dbPath); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaRepair.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/quota/QuotaRepair.java similarity index 89% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaRepair.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/quota/QuotaRepair.java index 988b42ceb91..e5cd7b73afa 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaRepair.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/quota/QuotaRepair.java @@ -16,11 +16,12 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.repair.quota; +package org.apache.hadoop.ozone.repair.om.quota; import java.io.IOException; import java.util.Collection; -import org.apache.hadoop.hdds.cli.RepairSubcommand; + +import org.apache.hadoop.hdds.cli.AbstractSubcommand; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; @@ -30,10 +31,8 @@ import org.apache.hadoop.ozone.om.protocolPB.OmTransport; import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB; import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB; -import org.apache.hadoop.ozone.repair.OzoneRepair; import org.apache.hadoop.security.UserGroupInformation; import org.apache.ratis.protocol.ClientId; -import org.kohsuke.MetaInfServices; import picocli.CommandLine; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; @@ -48,18 +47,14 @@ QuotaTrigger.class, }, description = "Operational tool to repair quota in OM DB.") -@MetaInfServices(RepairSubcommand.class) -public class QuotaRepair implements RepairSubcommand { - - @CommandLine.ParentCommand - private OzoneRepair parent; +public class QuotaRepair extends AbstractSubcommand { public OzoneManagerProtocolClientSideTranslatorPB createOmClient( String omServiceID, String omHost, boolean forceHA ) throws Exception { - OzoneConfiguration conf = parent.getOzoneConf(); + OzoneConfiguration conf = getOzoneConf(); if (omHost != null && !omHost.isEmpty()) { omServiceID = null; conf.set(OZONE_OM_ADDRESS_KEY, omHost); @@ -93,7 +88,7 @@ private String getTheOnlyConfiguredOmServiceIdOrThrow() { } private Collection getConfiguredServiceIds() { - OzoneConfiguration conf = parent.getOzoneConf(); + OzoneConfiguration conf = getOzoneConf(); Collection omServiceIds = conf.getTrimmedStringCollection(OZONE_OM_SERVICE_IDS_KEY); return omServiceIds; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaStatus.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/quota/QuotaStatus.java similarity index 92% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaStatus.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/quota/QuotaStatus.java index cd9ef42da8e..879dc06f189 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaStatus.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/quota/QuotaStatus.java @@ -19,7 +19,7 @@ * permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.repair.quota; +package org.apache.hadoop.ozone.repair.om.quota; import java.util.concurrent.Callable; import org.apache.hadoop.hdds.cli.HddsVersionProvider; @@ -35,9 +35,7 @@ mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class ) -public class QuotaStatus implements Callable { - @CommandLine.Spec - private static CommandLine.Model.CommandSpec spec; +public class QuotaStatus implements Callable { @CommandLine.Option( names = {"--service-id", "--om-service-id"}, diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaTrigger.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/quota/QuotaTrigger.java similarity index 98% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaTrigger.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/quota/QuotaTrigger.java index 2930c873563..b490f758eaf 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/QuotaTrigger.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/quota/QuotaTrigger.java @@ -19,7 +19,7 @@ * permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.repair.quota; +package org.apache.hadoop.ozone.repair.om.quota; import java.util.Arrays; import java.util.Collections; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/package-info.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/quota/package-info.java similarity index 91% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/package-info.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/quota/package-info.java index 40c0abcb916..b17a9864723 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/quota/package-info.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/quota/package-info.java @@ -17,6 +17,6 @@ */ /** - * Ozone Quota Repair tools. + * Ozone OM Quota Repair tools. */ -package org.apache.hadoop.ozone.repair.quota; +package org.apache.hadoop.ozone.repair.om.quota; From a4f48211b35522a30e42f6df918409495f0652e2 Mon Sep 17 00:00:00 2001 From: Sadanand Shenoy Date: Thu, 9 Jan 2025 16:42:35 +0530 Subject: [PATCH 065/168] HDDS-11969. getFilechecksum() API fails if checksum type is NONE. (#7656) --- .../hadoop/hdds/scm/OzoneClientConfig.java | 12 +-- .../fs/ozone/AbstractOzoneFileSystemTest.java | 77 ++++++++++++------- .../fs/ozone/BasicOzoneClientAdapterImpl.java | 8 +- .../BasicRootedOzoneClientAdapterImpl.java | 8 +- 4 files changed, 67 insertions(+), 38 deletions(-) diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java index a4b53a80a1e..e31a2942cb9 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java @@ -485,14 +485,16 @@ public ChecksumCombineMode getChecksumCombineMode() { try { return ChecksumCombineMode.valueOf(checksumCombineMode); } catch (IllegalArgumentException iae) { - LOG.warn("Bad checksum combine mode: {}. Using default {}", - checksumCombineMode, - ChecksumCombineMode.COMPOSITE_CRC.name()); - return ChecksumCombineMode.valueOf( - ChecksumCombineMode.COMPOSITE_CRC.name()); + LOG.warn("Bad checksum combine mode: {}.", + checksumCombineMode); + return null; } } + public void setChecksumCombineMode(String checksumCombineMode) { + this.checksumCombineMode = checksumCombineMode; + } + public void setEcReconstructStripeReadPoolLimit(int poolLimit) { this.ecReconstructStripeReadPoolLimit = poolLimit; } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java index caba8b35199..ee004af1fc4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/AbstractOzoneFileSystemTest.java @@ -44,6 +44,7 @@ import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConfigKeys; @@ -1581,40 +1582,42 @@ public void testCreateKeyShouldUseRefreshedBucketReplicationConfig() Configuration conf = new OzoneConfiguration(cluster.getConf()); conf.set(FS_DEFAULT_NAME_KEY, rootPath); // Set the number of keys to be processed during batch operate. - OzoneFileSystem o3FS = (OzoneFileSystem) FileSystem.get(conf); + try (FileSystem fileSystem = FileSystem.get(conf)) { + OzoneFileSystem o3FS = (OzoneFileSystem) fileSystem; - //Let's reset the clock to control the time. - ((BasicOzoneClientAdapterImpl) (o3FS.getAdapter())).setClock(testClock); + //Let's reset the clock to control the time. + ((BasicOzoneClientAdapterImpl) (o3FS.getAdapter())).setClock(testClock); - createKeyAndAssertKeyType(bucket, o3FS, new Path(rootPath, "key"), - ReplicationType.RATIS); + createKeyAndAssertKeyType(bucket, o3FS, new Path(rootPath, "key"), + ReplicationType.RATIS); - bucket.setReplicationConfig(new ECReplicationConfig("rs-3-2-1024k")); + bucket.setReplicationConfig(new ECReplicationConfig("rs-3-2-1024k")); - //After changing the bucket policy, it should create ec key, but o3fs will - // refresh after some time. So, it will be sill old type. - createKeyAndAssertKeyType(bucket, o3FS, new Path(rootPath, "key1"), - ReplicationType.RATIS); + //After changing the bucket policy, it should create ec key, but o3fs will + // refresh after some time. So, it will be sill old type. + createKeyAndAssertKeyType(bucket, o3FS, new Path(rootPath, "key1"), + ReplicationType.RATIS); - testClock.fastForward(300 * 1000 + 1); + testClock.fastForward(300 * 1000 + 1); - //After client bucket refresh time, it should create new type what is - // available on bucket at that moment. - createKeyAndAssertKeyType(bucket, o3FS, new Path(rootPath, "key2"), - ReplicationType.EC); + //After client bucket refresh time, it should create new type what is + // available on bucket at that moment. + createKeyAndAssertKeyType(bucket, o3FS, new Path(rootPath, "key2"), + ReplicationType.EC); - // Rechecking the same steps with changing to Ratis again to check the - // behavior is consistent. - bucket.setReplicationConfig( - RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE)); + // Rechecking the same steps with changing to Ratis again to check the + // behavior is consistent. + bucket.setReplicationConfig(RatisReplicationConfig.getInstance( + HddsProtos.ReplicationFactor.THREE)); - createKeyAndAssertKeyType(bucket, o3FS, new Path(rootPath, "key3"), - ReplicationType.EC); + createKeyAndAssertKeyType(bucket, o3FS, new Path(rootPath, "key3"), + ReplicationType.EC); - testClock.fastForward(300 * 1000 + 1); + testClock.fastForward(300 * 1000 + 1); - createKeyAndAssertKeyType(bucket, o3FS, new Path(rootPath, "key4"), - ReplicationType.RATIS); + createKeyAndAssertKeyType(bucket, o3FS, new Path(rootPath, "key4"), + ReplicationType.RATIS); + } } private void createKeyAndAssertKeyType(OzoneBucket bucket, @@ -1668,9 +1671,11 @@ public void testDeleteRootWithTrash() throws IOException { OzoneConfiguration conf2 = new OzoneConfiguration(cluster.getConf()); conf2.setClass("fs.trash.classname", TrashPolicyDefault.class, TrashPolicy.class); - Trash trashPolicyDefault = new Trash(conf2); - assertThrows(IOException.class, - () -> trashPolicyDefault.moveToTrash(root)); + try (FileSystem fs = FileSystem.get(conf2)) { + Trash trashPolicyDefault = new Trash(fs, conf2); + assertThrows(IOException.class, + () -> trashPolicyDefault.moveToTrash(root)); + } } /** @@ -2275,6 +2280,24 @@ void testFileSystemWithObjectStoreLayout() throws IOException { } } + @Test + public void testGetFileChecksumWithInvalidCombineMode() throws IOException { + final String root = "/root"; + Path rootPath = new Path(fs.getUri().toString() + root); + fs.mkdirs(rootPath); + Path file = new Path(fs.getUri().toString() + root + + "/dummy"); + ContractTestUtils.touch(fs, file); + OzoneClientConfig clientConfig = cluster.getConf().getObject(OzoneClientConfig.class); + clientConfig.setChecksumCombineMode("NONE"); + OzoneConfiguration conf = cluster.getConf(); + conf.setFromObject(clientConfig); + conf.setBoolean("fs.o3fs.impl.disable.cache", true); + try (FileSystem fileSystem = FileSystem.get(conf)) { + assertNull(fileSystem.getFileChecksum(file)); + } + } + private String getCurrentUser() { try { return UserGroupInformation.getCurrentUser().getShortUserName(); diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java index 689e340ff5d..d824abc28fc 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java @@ -647,11 +647,13 @@ public FileChecksum getFileChecksum(String keyName, long length) throws IOException { OzoneClientConfig.ChecksumCombineMode combineMode = config.getObject(OzoneClientConfig.class).getChecksumCombineMode(); - + if (combineMode == null) { + return null; + } return OzoneClientUtils.getFileChecksumWithCombineMode( volume, bucket, keyName, - length, combineMode, ozoneClient.getObjectStore().getClientProxy()); - + length, combineMode, + ozoneClient.getObjectStore().getClientProxy()); } @Override diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java index 9896ab722de..c5cb003a568 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java @@ -1298,14 +1298,16 @@ public FileChecksum getFileChecksum(String keyName, long length) throws IOException { OzoneClientConfig.ChecksumCombineMode combineMode = config.getObject(OzoneClientConfig.class).getChecksumCombineMode(); - + if (combineMode == null) { + return null; + } OFSPath ofsPath = new OFSPath(keyName, config); - OzoneVolume volume = objectStore.getVolume(ofsPath.getVolumeName()); OzoneBucket bucket = getBucket(ofsPath, false); return OzoneClientUtils.getFileChecksumWithCombineMode( volume, bucket, ofsPath.getKeyName(), - length, combineMode, ozoneClient.getObjectStore().getClientProxy()); + length, combineMode, + ozoneClient.getObjectStore().getClientProxy()); } From 990b5bf8c9350b1ffe62dd01e6d568f2e39b1a67 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Thu, 9 Jan 2025 18:38:36 +0100 Subject: [PATCH 066/168] HDDS-12038. Bump maven-remote-resources-plugin to 3.3.0 (#7661) --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 1f6a07382fd..d018c821d85 100644 --- a/pom.xml +++ b/pom.xml @@ -156,7 +156,7 @@ 3.4.2 3.11.1 1.6.1 - 1.7.0 + 3.3.0 3.3.0 3.6.0 3.21.0 From 93dab91667b57fe9d7cd6be251a20094edcfeaa9 Mon Sep 17 00:00:00 2001 From: Ashish Kumar <117710273+ashishkumar50@users.noreply.github.com> Date: Thu, 9 Jan 2025 23:13:57 +0530 Subject: [PATCH 067/168] HDDS-11816. Ozone stream to support Hsync,Hflush. (#7592) Co-authored-by: ashishk --- .../scm/storage/BlockDataStreamOutput.java | 16 +++++ .../scm/storage/ByteBufferStreamOutput.java | 4 +- .../client/io/BlockDataStreamOutputEntry.java | 12 ++++ .../io/BlockDataStreamOutputEntryPool.java | 29 ++++++++ .../ozone/client/io/KeyDataStreamOutput.java | 20 +++++- .../client/io/OzoneDataStreamOutput.java | 70 +++++++++++++++++-- .../ozone/client/io/OzoneOutputStream.java | 6 ++ .../hadoop/ozone/client/rpc/RpcClient.java | 4 +- .../ozone/client/io/SelectorOutputStream.java | 17 ++++- .../org/apache/hadoop/fs/ozone/TestHSync.java | 29 ++++++++ .../TestOzoneFileSystemWithStreaming.java | 6 +- .../hadoop/fs/ozone/BasicOzoneFileSystem.java | 7 +- .../fs/ozone/BasicRootedOzoneFileSystem.java | 7 +- .../ozone/CapableOzoneFSDataStreamOutput.java | 63 +++++++++++++++++ .../fs/ozone/OzoneFSDataStreamOutput.java | 16 +++++ .../hadoop/fs/ozone/OzoneFileSystem.java | 6 ++ .../fs/ozone/RootedOzoneFileSystem.java | 6 ++ .../hadoop/fs/ozone/OzoneFileSystem.java | 6 ++ .../fs/ozone/RootedOzoneFileSystem.java | 6 ++ .../hadoop/ozone/client/OzoneBucketStub.java | 10 +++ .../client/OzoneDataStreamOutputStub.java | 2 +- 21 files changed, 327 insertions(+), 15 deletions(-) create mode 100644 hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/CapableOzoneFSDataStreamOutput.java diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java index 342fcaba9af..ac2a47ba972 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java @@ -511,6 +511,22 @@ public void flush() throws IOException { } } + @Override + public void hflush() throws IOException { + hsync(); + } + + @Override + public void hsync() throws IOException { + try { + if (!isClosed()) { + handleFlush(false); + } + } catch (Exception e) { + + } + } + public void waitFuturesComplete() throws IOException { try { CompletableFuture.allOf(futures.toArray(EMPTY_FUTURE_ARRAY)).get(); diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ByteBufferStreamOutput.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ByteBufferStreamOutput.java index b213bb1f4c6..baaff09e6cf 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ByteBufferStreamOutput.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ByteBufferStreamOutput.java @@ -18,6 +18,8 @@ package org.apache.hadoop.hdds.scm.storage; +import org.apache.hadoop.fs.Syncable; + import java.io.Closeable; import java.io.IOException; import java.nio.ByteBuffer; @@ -26,7 +28,7 @@ * This interface is similar to {@link java.io.OutputStream} * except that this class support {@link ByteBuffer} instead of byte[]. */ -public interface ByteBufferStreamOutput extends Closeable { +public interface ByteBufferStreamOutput extends Closeable, Syncable { /** * Similar to {@link java.io.OutputStream#write(byte[])}, * except that the parameter of this method is a {@link ByteBuffer}. diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntry.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntry.java index 4e5a35a539c..67fc205cbf7 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntry.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntry.java @@ -116,6 +116,18 @@ public void flush() throws IOException { } } + @Override + public void hflush() throws IOException { + hsync(); + } + + @Override + public void hsync() throws IOException { + if (this.byteBufferStreamOutput != null) { + this.byteBufferStreamOutput.hsync(); + } + } + @Override public void close() throws IOException { if (this.byteBufferStreamOutput != null) { diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java index 8e80b381041..153d514cfef 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.client.io; import com.google.common.base.Preconditions; +import org.apache.hadoop.hdds.client.ContainerBlockID; import org.apache.hadoop.hdds.client.ReplicationConfig; import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientFactory; @@ -62,6 +63,7 @@ public class BlockDataStreamOutputEntryPool implements KeyMetadataAware { private final long openID; private final ExcludeList excludeList; private List bufferList; + private ContainerBlockID lastUpdatedBlockId = new ContainerBlockID(-1, -1); @SuppressWarnings({"parameternumber", "squid:S00107"}) public BlockDataStreamOutputEntryPool( @@ -152,6 +154,33 @@ public List getLocationInfoList() { return locationInfoList; } + void hsyncKey(long offset) throws IOException { + if (keyArgs != null) { + // in test, this could be null + keyArgs.setDataSize(offset); + keyArgs.setLocationInfoList(getLocationInfoList()); + // When the key is multipart upload part file upload, we should not + // commit the key, as this is not an actual key, this is a just a + // partial key of a large file. + if (keyArgs.getIsMultipartKey()) { + throw new IOException("Hsync is unsupported for multipart keys."); + } else { + if (keyArgs.getLocationInfoList().size() == 0) { + omClient.hsyncKey(keyArgs, openID); + } else { + ContainerBlockID lastBLockId = keyArgs.getLocationInfoList().get(keyArgs.getLocationInfoList().size() - 1) + .getBlockID().getContainerBlockID(); + if (!lastUpdatedBlockId.equals(lastBLockId)) { + omClient.hsyncKey(keyArgs, openID); + lastUpdatedBlockId = lastBLockId; + } + } + } + } else { + LOG.warn("Closing KeyOutputStream, but key args is null"); + } + } + /** * Discards the subsequent pre allocated blocks and removes the streamEntries * from the streamEntries list for the container which is closed. diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyDataStreamOutput.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyDataStreamOutput.java index e5a43819a3c..811435b8489 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyDataStreamOutput.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyDataStreamOutput.java @@ -65,7 +65,7 @@ public class KeyDataStreamOutput extends AbstractDataStreamOutput * Defines stream action while calling handleFlushOrClose. */ enum StreamAction { - FLUSH, CLOSE, FULL + FLUSH, HSYNC, CLOSE, FULL } public static final Logger LOG = @@ -234,6 +234,21 @@ private int writeToDataStreamOutput(BlockDataStreamOutputEntry current, return writeLen; } + @Override + public void hflush() throws IOException { + hsync(); + } + + @Override + public void hsync() throws IOException { + checkNotClosed(); + final long hsyncPos = writeOffset; + handleFlushOrClose(KeyDataStreamOutput.StreamAction.HSYNC); + Preconditions.checkState(offset >= hsyncPos, + "offset = %s < hsyncPos = %s", offset, hsyncPos); + blockDataStreamOutputEntryPool.hsyncKey(hsyncPos); + } + /** * It performs following actions : * a. Updates the committed length at datanode for the current stream in @@ -394,6 +409,9 @@ private void handleStreamAction(BlockDataStreamOutputEntry entry, case FLUSH: entry.flush(); break; + case HSYNC: + entry.hsync(); + break; default: throw new IOException("Invalid Operation"); } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneDataStreamOutput.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneDataStreamOutput.java index c0af1c53010..da61b3e30ef 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneDataStreamOutput.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneDataStreamOutput.java @@ -17,6 +17,7 @@ package org.apache.hadoop.ozone.client.io; import org.apache.hadoop.crypto.CryptoOutputStream; +import org.apache.hadoop.fs.Syncable; import org.apache.hadoop.hdds.scm.storage.ByteBufferStreamOutput; import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo; @@ -24,6 +25,8 @@ import java.io.OutputStream; import java.nio.ByteBuffer; import java.util.Map; +import java.util.Objects; +import java.util.Optional; /** * OzoneDataStreamOutput is used to write data into Ozone. @@ -32,14 +35,52 @@ public class OzoneDataStreamOutput extends ByteBufferOutputStream implements KeyMetadataAware { private final ByteBufferStreamOutput byteBufferStreamOutput; + private boolean enableHsync; + private final Syncable syncable; /** - * Constructs OzoneDataStreamOutput with KeyDataStreamOutput. + * Constructs an instance with a {@link Syncable} {@link OutputStream}. * - * @param byteBufferStreamOutput the underlying ByteBufferStreamOutput + * @param outputStream an {@link OutputStream} which is {@link Syncable}. + * @param enableHsync if false, hsync() executes flush() instead. */ - public OzoneDataStreamOutput(ByteBufferStreamOutput byteBufferStreamOutput) { - this.byteBufferStreamOutput = byteBufferStreamOutput; + public OzoneDataStreamOutput(Syncable outputStream, boolean enableHsync) { + this(Optional.of(Objects.requireNonNull(outputStream, + "outputStream == null")) + .filter(s -> s instanceof OzoneDataStreamOutput) + .map(s -> (OzoneDataStreamOutput)s) + .orElseThrow(() -> new IllegalArgumentException( + "The parameter syncable is not an OutputStream")), + outputStream, enableHsync); + } + + /** + * Constructs an instance with a (non-{@link Syncable}) {@link ByteBufferStreamOutput} + * with an optional {@link Syncable} object. + * + * @param byteBufferStreamOutput for writing data. + * @param syncable an optional parameter + * for accessing the {@link Syncable} feature. + */ + public OzoneDataStreamOutput(ByteBufferStreamOutput byteBufferStreamOutput, Syncable syncable) { + this(byteBufferStreamOutput, syncable, false); + } + + /** + * Constructs an instance with a (non-{@link Syncable}) {@link ByteBufferStreamOutput} + * with an optional {@link Syncable} object. + * + * @param byteBufferStreamOutput for writing data. + * @param syncable an optional parameter + * for accessing the {@link Syncable} feature. + * @param enableHsync if false, hsync() executes flush() instead. + */ + public OzoneDataStreamOutput(ByteBufferStreamOutput byteBufferStreamOutput, Syncable syncable, + boolean enableHsync) { + this.byteBufferStreamOutput = Objects.requireNonNull(byteBufferStreamOutput, + "byteBufferStreamOutput == null"); + this.syncable = syncable != null ? syncable : byteBufferStreamOutput; + this.enableHsync = enableHsync; } @Override @@ -93,6 +134,27 @@ public KeyDataStreamOutput getKeyDataStreamOutput() { return null; } + public void hflush() throws IOException { + hsync(); + } + + public void hsync() throws IOException { + // Disable the feature flag restores the prior behavior. + if (!enableHsync) { + byteBufferStreamOutput.flush(); + return; + } + if (syncable != null) { + if (byteBufferStreamOutput != syncable) { + byteBufferStreamOutput.flush(); + } + syncable.hsync(); + } else { + throw new UnsupportedOperationException(byteBufferStreamOutput.getClass() + + " is not " + Syncable.class.getSimpleName()); + } + } + public ByteBufferStreamOutput getByteBufStreamOutput() { return byteBufferStreamOutput; } diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneOutputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneOutputStream.java index bd056185e75..f161d80c834 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneOutputStream.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneOutputStream.java @@ -105,6 +105,12 @@ public synchronized void close() throws IOException { outputStream.close(); } + @Override + public void hflush() throws IOException { + hsync(); + } + + @Override public void hsync() throws IOException { // Disable the feature flag restores the prior behavior. if (!enableHsync) { diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index 93c675d9b90..7ce446d446e 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -1996,7 +1996,7 @@ public OzoneDataStreamOutput createMultipartStreamKey( } else { out = createMultipartOutputStream(openKey, uploadID, partNumber); } - return new OzoneDataStreamOutput(out); + return new OzoneDataStreamOutput(out, out); } @Override @@ -2417,7 +2417,7 @@ private OzoneDataStreamOutput createDataStreamOutput(OpenKeySession openKey) } else { out = createOutputStream(openKey); } - return new OzoneDataStreamOutput(out); + return new OzoneDataStreamOutput(out, out); } private KeyDataStreamOutput.Builder newKeyOutputStreamBuilder() { diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/SelectorOutputStream.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/SelectorOutputStream.java index c7e20fb7e8b..540efe1f88c 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/SelectorOutputStream.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/client/io/SelectorOutputStream.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.ozone.client.io; +import org.apache.hadoop.fs.StreamCapabilities; import org.apache.hadoop.fs.Syncable; import org.apache.ratis.util.function.CheckedFunction; @@ -37,7 +38,7 @@ * @param The underlying {@link OutputStream} type. */ public class SelectorOutputStream - extends OutputStream implements Syncable { + extends OutputStream implements Syncable, StreamCapabilities { /** A buffer backed by a byte[]. */ static final class ByteArrayBuffer { private byte[] array; @@ -182,6 +183,20 @@ public void hsync() throws IOException { } } + @Override + public boolean hasCapability(String capability) { + try { + final OUT out = select(); + if (out instanceof StreamCapabilities) { + return ((StreamCapabilities) out).hasCapability(capability); + } else { + return false; + } + } catch (Exception e) { + return false; + } + } + @Override public void close() throws IOException { select().close(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java index efd5f2765e5..b59199f347f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestHSync.java @@ -126,6 +126,8 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_DATASTREAM_AUTO_THRESHOLD; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_DATASTREAM_ENABLED; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_ROOT; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_DELIMITER; @@ -175,6 +177,7 @@ public class TestHSync { private static final int WAL_HEADER_LEN = 83; private static OpenKeyCleanupService openKeyCleanupService; + private static final int AUTO_THRESHOLD = 0; @BeforeAll public static void init() throws Exception { @@ -1059,6 +1062,32 @@ public void testStreamCapability() throws Exception { testEncryptedStreamCapabilities(false); } + @Test + public void testOzoneStreamCapabilityForHsyncHflush() throws Exception { + final String rootPath = String.format("%s://%s/", + OZONE_OFS_URI_SCHEME, CONF.get(OZONE_OM_ADDRESS_KEY)); + CONF.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); + CONF.set(OZONE_FS_DATASTREAM_AUTO_THRESHOLD, AUTO_THRESHOLD + "B"); + CONF.setBoolean(OZONE_FS_DATASTREAM_ENABLED, true); + + final String dir = OZONE_ROOT + bucket.getVolumeName() + + OZONE_URI_DELIMITER + bucket.getName(); + final Path file = new Path(dir, "file"); + + try (FileSystem fs = FileSystem.get(CONF); + FSDataOutputStream os = fs.create(file, true)) { + os.write(100); + // Verify output stream supports hsync() and hflush(). + assertTrue(os.hasCapability(StreamCapabilities.HFLUSH), + "KeyOutputStream should support hflush()!"); + assertTrue(os.hasCapability(StreamCapabilities.HSYNC), + "KeyOutputStream should support hsync()!"); + os.hsync(); + } + + CONF.setBoolean(OZONE_FS_DATASTREAM_ENABLED, false); + } + @Test public void testECStreamCapability() throws Exception { // create EC bucket to be used by OzoneFileSystem diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithStreaming.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithStreaming.java index 5f8bf162b4e..08785056758 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithStreaming.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithStreaming.java @@ -169,9 +169,9 @@ static void createFile(FileSystem fs, Path path, boolean overwrite, assertNotNull(underlying); LOG.info("underlying after close: {}", underlying.getClass()); if (belowThreshold) { - assertInstanceOf(OzoneFSOutputStream.class, underlying); + assertInstanceOf(CapableOzoneFSOutputStream.class, underlying); } else { - assertEquals(OzoneFSDataStreamOutput.class, underlying.getClass()); + assertEquals(CapableOzoneFSDataStreamOutput.class, underlying.getClass()); } } @@ -184,7 +184,7 @@ static void assertUnderlying(SelectorOutputStream selector, assertNull(underlying); } else { assertNotNull(underlying); - assertEquals(OzoneFSDataStreamOutput.class, + assertEquals(CapableOzoneFSDataStreamOutput.class, underlying.getClass()); } } diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java index ed8d99d67fa..2a3f97038e8 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java @@ -301,7 +301,7 @@ private OutputStream selectOutputStream(String key, short replication, boolean overwrite, boolean recursive, int byteWritten) throws IOException { return isRatisStreamingEnabled && byteWritten > streamingAutoThreshold ? - adapter.createStreamFile(key, replication, overwrite, recursive) + createFSDataStreamOutput(adapter.createStreamFile(key, replication, overwrite, recursive)) : createFSOutputStream(adapter.createFile( key, replication, overwrite, recursive)); } @@ -327,6 +327,11 @@ protected OzoneFSOutputStream createFSOutputStream( return outputStream; } + protected OzoneFSDataStreamOutput createFSDataStreamOutput( + OzoneFSDataStreamOutput outputDataStream) { + return outputDataStream; + } + @Override public FSDataOutputStream append(Path f, int bufferSize, Progressable progress) throws IOException { diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java index 66b0037cf33..2926088e19f 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java @@ -299,7 +299,7 @@ private OutputStream selectOutputStream(String key, short replication, boolean overwrite, boolean recursive, int byteWritten) throws IOException { return isRatisStreamingEnabled && byteWritten > streamingAutoThreshold ? - adapter.createStreamFile(key, replication, overwrite, recursive) + createFSDataStreamOutput(adapter.createStreamFile(key, replication, overwrite, recursive)) : createFSOutputStream(adapter.createFile( key, replication, overwrite, recursive)); } @@ -324,6 +324,11 @@ protected OzoneFSOutputStream createFSOutputStream( return outputStream; } + protected OzoneFSDataStreamOutput createFSDataStreamOutput( + OzoneFSDataStreamOutput outputDataStream) { + return outputDataStream; + } + @Override public FSDataOutputStream append(Path f, int bufferSize, Progressable progress) throws IOException { diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/CapableOzoneFSDataStreamOutput.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/CapableOzoneFSDataStreamOutput.java new file mode 100644 index 00000000000..7f723708e22 --- /dev/null +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/CapableOzoneFSDataStreamOutput.java @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.fs.ozone; + +import org.apache.hadoop.fs.StreamCapabilities; +import org.apache.hadoop.hdds.scm.storage.ByteBufferStreamOutput; +import org.apache.hadoop.ozone.client.io.KeyDataStreamOutput; +import org.apache.hadoop.util.StringUtils; + + +/** + * This class is used to workaround Hadoop2 compatibility issues. + * + * Hadoop 2 does not support StreamCapabilities, so we create different modules + * for Hadoop2 and Hadoop3 profiles. + * + * The OzoneFileSystem and RootedOzoneFileSystem in Hadoop3 profile uses + * CapableOzoneFSDataStreamOutput which implements StreamCapabilities interface, + * whereas the ones in Hadoop2 profile does not. + */ +public class CapableOzoneFSDataStreamOutput extends OzoneFSDataStreamOutput + implements StreamCapabilities { + private final boolean isHsyncEnabled; + public CapableOzoneFSDataStreamOutput(OzoneFSDataStreamOutput outputStream, + boolean enabled) { + super(outputStream.getByteBufferStreamOutput()); + this.isHsyncEnabled = enabled; + } + + @Override + public boolean hasCapability(String capability) { + ByteBufferStreamOutput os = getByteBufferStreamOutput(); + return hasWrappedCapability(os, capability); + } + + private boolean hasWrappedCapability(ByteBufferStreamOutput os, String capability) { + if (os instanceof KeyDataStreamOutput) { + switch (StringUtils.toLowerCase(capability)) { + case StreamCapabilities.HFLUSH: + case StreamCapabilities.HSYNC: + return isHsyncEnabled; + default: + return false; + } + } + return false; + } +} diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSDataStreamOutput.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSDataStreamOutput.java index dcb917f2f9b..44b838def84 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSDataStreamOutput.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSDataStreamOutput.java @@ -18,6 +18,7 @@ package org.apache.hadoop.fs.ozone; import org.apache.hadoop.hdds.scm.storage.ByteBufferStreamOutput; +import org.apache.hadoop.hdds.tracing.TracingUtil; import org.apache.hadoop.ozone.client.io.ByteBufferOutputStream; import java.io.IOException; @@ -77,4 +78,19 @@ public void flush() throws IOException { public void close() throws IOException { byteBufferStreamOutput.close(); } + + @Override + public void hflush() throws IOException { + hsync(); + } + + @Override + public void hsync() throws IOException { + TracingUtil.executeInNewSpan("OzoneFSDataStreamOutput.hsync", + byteBufferStreamOutput::hsync); + } + + protected ByteBufferStreamOutput getByteBufferStreamOutput() { + return byteBufferStreamOutput; + } } diff --git a/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java b/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java index 65e3145e7d4..62f4d3f0019 100644 --- a/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java +++ b/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java @@ -122,6 +122,12 @@ protected OzoneFSOutputStream createFSOutputStream( return new CapableOzoneFSOutputStream(outputStream, isHsyncEnabled()); } + @Override + protected OzoneFSDataStreamOutput createFSDataStreamOutput( + OzoneFSDataStreamOutput outputDataStream) { + return new CapableOzoneFSDataStreamOutput(outputDataStream, isHsyncEnabled()); + } + @Override public boolean hasPathCapability(final Path path, final String capability) throws IOException { diff --git a/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/RootedOzoneFileSystem.java b/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/RootedOzoneFileSystem.java index bae01eafdea..7d263b8fa73 100644 --- a/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/RootedOzoneFileSystem.java +++ b/hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/RootedOzoneFileSystem.java @@ -120,6 +120,12 @@ protected OzoneFSOutputStream createFSOutputStream( return new CapableOzoneFSOutputStream(outputStream, isHsyncEnabled()); } + @Override + protected OzoneFSDataStreamOutput createFSDataStreamOutput( + OzoneFSDataStreamOutput outputDataStream) { + return new CapableOzoneFSDataStreamOutput(outputDataStream, isHsyncEnabled()); + } + @Override public boolean hasPathCapability(final Path path, final String capability) throws IOException { diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java index 65e3145e7d4..62f4d3f0019 100644 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java +++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java @@ -122,6 +122,12 @@ protected OzoneFSOutputStream createFSOutputStream( return new CapableOzoneFSOutputStream(outputStream, isHsyncEnabled()); } + @Override + protected OzoneFSDataStreamOutput createFSDataStreamOutput( + OzoneFSDataStreamOutput outputDataStream) { + return new CapableOzoneFSDataStreamOutput(outputDataStream, isHsyncEnabled()); + } + @Override public boolean hasPathCapability(final Path path, final String capability) throws IOException { diff --git a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/RootedOzoneFileSystem.java b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/RootedOzoneFileSystem.java index bed3505fe9b..815113afe17 100644 --- a/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/RootedOzoneFileSystem.java +++ b/hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/RootedOzoneFileSystem.java @@ -122,6 +122,12 @@ protected OzoneFSOutputStream createFSOutputStream( return new CapableOzoneFSOutputStream(outputStream, isHsyncEnabled()); } + @Override + protected OzoneFSDataStreamOutput createFSDataStreamOutput( + OzoneFSDataStreamOutput outputDataStream) { + return new CapableOzoneFSDataStreamOutput(outputDataStream, isHsyncEnabled()); + } + @Override public boolean hasPathCapability(final Path path, final String capability) throws IOException { diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java index 21f2414c0a7..b9b63bf9699 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java @@ -742,6 +742,16 @@ public void close() throws IOException { } + @Override + public void hflush() { + + } + + @Override + public void hsync() throws IOException { + + } + @Override public Map getMetadata() { return metadata; diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneDataStreamOutputStub.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneDataStreamOutputStub.java index b472320b7fe..858c4236ed7 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneDataStreamOutputStub.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneDataStreamOutputStub.java @@ -42,7 +42,7 @@ public class OzoneDataStreamOutputStub extends OzoneDataStreamOutput { public OzoneDataStreamOutputStub( ByteBufferStreamOutput byteBufferStreamOutput, String partName) { - super(byteBufferStreamOutput); + super(byteBufferStreamOutput, byteBufferStreamOutput); this.partName = partName; } From 9670965428ac289a77abc6ed004a9668346b07d2 Mon Sep 17 00:00:00 2001 From: Alexandr Juncevich Date: Thu, 9 Jan 2025 22:24:21 +0300 Subject: [PATCH 068/168] HDDS-11699. Remove unnecessary information about parts when downloading multipart files. (#7558) --- .../hadoop/ozone/OzoneManagerVersion.java | 3 + .../hadoop/ozone/client/rpc/RpcClient.java | 48 ++++-- .../hadoop/ozone/om/helpers/OmKeyArgs.java | 11 +- .../src/main/smoketest/s3/objectputget.robot | 33 ++++ ...TestOzoneClientMultipartUploadWithFSO.java | 157 +++++++++++++++--- .../hadoop/ozone/om/TestKeyManagerImpl.java | 102 +++++++++++- .../s3/awssdk/v1/AbstractS3SDKV1Tests.java | 60 +++++++ .../hadoop/ozone/om/KeyManagerImpl.java | 16 ++ .../OzoneManagerRequestHandler.java | 1 + 9 files changed, 392 insertions(+), 39 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java index 2d0b2bb56fd..d46cdeaf1fd 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneManagerVersion.java @@ -50,6 +50,9 @@ public enum OzoneManagerVersion implements ComponentVersion { S3_OBJECT_TAGGING_API(9, "OzoneManager version that supports S3 object tagging APIs, such as " + "PutObjectTagging, GetObjectTagging, and DeleteObjectTagging"), + S3_PART_AWARE_GET(10, "OzoneManager version that supports S3 get for a specific multipart " + + "upload part number"), + FUTURE_VERSION(-1, "Used internally in the client when the server side is " + " newer and an unknown server version has arrived to the client."); diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java index 7ce446d446e..3d52e3f6972 100644 --- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java +++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java @@ -1765,16 +1765,21 @@ public OzoneKeyDetails getS3KeyDetails(String bucketName, String keyName) @Override public OzoneKeyDetails getS3KeyDetails(String bucketName, String keyName, int partNumber) throws IOException { - OmKeyInfo keyInfo = getS3KeyInfo(bucketName, keyName, false); - List filteredKeyLocationInfo = keyInfo - .getLatestVersionLocations().getBlocksLatestVersionOnly().stream() - .filter(omKeyLocationInfo -> omKeyLocationInfo.getPartNumber() == - partNumber) - .collect(Collectors.toList()); - keyInfo.updateLocationInfoList(filteredKeyLocationInfo, false); - keyInfo.setDataSize(filteredKeyLocationInfo.stream() - .mapToLong(OmKeyLocationInfo::getLength) - .sum()); + OmKeyInfo keyInfo; + if (omVersion.compareTo(OzoneManagerVersion.S3_PART_AWARE_GET) >= 0) { + keyInfo = getS3PartKeyInfo(bucketName, keyName, partNumber); + } else { + keyInfo = getS3KeyInfo(bucketName, keyName, false); + List filteredKeyLocationInfo = keyInfo + .getLatestVersionLocations().getBlocksLatestVersionOnly().stream() + .filter(omKeyLocationInfo -> omKeyLocationInfo.getPartNumber() == + partNumber) + .collect(Collectors.toList()); + keyInfo.updateLocationInfoList(filteredKeyLocationInfo, true, true); + keyInfo.setDataSize(filteredKeyLocationInfo.stream() + .mapToLong(OmKeyLocationInfo::getLength) + .sum()); + } return getOzoneKeyDetails(keyInfo); } @@ -1801,6 +1806,29 @@ private OmKeyInfo getS3KeyInfo( return keyInfoWithS3Context.getKeyInfo(); } + @Nonnull + private OmKeyInfo getS3PartKeyInfo( + String bucketName, String keyName, int partNumber) throws IOException { + verifyBucketName(bucketName); + Preconditions.checkNotNull(keyName); + + OmKeyArgs keyArgs = new OmKeyArgs.Builder() + // Volume name is not important, as we call GetKeyInfo with + // assumeS3Context = true, OM will infer the correct s3 volume. + .setVolumeName(OzoneConfigKeys.OZONE_S3_VOLUME_NAME_DEFAULT) + .setBucketName(bucketName) + .setKeyName(keyName) + .setSortDatanodesInPipeline(topologyAwareReadEnabled) + .setLatestVersionLocation(getLatestVersionLocation) + .setForceUpdateContainerCacheFromSCM(false) + .setMultipartUploadPartNumber(partNumber) + .build(); + KeyInfoWithVolumeContext keyInfoWithS3Context = + ozoneManagerClient.getKeyInfo(keyArgs, true); + keyInfoWithS3Context.getUserPrincipal().ifPresent(this::updateS3Principal); + return keyInfoWithS3Context.getKeyInfo(); + } + private OmKeyInfo getKeyInfo( String volumeName, String bucketName, String keyName, boolean forceUpdateContainerCache) throws IOException { diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java index ba28b45a0e5..106ef6a06ab 100644 --- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java +++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyArgs.java @@ -213,6 +213,7 @@ public OmKeyArgs.Builder toBuilder() { if (expectedDataGeneration != null) { builder.setExpectedDataGeneration(expectedDataGeneration); } + return builder; } @@ -227,7 +228,11 @@ public KeyArgs toProtobuf() { .setLatestVersionLocation(getLatestVersionLocation()) .setHeadOp(isHeadOp()) .setForceUpdateContainerCacheFromSCM( - isForceUpdateContainerCacheFromSCM()); + isForceUpdateContainerCacheFromSCM() + ); + if (multipartUploadPartNumber != 0) { + builder.setMultipartNumber(multipartUploadPartNumber); + } if (expectedDataGeneration != null) { builder.setExpectedDataGeneration(expectedDataGeneration); } @@ -308,8 +313,8 @@ public Builder setMultipartUploadID(String uploadID) { return this; } - public Builder setMultipartUploadPartNumber(int partNumber) { - this.multipartUploadPartNumber = partNumber; + public Builder setMultipartUploadPartNumber(int multipartUploadPartNumber) { + this.multipartUploadPartNumber = multipartUploadPartNumber; return this; } diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot b/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot index cd5a7c7597c..e204e177d8d 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/objectputget.robot @@ -270,3 +270,36 @@ Create key twice with different content and expect different ETags Execute AWSS3Cli rm s3://${BUCKET}/test_key_to_check_etag_differences Execute rm -rf /tmp/file1 Execute rm -rf /tmp/file2 + +Create&Download big file by multipart upload and get file via part numbers + Execute head -c 10000000 /tmp/big_file + ${result} Execute AWSS3CliDebug cp /tmp/big_file s3://${BUCKET}/ + ${get_part_1_response} Execute AWSS3APICli get-object --bucket ${BUCKET} --key big_file /tmp/big_file_1 --part-number 1 + ${part_1_size} = Execute and checkrc echo '${get_part_1_response}' | jq -r '.ContentLength' 0 + Should contain ${get_part_1_response} \"PartsCount\": 2 + ${get_part_2_response} Execute AWSS3APICli get-object --bucket ${BUCKET} --key big_file /tmp/big_file_2 --part-number 2 + ${part_2_size} = Execute and checkrc echo '${get_part_2_response}' | jq -r '.ContentLength' 0 + Should contain ${get_part_2_response} \"PartsCount\": 2 + + Should Be Equal As Integers 10000000 ${${part_1_size} + ${part_2_size}} + + ${get_part_3_response} Execute AWSS3APICli get-object --bucket ${BUCKET} --key big_file /tmp/big_file_3 --part-number 3 + Should contain ${get_part_3_response} \"ContentLength\": 0 + Should contain ${get_part_3_response} \"PartsCount\": 2 + # clean up + Execute AWSS3Cli rm s3://${BUCKET}/big_file + Execute rm -rf /tmp/big_file + Execute rm -rf /tmp/big_file_1 + Execute rm -rf /tmp/big_file_2 + Execute rm -rf /tmp/big_file_3 + +Create&Download big file by multipart upload and get file not existed part number + Execute head -c 10000000 /tmp/big_file + ${result} Execute AWSS3CliDebug cp /tmp/big_file s3://${BUCKET}/ + ${get_part_99_response} Execute AWSS3APICli get-object --bucket ${BUCKET} --key big_file /tmp/big_file_1 --part-number 99 + Should contain ${get_part_99_response} \"ContentLength\": 0 + Should contain ${get_part_99_response} \"PartsCount\": 2 + # clean up + Execute AWSS3Cli rm s3://${BUCKET}/big_file + Execute rm -rf /tmp/big_file + Execute rm -rf /tmp/big_file_1 \ No newline at end of file diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java index b943930f62f..58183e87705 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java @@ -40,6 +40,8 @@ import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientFactory; +import org.apache.hadoop.ozone.client.OzoneKeyDetails; +import org.apache.hadoop.ozone.client.OzoneKeyLocation; import org.apache.hadoop.ozone.client.OzoneMultipartUpload; import org.apache.hadoop.ozone.client.OzoneMultipartUploadList; import org.apache.hadoop.ozone.client.OzoneMultipartUploadPartListParts; @@ -179,12 +181,12 @@ public void preTest() throws Exception { @Test public void testInitiateMultipartUploadWithReplicationInformationSet() throws IOException { - String uploadID = initiateMultipartUpload(bucket, keyName, + String uploadID = initiateMultipartUploadWithAsserts(bucket, keyName, ReplicationType.RATIS, ONE); // Call initiate multipart upload for the same key again, this should // generate a new uploadID. - String uploadIDNew = initiateMultipartUpload(bucket, keyName, + String uploadIDNew = initiateMultipartUploadWithAsserts(bucket, keyName, ReplicationType.RATIS, ONE); assertNotEquals(uploadIDNew, uploadID); } @@ -216,7 +218,7 @@ public void testInitiateMultipartUploadWithDefaultReplication() throws @Test public void testUploadPartWithNoOverride() throws IOException { String sampleData = "sample Value"; - String uploadID = initiateMultipartUpload(bucket, keyName, + String uploadID = initiateMultipartUploadWithAsserts(bucket, keyName, ReplicationType.RATIS, ONE); OzoneOutputStream ozoneOutputStream = bucket.createMultipartKey(keyName, @@ -235,7 +237,7 @@ public void testUploadPartWithNoOverride() throws IOException { @Test public void testUploadPartOverrideWithRatis() throws Exception { String sampleData = "sample Value"; - String uploadID = initiateMultipartUpload(bucket, keyName, + String uploadID = initiateMultipartUploadWithAsserts(bucket, keyName, ReplicationType.RATIS, THREE); int partNumber = 1; @@ -348,7 +350,7 @@ private OzoneBucket getOzoneECBucket(String myBucket) @Test public void testMultipartUploadWithPartsLessThanMinSize() throws Exception { // Initiate multipart upload - String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, + String uploadID = initiateMultipartUploadWithAsserts(bucket, keyName, RATIS, ONE); // Upload Parts @@ -371,7 +373,7 @@ public void testMultipartUploadWithPartsLessThanMinSize() throws Exception { public void testMultipartUploadWithDiscardedUnusedPartSize() throws Exception { // Initiate multipart upload - String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); + String uploadID = initiateMultipartUploadWithAsserts(bucket, keyName, RATIS, ONE); byte[] data = generateData(10000000, (byte) 97); // Upload Parts @@ -402,7 +404,7 @@ public void testMultipartUploadWithDiscardedUnusedPartSize() @Test public void testMultipartUploadWithPartsMisMatchWithListSizeDifferent() throws Exception { - String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, + String uploadID = initiateMultipartUploadWithAsserts(bucket, keyName, RATIS, ONE); // We have not uploaded any parts, but passing some list it should throw @@ -417,7 +419,7 @@ public void testMultipartUploadWithPartsMisMatchWithListSizeDifferent() @Test public void testMultipartUploadWithPartsMisMatchWithIncorrectPartName() throws Exception { - String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, + String uploadID = initiateMultipartUploadWithAsserts(bucket, keyName, RATIS, ONE); uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8)); @@ -432,7 +434,7 @@ public void testMultipartUploadWithPartsMisMatchWithIncorrectPartName() @Test public void testMultipartUploadWithMissingParts() throws Exception { - String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, + String uploadID = initiateMultipartUploadWithAsserts(bucket, keyName, RATIS, ONE); uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8)); @@ -447,7 +449,7 @@ public void testMultipartUploadWithMissingParts() throws Exception { @Test public void testMultipartPartNumberExceedingAllowedRange() throws Exception { - String uploadID = initiateMultipartUpload(bucket, keyName, + String uploadID = initiateMultipartUploadWithAsserts(bucket, keyName, RATIS, ONE); byte[] data = "data".getBytes(UTF_8); @@ -469,7 +471,7 @@ public void testMultipartPartNumberExceedingAllowedRange() throws Exception { public void testCommitPartAfterCompleteUpload() throws Exception { String parentDir = "a/b/c/d/"; keyName = parentDir + UUID.randomUUID(); - String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, ONE); + String uploadID = initiateMultipartUploadWithAsserts(bucket, keyName, RATIS, ONE); assertEquals(volume.getBucket(bucketName).getUsedNamespace(), 4); @@ -530,7 +532,7 @@ public void testAbortUploadFailWithInProgressPartUpload() throws Exception { String parentDir = "a/b/c/d/"; keyName = parentDir + UUID.randomUUID(); - String uploadID = initiateMultipartUpload(bucket, keyName, + String uploadID = initiateMultipartUploadWithAsserts(bucket, keyName, RATIS, ONE); // Do not close output stream. @@ -550,7 +552,7 @@ public void testAbortUploadSuccessWithOutAnyParts() throws Exception { String parentDir = "a/b/c/d/"; keyName = parentDir + UUID.randomUUID(); - String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, + String uploadID = initiateMultipartUploadWithAsserts(bucket, keyName, RATIS, ONE); bucket.abortMultipartUpload(keyName, uploadID); } @@ -567,7 +569,7 @@ public void testAbortUploadSuccessWithParts() throws Exception { ozoneManager.getMetadataManager().getBucketTable().get(buckKey); BucketLayout bucketLayout = buckInfo.getBucketLayout(); - String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, + String uploadID = initiateMultipartUploadWithAsserts(bucket, keyName, RATIS, ONE); Pair partNameAndETag = uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8)); @@ -599,7 +601,7 @@ public void testListMultipartUploadParts() throws Exception { keyName = parentDir + "file-ABC"; Map partsMap = new TreeMap<>(); - String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, + String uploadID = initiateMultipartUploadWithAsserts(bucket, keyName, RATIS, ONE); Pair partNameAndETag1 = uploadPart(bucket, keyName, uploadID, 1, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); @@ -688,7 +690,7 @@ private String verifyPartNames(Map partsMap, int index, public void testListMultipartUploadPartsWithContinuation() throws Exception { Map partsMap = new TreeMap<>(); - String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, + String uploadID = initiateMultipartUploadWithAsserts(bucket, keyName, RATIS, ONE); Pair partNameAndETag1 = uploadPart(bucket, keyName, uploadID, 1, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); @@ -752,7 +754,7 @@ public void testListPartsWithInvalidInputs(int partNumberMarker, int maxParts, S @Test public void testListPartsWithPartMarkerGreaterThanPartCount() throws Exception { - String uploadID = initiateMultipartUpload(bucket, keyName, RATIS, + String uploadID = initiateMultipartUploadWithAsserts(bucket, keyName, RATIS, ONE); uploadPart(bucket, keyName, uploadID, 1, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte)97)); @@ -795,11 +797,11 @@ public void testListMultipartUpload() throws Exception { keys.add(key3); // Initiate multipart upload - String uploadID1 = initiateMultipartUpload(bucket, key1, RATIS, + String uploadID1 = initiateMultipartUploadWithAsserts(bucket, key1, RATIS, ONE); - String uploadID2 = initiateMultipartUpload(bucket, key2, RATIS, + String uploadID2 = initiateMultipartUploadWithAsserts(bucket, key2, RATIS, ONE); - String uploadID3 = initiateMultipartUpload(bucket, key3, RATIS, + String uploadID3 = initiateMultipartUploadWithAsserts(bucket, key3, RATIS, ONE); // Upload Parts @@ -854,6 +856,105 @@ public void testListMultipartUpload() throws Exception { assertEquals(0, expectedList.size()); } + @Test + void testGetAllPartsWhenZeroPartNumber() throws Exception { + String parentDir = "a/b/c/d/e/f/"; + keyName = parentDir + "file-ABC"; + OzoneVolume s3volume = store.getVolume("s3v"); + s3volume.createBucket(bucketName); + OzoneBucket s3Bucket = s3volume.getBucket(bucketName); + + Map partsMap = new TreeMap<>(); + String uploadID = initiateMultipartUpload(s3Bucket, keyName, RATIS, + ONE); + Pair partNameAndETag1 = uploadPart(s3Bucket, keyName, + uploadID, 1, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte) 97)); + partsMap.put(1, partNameAndETag1.getKey()); + + Pair partNameAndETag2 = uploadPart(s3Bucket, keyName, + uploadID, 2, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte) 97)); + partsMap.put(2, partNameAndETag2.getKey()); + + Pair partNameAndETag3 = uploadPart(s3Bucket, keyName, + uploadID, 3, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte) 97)); + partsMap.put(3, partNameAndETag3.getKey()); + + s3Bucket.completeMultipartUpload(keyName, uploadID, partsMap); + + OzoneKeyDetails s3KeyDetailsWithAllParts = ozClient.getProxy() + .getS3KeyDetails(s3Bucket.getName(), keyName, 0); + List ozoneKeyLocations = s3KeyDetailsWithAllParts.getOzoneKeyLocations(); + assertEquals(6, ozoneKeyLocations.size()); + } + + @Test + void testGetParticularPart() throws Exception { + String parentDir = "a/b/c/d/e/f/"; + keyName = parentDir + "file-ABC"; + OzoneVolume s3volume = store.getVolume("s3v"); + s3volume.createBucket(bucketName); + OzoneBucket s3Bucket = s3volume.getBucket(bucketName); + + Map partsMap = new TreeMap<>(); + String uploadID = initiateMultipartUpload(s3Bucket, keyName, RATIS, + ONE); + Pair partNameAndETag1 = uploadPart(s3Bucket, keyName, + uploadID, 1, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte) 97)); + partsMap.put(1, partNameAndETag1.getKey()); + + Pair partNameAndETag2 = uploadPart(s3Bucket, keyName, + uploadID, 2, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte) 97)); + partsMap.put(2, partNameAndETag2.getKey()); + + Pair partNameAndETag3 = uploadPart(s3Bucket, keyName, + uploadID, 3, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte) 97)); + partsMap.put(3, partNameAndETag3.getKey()); + + s3Bucket.completeMultipartUpload(keyName, uploadID, partsMap); + +// OzoneKeyLocations size is 2 because part size is 5MB and ozone.scm.block.size in ozone-site.xml +// for integration-test is 4MB + OzoneKeyDetails s3KeyDetailsOneParts = ozClient.getProxy().getS3KeyDetails(bucketName, keyName, 1); + assertEquals(2, s3KeyDetailsOneParts.getOzoneKeyLocations().size()); + + OzoneKeyDetails s3KeyDetailsTwoParts = ozClient.getProxy().getS3KeyDetails(bucketName, keyName, 2); + assertEquals(2, s3KeyDetailsTwoParts.getOzoneKeyLocations().size()); + + OzoneKeyDetails s3KeyDetailsThreeParts = ozClient.getProxy().getS3KeyDetails(bucketName, keyName, 3); + assertEquals(2, s3KeyDetailsThreeParts.getOzoneKeyLocations().size()); + } + + @Test + void testGetNotExistedPart() throws Exception { + String parentDir = "a/b/c/d/e/f/"; + keyName = parentDir + "file-ABC"; + OzoneVolume s3volume = store.getVolume("s3v"); + s3volume.createBucket(bucketName); + OzoneBucket s3Bucket = s3volume.getBucket(bucketName); + + Map partsMap = new TreeMap<>(); + String uploadID = initiateMultipartUpload(s3Bucket, keyName, RATIS, + ONE); + Pair partNameAndETag1 = uploadPart(s3Bucket, keyName, + uploadID, 1, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte) 97)); + partsMap.put(1, partNameAndETag1.getKey()); + + Pair partNameAndETag2 = uploadPart(s3Bucket, keyName, + uploadID, 2, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte) 97)); + partsMap.put(2, partNameAndETag2.getKey()); + + Pair partNameAndETag3 = uploadPart(s3Bucket, keyName, + uploadID, 3, generateData(OzoneConsts.OM_MULTIPART_MIN_SIZE, (byte) 97)); + partsMap.put(3, partNameAndETag3.getKey()); + + s3Bucket.completeMultipartUpload(keyName, uploadID, partsMap); + + OzoneKeyDetails s3KeyDetailsWithNotExistedParts = ozClient.getProxy() + .getS3KeyDetails(s3Bucket.getName(), keyName, 4); + List ozoneKeyLocations = s3KeyDetailsWithNotExistedParts.getOzoneKeyLocations(); + assertEquals(0, ozoneKeyLocations.size()); + } + private String verifyUploadedPart(String uploadID, String partName, OMMetadataManager metadataMgr) throws IOException { OzoneManager ozoneManager = cluster.getOzoneManager(); @@ -891,11 +992,10 @@ private String verifyUploadedPart(String uploadID, String partName, return multipartKey; } - private String initiateMultipartUpload(OzoneBucket oBucket, String kName, - ReplicationType replicationType, ReplicationFactor replicationFactor) - throws IOException { - OmMultipartInfo multipartInfo = oBucket.initiateMultipartUpload(kName, - replicationType, replicationFactor); + private String initiateMultipartUploadWithAsserts( + OzoneBucket oBucket, String kName, ReplicationType replicationType, ReplicationFactor replicationFactor + ) throws IOException { + OmMultipartInfo multipartInfo = oBucket.initiateMultipartUpload(kName, replicationType, replicationFactor); assertNotNull(multipartInfo); String uploadID = multipartInfo.getUploadID(); @@ -907,6 +1007,13 @@ private String initiateMultipartUpload(OzoneBucket oBucket, String kName, return uploadID; } + private String initiateMultipartUpload( + OzoneBucket oBucket, String kName, ReplicationType replicationType, ReplicationFactor replicationFactor + ) throws IOException { + OmMultipartInfo multipartInfo = oBucket.initiateMultipartUpload(kName, replicationType, replicationFactor); + return multipartInfo.getUploadID(); + } + private Pair uploadPart(OzoneBucket oBucket, String kName, String uploadID, int partNumber, byte[] data) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java index e9c9b946c8e..c3ceb0f6209 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java @@ -1518,7 +1518,107 @@ public void testRefreshPipelineException() throws Exception { assertEquals(errorMessage, omEx.getMessage()); } - /** + @Test + void testGetAllPartsWhenZeroPartNumber() throws IOException { + String keyName = RandomStringUtils.randomAlphabetic(5); + + String volume = VOLUME_NAME; + + initKeyTableForMultipartTest(keyName, volume); + + OmKeyArgs keyArgs = new OmKeyArgs.Builder() + .setVolumeName(volume) + .setBucketName(BUCKET_NAME) + .setKeyName(keyName) + .setMultipartUploadPartNumber(0) + .build(); + OmKeyInfo omKeyInfo = keyManager.getKeyInfo(keyArgs, resolvedBucket(), "test"); + assertEquals(keyName, omKeyInfo.getKeyName()); + assertNotNull(omKeyInfo.getLatestVersionLocations()); + + List locationList = omKeyInfo.getLatestVersionLocations().getLocationList(); + assertNotNull(locationList); + assertEquals(5, locationList.size()); + for (int i = 0; i < 5; i++) { + assertEquals(i, locationList.get(i).getPartNumber()); + } + } + + @Test + void testGetParticularPart() throws IOException { + String keyName = RandomStringUtils.randomAlphabetic(5); + + String volume = VOLUME_NAME; + + initKeyTableForMultipartTest(keyName, volume); + + OmKeyArgs keyArgs = new OmKeyArgs.Builder() + .setVolumeName(volume) + .setBucketName(BUCKET_NAME) + .setKeyName(keyName) + .setMultipartUploadPartNumber(3) + .build(); + OmKeyInfo omKeyInfo = keyManager.getKeyInfo(keyArgs, resolvedBucket(), "test"); + assertEquals(keyName, omKeyInfo.getKeyName()); + assertNotNull(omKeyInfo.getLatestVersionLocations()); + + List locationList = omKeyInfo.getLatestVersionLocations().getLocationList(); + assertNotNull(locationList); + assertEquals(1, locationList.size()); + assertEquals(3, locationList.get(0).getPartNumber()); + } + + @Test + void testGetNotExistedPart() throws IOException { + String keyName = RandomStringUtils.randomAlphabetic(5); + + String volume = VOLUME_NAME; + + initKeyTableForMultipartTest(keyName, volume); + + OmKeyArgs keyArgs = new OmKeyArgs.Builder() + .setVolumeName(volume) + .setBucketName(BUCKET_NAME) + .setKeyName(keyName) + .setMultipartUploadPartNumber(99) + .build(); + OmKeyInfo omKeyInfo = keyManager.getKeyInfo(keyArgs, resolvedBucket(), "test"); + assertEquals(keyName, omKeyInfo.getKeyName()); + assertNotNull(omKeyInfo.getLatestVersionLocations()); + + List locationList = omKeyInfo.getLatestVersionLocations().getLocationList(); + assertNotNull(locationList); + assertEquals(0, locationList.size()); + } + + private void initKeyTableForMultipartTest(String keyName, String volume) throws IOException { + List locationInfoGroups = new ArrayList<>(); + List locationInfoList = new ArrayList<>(); + for (int i = 0; i < 5; i++) { + OmKeyLocationInfo locationInfo1 = new OmKeyLocationInfo.Builder() + .setBlockID(new BlockID(i, i)) + .setPartNumber(i) + .build(); + locationInfoList.add(locationInfo1); + } + + OmKeyLocationInfoGroup locationInfoGroup = new OmKeyLocationInfoGroup(0, locationInfoList); + locationInfoGroups.add(locationInfoGroup); + locationInfoGroup.setMultipartKey(true); + + OmKeyInfo omKeyInfo = new OmKeyInfo.Builder() + .setKeyName(keyName) + .setBucketName(BUCKET_NAME) + .setVolumeName(volume) + .setReplicationConfig(RatisReplicationConfig.getInstance(THREE)) + .setOmKeyLocationInfos(locationInfoGroups) + .build(); + + String key = String.format("/%s/%s/%s", volume, BUCKET_NAME, keyName); + metadataManager.getKeyTable(BucketLayout.LEGACY).put(key, omKeyInfo); + } + + /** * Get Random pipeline. * @return pipeline */ diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/AbstractS3SDKV1Tests.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/AbstractS3SDKV1Tests.java index ab56af670b3..661afaf0772 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/AbstractS3SDKV1Tests.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/s3/awssdk/v1/AbstractS3SDKV1Tests.java @@ -27,6 +27,7 @@ import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest; import com.amazonaws.services.s3.model.CompleteMultipartUploadResult; import com.amazonaws.services.s3.model.CreateBucketRequest; +import com.amazonaws.services.s3.model.GetObjectRequest; import com.amazonaws.services.s3.model.Grantee; import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest; import com.amazonaws.services.s3.model.InitiateMultipartUploadResult; @@ -736,6 +737,65 @@ public void testListParts(@TempDir Path tempDir) throws Exception { } } + @Test + public void testGetParticularPart(@TempDir Path tempDir) throws Exception { + final String bucketName = getBucketName(); + final String keyName = getKeyName(); + + s3Client.createBucket(bucketName); + + File multipartUploadFile = Files.createFile(tempDir.resolve("multipartupload.txt")).toFile(); + + createFile(multipartUploadFile, (int) (15 * MB)); + + multipartUpload(bucketName, keyName, multipartUploadFile, 5 * MB, null, null, null); + + GetObjectRequest getObjectRequestAll = new GetObjectRequest(bucketName, keyName); + getObjectRequestAll.setPartNumber(0); + S3Object s3ObjectAll = s3Client.getObject(getObjectRequestAll); + long allPartContentLength = s3ObjectAll.getObjectMetadata().getContentLength(); + + GetObjectRequest getObjectRequestOne = new GetObjectRequest(bucketName, keyName); + getObjectRequestOne.setPartNumber(1); + S3Object s3ObjectOne = s3Client.getObject(getObjectRequestOne); + long partOneContentLength = s3ObjectOne.getObjectMetadata().getContentLength(); + assertEquals(allPartContentLength / 3, partOneContentLength); + + GetObjectRequest getObjectRequestTwo = new GetObjectRequest(bucketName, keyName); + getObjectRequestTwo.setPartNumber(2); + S3Object s3ObjectTwo = s3Client.getObject(getObjectRequestTwo); + long partTwoContentLength = s3ObjectTwo.getObjectMetadata().getContentLength(); + assertEquals(allPartContentLength / 3, partTwoContentLength); + + GetObjectRequest getObjectRequestThree = new GetObjectRequest(bucketName, keyName); + getObjectRequestThree.setPartNumber(1); + S3Object s3ObjectThree = s3Client.getObject(getObjectRequestTwo); + long partThreeContentLength = s3ObjectThree.getObjectMetadata().getContentLength(); + assertEquals(allPartContentLength / 3, partThreeContentLength); + + assertEquals(allPartContentLength, (partOneContentLength + partTwoContentLength + partThreeContentLength)); + } + + @Test + public void testGetNotExistedPart(@TempDir Path tempDir) throws Exception { + final String bucketName = getBucketName(); + final String keyName = getKeyName(); + + s3Client.createBucket(bucketName); + + File multipartUploadFile = Files.createFile(tempDir.resolve("multipartupload.txt")).toFile(); + + createFile(multipartUploadFile, (int) (15 * MB)); + + multipartUpload(bucketName, keyName, multipartUploadFile, 5 * MB, null, null, null); + + GetObjectRequest getObjectRequestOne = new GetObjectRequest(bucketName, keyName); + getObjectRequestOne.setPartNumber(4); + S3Object s3ObjectOne = s3Client.getObject(getObjectRequestOne); + long partOneContentLength = s3ObjectOne.getObjectMetadata().getContentLength(); + assertEquals(0, partOneContentLength); + } + @Test public void testListPartsNotFound() { final String bucketName = getBucketName(); diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java index ccda21efc93..8e3bbb47c3c 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java @@ -509,6 +509,22 @@ private OmKeyInfo readKeyInfo(OmKeyArgs args, BucketLayout bucketLayout) if (args.getLatestVersionLocation()) { slimLocationVersion(value); } + int partNumberParam = args.getMultipartUploadPartNumber(); + if (partNumberParam > 0) { + OmKeyLocationInfoGroup latestLocationVersion = value.getLatestVersionLocations(); + if (latestLocationVersion != null && latestLocationVersion.isMultipartKey()) { + List currentLocations = latestLocationVersion.getBlocksLatestVersionOnly() + .stream() + .filter(it -> it.getPartNumber() == partNumberParam) + .collect(Collectors.toList()); + + value.updateLocationInfoList(currentLocations, true, true); + + value.setDataSize(currentLocations.stream() + .mapToLong(BlockLocationInfo::getLength) + .sum()); + } + } return value; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java index 09865ace27a..a31783f29c4 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java @@ -660,6 +660,7 @@ private GetKeyInfoResponse getKeyInfo(GetKeyInfoRequest request, .setHeadOp(keyArgs.getHeadOp()) .setForceUpdateContainerCacheFromSCM( keyArgs.getForceUpdateContainerCacheFromSCM()) + .setMultipartUploadPartNumber(keyArgs.getMultipartNumber()) .build(); KeyInfoWithVolumeContext keyInfo = impl.getKeyInfo(omKeyArgs, request.getAssumeS3Context()); From 76ac396611e0d650c02ce0a13fd2c75d61f30f26 Mon Sep 17 00:00:00 2001 From: Abhishek Pal <43001336+devabhishekpal@users.noreply.github.com> Date: Fri, 10 Jan 2025 11:01:46 +0530 Subject: [PATCH 069/168] HDDS-12036. Add storage indicators when reaching capacity (#7663) --- .../overviewCard/overviewStorageCard.tsx | 6 ++- .../v2/components/storageBar/storageBar.less | 29 +++--------- .../v2/components/storageBar/storageBar.tsx | 46 ++++++++++--------- 3 files changed, 37 insertions(+), 44 deletions(-) diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/overviewCard/overviewStorageCard.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/overviewCard/overviewStorageCard.tsx index d6e29a2f968..51de4669b99 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/overviewCard/overviewStorageCard.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/overviewCard/overviewStorageCard.tsx @@ -55,6 +55,10 @@ const cardStyle: React.CSSProperties = { boxSizing: 'border-box', height: '100%' } +const cardErrorStyle: React.CSSProperties = { + borderColor: '#FF4D4E', + borderWidth: '1.4px' +} const eChartStyle: React.CSSProperties = { width: '280px', height: '200px' @@ -175,7 +179,7 @@ const OverviewStorageCard: React.FC = ({ title='Cluster Capacity' headStyle={cardHeadStyle} bodyStyle={cardBodyStyle} - style={cardStyle}> + style={(usagePercentage > 79) ? {...cardStyle, ...cardErrorStyle} : cardStyle} > = ({ const totalUsed = capacity - remaining; const tooltip = ( <> -

- - Ozone Used ({size(used)}) -
-
- - Non Ozone Used ({size(nonOzoneUsed)}) -
-
- - Remaining ({size(remaining)}) -
-
- - Container Pre-allocated ({size(committed)}) -
+ + + + + + + + + + + + + + + + + + + +
Ozone Used{size(used)}
Non Ozone Used{size(nonOzoneUsed)}
Remaining{size(remaining)}
Container Pre-allocated{size(committed)}
); + const percentage = getCapacityPercent(totalUsed, capacity) + return ( = ({ } + percent={percentage} + strokeColor={(percentage > 80) ? '#FF4D4E' : '#52C41A'} + className={(percentage > 80) ? 'capacity-bar-v2-error' : 'capacity-bar-v2'} strokeWidth={strokeWidth} /> ); } From 49efad97a7f9779c80e541cf632fa8115675bccf Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Fri, 10 Jan 2025 09:43:25 +0100 Subject: [PATCH 070/168] HDDS-12012. Defer ozone repair prompt after subcommand validation (#7653) --- .../apache/ozone/test/GenericTestUtils.java | 15 +++++++ .../java/org/apache/ozone/test/IntLambda.java | 43 +++++++++++++++++++ .../ozone/repair/om/TestFSORepairTool.java | 4 +- .../ozone/shell/TestOzoneRepairShell.java | 19 ++++++-- .../hadoop/ozone/repair/OzoneRepair.java | 32 -------------- .../hadoop/ozone/repair/RepairTool.java | 28 ++++++++++++ .../hadoop/ozone/repair/TestOzoneRepair.java | 42 ++++++++++-------- .../repair/om/TestTransactionInfoRepair.java | 15 ++++--- 8 files changed, 137 insertions(+), 61 deletions(-) create mode 100644 hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/IntLambda.java diff --git a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java index e42f80b329b..959326e210f 100644 --- a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java +++ b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/GenericTestUtils.java @@ -20,6 +20,7 @@ import java.io.ByteArrayOutputStream; import java.io.File; +import java.io.InputStream; import java.io.OutputStream; import java.io.PrintStream; import java.io.StringWriter; @@ -34,6 +35,7 @@ import com.google.common.base.Preconditions; import org.apache.commons.io.IOUtils; +import org.apache.commons.io.input.CharSequenceInputStream; import org.apache.commons.lang3.tuple.Pair; import org.apache.log4j.Layout; import org.apache.log4j.Level; @@ -440,6 +442,19 @@ public SystemOutCapturer() { } } + /** + * Replaces {@link System#in} with a stream that provides {@code lines} as input. + * @return an {@code AutoCloseable} to restore the original {@link System#in} stream + */ + public static AutoCloseable supplyOnSystemIn(String... lines) { + final InputStream original = System.in; + final InputStream in = CharSequenceInputStream.builder() + .setCharSequence(String.join("\n", lines)) + .get(); + System.setIn(in); + return () -> System.setIn(original); + } + /** * Prints output to one {@link PrintStream} while copying to the other. *

diff --git a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/IntLambda.java b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/IntLambda.java new file mode 100644 index 00000000000..912b7b051b2 --- /dev/null +++ b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/IntLambda.java @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ozone.test; + +import java.util.function.IntSupplier; + +/** Test utilities for working with lambdas returning int value. */ +public interface IntLambda { + + static ToIntExecutable withTextFromSystemIn(String... lines) { + return runnable -> { + try (AutoCloseable ignored = GenericTestUtils.supplyOnSystemIn(lines)) { + return runnable.getAsInt(); + } catch (RuntimeException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + }; + } + + /** Function that takes a block of code returning int, executes it, and returns the value. */ + @FunctionalInterface + interface ToIntExecutable { + int execute(IntSupplier code); + } + +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/repair/om/TestFSORepairTool.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/repair/om/TestFSORepairTool.java index fb6472d7bc7..2969931808a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/repair/om/TestFSORepairTool.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/repair/om/TestFSORepairTool.java @@ -60,6 +60,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME; import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; +import static org.apache.ozone.test.IntLambda.withTextFromSystemIn; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -350,7 +351,8 @@ private int execute(boolean dryRun, String... args) { } argList.addAll(Arrays.asList(args)); - return cmd.execute(argList.toArray(new String[0])); + return withTextFromSystemIn("y") + .execute(() -> cmd.execute(argList.toArray(new String[0]))); } private int countTableEntries(Table table) throws Exception { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneRepairShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneRepairShell.java index dffc4bd552c..4cc8ecaa030 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneRepairShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneRepairShell.java @@ -38,6 +38,7 @@ import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; +import static org.apache.ozone.test.IntLambda.withTextFromSystemIn; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -89,7 +90,11 @@ public void testUpdateTransactionInfoTable() throws Exception { String testTerm = "1111"; String testIndex = "1111"; - int exitCode = cmd.execute("om", "update-transaction", "--db", dbPath, "--term", testTerm, "--index", testIndex); + int exitCode = withTextFromSystemIn("y") + .execute(() -> cmd.execute("om", "update-transaction", + "--db", dbPath, + "--term", testTerm, + "--index", testIndex)); assertEquals(0, exitCode, err); assertThat(out.get()) .contains( @@ -101,8 +106,11 @@ public void testUpdateTransactionInfoTable() throws Exception { String cmdOut2 = scanTransactionInfoTable(dbPath); assertThat(cmdOut2).contains(testTerm + "#" + testIndex); - cmd.execute("om", "update-transaction", "--db", dbPath, "--term", - originalHighestTermIndex[0], "--index", originalHighestTermIndex[1]); + withTextFromSystemIn("y") + .execute(() -> cmd.execute("om", "update-transaction", + "--db", dbPath, + "--term", originalHighestTermIndex[0], + "--index", originalHighestTermIndex[1])); cluster.getOzoneManager().restart(); try (OzoneClient ozoneClient = cluster.newClient()) { ozoneClient.getObjectStore().createVolume("vol1"); @@ -130,8 +138,11 @@ public void testQuotaRepair() throws Exception { int exitCode = cmd.execute("om", "quota", "status", "--service-host", conf.get(OZONE_OM_ADDRESS_KEY)); assertEquals(0, exitCode, err); - exitCode = cmd.execute("om", "quota", "start", "--service-host", conf.get(OZONE_OM_ADDRESS_KEY)); + + exitCode = withTextFromSystemIn("y") + .execute(() -> cmd.execute("om", "quota", "start", "--service-host", conf.get(OZONE_OM_ADDRESS_KEY))); assertEquals(0, exitCode, err); + GenericTestUtils.waitFor(() -> { out.reset(); // verify quota trigger is completed having non-zero lastRunFinishedTime diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/OzoneRepair.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/OzoneRepair.java index 864022da6f3..2518714a8f8 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/OzoneRepair.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/OzoneRepair.java @@ -24,9 +24,6 @@ import org.apache.hadoop.hdds.cli.RepairSubcommand; import picocli.CommandLine; -import java.nio.charset.StandardCharsets; -import java.util.Scanner; - /** * Ozone Repair Command line tool. */ @@ -37,39 +34,10 @@ mixinStandardHelpOptions = true) public class OzoneRepair extends GenericCli implements ExtensibleParentCommand { - public static final String WARNING_SYS_USER_MESSAGE = - "ATTENTION: Running as user %s. Make sure this is the same user used to run the Ozone process." + - " Are you sure you want to continue (y/N)? "; - public static void main(String[] argv) { new OzoneRepair().run(argv); } - @Override - public int execute(String[] argv) { - if (argv.length == 0 || argv[0].equals("--help") || argv[0].equals("-h")) { - return super.execute(argv); - } - - String currentUser = getSystemUserName(); - if (!("y".equalsIgnoreCase(getConsoleReadLineWithFormat(currentUser)))) { - System.out.println("Aborting command."); - return 1; - } - System.out.println("Run as user: " + currentUser); - - return super.execute(argv); - } - - public String getSystemUserName() { - return System.getProperty("user.name"); - } - - public String getConsoleReadLineWithFormat(String currentUser) { - System.err.printf(WARNING_SYS_USER_MESSAGE, currentUser); - return (new Scanner(System.in, StandardCharsets.UTF_8.name())).nextLine().trim(); - } - @Override public Class subcommandType() { return RepairSubcommand.class; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RepairTool.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RepairTool.java index 20a30f0b187..a64cacb8b21 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RepairTool.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RepairTool.java @@ -21,11 +21,17 @@ import picocli.CommandLine; import java.io.PrintWriter; +import java.nio.charset.StandardCharsets; +import java.util.Scanner; import java.util.concurrent.Callable; /** Parent class for all actionable repair commands. */ public abstract class RepairTool extends AbstractSubcommand implements Callable { + private static final String WARNING_SYS_USER_MESSAGE = + "ATTENTION: Running as user %s. Make sure this is the same user used to run the Ozone process." + + " Are you sure you want to continue (y/N)? "; + @CommandLine.Option(names = {"--force"}, description = "Use this flag if you want to bypass the check in false-positive cases.") private boolean force; @@ -35,6 +41,7 @@ public abstract class RepairTool extends AbstractSubcommand implements Callable< @Override public final Void call() throws Exception { + confirmUser(); execute(); return null; } @@ -84,4 +91,25 @@ private String formatMessage(String msg, Object[] args) { return msg; } + protected void confirmUser() { + final String currentUser = getSystemUserName(); + final boolean confirmed = "y".equalsIgnoreCase(getConsoleReadLineWithFormat(currentUser)); + + if (!confirmed) { + throw new IllegalStateException("Aborting command."); + } + + info("Run as user: " + currentUser); + } + + private String getSystemUserName() { + return System.getProperty("user.name"); + } + + private String getConsoleReadLineWithFormat(String currentUser) { + err().printf(WARNING_SYS_USER_MESSAGE, currentUser); + return new Scanner(System.in, StandardCharsets.UTF_8.name()) + .nextLine() + .trim(); + } } diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/TestOzoneRepair.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/TestOzoneRepair.java index bf6a9ed00a4..dc3f2d73845 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/TestOzoneRepair.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/TestOzoneRepair.java @@ -20,14 +20,20 @@ import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; +import picocli.CommandLine; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.PrintStream; +import java.util.List; import static java.nio.charset.StandardCharsets.UTF_8; +import static java.util.Arrays.asList; +import static java.util.Collections.emptyList; +import static java.util.Collections.singletonList; import static org.assertj.core.api.Assertions.assertThat; -import static org.junit.jupiter.api.Assertions.assertEquals; /** * Tests the ozone repair command. @@ -67,9 +73,9 @@ void testOzoneRepairWhenUserIsRemindedSystemUserAndDeclinesToProceed() throws Ex OzoneRepair ozoneRepair = new OzoneRepair(); System.setIn(new ByteArrayInputStream("N".getBytes(DEFAULT_ENCODING))); - int res = ozoneRepair.execute(new String[]{"om", "fso-tree"}); - assertEquals(1, res); - assertThat(out.toString(DEFAULT_ENCODING)).contains("Aborting command."); + int res = ozoneRepair.execute(new String[]{"om", "fso-tree", "--db", "/dev/null"}); + assertThat(res).isNotEqualTo(CommandLine.ExitCode.OK); + assertThat(err.toString(DEFAULT_ENCODING)).contains("Aborting command."); // prompt should contain the current user name as well assertThat(err.toString(DEFAULT_ENCODING)).contains("ATTENTION: Running as user " + OZONE_USER); } @@ -79,30 +85,30 @@ void testOzoneRepairWhenUserIsRemindedSystemUserAndAgreesToProceed() throws Exce OzoneRepair ozoneRepair = new OzoneRepair(); System.setIn(new ByteArrayInputStream("y".getBytes(DEFAULT_ENCODING))); - ozoneRepair.execute(new String[]{"om", "fso-tree"}); + ozoneRepair.execute(new String[]{"om", "fso-tree", "--db", "/dev/null"}); assertThat(out.toString(DEFAULT_ENCODING)).contains("Run as user: " + OZONE_USER); // prompt should contain the current user name as well assertThat(err.toString(DEFAULT_ENCODING)).contains("ATTENTION: Running as user " + OZONE_USER); } - @Test - void testOzoneRepairSkipsPromptWhenNoSubcommandProvided() throws Exception { - OzoneRepair ozoneRepair = new OzoneRepair(); - - // when no argument is passed, prompt should not be displayed - ozoneRepair.execute(new String[]{}); - assertThat(err.toString(DEFAULT_ENCODING)).doesNotContain("ATTENTION: Running as user " + OZONE_USER); + /** Arguments for which confirmation prompt should not be displayed. */ + static List> skipPromptParams() { + return asList( + emptyList(), + singletonList("om"), + asList("om", "fso-tree"), + asList("om", "fso-tree", "-h"), + asList("om", "fso-tree", "--help") + ); } - @Test - void testOzoneRepairSkipsPromptWhenHelpFlagProvided() throws Exception { + @ParameterizedTest + @MethodSource("skipPromptParams") + void testSkipsPrompt(List args) throws Exception { OzoneRepair ozoneRepair = new OzoneRepair(); - // when --help or -h flag is passed, prompt should not be displayed - ozoneRepair.execute(new String[]{"--help"}); - assertThat(err.toString(DEFAULT_ENCODING)).doesNotContain("ATTENTION: Running as user " + OZONE_USER); + ozoneRepair.execute(args.toArray(new String[0])); - ozoneRepair.execute(new String[]{"-h"}); assertThat(err.toString(DEFAULT_ENCODING)).doesNotContain("ATTENTION: Running as user " + OZONE_USER); } diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/om/TestTransactionInfoRepair.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/om/TestTransactionInfoRepair.java index 7b355cf0c91..3ad1c4f8404 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/om/TestTransactionInfoRepair.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/om/TestTransactionInfoRepair.java @@ -33,6 +33,7 @@ import org.rocksdb.RocksDBException; import picocli.CommandLine; +import static org.apache.ozone.test.IntLambda.withTextFromSystemIn; import static org.apache.hadoop.ozone.OzoneConsts.TRANSACTION_INFO_KEY; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.TRANSACTION_INFO_TABLE; import static org.assertj.core.api.Assertions.assertThat; @@ -125,12 +126,14 @@ private void testCommand(ManagedRocksDB mdb, ColumnFamilyHandle columnFamilyHand .thenReturn(transactionInfo2); CommandLine cli = new OzoneRepair().getCmd(); - cli.execute( - "om", - "update-transaction", - "--db", DB_PATH, - "--term", String.valueOf(TEST_TERM), - "--index", String.valueOf(TEST_INDEX)); + withTextFromSystemIn("y") + .execute(() -> cli.execute( + "om", + "update-transaction", + "--db", DB_PATH, + "--term", String.valueOf(TEST_TERM), + "--index", String.valueOf(TEST_INDEX) + )); } } From 7c13de865ee2e3fa34bfda4aa7b917308763d037 Mon Sep 17 00:00:00 2001 From: Sarveksha Yeshavantha Raju <79865743+sarvekshayr@users.noreply.github.com> Date: Fri, 10 Jan 2025 17:21:55 +0530 Subject: [PATCH 071/168] HDDS-12041. Add ozone repair scm cert command and its subcommand (#7671) --- .../hadoop/ozone/repair/scm/SCMRepair.java | 38 +++++++++++++++++++ .../ozone/repair/scm/cert/CertRepair.java | 35 +++++++++++++++++ .../{ => scm/cert}/RecoverSCMCertificate.java | 20 +++++----- .../ozone/repair/scm/cert/package-info.java | 22 +++++++++++ .../hadoop/ozone/repair/scm/package-info.java | 22 +++++++++++ 5 files changed, 126 insertions(+), 11 deletions(-) create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/scm/SCMRepair.java create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/scm/cert/CertRepair.java rename hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/{ => scm/cert}/RecoverSCMCertificate.java (95%) create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/scm/cert/package-info.java create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/scm/package-info.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/scm/SCMRepair.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/scm/SCMRepair.java new file mode 100644 index 00000000000..d7e61a8ed22 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/scm/SCMRepair.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.repair.scm; + +import org.apache.hadoop.hdds.cli.RepairSubcommand; +import org.apache.hadoop.ozone.repair.scm.cert.CertRepair; +import org.kohsuke.MetaInfServices; +import picocli.CommandLine; + +/** + * Ozone Repair CLI for SCM. + */ +@CommandLine.Command(name = "scm", + description = "Operational tool to repair SCM.", + subcommands = { + CertRepair.class, + } +) +@MetaInfServices(RepairSubcommand.class) +public class SCMRepair implements RepairSubcommand { + +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/scm/cert/CertRepair.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/scm/cert/CertRepair.java new file mode 100644 index 00000000000..c1d687101f0 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/scm/cert/CertRepair.java @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.repair.scm.cert; + +import picocli.CommandLine; + +/** + * A dedicated subcommand for all certificate related repairs on SCM. + */ + +@CommandLine.Command(name = "cert", + description = "Subcommand for all certificate related repairs on SCM", + subcommands = { + RecoverSCMCertificate.class + } +) +public class CertRepair { + +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RecoverSCMCertificate.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/scm/cert/RecoverSCMCertificate.java similarity index 95% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RecoverSCMCertificate.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/scm/cert/RecoverSCMCertificate.java index b85ccfb1e8b..29b92574b81 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RecoverSCMCertificate.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/scm/cert/RecoverSCMCertificate.java @@ -15,9 +15,8 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.repair; +package org.apache.hadoop.ozone.repair.scm.cert; -import org.apache.hadoop.hdds.cli.RepairSubcommand; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.security.SecurityConfig; import org.apache.hadoop.hdds.security.x509.certificate.authority.CAType; @@ -31,7 +30,8 @@ import org.apache.hadoop.ozone.debug.DBDefinitionFactory; import org.apache.hadoop.ozone.debug.RocksDBUtils; import java.security.cert.CertificateFactory; -import org.kohsuke.MetaInfServices; + +import org.apache.hadoop.ozone.repair.RepairTool; import org.rocksdb.ColumnFamilyDescriptor; import org.rocksdb.ColumnFamilyHandle; import org.rocksdb.RocksDBException; @@ -63,21 +63,19 @@ * and private keys of the SCM are intact. */ @CommandLine.Command( - name = "cert-recover", + name = "recover", description = "Recover Deleted SCM Certificate from RocksDB") -@MetaInfServices(RepairSubcommand.class) -public class RecoverSCMCertificate extends RepairTool implements RepairSubcommand { - +public class RecoverSCMCertificate extends RepairTool { @CommandLine.Option(names = {"--db"}, required = true, description = "SCM DB Path") private String dbPath; - @CommandLine.ParentCommand - private OzoneRepair parent; - @Override public void execute() throws Exception { + if (checkIfServiceIsRunning("SCM")) { + return; + } dbPath = removeTrailingSlashIfNeeded(dbPath); String tableName = VALID_SCM_CERTS.getName(); DBDefinition dbDefinition = @@ -96,7 +94,7 @@ public void execute() throws Exception { try (ManagedRocksDB db = ManagedRocksDB.openReadOnly(dbPath, cfDescList, cfHandleList)) { cfHandle = getColumnFamilyHandle(cfHandleList, tableNameBytes); - SecurityConfig securityConfig = new SecurityConfig(parent.getOzoneConf()); + SecurityConfig securityConfig = new SecurityConfig(getOzoneConf()); Map allCerts = getAllCerts(columnFamilyDefinition, cfHandle, db); info("All Certs in DB : %s", allCerts.keySet()); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/scm/cert/package-info.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/scm/cert/package-info.java new file mode 100644 index 00000000000..4086badcb7c --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/scm/cert/package-info.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * SCM Cert Repair tools. + */ +package org.apache.hadoop.ozone.repair.scm.cert; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/scm/package-info.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/scm/package-info.java new file mode 100644 index 00000000000..76a2b0f5daf --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/scm/package-info.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * SCM related repair tools. + */ +package org.apache.hadoop.ozone.repair.scm; From 9c391bc29effb2f0810bdf864a3e6d49b15e98c0 Mon Sep 17 00:00:00 2001 From: Abhishek Pal <43001336+devabhishekpal@users.noreply.github.com> Date: Fri, 10 Jan 2025 20:04:14 +0530 Subject: [PATCH 072/168] HDDS-12049. Rename OM ID and OM Service ID to Ozone Service ID. (#7679) --- .../src/components/overviewCard/overviewCard.tsx | 2 +- .../recon/ozone-recon-web/src/v2/pages/overview/overview.tsx | 2 +- .../recon/ozone-recon-web/src/views/overview/overview.tsx | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/overviewCard/overviewCard.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/overviewCard/overviewCard.tsx index 977eddb9811..6ccc106d019 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/overviewCard/overviewCard.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/overviewCard/overviewCard.tsx @@ -103,7 +103,7 @@ class OverviewCardWrapper extends React.Component { active: '3' } } - else if (title === 'OM Service') { + else if (title === 'Ozone Service ID') { return { active: '4' } diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/overview/overview.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/overview/overview.tsx index c7f5a4b95d3..e14f134a0e2 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/overview/overview.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/overview/overview.tsx @@ -530,7 +530,7 @@ const Overview: React.FC<{}> = () => { - OM ID:  + Ozone Service ID:  {omServiceId} | diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/overview/overview.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/overview/overview.tsx index a83d12408b3..0e8940a928e 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/overview/overview.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/overview/overview.tsx @@ -438,7 +438,7 @@ export class Overview extends React.Component, IOverviewS } {omServiceId && - + } From da8fa24791b3a2e75b4abab922d8688190ced716 Mon Sep 17 00:00:00 2001 From: Sarveksha Yeshavantha Raju <79865743+sarvekshayr@users.noreply.github.com> Date: Fri, 10 Jan 2025 22:17:36 +0530 Subject: [PATCH 073/168] HDDS-12054. Move ozone debug prefix to ozone debug om prefix (#7674) --- .../TestOzoneFileSystemPrefixParser.java | 2 +- .../apache/hadoop/ozone/debug/om/OMDebug.java | 37 +++++++++++++++++++ .../ozone/debug/{ => om}/PrefixParser.java | 12 +----- .../hadoop/ozone/debug/om/package-info.java | 22 +++++++++++ 4 files changed, 62 insertions(+), 11 deletions(-) create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/om/OMDebug.java rename hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/{ => om}/PrefixParser.java (95%) create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/om/package-info.java diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemPrefixParser.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemPrefixParser.java index 37116f33e27..cc1780a2c1e 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemPrefixParser.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemPrefixParser.java @@ -28,7 +28,7 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.TestDataUtil; import org.apache.hadoop.ozone.client.OzoneClient; -import org.apache.hadoop.ozone.debug.PrefixParser; +import org.apache.hadoop.ozone.debug.om.PrefixParser; import org.apache.hadoop.ozone.om.OMStorage; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.junit.jupiter.api.AfterAll; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/om/OMDebug.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/om/OMDebug.java new file mode 100644 index 00000000000..c52e5e7aa82 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/om/OMDebug.java @@ -0,0 +1,37 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.debug.om; + +import org.apache.hadoop.hdds.cli.DebugSubcommand; +import org.kohsuke.MetaInfServices; +import picocli.CommandLine; + +/** + * OM debug related commands. + */ +@CommandLine.Command( + name = "om", + description = "Debug commands related to OM.", + subcommands = { + PrefixParser.class + } +) +@MetaInfServices(DebugSubcommand.class) +public class OMDebug implements DebugSubcommand { +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/PrefixParser.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/om/PrefixParser.java similarity index 95% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/PrefixParser.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/om/PrefixParser.java index cdda3e5e0f9..3f774ae7676 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/PrefixParser.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/om/PrefixParser.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.debug; +package org.apache.hadoop.ozone.debug.om; import java.io.IOException; import java.nio.file.Files; @@ -26,7 +26,6 @@ import java.util.concurrent.Callable; import java.nio.file.Path; -import org.apache.hadoop.hdds.cli.DebugSubcommand; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.utils.MetadataKeyFilters; @@ -40,10 +39,7 @@ import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; import org.apache.hadoop.ozone.om.helpers.WithParentObjectId; import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; -import org.kohsuke.MetaInfServices; import picocli.CommandLine; -import picocli.CommandLine.Model.CommandSpec; -import picocli.CommandLine.Spec; import static org.apache.hadoop.ozone.OzoneConsts.OM_KEY_PREFIX; @@ -53,8 +49,7 @@ @CommandLine.Command( name = "prefix", description = "Parse prefix contents") -@MetaInfServices(DebugSubcommand.class) -public class PrefixParser implements Callable, DebugSubcommand { +public class PrefixParser implements Callable { /** * Types to represent the level or path component type. @@ -70,9 +65,6 @@ public enum Types { private final int[] parserStats = new int[Types.values().length]; - @Spec - private CommandSpec spec; - @CommandLine.Option(names = {"--db"}, required = true, description = "Database File Path") diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/om/package-info.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/om/package-info.java new file mode 100644 index 00000000000..c208e807b51 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/om/package-info.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * OM debug related commands. + */ +package org.apache.hadoop.ozone.debug.om; From 62f24bd3bdaf43eb668d640d3a603e5733b6ec5c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 11 Jan 2025 14:14:20 +0100 Subject: [PATCH 074/168] HDDS-12066. Bump jetty to 9.4.57.v20241219 (#7683) --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index d018c821d85..dd0d9314cb4 100644 --- a/pom.xml +++ b/pom.xml @@ -126,7 +126,7 @@ 1.0-1 1.19.4 2.46 - 9.4.56.v20240826 + 9.4.57.v20241219 1.4.0 3.9.12 3.28.0 From 9b8ef7457374e582de57d1479134d48dde1ea090 Mon Sep 17 00:00:00 2001 From: Abhishek Pal <43001336+devabhishekpal@users.noreply.github.com> Date: Sat, 11 Jan 2025 19:24:12 +0530 Subject: [PATCH 075/168] HDDS-12043. Mark fixed column with disabled checkbox (#7667) --- .../src/v2/components/select/multiSelect.tsx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/select/multiSelect.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/select/multiSelect.tsx index 3dfe19f9b45..03dd12b5698 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/select/multiSelect.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/select/multiSelect.tsx @@ -61,7 +61,8 @@ const Option: React.FC> = (props) => { marginRight: '8px', accentColor: '#1AA57A' }} - onChange={() => null} /> + onChange={() => null} + disabled={props.isDisabled} /> @@ -114,14 +115,13 @@ const MultiSelect: React.FC = ({ isSearchable={false} controlShouldRenderValue={false} classNamePrefix='multi-select' - options={options} + options={options.map((opt) => ({...opt, isDisabled: (opt.value === fixedColumn)}))} components={{ ValueContainer, Option }} placeholder={placeholder} value={selected} - isOptionDisabled={(option) => option.value === fixedColumn} isDisabled={isDisabled} onChange={(selected: ValueType) => { if (selected?.length === options.length) return onChange!(options); From 75bed82a764b6536b5b1516a4527d45953bdc03f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 11 Jan 2025 15:41:40 +0100 Subject: [PATCH 076/168] HDDS-12067. Bump assertj-core to 3.27.2 (#7684) --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index dd0d9314cb4..9477d0fb97d 100644 --- a/pom.xml +++ b/pom.xml @@ -34,7 +34,7 @@ 0.16.1 1.14 1.9.7 - 3.27.1 + 3.27.2 1.12.661 0.8.0.RELEASE 1.79 From 1d95f67e8d50967db06b42033bd94dc4b249698f Mon Sep 17 00:00:00 2001 From: Sarveksha Yeshavantha Raju <79865743+sarvekshayr@users.noreply.github.com> Date: Sat, 11 Jan 2025 21:34:41 +0530 Subject: [PATCH 077/168] HDDS-12055. Move ozone debug container to ozone debug datanode container (#7675) --- .../compatibility/dn-one-rocksdb.robot | 2 +- .../ozone/debug/datanode/DatanodeDebug.java | 38 +++++++++++++++++++ .../container/ContainerCommands.java | 19 ++-------- .../container/ExportSubcommand.java | 4 +- .../container/InfoSubcommand.java | 6 +-- .../container/InspectSubcommand.java | 4 +- .../container/ListSubcommand.java | 6 +-- .../container/package-info.java | 2 +- .../ozone/debug/datanode/package-info.java | 22 +++++++++++ 9 files changed, 76 insertions(+), 27 deletions(-) create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/datanode/DatanodeDebug.java rename hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/{ => datanode}/container/ContainerCommands.java (93%) rename hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/{ => datanode}/container/ExportSubcommand.java (96%) rename hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/{ => datanode}/container/InfoSubcommand.java (88%) rename hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/{ => datanode}/container/InspectSubcommand.java (96%) rename hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/{ => datanode}/container/ListSubcommand.java (87%) rename hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/{ => datanode}/container/package-info.java (93%) create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/datanode/package-info.java diff --git a/hadoop-ozone/dist/src/main/smoketest/compatibility/dn-one-rocksdb.robot b/hadoop-ozone/dist/src/main/smoketest/compatibility/dn-one-rocksdb.robot index 9599e319849..282aa8f168f 100644 --- a/hadoop-ozone/dist/src/main/smoketest/compatibility/dn-one-rocksdb.robot +++ b/hadoop-ozone/dist/src/main/smoketest/compatibility/dn-one-rocksdb.robot @@ -25,5 +25,5 @@ Test Timeout 5 minutes Create a container and check container schema version ${output} = Execute ozone admin container create Should not contain ${output} Failed - ${output} = Execute ozone debug container list + ${output} = Execute ozone debug datanode container list Should contain ${output} \"schemaVersion\" : \"3\" diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/datanode/DatanodeDebug.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/datanode/DatanodeDebug.java new file mode 100644 index 00000000000..a83765fea92 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/datanode/DatanodeDebug.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.debug.datanode; + +import org.apache.hadoop.hdds.cli.DebugSubcommand; +import org.apache.hadoop.ozone.debug.datanode.container.ContainerCommands; +import org.kohsuke.MetaInfServices; +import picocli.CommandLine; + +/** + * Datanode debug related commands. + */ +@CommandLine.Command( + name = "datanode", + description = "Debug commands related to Datanode.", + subcommands = { + ContainerCommands.class + } +) +@MetaInfServices(DebugSubcommand.class) +public class DatanodeDebug implements DebugSubcommand { +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerCommands.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/datanode/container/ContainerCommands.java similarity index 93% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerCommands.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/datanode/container/ContainerCommands.java index 3df8330136c..d3a0960f2df 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ContainerCommands.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/datanode/container/ContainerCommands.java @@ -16,11 +16,11 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.debug.container; +package org.apache.hadoop.ozone.debug.datanode.container; import com.google.common.base.Preconditions; +import org.apache.hadoop.hdds.cli.AbstractSubcommand; import org.apache.hadoop.hdds.cli.HddsVersionProvider; -import org.apache.hadoop.hdds.cli.DebugSubcommand; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; @@ -42,12 +42,9 @@ import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController; import org.apache.hadoop.ozone.container.ozoneimpl.ContainerReader; import org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures; -import org.apache.hadoop.ozone.debug.OzoneDebug; -import org.kohsuke.MetaInfServices; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import picocli.CommandLine.Command; -import picocli.CommandLine.ParentCommand; import java.io.File; import java.io.IOException; @@ -77,25 +74,17 @@ ExportSubcommand.class, InspectSubcommand.class }) -@MetaInfServices(DebugSubcommand.class) -public class ContainerCommands implements DebugSubcommand { +public class ContainerCommands extends AbstractSubcommand { private static final Logger LOG = LoggerFactory.getLogger(ContainerCommands.class); - @ParentCommand - private OzoneDebug parent; - private MutableVolumeSet volumeSet; private ContainerController controller; - OzoneConfiguration getOzoneConf() { - return parent.getOzoneConf(); - } - public void loadContainersFromVolumes() throws IOException { - OzoneConfiguration conf = parent.getOzoneConf(); + OzoneConfiguration conf = getOzoneConf(); ContainerSet containerSet = new ContainerSet(null, 1000, true); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ExportSubcommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/datanode/container/ExportSubcommand.java similarity index 96% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ExportSubcommand.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/datanode/container/ExportSubcommand.java index d0337f65345..984faec8f76 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ExportSubcommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/datanode/container/ExportSubcommand.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.debug.container; +package org.apache.hadoop.ozone.debug.datanode.container; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.ozone.container.replication.ContainerReplicationSource; @@ -35,7 +35,7 @@ import static org.apache.hadoop.ozone.container.replication.CopyContainerCompression.NO_COMPRESSION; /** - * Handles {@code ozone debug container export} command. + * Handles {@code ozone debug datanode container export} command. */ @Command( name = "export", diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/InfoSubcommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/datanode/container/InfoSubcommand.java similarity index 88% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/InfoSubcommand.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/datanode/container/InfoSubcommand.java index 0d70d90c5ea..87ebd21cac7 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/InfoSubcommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/datanode/container/InfoSubcommand.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.debug.container; +package org.apache.hadoop.ozone.debug.datanode.container; import org.apache.hadoop.ozone.container.common.interfaces.Container; import picocli.CommandLine; @@ -24,10 +24,10 @@ import java.util.concurrent.Callable; -import static org.apache.hadoop.ozone.debug.container.ContainerCommands.outputContainer; +import static org.apache.hadoop.ozone.debug.datanode.container.ContainerCommands.outputContainer; /** - * Handles {@code ozone debug container info} command. + * Handles {@code ozone debug datanode container info} command. */ @Command( name = "info", diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/InspectSubcommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/datanode/container/InspectSubcommand.java similarity index 96% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/InspectSubcommand.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/datanode/container/InspectSubcommand.java index f924277d27f..63d3cab5232 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/InspectSubcommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/datanode/container/InspectSubcommand.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.debug.container; +package org.apache.hadoop.ozone.debug.datanode.container; import org.apache.hadoop.hdds.cli.AbstractSubcommand; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -33,7 +33,7 @@ import java.util.concurrent.Callable; /** - * {@code ozone debug container inspect}, + * {@code ozone debug datanode container inspect}, * a command to run {@link KeyValueContainerMetadataInspector}. */ @Command( diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ListSubcommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/datanode/container/ListSubcommand.java similarity index 87% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ListSubcommand.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/datanode/container/ListSubcommand.java index 037d61327b1..ff6cc11a71f 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/ListSubcommand.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/datanode/container/ListSubcommand.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.debug.container; +package org.apache.hadoop.ozone.debug.datanode.container; import org.apache.hadoop.ozone.container.common.interfaces.Container; import picocli.CommandLine; @@ -24,10 +24,10 @@ import java.util.concurrent.Callable; -import static org.apache.hadoop.ozone.debug.container.ContainerCommands.outputContainer; +import static org.apache.hadoop.ozone.debug.datanode.container.ContainerCommands.outputContainer; /** - * Handles {@code ozone debug container list} command. + * Handles {@code ozone debug datanode container list} command. */ @Command( name = "list", diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/package-info.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/datanode/container/package-info.java similarity index 93% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/package-info.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/datanode/container/package-info.java index fbfc293d306..d34d4d9dd27 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/container/package-info.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/datanode/container/package-info.java @@ -19,4 +19,4 @@ /** * Contains all of the datanode container replica related commands. */ -package org.apache.hadoop.ozone.debug.container; +package org.apache.hadoop.ozone.debug.datanode.container; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/datanode/package-info.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/datanode/package-info.java new file mode 100644 index 00000000000..9117cc6b22f --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/datanode/package-info.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Datanode debug related commands. + */ +package org.apache.hadoop.ozone.debug.datanode; From 19c8136afe34683193f9cdeef0b369a54c1ad045 Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Sat, 11 Jan 2025 09:27:03 -0800 Subject: [PATCH 078/168] HDDS-11962. [Docs] Hive Integration (#7596) --- hadoop-hdds/docs/content/integration/Hive.md | 169 ++++++++++++++++++ .../docs/content/integration/_index.md | 26 +++ 2 files changed, 195 insertions(+) create mode 100644 hadoop-hdds/docs/content/integration/Hive.md create mode 100644 hadoop-hdds/docs/content/integration/_index.md diff --git a/hadoop-hdds/docs/content/integration/Hive.md b/hadoop-hdds/docs/content/integration/Hive.md new file mode 100644 index 00000000000..8b43236d567 --- /dev/null +++ b/hadoop-hdds/docs/content/integration/Hive.md @@ -0,0 +1,169 @@ +--- +title: Hive +weight: 4 +menu: + main: + parent: "Application Integrations" +--- + + +Apache Hive has supported Apache Ozone since Hive 4.0. To enable Hive to work with Ozone paths, ensure that the `ozone-filesystem-hadoop3` JAR is added to the Hive classpath. + +## Supported Access Protocols + +Hive supports the following protocols for accessing Ozone data: + +* ofs +* o3fs +* s3a + +## Supported Replication Types + +Hive is compatible with Ozone buckets configured with either: + +* RATIS (Replication) +* Erasure Coding + +## Accessing Ozone Data in Hive + +Hive provides two methods to interact with data in Ozone: + +* Managed Tables +* External Tables + +### Managed Tables +#### Configuring the Hive Warehouse Directory in Ozone +To store managed tables in Ozone, update the following properties in the `hive-site.xml` configuration file: + +```xml + + hive.metastore.warehouse.dir + ofs://ozone1/vol1/bucket1/warehouse/ + +``` + +#### Creating a Managed Table +You can create a managed table with a standard `CREATE TABLE` statement: + +```sql +CREATE TABLE myTable ( + id INT, + name STRING +); +``` + +#### Loading Data into a Managed Table +Data can be loaded into a Hive table from an Ozone location: + +```sql +LOAD DATA INPATH 'ofs://ozone1/vol1/bucket1/table.csv' INTO TABLE myTable; +``` + +#### Specifying a Custom Ozone Path +You can define a custom Ozone path for a database using the `MANAGEDLOCATION` clause: + +```sql +CREATE DATABASE d1 MANAGEDLOCATION 'ofs://ozone1/vol1/bucket1/data'; +``` + +Tables created in the database d1 will be stored under the specified path: +`ofs://ozone1/vol1/bucket1/data` + +#### Verifying the Ozone Path +You can confirm that Hive references the correct Ozone path using: + +```sql +SHOW CREATE DATABASE d1; +``` + +Output Example: + +```text ++----------------------------------------------------+ +| createdb_stmt | ++----------------------------------------------------+ +| CREATE DATABASE `d1` | +| LOCATION | +| 'ofs://ozone1/vol1/bucket1/external/d1.db' | +| MANAGEDLOCATION | +| 'ofs://ozone1/vol1/bucket1/data' | ++----------------------------------------------------+ +``` + +### External Tables + +Hive allows the creation of external tables to query existing data stored in Ozone. + +#### Creating an External Table +```sql +CREATE EXTERNAL TABLE external_table ( + id INT, + name STRING +) +LOCATION 'ofs://ozone1/vol1/bucket1/table1'; +``` + +* With external tables, the data is expected to be created and managed by another tool. +* Hive queries the data as-is. +* Note: Dropping an external table in Hive does not delete the associated data. + +To set a default path for external tables, configure the following property in the `hive-site.xml` file: +```xml + + hive.metastore.warehouse.external.dir + ofs://ozone1/vol1/bucket1/external/ + +``` +This property specifies the base directory for external tables when no explicit `LOCATION` is provided. + +#### Verifying the External Table Path +To confirm the table's metadata and location, use: + +```sql +SHOW CREATE TABLE external_table; +``` +Output Example: + +```text ++----------------------------------------------------+ +| createtab_stmt | ++----------------------------------------------------+ +| CREATE EXTERNAL TABLE `external_table`( | +| `id` int, | +| `name` string) | +| ROW FORMAT SERDE | +| 'org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe' | +| STORED AS INPUTFORMAT | +| 'org.apache.hadoop.mapred.TextInputFormat' | +| OUTPUTFORMAT | +| 'org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat' | +| LOCATION | +| 'ofs://ozone1/vol1/bucket1/table1' | +| TBLPROPERTIES ( | +| 'bucketing_version'='2', | +| 'transient_lastDdlTime'='1734725573') | ++----------------------------------------------------+ +``` + +## Using the S3A Protocol +In addition to ofs, Hive can access Ozone using the S3 Gateway via the S3A file system. + +For more information, consult: + +* The [S3 Protocol]({{< ref "interface/S3.md">}}) +* The [Hadoop S3A](https://hadoop.apache.org/docs/current/hadoop-aws/tools/hadoop-aws/index.html) documentation. diff --git a/hadoop-hdds/docs/content/integration/_index.md b/hadoop-hdds/docs/content/integration/_index.md new file mode 100644 index 00000000000..87f6a4825b6 --- /dev/null +++ b/hadoop-hdds/docs/content/integration/_index.md @@ -0,0 +1,26 @@ +--- +title: "Application Integrations" +menu: + main: + weight: 5 +--- + + +{{}} +Many applications can be integrated with Ozone through the Hadoop-compatible ofs interface or the S3 interface. +{{}} From 468c35d76c068d5e55fac676dd6d3e4b6e4c3773 Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Sat, 11 Jan 2025 09:27:26 -0800 Subject: [PATCH 079/168] HDDS-11947. [Docs] Impala Integration (#7584) --- .../docs/content/integration/Impala.md | 101 ++++++++++++++++++ 1 file changed, 101 insertions(+) create mode 100644 hadoop-hdds/docs/content/integration/Impala.md diff --git a/hadoop-hdds/docs/content/integration/Impala.md b/hadoop-hdds/docs/content/integration/Impala.md new file mode 100644 index 00000000000..3c786d5e15a --- /dev/null +++ b/hadoop-hdds/docs/content/integration/Impala.md @@ -0,0 +1,101 @@ +--- +title: Impala +weight: 4 +menu: + main: + parent: "Application Integrations" +--- + + +Starting with version 4.2.0, Apache Impala provides full support for querying data stored in Apache Ozone. To utilize this functionality, ensure that your Ozone version is 1.4.0 or later. + +## Supported Access Protocols + +Impala supports the following protocols for accessing Ozone data: + +* ofs +* s3a + +Note: The o3fs protocol is **NOT** supported by Impala. + +## Supported Replication Types + +Impala is compatible with Ozone buckets configured with either: + +* RATIS (Replication) +* Erasure Coding + +## Querying Ozone Data with Impala + +Impala provides two approaches to interact with Ozone: + +* Managed Tables +* External Tables + +### Managed Tables + +If the Hive Warehouse Directory is located in Ozone, you can execute Impala queries without any changes, treating the Ozone file system like HDFS. For example: + +```sql +CREATE DATABASE d1; +``` + +```sql +CREATE TABLE t1 (x INT, s STRING); +``` + +The data will be stored under the Hive Warehouse Directory path in Ozone. + +#### Specifying a Custom Ozone Path + +You can create managed databases, tables, or partitions at a specific Ozone path using the `LOCATION` clause. Example: + +```sql +CREATE DATABASE d1 LOCATION 'ofs://ozone1/vol1/bucket1/d1.db'; +``` + +```sql +CREATE TABLE t1 LOCATION 'ofs://ozone1/vol1/bucket1/table1'; +``` + +### External Tables + +You can create an external table in Impala to query Ozone data. For example: + +```sql +CREATE EXTERNAL TABLE external_table ( + id INT, + name STRING +) +LOCATION 'ofs://ozone1/vol1/bucket1/table1'; +``` + +* With external tables, the data is expected to be created and managed by another tool. +* Impala queries the data as-is. +* The metadata is stored under the external warehouse directory. +* Note: Dropping an external table in Impala does not delete the associated data. + + +## Using the S3A Protocol + +In addition to ofs, Impala can access Ozone via the S3 Gateway using the S3A file system. For more details, refer to +* The [S3 Protocol]({{< ref "interface/S3.md">}}) +* The [Hadoop S3A](https://hadoop.apache.org/docs/current/hadoop-aws/tools/hadoop-aws/index.html) documentation. + +For additional information, consult the Apache Impala User Documentation +[Using Impala with Apache Ozone Storage](https://impala.apache.org/docs/build/html/topics/impala_ozone.html). From b89b6e021d90e6e1b433aead64ca4cb3261de1f5 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Sat, 11 Jan 2025 20:36:30 +0100 Subject: [PATCH 080/168] HDDS-12058. Use CommandLine out/err in GenericCli subclasses (#7673) --- .../apache/hadoop/hdds/cli/GenericCli.java | 13 +++- .../StorageContainerManagerStarter.java | 4 +- .../hadoop/ozone/conf/OzoneGetConf.java | 26 +------ .../GenerateOzoneRequiredConfigurations.java | 44 ++--------- .../org/apache/hadoop/ozone/shell/Shell.java | 2 +- .../ozone/shell/checknative/CheckNative.java | 8 +- .../hadoop/ozone/conf/TestGetConfOptions.java | 76 ++++++++----------- 7 files changed, 61 insertions(+), 112 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java index c698a9f3d50..3ec9048dfcf 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/GenericCli.java @@ -17,6 +17,7 @@ package org.apache.hadoop.hdds.cli; import java.io.IOException; +import java.io.PrintWriter; import java.util.Map; import com.google.common.base.Strings; @@ -87,9 +88,9 @@ protected void printError(Throwable error) { //message could be null in case of NPE. This is unexpected so we can //print out the stack trace. if (verbose || Strings.isNullOrEmpty(error.getMessage())) { - error.printStackTrace(System.err); + error.printStackTrace(cmd.getErr()); } else { - System.err.println(error.getMessage().split("\n")[0]); + cmd.getErr().println(error.getMessage().split("\n")[0]); } } @@ -114,4 +115,12 @@ public CommandLine getCmd() { public boolean isVerbose() { return verbose; } + + protected PrintWriter out() { + return cmd.getOut(); + } + + protected PrintWriter err() { + return cmd.getErr(); + } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerStarter.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerStarter.java index 1eef7bce14c..353c6c50104 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerStarter.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerStarter.java @@ -92,8 +92,8 @@ public Void call() throws Exception { versionProvider = HddsVersionProvider.class) public void generateClusterId() { commonInit(); - System.out.println("Generating new cluster id:"); - System.out.println(receiver.generateClusterId()); + out().println("Generating new cluster id:"); + out().println(receiver.generateClusterId()); } /** diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/conf/OzoneGetConf.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/conf/OzoneGetConf.java index bdb749d5218..1ae3aa51def 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/conf/OzoneGetConf.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/conf/OzoneGetConf.java @@ -17,8 +17,6 @@ package org.apache.hadoop.ozone.conf; -import java.io.PrintStream; - import org.apache.hadoop.hdds.cli.GenericCli; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -45,31 +43,17 @@ OzoneManagersCommandHandler.class }) public class OzoneGetConf extends GenericCli { - private final PrintStream out; // Stream for printing command output - private final PrintStream err; // Stream for printing error - private OzoneConfiguration conf; - - protected OzoneGetConf(OzoneConfiguration conf) { - this(conf, System.out, System.err); - } - - protected OzoneGetConf(OzoneConfiguration conf, PrintStream out, - PrintStream err) { - this.conf = conf; - this.out = out; - this.err = err; - } void printError(String message) { - err.println(message); + err().println(message); } void printOut(String message) { - out.println(message); + out().println(message); } OzoneConfiguration getConf() { - return this.conf; + return getOzoneConf(); } public static void main(String[] argv) { @@ -79,8 +63,6 @@ public static void main(String[] argv) { .addAppender(new ConsoleAppender(new PatternLayout("%m%n"))); Logger.getLogger(NativeCodeLoader.class).setLevel(Level.ERROR); - OzoneConfiguration conf = new OzoneConfiguration(); - conf.addResource(new OzoneConfiguration()); - new OzoneGetConf(conf).run(argv); + new OzoneGetConf().run(argv); } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/GenerateOzoneRequiredConfigurations.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/GenerateOzoneRequiredConfigurations.java index c88b6b2d698..c29a90ec0d0 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/GenerateOzoneRequiredConfigurations.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/genconf/GenerateOzoneRequiredConfigurations.java @@ -28,7 +28,6 @@ import picocli.CommandLine.Command; import picocli.CommandLine.Option; import picocli.CommandLine.Parameters; -import picocli.CommandLine.PicocliException; import javax.xml.bind.JAXBContext; import javax.xml.bind.JAXBException; @@ -68,50 +67,25 @@ public final class GenerateOzoneRequiredConfigurations extends GenericCli implem "template, update Kerberos principal and keytab file before use.") private boolean genSecurityConf; - /** - * Entry point for using genconf tool. - * - * @param args - * - */ public static void main(String[] args) throws Exception { new GenerateOzoneRequiredConfigurations().run(args); } @Override public Void call() throws Exception { - generateConfigurations(path, genSecurityConf); + generateConfigurations(); return null; } - /** - * Generate ozone-site.xml at specified path. - * @param path - * @throws PicocliException - * @throws JAXBException - */ - public static void generateConfigurations(String path) throws - PicocliException, JAXBException, IOException { - generateConfigurations(path, false); - } - - /** - * Generate ozone-site.xml at specified path. - * @param path - * @param genSecurityConf - * @throws PicocliException - * @throws JAXBException - */ - public static void generateConfigurations(String path, - boolean genSecurityConf) throws - PicocliException, JAXBException, IOException { + private void generateConfigurations() throws + JAXBException, IOException { if (!isValidPath(path)) { - throw new PicocliException("Invalid directory path."); + throw new IllegalArgumentException("Invalid directory path."); } if (!canWrite(path)) { - throw new PicocliException("Insufficient permission."); + throw new IllegalArgumentException("Insufficient permission."); } OzoneConfiguration oc = new OzoneConfiguration(); @@ -171,9 +145,9 @@ public static void generateConfigurations(String path, m.setProperty(Marshaller.JAXB_FORMATTED_OUTPUT, Boolean.TRUE); m.marshal(generatedConfig, output); - System.out.println("ozone-site.xml has been generated at " + path); + out().println("ozone-site.xml has been generated at " + path); } else { - System.out.printf("ozone-site.xml already exists at %s and " + + out().printf("ozone-site.xml already exists at %s and " + "will not be overwritten%n", path); } @@ -182,21 +156,19 @@ public static void generateConfigurations(String path, /** * Check if the path is valid directory. * - * @param path * @return true, if path is valid directory, else return false */ public static boolean isValidPath(String path) { try { return Files.isDirectory(Paths.get(path)); } catch (InvalidPathException | NullPointerException ex) { - return Boolean.FALSE; + return false; } } /** * Check if user has permission to write in the specified path. * - * @param path * @return true, if the user has permission to write, else returns false */ public static boolean canWrite(String path) { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/Shell.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/Shell.java index 8bca492f042..515dcec1796 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/Shell.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/Shell.java @@ -109,7 +109,7 @@ public void printError(Throwable errorArg) { if (omException != null && !isVerbose()) { // In non-verbose mode, reformat OMExceptions as error messages to the // user. - System.err.println(String.format("%s %s", omException.getResult().name(), + err().println(String.format("%s %s", omException.getResult().name(), omException.getMessage())); } else { // Prints the stack trace when in verbose mode. diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/checknative/CheckNative.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/checknative/CheckNative.java index b6b5cc989b9..1298811fa40 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/checknative/CheckNative.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/checknative/CheckNative.java @@ -57,10 +57,10 @@ public Void call() throws Exception { isalLoaded = true; } } - System.out.println("Native library checking:"); - System.out.printf("hadoop: %b %s%n", nativeHadoopLoaded, + out().println("Native library checking:"); + out().printf("hadoop: %b %s%n", nativeHadoopLoaded, hadoopLibraryName); - System.out.printf("ISA-L: %b %s%n", isalLoaded, isalDetail); + out().printf("ISA-L: %b %s%n", isalLoaded, isalDetail); // Attempt to load the rocks-tools lib boolean nativeRocksToolsLoaded = NativeLibraryLoader.getInstance().loadLibrary( @@ -70,7 +70,7 @@ public Void call() throws Exception { if (nativeRocksToolsLoaded) { rocksToolsDetail = NativeLibraryLoader.getJniLibraryFileName(); } - System.out.printf("rocks-tools: %b %s%n", nativeRocksToolsLoaded, rocksToolsDetail); + out().printf("rocks-tools: %b %s%n", nativeRocksToolsLoaded, rocksToolsDetail); return null; } } diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/conf/TestGetConfOptions.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/conf/TestGetConfOptions.java index 4f7233f148b..ed16434f900 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/conf/TestGetConfOptions.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/conf/TestGetConfOptions.java @@ -17,82 +17,68 @@ */ package org.apache.hadoop.ozone.conf; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.ozone.om.OMConfigKeys; -import static org.junit.jupiter.api.Assertions.assertEquals; +import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.Test; -import java.io.ByteArrayOutputStream; -import java.io.PrintStream; -import java.io.UnsupportedEncodingException; - -import static java.nio.charset.StandardCharsets.UTF_8; +import static org.junit.jupiter.api.Assertions.assertEquals; /** * Tests the ozone getconf command. */ public class TestGetConfOptions { - private static OzoneConfiguration conf; - private static ByteArrayOutputStream bout; - private static PrintStream psBackup; - private static final String DEFAULT_ENCODING = UTF_8.name(); + private static GenericTestUtils.PrintStreamCapturer out; + private static OzoneGetConf subject; @BeforeAll - public static void init() throws UnsupportedEncodingException { - conf = new OzoneConfiguration(); - conf.set(OMConfigKeys.OZONE_OM_NODE_ID_KEY, "1"); - conf.set(OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY, "service1"); - conf.set(ScmConfigKeys.OZONE_SCM_NAMES, "localhost"); - psBackup = System.out; - bout = new ByteArrayOutputStream(); - PrintStream psOut = new PrintStream(bout, false, DEFAULT_ENCODING); - System.setOut(psOut); + public static void init() { + out = GenericTestUtils.captureOut(); + subject = new OzoneGetConf(); + subject.getConf().set(OMConfigKeys.OZONE_OM_NODE_ID_KEY, "1"); + subject.getConf().set(OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY, "service1"); + subject.getConf().set(ScmConfigKeys.OZONE_SCM_NAMES, "localhost"); } @AfterEach public void setUp() { - bout.reset(); + out.reset(); } @AfterAll public static void tearDown() { - System.setOut(psBackup); + IOUtils.closeQuietly(out); } @Test - public void testGetConfWithTheOptionConfKey() - throws UnsupportedEncodingException { - new OzoneGetConf(conf) - .run(new String[] {"-confKey", ScmConfigKeys.OZONE_SCM_NAMES}); - assertEquals("localhost\n", bout.toString(DEFAULT_ENCODING)); - bout.reset(); - new OzoneGetConf(conf) - .run(new String[] {"confKey", OMConfigKeys.OZONE_OM_NODE_ID_KEY}); - assertEquals("1\n", bout.toString(DEFAULT_ENCODING)); + public void testGetConfWithTheOptionConfKey() { + subject.run(new String[] {"-confKey", ScmConfigKeys.OZONE_SCM_NAMES}); + assertEquals("localhost\n", out.get()); + out.reset(); + subject.run(new String[] {"confKey", OMConfigKeys.OZONE_OM_NODE_ID_KEY}); + assertEquals("1\n", out.get()); } @Test - public void testGetConfWithTheOptionStorageContainerManagers() - throws UnsupportedEncodingException { - new OzoneGetConf(conf).run(new String[] {"-storagecontainermanagers"}); - assertEquals("localhost\n", bout.toString(DEFAULT_ENCODING)); - bout.reset(); - new OzoneGetConf(conf).run(new String[] {"storagecontainermanagers"}); - assertEquals("localhost\n", bout.toString(DEFAULT_ENCODING)); + public void testGetConfWithTheOptionStorageContainerManagers() { + subject.execute(new String[] {"-storagecontainermanagers"}); + assertEquals("localhost\n", out.get()); + out.reset(); + subject.execute(new String[] {"storagecontainermanagers"}); + assertEquals("localhost\n", out.get()); } @Test - public void testGetConfWithTheOptionOzoneManagers() - throws UnsupportedEncodingException { - new OzoneGetConf(conf).run(new String[] {"-ozonemanagers"}); - assertEquals("", bout.toString(DEFAULT_ENCODING)); - bout.reset(); - new OzoneGetConf(conf).run(new String[] {"ozonemanagers"}); - assertEquals("", bout.toString(DEFAULT_ENCODING)); + public void testGetConfWithTheOptionOzoneManagers() { + subject.execute(new String[] {"-ozonemanagers"}); + assertEquals("", out.get()); + out.reset(); + subject.execute(new String[] {"ozonemanagers"}); + assertEquals("", out.get()); } } From 12def8a0f1a754b0f456948ef4c90edb9292d2fb Mon Sep 17 00:00:00 2001 From: Chung En Lee Date: Sun, 12 Jan 2025 15:24:50 +0800 Subject: [PATCH 081/168] HDDS-11326. Speed up TestBlockOutputStreamWithFailures (#7374) --- .../client/rpc/TestBlockOutputStream.java | 7 ++++- .../TestBlockOutputStreamWithFailures.java | 31 ++++++++++++------- 2 files changed, 25 insertions(+), 13 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java index 63692c0dfc7..30cfb15a5dd 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStream.java @@ -84,6 +84,11 @@ class TestBlockOutputStream { static MiniOzoneCluster createCluster() throws IOException, InterruptedException, TimeoutException { + return createCluster(5); + } + + static MiniOzoneCluster createCluster(int datanodes) throws IOException, + InterruptedException, TimeoutException { OzoneConfiguration conf = new OzoneConfiguration(); OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); clientConfig.setChecksumType(ChecksumType.NONE); @@ -126,7 +131,7 @@ static MiniOzoneCluster createCluster() throws IOException, .applyTo(conf); MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(5) + .setNumDatanodes(datanodes) .build(); cluster.waitForClusterToBeReady(); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java index 010bd93834b..e6a6b672229 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java @@ -19,17 +19,18 @@ import org.apache.commons.lang3.ArrayUtils; import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.hdds.client.ReplicationFactor; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientRatis; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerNotOpenException; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.storage.RatisBlockOutputStream; +import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.io.KeyOutputStream; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.container.TestHelper; -import org.apache.ozone.test.tag.Flaky; import static org.apache.hadoop.hdds.scm.client.HddsClientUtils.checkForException; import static org.apache.hadoop.ozone.client.rpc.TestBlockOutputStream.BLOCK_SIZE; @@ -49,16 +50,18 @@ import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertTrue; +import org.apache.ozone.test.tag.Flaky; import org.apache.ratis.protocol.exceptions.GroupMismatchException; import org.apache.ratis.protocol.exceptions.RaftRetryFailureException; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.api.Timeout; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; +import java.io.IOException; import java.util.stream.Stream; /** @@ -66,16 +69,17 @@ */ @TestInstance(TestInstance.Lifecycle.PER_CLASS) @Timeout(300) +@Flaky("HDDS-11849") class TestBlockOutputStreamWithFailures { private MiniOzoneCluster cluster; - @BeforeEach + @BeforeAll void init() throws Exception { - cluster = createCluster(); + cluster = createCluster(25); } - @AfterEach + @AfterAll void shutdown() { if (cluster != null) { cluster.shutdown(); @@ -187,7 +191,6 @@ private void testWatchForCommitWithCloseContainerException(OzoneClient client) @ParameterizedTest @MethodSource("clientParameters") - @Flaky("HDDS-6113") void testWatchForCommitDatanodeFailure(boolean flushDelay, boolean enablePiggybacking) throws Exception { OzoneClientConfig config = newClientConfig(cluster.getConf(), flushDelay, enablePiggybacking); try (OzoneClient client = newClient(cluster.getConf(), config)) { @@ -246,7 +249,7 @@ void testWatchForCommitDatanodeFailure(boolean flushDelay, boolean enablePiggyba (XceiverClientRatis) blockOutputStream.getXceiverClient(); assertEquals(3, raftClient.getCommitInfoMap().size()); Pipeline pipeline = raftClient.getPipeline(); - cluster.shutdownHddsDatanode(pipeline.getNodes().get(0)); + stopAndRemove(pipeline.getNodes().get(0)); // again write data with more than max buffer limit. This will call // watchForCommit again. Since the commit will happen 2 way, the @@ -272,7 +275,6 @@ void testWatchForCommitDatanodeFailure(boolean flushDelay, boolean enablePiggyba @ParameterizedTest @MethodSource("clientParameters") - @Flaky("HDDS-11849") void test2DatanodesFailure(boolean flushDelay, boolean enablePiggybacking) throws Exception { OzoneClientConfig config = newClientConfig(cluster.getConf(), flushDelay, enablePiggybacking); try (OzoneClient client = newClient(cluster.getConf(), config)) { @@ -329,8 +331,8 @@ void test2DatanodesFailure(boolean flushDelay, boolean enablePiggybacking) throw (XceiverClientRatis) blockOutputStream.getXceiverClient(); assertEquals(3, raftClient.getCommitInfoMap().size()); Pipeline pipeline = raftClient.getPipeline(); - cluster.shutdownHddsDatanode(pipeline.getNodes().get(0)); - cluster.shutdownHddsDatanode(pipeline.getNodes().get(1)); + stopAndRemove(pipeline.getNodes().get(0)); + stopAndRemove(pipeline.getNodes().get(1)); // again write data with more than max buffer limit. This will call // watchForCommit again. Since the commit will happen 2 way, the // commitInfoMap will get updated for servers which are alive @@ -579,7 +581,6 @@ private void testWatchForCommitWithSingleNodeRatis(OzoneClient client) @ParameterizedTest @MethodSource("clientParameters") - @Flaky("HDDS-6113") void testDatanodeFailureWithSingleNode(boolean flushDelay, boolean enablePiggybacking) throws Exception { OzoneClientConfig config = newClientConfig(cluster.getConf(), flushDelay, enablePiggybacking); try (OzoneClient client = newClient(cluster.getConf(), config)) { @@ -761,4 +762,10 @@ void testDatanodeFailureWithPreAllocation(boolean flushDelay, boolean enablePigg } } + private void stopAndRemove(DatanodeDetails dn) throws IOException { + HddsDatanodeService datanode = cluster.getHddsDatanodes().remove(cluster.getHddsDatanodeIndex(dn)); + datanode.stop(); + datanode.join(); + } + } From 67fdb88b95050feea654a00649c54d014331b691 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Sun, 12 Jan 2025 09:47:55 +0100 Subject: [PATCH 082/168] HDDS-12068. Enable sortpom in remaining hdds modules (#7686) --- hadoop-hdds/managed-rocksdb/pom.xml | 32 ++- hadoop-hdds/rocks-native/pom.xml | 125 +++++------ hadoop-hdds/rocksdb-checkpoint-differ/pom.xml | 70 +++---- hadoop-hdds/server-scm/pom.xml | 196 ++++++++---------- hadoop-hdds/test-utils/pom.xml | 47 ++--- hadoop-hdds/tools/pom.xml | 124 +++++------ 6 files changed, 267 insertions(+), 327 deletions(-) diff --git a/hadoop-hdds/managed-rocksdb/pom.xml b/hadoop-hdds/managed-rocksdb/pom.xml index 144d482be12..82b58b5a4db 100644 --- a/hadoop-hdds/managed-rocksdb/pom.xml +++ b/hadoop-hdds/managed-rocksdb/pom.xml @@ -12,9 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -23,26 +21,32 @@ hdds-managed-rocksdb 2.0.0-SNAPSHOT - Apache Ozone Managed RocksDB library - Apache Ozone HDDS Managed RocksDB jar + Apache Ozone HDDS Managed RocksDB + Apache Ozone Managed RocksDB library - true - true + + true + + com.google.guava + guava + + + jakarta.annotation + jakarta.annotation-api + org.apache.ozone hdds-common - org.apache.ratis ratis-common - org.rocksdb rocksdbjni @@ -51,16 +55,6 @@ org.slf4j slf4j-api - - - com.google.guava - guava - - - - jakarta.annotation - jakarta.annotation-api - diff --git a/hadoop-hdds/rocks-native/pom.xml b/hadoop-hdds/rocks-native/pom.xml index c01a4f16651..087dc8c0235 100644 --- a/hadoop-hdds/rocks-native/pom.xml +++ b/hadoop-hdds/rocks-native/pom.xml @@ -12,23 +12,25 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + + 4.0.0 - hdds org.apache.ozone + hdds 2.0.0-SNAPSHOT - 4.0.0 - Apache Ozone HDDS RocksDB Tools hdds-rocks-native - - - true - + Apache Ozone HDDS RocksDB Tools + + com.google.guava + guava + + + org.apache.commons + commons-lang3 + org.apache.ozone hdds-common @@ -37,12 +39,6 @@ org.apache.ozone hdds-managed-rocksdb - - - org.apache.commons - commons-lang3 - - org.eclipse.jetty jetty-io @@ -56,11 +52,6 @@ slf4j-api - - com.google.guava - guava - - org.apache.ozone @@ -108,10 +99,10 @@ get-cpu-count - generate-sources cpu-count + generate-sources system.numCores @@ -140,10 +131,10 @@ set-property - initialize java + initialize org.apache.hadoop.hdds.utils.db.managed.JniLibNamePropertyWriter @@ -159,10 +150,10 @@ read-property-from-file - initialize read-project-properties + initialize ${project.build.directory}/propertyFile.txt @@ -177,10 +168,10 @@ unpack-dependency - initialize unpack + initialize @@ -201,10 +192,10 @@ rocksdb source download - generate-sources wget + generate-sources https://github.com/facebook/rocksdb/archive/refs/tags/v${rocksdb.version}.tar.gz rocksdb-v${rocksdb.version}.tar.gz @@ -225,10 +216,10 @@ patch - process-sources apply + process-sources @@ -238,70 +229,71 @@ unzip-artifact + + run + generate-sources - + - - run - build-rocksjava + + run + generate-resources - - - + + + - - - - - - + + + + + + - - run - build-rocks-tools + + run + process-classes - - - - - - - - - - - - - + + + + + + + + + + + + + - - + + - - run - copy-lib-file + + run + process-classes @@ -310,9 +302,6 @@ - - run - @@ -356,10 +345,10 @@ native-maven-plugin - compile javah + compile ${env.JAVA_HOME}/bin/javah @@ -390,10 +379,10 @@ copy-dependencies - process-sources copy-dependencies + process-sources ${project.build.directory}/dependency runtime diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml b/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml index fc8cf910613..cb7ff3acd59 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml +++ b/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml @@ -12,10 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -25,18 +22,30 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> rocksdb-checkpoint-differ 2.0.0-SNAPSHOT - RocksDB Checkpoint Differ - RocksDB Checkpoint Differ jar - - - true - + RocksDB Checkpoint Differ + RocksDB Checkpoint Differ - org.rocksdb - rocksdbjni + com.github.vlsi.mxgraph + jgraphx + + + com.google.guava + guava + + + com.google.protobuf + protobuf-java + + + commons-collections + commons-collections + + + org.apache.commons + commons-lang3 org.apache.ozone @@ -58,15 +67,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds-rocks-native - - - com.google.guava - guava - - - org.apache.commons - commons-lang3 - org.apache.ratis ratis-common @@ -79,6 +79,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.jgrapht jgrapht-ext + + org.rocksdb + rocksdbjni + org.slf4j slf4j-api @@ -88,20 +92,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> slf4j-reload4j - - com.github.vlsi.mxgraph - jgraphx - - - com.google.protobuf - protobuf-java - - - - commons-collections - commons-collections - - org.apache.ozone @@ -110,15 +100,15 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone - hdds-test-utils + hdds-rocks-native + ${project.version} + test-jar test org.apache.ozone - hdds-rocks-native - ${project.version} + hdds-test-utils test - test-jar @@ -143,7 +133,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> depcheck - + diff --git a/hadoop-hdds/server-scm/pom.xml b/hadoop-hdds/server-scm/pom.xml index a1da1f4e68c..4137f443c71 100644 --- a/hadoop-hdds/server-scm/pom.xml +++ b/hadoop-hdds/server-scm/pom.xml @@ -12,10 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -24,38 +21,97 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hdds-server-scm 2.0.0-SNAPSHOT - Apache Ozone Distributed Data Store Storage Container Manager Server - Apache Ozone HDDS SCM Server jar + Apache Ozone HDDS SCM Server + Apache Ozone Distributed Data Store Storage Container Manager Server false - true + + com.fasterxml.jackson.core + jackson-annotations + + + com.fasterxml.jackson.core + jackson-core + + + com.fasterxml.jackson.core + jackson-databind + + + com.google.guava + guava + com.google.protobuf protobuf-java compile + + commons-collections + commons-collections + + + commons-io + commons-io + + + info.picocli + picocli + + + io.dropwizard.metrics + metrics-core + + + jakarta.annotation + jakarta.annotation-api + + + javax.servlet + javax.servlet-api + + + org.apache.commons + commons-compress + + + org.apache.commons + commons-lang3 + + + org.apache.commons + commons-text + + + org.apache.hadoop + hadoop-hdfs-client + + + com.squareup.okhttp + okhttp + + + org.apache.ozone - hdds-common + hdds-client org.apache.ozone - hdds-config + hdds-common - org.apache.ozone - hdds-container-service + hdds-config - org.apache.ozone - hdds-client + hdds-container-service org.apache.ozone @@ -77,13 +133,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds-server-framework - - - org.apache.ozone - hdds-docs - provided - - org.apache.ratis ratis-client @@ -106,17 +155,16 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ratis - ratis-server-api + ratis-server org.apache.ratis - ratis-server + ratis-server-api org.apache.ratis ratis-thirdparty-misc - org.bouncycastle bcpkix-jdk18on @@ -125,35 +173,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.bouncycastle bcprov-jdk18on - - io.dropwizard.metrics - metrics-core - - - - org.apache.commons - commons-compress - - - org.apache.commons - commons-lang3 - - - org.apache.commons - commons-text - - - - org.apache.hadoop - hadoop-hdfs-client - - - com.squareup.okhttp - okhttp - - - - org.eclipse.jetty jetty-webapp @@ -162,44 +181,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.slf4j slf4j-api - - - com.fasterxml.jackson.core - jackson-annotations - - - com.fasterxml.jackson.core - jackson-core - - - com.fasterxml.jackson.core - jackson-databind - - - com.google.guava - guava - - - - info.picocli - picocli - - - jakarta.annotation - jakarta.annotation-api - - - javax.servlet - javax.servlet-api - - - commons-collections - commons-collections - - - commons-io - commons-io + org.apache.ozone + hdds-docs + provided @@ -216,14 +201,14 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone - hdds-hadoop-dependency-test + hdds-container-service + test-jar test org.apache.ozone - hdds-container-service + hdds-hadoop-dependency-test test - test-jar org.apache.ozone @@ -232,6 +217,14 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> + + + ${basedir}/../../hdds/common/src/main/resources + + + ${basedir}/src/test/resources + + org.apache.maven.plugins @@ -260,7 +253,8 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> maven-enforcer-plugin - ban-annotations + ban-annotations + @@ -281,24 +275,22 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> copy-common-html - prepare-package unpack + prepare-package org.apache.ozone hdds-server-framework - ${project.build.outputDirectory} - + ${project.build.outputDirectory} webapps/static/**/*.* org.apache.ozone hdds-docs - ${project.build.outputDirectory}/webapps/scm - + ${project.build.outputDirectory}/webapps/scm docs/**/*.* @@ -315,13 +307,5 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> - - - ${basedir}/../../hdds/common/src/main/resources - - - ${basedir}/src/test/resources - - diff --git a/hadoop-hdds/test-utils/pom.xml b/hadoop-hdds/test-utils/pom.xml index 903f01c8269..0c4d5598192 100644 --- a/hadoop-hdds/test-utils/pom.xml +++ b/hadoop-hdds/test-utils/pom.xml @@ -12,10 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -24,18 +21,14 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hdds-test-utils 2.0.0-SNAPSHOT - Apache Ozone Distributed Data Store Test Utils - Apache Ozone HDDS Test Utils jar - - - true - + Apache Ozone HDDS Test Utils + Apache Ozone Distributed Data Store Test Utils - org.assertj - assertj-core + ch.qos.reload4j + reload4j com.google.guava @@ -50,25 +43,24 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> commons-logging - org.junit.jupiter - junit-jupiter-api + jakarta.annotation + jakarta.annotation-api - org.junit.platform - junit-platform-launcher - provided + org.apache.commons + commons-lang3 - ch.qos.reload4j - reload4j + org.apache.logging.log4j + log4j-api - jakarta.annotation - jakarta.annotation-api + org.assertj + assertj-core - org.apache.commons - commons-lang3 + org.junit.jupiter + junit-jupiter-api org.slf4j @@ -85,10 +77,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> - - org.apache.logging.log4j - log4j-api - org.apache.logging.log4j log4j-core @@ -99,6 +87,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.jacoco.core provided + + org.junit.platform + junit-platform-launcher + provided + org.mockito mockito-core diff --git a/hadoop-hdds/tools/pom.xml b/hadoop-hdds/tools/pom.xml index 8af514f65b7..6a7dd1e9706 100644 --- a/hadoop-hdds/tools/pom.xml +++ b/hadoop-hdds/tools/pom.xml @@ -12,10 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -25,15 +22,55 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hdds-tools 2.0.0-SNAPSHOT - Apache Ozone Distributed Data Store Tools - Apache Ozone HDDS Tools jar - - - true - + Apache Ozone HDDS Tools + Apache Ozone Distributed Data Store Tools + + ch.qos.reload4j + reload4j + + + com.fasterxml.jackson.core + jackson-annotations + + + com.fasterxml.jackson.core + jackson-core + + + com.fasterxml.jackson.core + jackson-databind + + + com.fasterxml.jackson.datatype + jackson-datatype-jsr310 + + + com.google.guava + guava + + + commons-cli + commons-cli + + + commons-io + commons-io + + + info.picocli + picocli + + + org.apache.commons + commons-lang3 + + + org.apache.ozone + hdds-client + org.apache.ozone hdds-common @@ -60,20 +97,15 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone - hdds-client - - - - org.apache.commons - commons-lang3 + hdds-server-scm org.apache.ratis ratis-common - ratis-tools org.apache.ratis + ratis-tools ${ratis.version} @@ -82,22 +114,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> - - commons-cli - commons-cli - - - ch.qos.reload4j - reload4j - org.kohsuke.metainf-services metainf-services - - org.xerial - sqlite-jdbc - org.slf4j slf4j-api @@ -108,39 +128,13 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> ${slf4j.version} - org.apache.ozone - hdds-server-scm - - - - com.fasterxml.jackson.core - jackson-annotations - - - com.fasterxml.jackson.core - jackson-core - - - com.fasterxml.jackson.core - jackson-databind - - - com.fasterxml.jackson.datatype - jackson-datatype-jsr310 - - - com.google.guava - guava - - - - info.picocli - picocli + org.xerial + sqlite-jdbc - - commons-io - commons-io + commons-codec + commons-codec + test @@ -153,8 +147,8 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds-container-service - test test-jar + test org.apache.ozone @@ -166,11 +160,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hdds-test-utils test - - commons-codec - commons-codec - test - @@ -202,7 +191,8 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> maven-enforcer-plugin - ban-annotations + ban-annotations + From c3876565308902cbe96fb9b9ce0b8d9030f6e7c9 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Mon, 13 Jan 2025 15:03:21 +0100 Subject: [PATCH 083/168] HDDS-12063. Speed up TestLeaseRecovery (#7688) --- .../hadoop/fs/ozone/TestLeaseRecovery.java | 237 ++++++++++-------- 1 file changed, 130 insertions(+), 107 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java index d4885300332..f178bf24359 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestLeaseRecovery.java @@ -41,14 +41,22 @@ import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler; +import org.apache.hadoop.ozone.om.OzoneManager; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; import org.apache.hadoop.utils.FaultInjectorImpl; import org.apache.ozone.test.GenericTestUtils; +import org.apache.ozone.test.OzoneTestBase; import org.apache.ozone.test.tag.Flaky; +import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.MethodOrderer; +import org.junit.jupiter.api.Order; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; +import org.junit.jupiter.api.TestMethodOrder; import org.junit.jupiter.api.Timeout; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; @@ -58,8 +66,10 @@ import java.io.IOException; import java.io.OutputStream; import java.net.ConnectException; +import java.util.LinkedList; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicInteger; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT; import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_READ_TIMEOUT; @@ -75,6 +85,7 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -83,7 +94,11 @@ */ @Timeout(300) @Flaky("HDDS-11323") -public class TestLeaseRecovery { +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +@TestMethodOrder(MethodOrderer.OrderAnnotation.class) +public class TestLeaseRecovery extends OzoneTestBase { + + private static final AtomicInteger FILE_COUNTER = new AtomicInteger(); private MiniOzoneCluster cluster; private OzoneBucket bucket; @@ -92,6 +107,8 @@ public class TestLeaseRecovery { private final OzoneConfiguration conf = new OzoneConfiguration(); private String dir; private Path file; + private GenericTestUtils.LogCapturer xceiverClientLogs; + private RootedOzoneFileSystem fs; /** * Closing the output stream after lease recovery throws because the key @@ -104,12 +121,15 @@ public static void closeIgnoringKeyNotFound(OutputStream stream) { public static void closeIgnoringOMException(OutputStream stream, OMException.ResultCodes expectedResultCode) { try { stream.close(); - } catch (IOException e) { - assertEquals(expectedResultCode, ((OMException)e).getResult()); + } catch (OMException e) { + assertEquals(expectedResultCode, e.getResult()); + } catch (Exception e) { + OMException omException = assertInstanceOf(OMException.class, e.getCause()); + assertEquals(expectedResultCode, omException.getResult()); } } - @BeforeEach + @BeforeAll public void init() throws IOException, InterruptedException, TimeoutException { final int chunkSize = 16 << 10; @@ -120,6 +140,7 @@ public void init() throws IOException, InterruptedException, conf.setBoolean(OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED, true); conf.setBoolean("ozone.client.hbase.enhancements.allowed", true); + conf.setBoolean("fs." + OZONE_OFS_URI_SCHEME + ".impl.disable.cache", true); conf.setBoolean(OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED, true); conf.set(OZONE_DEFAULT_BUCKET_LAYOUT, layout.name()); conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 10); @@ -152,10 +173,24 @@ public void init() throws IOException, InterruptedException, final String rootPath = String.format("%s://%s/", OZONE_OFS_URI_SCHEME, conf.get(OZONE_OM_ADDRESS_KEY)); conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); dir = OZONE_ROOT + bucket.getVolumeName() + OZONE_URI_DELIMITER + bucket.getName(); - file = new Path(dir, "file"); + + xceiverClientLogs = GenericTestUtils.LogCapturer.captureLogs(XceiverClientGrpc.getLogger()); + } + + @BeforeEach + void beforeEach() throws Exception { + file = new Path(dir, "file-" + getTestName() + "-" + FILE_COUNTER.incrementAndGet()); + fs = (RootedOzoneFileSystem) FileSystem.get(conf); } @AfterEach + void afterEach() { + IOUtils.closeQuietly(fs); + xceiverClientLogs.clearOutput(); + KeyValueHandler.setInjector(null); + } + + @AfterAll public void tearDown() { IOUtils.closeQuietly(client); if (cluster != null) { @@ -166,8 +201,6 @@ public void tearDown() { @ParameterizedTest @ValueSource(ints = {1 << 17, (1 << 17) + 1, (1 << 17) - 1}) public void testRecovery(int dataSize) throws Exception { - RootedOzoneFileSystem fs = (RootedOzoneFileSystem)FileSystem.get(conf); - final byte[] data = getData(dataSize); final FSDataOutputStream stream = fs.create(file, true); @@ -199,8 +232,6 @@ public void testRecovery(int dataSize) throws Exception { @Test public void testRecoveryWithoutHsyncHflushOnLastBlock() throws Exception { - RootedOzoneFileSystem fs = (RootedOzoneFileSystem)FileSystem.get(conf); - int blockSize = (int) cluster.getOzoneManager().getConfiguration().getStorageSize( OZONE_SCM_BLOCK_SIZE, OZONE_SCM_BLOCK_SIZE_DEFAULT, StorageUnit.BYTES); @@ -235,24 +266,16 @@ public void testRecoveryWithoutHsyncHflushOnLastBlock() throws Exception { @Test public void testOBSRecoveryShouldFail() throws Exception { - // Set the fs.defaultFS - bucket = TestDataUtil.createVolumeAndBucket(client, + OzoneBucket obsBucket = TestDataUtil.createVolumeAndBucket(client, "vol2", "obs", BucketLayout.OBJECT_STORE); - final String rootPath = String.format("%s://%s/", OZONE_OFS_URI_SCHEME, - conf.get(OZONE_OM_ADDRESS_KEY)); - conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath); + String obsDir = OZONE_ROOT + obsBucket.getVolumeName() + OZONE_URI_DELIMITER + obsBucket.getName(); + Path obsFile = new Path(obsDir, "file" + getTestName() + FILE_COUNTER.incrementAndGet()); - final String directory = OZONE_ROOT + bucket.getVolumeName() + - OZONE_URI_DELIMITER + bucket.getName(); - final Path f = new Path(directory, "file"); - - RootedOzoneFileSystem fs = (RootedOzoneFileSystem) FileSystem.get(conf); - assertThrows(IllegalArgumentException.class, () -> fs.recoverLease(f)); + assertThrows(IllegalArgumentException.class, () -> fs.recoverLease(obsFile)); } @Test public void testFinalizeBlockFailure() throws Exception { - RootedOzoneFileSystem fs = (RootedOzoneFileSystem)FileSystem.get(conf); int dataSize = 100; final byte[] data = getData(dataSize); @@ -294,7 +317,6 @@ public void testFinalizeBlockFailure() throws Exception { @Test public void testBlockPipelineClosed() throws Exception { - RootedOzoneFileSystem fs = (RootedOzoneFileSystem)FileSystem.get(conf); int dataSize = 100; final byte[] data = getData(dataSize); @@ -310,8 +332,7 @@ public void testBlockPipelineClosed() throws Exception { // close the pipeline StorageContainerManager scm = cluster.getStorageContainerManager(); - ContainerInfo container = scm.getContainerManager().getContainers().get(0); - OzoneTestUtils.closeContainer(scm, container); + ContainerInfo container = closeLatestContainer(); GenericTestUtils.waitFor(() -> { try { return scm.getPipelineManager().getPipeline(container.getPipelineID()).isClosed(); @@ -338,62 +359,59 @@ public void testBlockPipelineClosed() throws Exception { @ValueSource(booleans = {false, true}) public void testGetCommittedBlockLengthTimeout(boolean forceRecovery) throws Exception { // reduce read timeout - conf.set(OZONE_CLIENT_READ_TIMEOUT, "2s"); + OzoneConfiguration clientConf = new OzoneConfiguration(conf); + clientConf.set(OZONE_CLIENT_READ_TIMEOUT, "2s"); // set force recovery System.setProperty(FORCE_LEASE_RECOVERY_ENV, String.valueOf(forceRecovery)); - RootedOzoneFileSystem fs = (RootedOzoneFileSystem)FileSystem.get(conf); - int dataSize = 100; - final byte[] data = getData(dataSize); + try (RootedOzoneFileSystem fs = (RootedOzoneFileSystem)FileSystem.get(clientConf)) { + int dataSize = 100; + final byte[] data = getData(dataSize); - final FSDataOutputStream stream = fs.create(file, true); - try { - stream.write(data); - stream.hsync(); - assertFalse(fs.isFileClosed(file)); - - // write more data without hsync - stream.write(data); - stream.flush(); - - // close the pipeline and container - ContainerInfo container = cluster.getStorageContainerManager().getContainerManager().getContainers().get(0); - OzoneTestUtils.closeContainer(cluster.getStorageContainerManager(), container); - // pause getCommittedBlockLength handling on all DNs to make sure all getCommittedBlockLength will time out - FaultInjectorImpl injector = new FaultInjectorImpl(); - injector.setType(ContainerProtos.Type.GetCommittedBlockLength); - KeyValueHandler.setInjector(injector); - GenericTestUtils.LogCapturer logs = - GenericTestUtils.LogCapturer.captureLogs(XceiverClientGrpc.getLogger()); - if (!forceRecovery) { - assertThrows(IOException.class, () -> fs.recoverLease(file)); - return; - } else { - fs.recoverLease(file); + final FSDataOutputStream stream = fs.create(file, true); + try { + stream.write(data); + stream.hsync(); + assertFalse(fs.isFileClosed(file)); + + // write more data without hsync + stream.write(data); + stream.flush(); + + // close the pipeline and container + closeLatestContainer(); + // pause getCommittedBlockLength handling on all DNs to make sure all getCommittedBlockLength will time out + FaultInjectorImpl injector = new FaultInjectorImpl(); + injector.setType(ContainerProtos.Type.GetCommittedBlockLength); + KeyValueHandler.setInjector(injector); + if (!forceRecovery) { + assertThrows(IOException.class, () -> fs.recoverLease(file)); + return; + } else { + fs.recoverLease(file); + } + assertEquals(3, StringUtils.countMatches(xceiverClientLogs.getOutput(), + "Executing command cmdType: GetCommittedBlockLength")); + + // The lease should have been recovered. + assertTrue(fs.isFileClosed(file), "File should be closed"); + FileStatus fileStatus = fs.getFileStatus(file); + // Since all DNs are out, then the length in OM keyInfo will be used as the final file length + assertEquals(dataSize, fileStatus.getLen()); + } finally { + if (!forceRecovery) { + closeIgnoringOMException(stream, OMException.ResultCodes.KEY_UNDER_LEASE_RECOVERY); + } else { + closeIgnoringKeyNotFound(stream); + } } - assertEquals(3, StringUtils.countMatches(logs.getOutput(), - "Executing command cmdType: GetCommittedBlockLength")); - // The lease should have been recovered. - assertTrue(fs.isFileClosed(file), "File should be closed"); - FileStatus fileStatus = fs.getFileStatus(file); - // Since all DNs are out, then the length in OM keyInfo will be used as the final file length - assertEquals(dataSize, fileStatus.getLen()); - } finally { - if (!forceRecovery) { - closeIgnoringOMException(stream, OMException.ResultCodes.KEY_UNDER_LEASE_RECOVERY); - } else { - closeIgnoringKeyNotFound(stream); - } - KeyValueHandler.setInjector(null); + // open it again, make sure the data is correct + verifyData(data, dataSize, file, fs); } - - // open it again, make sure the data is correct - verifyData(data, dataSize, file, fs); } @Test public void testGetCommittedBlockLengthWithException() throws Exception { - RootedOzoneFileSystem fs = (RootedOzoneFileSystem)FileSystem.get(conf); int dataSize = 100; final byte[] data = getData(dataSize); @@ -408,8 +426,7 @@ public void testGetCommittedBlockLengthWithException() throws Exception { stream.flush(); // close the pipeline and container - ContainerInfo container = cluster.getStorageContainerManager().getContainerManager().getContainers().get(0); - OzoneTestUtils.closeContainer(cluster.getStorageContainerManager(), container); + ContainerInfo container = closeLatestContainer(); // throw exception on first DN getCommittedBlockLength handling FaultInjectorImpl injector = new FaultInjectorImpl(); KeyValueHandler.setInjector(injector); @@ -418,14 +435,13 @@ public void testGetCommittedBlockLengthWithException() throws Exception { ContainerProtos.Result.CONTAINER_NOT_FOUND); injector.setException(sce); - GenericTestUtils.LogCapturer logs = - GenericTestUtils.LogCapturer.captureLogs(XceiverClientGrpc.getLogger()); fs.recoverLease(file); - assertEquals(2, StringUtils.countMatches(logs.getOutput(), - "Executing command cmdType: GetCommittedBlockLength")); - assertEquals(1, StringUtils.countMatches(logs.getOutput(), - "Failed to execute command cmdType: GetCommittedBlockLength")); + String output = xceiverClientLogs.getOutput(); + assertEquals(2, StringUtils.countMatches(output, + "Executing command cmdType: GetCommittedBlockLength"), output); + assertEquals(1, StringUtils.countMatches(output, + "Failed to execute command cmdType: GetCommittedBlockLength"), output); // The lease should have been recovered. assertTrue(fs.isFileClosed(file), "File should be closed"); @@ -433,7 +449,6 @@ public void testGetCommittedBlockLengthWithException() throws Exception { assertEquals(dataSize * 2, fileStatus.getLen()); } finally { closeIgnoringKeyNotFound(stream); - KeyValueHandler.setInjector(null); } // open it again, make sure the data is correct @@ -441,29 +456,36 @@ public void testGetCommittedBlockLengthWithException() throws Exception { } @Test + @Order(Integer.MAX_VALUE) public void testOMConnectionFailure() throws Exception { // reduce hadoop RPC retry max attempts - conf.setInt(OzoneConfigKeys.OZONE_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY, 5); - conf.setLong(OZONE_CLIENT_WAIT_BETWEEN_RETRIES_MILLIS_KEY, 100); - RootedOzoneFileSystem fs = (RootedOzoneFileSystem)FileSystem.get(conf); - int dataSize = 100; - final byte[] data = getData(dataSize); - - final FSDataOutputStream stream = fs.create(file, true); - try { - stream.write(data); - stream.hsync(); - assertFalse(fs.isFileClosed(file)); - - // close OM - cluster.getOzoneManager().stop(); - assertThrows(ConnectException.class, () -> fs.recoverLease(file)); - } finally { + OzoneConfiguration clientConf = new OzoneConfiguration(conf); + clientConf.setInt(OzoneConfigKeys.OZONE_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY, 5); + clientConf.setLong(OZONE_CLIENT_WAIT_BETWEEN_RETRIES_MILLIS_KEY, 100); + try (RootedOzoneFileSystem fs = (RootedOzoneFileSystem) FileSystem.get(clientConf)) { + int dataSize = 100; + final byte[] data = getData(dataSize); + + final FSDataOutputStream stream = fs.create(file, true); + OzoneManager om = cluster.getOzoneManager(); try { - stream.close(); - } catch (Throwable e) { + stream.write(data); + stream.hsync(); + assertFalse(fs.isFileClosed(file)); + + // close OM + if (om.stop()) { + om.join(); + } + assertThrows(ConnectException.class, () -> fs.recoverLease(file)); + } finally { + try { + stream.close(); + } catch (Throwable e) { + } } - cluster.getOzoneManager().restart(); + + om.restart(); cluster.waitForClusterToBeReady(); assertTrue(fs.recoverLease(file)); } @@ -473,7 +495,6 @@ public void testOMConnectionFailure() throws Exception { public void testRecoverWrongFile() throws Exception { final Path notExistFile = new Path(dir, "file1"); - RootedOzoneFileSystem fs = (RootedOzoneFileSystem)FileSystem.get(conf); int dataSize = 100; final byte[] data = getData(dataSize); @@ -491,8 +512,6 @@ public void testRecoverWrongFile() throws Exception { @Test public void testRecoveryWithoutBlocks() throws Exception { - RootedOzoneFileSystem fs = (RootedOzoneFileSystem)FileSystem.get(conf); - final FSDataOutputStream stream = fs.create(file, true); try { stream.hsync(); @@ -512,7 +531,6 @@ public void testRecoveryWithoutBlocks() throws Exception { @Test public void testRecoveryWithPartialFilledHsyncBlock() throws Exception { - RootedOzoneFileSystem fs = (RootedOzoneFileSystem)FileSystem.get(conf); int blockSize = (int) cluster.getOzoneManager().getConfiguration().getStorageSize( OZONE_SCM_BLOCK_SIZE, OZONE_SCM_BLOCK_SIZE_DEFAULT, StorageUnit.BYTES); final byte[] data = getData(blockSize - 1); @@ -524,10 +542,9 @@ public void testRecoveryWithPartialFilledHsyncBlock() throws Exception { stream.hsync(); StorageContainerManager scm = cluster.getStorageContainerManager(); - ContainerInfo container = scm.getContainerManager().getContainers().get(0); // Close container so that new data won't be written into the same block // block1 is partially filled - OzoneTestUtils.closeContainer(scm, container); + ContainerInfo container = closeLatestContainer(); GenericTestUtils.waitFor(() -> { try { return scm.getPipelineManager().getPipeline(container.getPipelineID()).isClosed(); @@ -560,9 +577,15 @@ public void testRecoveryWithPartialFilledHsyncBlock() throws Exception { verifyData(data, (blockSize - 1) * 2, file, fs); } + private ContainerInfo closeLatestContainer() throws IOException, TimeoutException, InterruptedException { + StorageContainerManager scm = cluster.getStorageContainerManager(); + ContainerInfo container = new LinkedList<>(scm.getContainerManager().getContainers()).getLast(); + OzoneTestUtils.closeContainer(scm, container); + return container; + } + @Test public void testRecoveryWithSameBlockCountInOpenFileAndFileTable() throws Exception { - RootedOzoneFileSystem fs = (RootedOzoneFileSystem)FileSystem.get(conf); int blockSize = (int) cluster.getOzoneManager().getConfiguration().getStorageSize( OZONE_SCM_BLOCK_SIZE, OZONE_SCM_BLOCK_SIZE_DEFAULT, StorageUnit.BYTES); final byte[] data = getData(blockSize / 2 - 1); @@ -598,8 +621,8 @@ public void testRecoveryWithSameBlockCountInOpenFileAndFileTable() throws Except verifyData(data, (blockSize / 2 - 1) * 2, file, fs); } - private void verifyData(byte[] data, int dataSize, Path filePath, RootedOzoneFileSystem fs) throws IOException { - try (FSDataInputStream fdis = fs.open(filePath)) { + private void verifyData(byte[] data, int dataSize, Path filePath, FileSystem fileSystem) throws IOException { + try (FSDataInputStream fdis = fileSystem.open(filePath)) { int bufferSize = dataSize > data.length ? dataSize / 2 : dataSize; while (dataSize > 0) { byte[] readData = new byte[bufferSize]; From 2516ea6ebac8c34358730b75bbc6a20b65949360 Mon Sep 17 00:00:00 2001 From: Chia-Chuan Yu Date: Mon, 13 Jan 2025 23:14:23 +0800 Subject: [PATCH 084/168] HDDS-12002. Move up out() and err() to AbstractSubcommand (#7687) --- .../hadoop/hdds/cli/AbstractSubcommand.java | 9 ++++ .../scm/cli/container/UpgradeSubcommand.java | 15 +----- .../ozone/shell/TestOzoneTenantShell.java | 48 +++++++------------ .../hadoop/ozone/debug/ldb/DBScanner.java | 28 ++++------- .../hadoop/ozone/debug/ldb/ValueSchema.java | 26 +++------- .../hadoop/ozone/repair/RepairTool.java | 11 ----- .../apache/hadoop/ozone/shell/Handler.java | 11 +---- .../hadoop/ozone/shell/OzoneAddress.java | 4 +- .../hadoop/ozone/shell/acl/AclOption.java | 8 ++-- .../shell/snapshot/SnapshotDiffHandler.java | 12 ++--- 10 files changed, 57 insertions(+), 115 deletions(-) diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/AbstractSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/AbstractSubcommand.java index 550a68ae07e..00d907c5ce5 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/AbstractSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/AbstractSubcommand.java @@ -21,6 +21,7 @@ import org.apache.ratis.util.MemoizedSupplier; import picocli.CommandLine; +import java.io.PrintWriter; import java.util.function.Supplier; /** Base functionality for all Ozone subcommands. */ @@ -77,4 +78,12 @@ public OzoneConfiguration getOzoneConf() { return conf; } } + + protected PrintWriter out() { + return spec().commandLine().getOut(); + } + + protected PrintWriter err() { + return spec().commandLine().getErr(); + } } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/UpgradeSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/UpgradeSubcommand.java index a94f631b5bc..3aeb7813a09 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/UpgradeSubcommand.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/UpgradeSubcommand.java @@ -20,6 +20,7 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Strings; import org.apache.commons.lang3.tuple.Pair; +import org.apache.hadoop.hdds.cli.AbstractSubcommand; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; @@ -40,7 +41,6 @@ import java.io.File; import java.io.InputStreamReader; -import java.io.PrintWriter; import java.nio.charset.StandardCharsets; import java.util.Iterator; import java.util.List; @@ -56,14 +56,11 @@ "for this datanode.", mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class) -public class UpgradeSubcommand implements Callable { +public class UpgradeSubcommand extends AbstractSubcommand implements Callable { private static final Logger LOG = LoggerFactory.getLogger(UpgradeSubcommand.class); - @CommandLine.Spec - private static CommandLine.Model.CommandSpec spec; - @CommandLine.Option(names = {"--volume"}, required = false, description = "volume path") @@ -194,12 +191,4 @@ private OzoneConfiguration getConfiguration() { } return ozoneConfiguration; } - - private static PrintWriter err() { - return spec.commandLine().getErr(); - } - - private static PrintWriter out() { - return spec.commandLine().getOut(); - } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneTenantShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneTenantShell.java index 09770b097f8..409d69e9980 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneTenantShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneTenantShell.java @@ -42,17 +42,16 @@ import org.junit.jupiter.api.Timeout; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.io.TempDir; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.slf4j.event.Level; import picocli.CommandLine; -import java.io.ByteArrayOutputStream; import java.io.File; import java.io.IOException; -import java.io.PrintStream; +import java.io.PrintWriter; +import java.io.StringWriter; import java.io.UnsupportedEncodingException; import java.nio.file.Path; import java.security.PrivilegedExceptionAction; @@ -102,10 +101,8 @@ public class TestOzoneTenantShell { private static OzoneShell ozoneSh = null; private static TenantShell tenantShell = null; - private final ByteArrayOutputStream out = new ByteArrayOutputStream(); - private final ByteArrayOutputStream err = new ByteArrayOutputStream(); - private static final PrintStream OLD_OUT = System.out; - private static final PrintStream OLD_ERR = System.err; + private final StringWriter out = new StringWriter(); + private final StringWriter err = new StringWriter(); private static String omServiceId; private static int numOfOMs; @@ -173,9 +170,10 @@ public static void shutdown() { @BeforeEach public void setup() throws UnsupportedEncodingException { - System.setOut(new PrintStream(out, false, UTF_8.name())); - System.setErr(new PrintStream(err, false, UTF_8.name())); - + tenantShell.getCmd().setOut(new PrintWriter(out)); + tenantShell.getCmd().setErr(new PrintWriter(err)); + ozoneSh.getCmd().setOut(new PrintWriter(out)); + ozoneSh.getCmd().setErr(new PrintWriter(err)); // Suppress OMNotLeaderException in the log GenericTestUtils.setLogLevel(RetryInvocationHandler.LOG, Level.WARN); // Enable debug logging for interested classes @@ -187,27 +185,15 @@ public void setup() throws UnsupportedEncodingException { GenericTestUtils.setLogLevel(OMRangerBGSyncService.LOG, Level.DEBUG); } - @AfterEach - public void reset() { - // reset stream after each unit test - out.reset(); - err.reset(); - - // restore system streams - System.setOut(OLD_OUT); - System.setErr(OLD_ERR); - } - /** * Returns exit code. */ private int execute(GenericCli shell, String[] args) { LOG.info("Executing shell command with args {}", Arrays.asList(args)); CommandLine cmd = shell.getCmd(); - CommandLine.IExecutionExceptionHandler exceptionHandler = (ex, commandLine, parseResult) -> { - new PrintStream(err, true, DEFAULT_ENCODING).println(ex.getMessage()); + commandLine.getErr().println(ex.getMessage()); return commandLine.getCommandSpec().exitCodeOnExecutionException(); }; @@ -310,25 +296,25 @@ private String[] getHASetConfStrings(String[] existingArgs) { /** * Helper function that checks command output AND clears it. */ - private void checkOutput(ByteArrayOutputStream stream, String stringToMatch, + private void checkOutput(StringWriter writer, String stringToMatch, boolean exactMatch) throws IOException { - stream.flush(); - final String str = stream.toString(DEFAULT_ENCODING); + writer.flush(); + final String str = writer.toString(); checkOutput(str, stringToMatch, exactMatch); - stream.reset(); + writer.getBuffer().setLength(0); } - private void checkOutput(ByteArrayOutputStream stream, String stringToMatch, + private void checkOutput(StringWriter writer, String stringToMatch, boolean exactMatch, boolean expectValidJSON) throws IOException { - stream.flush(); - final String str = stream.toString(DEFAULT_ENCODING); + writer.flush(); + final String str = writer.toString(); if (expectValidJSON) { // Verify if the String can be parsed as a valid JSON final ObjectMapper objectMapper = new ObjectMapper(); objectMapper.readTree(str); } checkOutput(str, stringToMatch, exactMatch); - stream.reset(); + writer.getBuffer().setLength(0); } private void checkOutput(String str, String stringToMatch, diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/DBScanner.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/DBScanner.java index 6fbbd1a3083..cb432ab45ab 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/DBScanner.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/DBScanner.java @@ -27,6 +27,7 @@ import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule; import com.google.common.annotations.VisibleForTesting; import com.google.common.util.concurrent.ThreadFactoryBuilder; +import org.apache.hadoop.hdds.cli.AbstractSubcommand; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.pipeline.PipelineID; @@ -87,14 +88,11 @@ name = "scan", description = "Parse specified metadataTable" ) -public class DBScanner implements Callable { +public class DBScanner extends AbstractSubcommand implements Callable { public static final Logger LOG = LoggerFactory.getLogger(DBScanner.class); private static final String SCHEMA_V3 = "V3"; - @CommandLine.Spec - private static CommandLine.Model.CommandSpec spec; - @CommandLine.ParentCommand private RDBParser parent; @@ -214,14 +212,6 @@ public Void call() throws Exception { return null; } - private static PrintWriter err() { - return spec.commandLine().getErr(); - } - - private static PrintWriter out() { - return spec.commandLine().getOut(); - } - public byte[] getValueObject(DBColumnFamilyDefinition dbColumnFamilyDefinition, String key) { Class keyType = dbColumnFamilyDefinition.getKeyType(); if (keyType.equals(String.class)) { @@ -525,7 +515,7 @@ private boolean checkFilteredObjectCollection(Collection valueObject, Map classFieldList = ValueSchema.getAllFields(clazz); Field classField = null; for (Field f : classFieldList) { @@ -680,12 +670,12 @@ public static ObjectWriter getWriter() { } - private static class Task implements Callable { + private class Task implements Callable { private final DBColumnFamilyDefinition dbColumnFamilyDefinition; private final ArrayList batch; private final LogWriter logWriter; - private static final ObjectWriter WRITER = + private final ObjectWriter writer = JsonSerializationHelper.getWriter(); private final long sequenceId; private final boolean withKey; @@ -758,12 +748,12 @@ public Void call() { } String cid = key.toString().substring(0, index); String blockId = key.toString().substring(index); - sb.append(WRITER.writeValueAsString(LongCodec.get() + sb.append(writer.writeValueAsString(LongCodec.get() .fromPersistedFormat( FixedLengthStringCodec.string2Bytes(cid)) + KEY_SEPARATOR_SCHEMA_V3 + blockId)); } else { - sb.append(WRITER.writeValueAsString(key)); + sb.append(writer.writeValueAsString(key)); } sb.append(": "); } @@ -774,9 +764,9 @@ public Void call() { if (valueFields != null) { Map filteredValue = new HashMap<>(); filteredValue.putAll(getFieldsFilteredObject(o, dbColumnFamilyDefinition.getValueType(), fieldsSplitMap)); - sb.append(WRITER.writeValueAsString(filteredValue)); + sb.append(writer.writeValueAsString(filteredValue)); } else { - sb.append(WRITER.writeValueAsString(o)); + sb.append(writer.writeValueAsString(o)); } results.add(sb.toString()); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/ValueSchema.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/ValueSchema.java index 4b8eb3b3208..0c2fb302be9 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/ValueSchema.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/ValueSchema.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.debug.ldb; +import org.apache.hadoop.hdds.cli.AbstractSubcommand; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.server.JsonUtils; import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition; @@ -29,7 +30,6 @@ import picocli.CommandLine; import java.io.IOException; -import java.io.PrintWriter; import java.lang.reflect.Field; import java.lang.reflect.Modifier; import java.lang.reflect.ParameterizedType; @@ -51,16 +51,13 @@ name = "value-schema", description = "Schema of value in metadataTable" ) -public class ValueSchema implements Callable { +public class ValueSchema extends AbstractSubcommand implements Callable { @CommandLine.ParentCommand private RDBParser parent; public static final Logger LOG = LoggerFactory.getLogger(ValueSchema.class); - @CommandLine.Spec - private static CommandLine.Model.CommandSpec spec; - @CommandLine.Option(names = {"--column_family", "--column-family", "--cf"}, required = true, description = "Table name") @@ -86,7 +83,7 @@ public Void call() throws Exception { String dbPath = parent.getDbPath(); Map fields = new HashMap<>(); - success = getValueFields(dbPath, fields, depth, tableName, dnDBSchemaVersion); + success = getValueFields(dbPath, fields); out().println(JsonUtils.toJsonStringWithDefaultPrettyPrinter(fields)); @@ -99,8 +96,7 @@ public Void call() throws Exception { return null; } - public static boolean getValueFields(String dbPath, Map valueSchema, int d, String table, - String dnDBSchemaVersion) { + public boolean getValueFields(String dbPath, Map valueSchema) { dbPath = removeTrailingSlashIfNeeded(dbPath); DBDefinitionFactory.setDnDBSchemaVersion(dnDBSchemaVersion); @@ -110,14 +106,14 @@ public static boolean getValueFields(String dbPath, Map valueSch return false; } final DBColumnFamilyDefinition columnFamilyDefinition = - dbDefinition.getColumnFamily(table); + dbDefinition.getColumnFamily(tableName); if (columnFamilyDefinition == null) { - err().print("Error: Table with name '" + table + "' not found"); + err().print("Error: Table with name '" + tableName + "' not found"); return false; } Class c = columnFamilyDefinition.getValueType(); - valueSchema.put(c.getSimpleName(), getFieldsStructure(c, d)); + valueSchema.put(c.getSimpleName(), getFieldsStructure(c, depth)); return true; } @@ -162,14 +158,6 @@ public static List getAllFields(Class clazz) { return result; } - private static PrintWriter err() { - return spec.commandLine().getErr(); - } - - private static PrintWriter out() { - return spec.commandLine().getOut(); - } - private static String removeTrailingSlashIfNeeded(String dbPath) { if (dbPath.endsWith(OzoneConsts.OZONE_URI_DELIMITER)) { dbPath = dbPath.substring(0, dbPath.length() - 1); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RepairTool.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RepairTool.java index a64cacb8b21..d873d07645d 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RepairTool.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RepairTool.java @@ -20,7 +20,6 @@ import org.apache.hadoop.hdds.cli.AbstractSubcommand; import picocli.CommandLine; -import java.io.PrintWriter; import java.nio.charset.StandardCharsets; import java.util.Scanner; import java.util.concurrent.Callable; @@ -74,16 +73,6 @@ protected void error(String msg, Object... args) { err().println(formatMessage(msg, args)); } - private PrintWriter out() { - return spec().commandLine() - .getOut(); - } - - private PrintWriter err() { - return spec().commandLine() - .getErr(); - } - private String formatMessage(String msg, Object[] args) { if (args != null && args.length > 0) { msg = String.format(msg, args); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/Handler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/Handler.java index db7294e2795..36eada9b4f9 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/Handler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/Handler.java @@ -19,7 +19,6 @@ package org.apache.hadoop.ozone.shell; import java.io.IOException; -import java.io.PrintStream; import java.util.Iterator; import java.util.concurrent.Callable; @@ -97,7 +96,7 @@ protected boolean securityEnabled() { } protected void printObjectAsJson(Object o) throws IOException { - out().println(JsonUtils.toJsonStringWithDefaultPrettyPrinter(o)); + System.out.println(JsonUtils.toJsonStringWithDefaultPrettyPrinter(o)); } /** @@ -123,12 +122,4 @@ protected OzoneConfiguration getConf() { return conf; } - protected PrintStream out() { - return System.out; - } - - protected PrintStream err() { - return System.err; - } - } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneAddress.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneAddress.java index ae5b5ad566e..0129737e0ea 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneAddress.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneAddress.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.shell; import java.io.IOException; -import java.io.PrintStream; +import java.io.PrintWriter; import java.net.URI; import java.net.URISyntaxException; import java.util.Collection; @@ -452,7 +452,7 @@ private OzoneObj.ResourceType getResourceType() { return null; } - public void print(PrintStream out) { + public void print(PrintWriter out) { if (!volumeName.isEmpty()) { out.printf("Volume Name : %s%n", volumeName); } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/acl/AclOption.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/acl/AclOption.java index aa1675d28eb..813c13a1cfe 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/acl/AclOption.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/acl/AclOption.java @@ -24,7 +24,7 @@ import picocli.CommandLine; import java.io.IOException; -import java.io.PrintStream; +import java.io.PrintWriter; import java.util.List; /** @@ -52,7 +52,7 @@ private List getAclList() { return ImmutableList.copyOf(values); } - public void addTo(OzoneObj obj, ObjectStore objectStore, PrintStream out) + public void addTo(OzoneObj obj, ObjectStore objectStore, PrintWriter out) throws IOException { for (OzoneAcl acl : getAclList()) { boolean result = objectStore.addAcl(obj, acl); @@ -65,7 +65,7 @@ public void addTo(OzoneObj obj, ObjectStore objectStore, PrintStream out) } } - public void removeFrom(OzoneObj obj, ObjectStore objectStore, PrintStream out) + public void removeFrom(OzoneObj obj, ObjectStore objectStore, PrintWriter out) throws IOException { for (OzoneAcl acl : getAclList()) { boolean result = objectStore.removeAcl(obj, acl); @@ -78,7 +78,7 @@ public void removeFrom(OzoneObj obj, ObjectStore objectStore, PrintStream out) } } - public void setOn(OzoneObj obj, ObjectStore objectStore, PrintStream out) + public void setOn(OzoneObj obj, ObjectStore objectStore, PrintWriter out) throws IOException { objectStore.setAcl(obj, getAclList()); out.println("ACLs set successfully."); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/SnapshotDiffHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/SnapshotDiffHandler.java index ebbb9509c94..e11c07dcf3b 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/SnapshotDiffHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/SnapshotDiffHandler.java @@ -35,7 +35,7 @@ import picocli.CommandLine; import java.io.IOException; -import java.io.PrintStream; +import java.io.PrintWriter; import static org.apache.hadoop.hdds.server.JsonUtils.toJsonStringWithDefaultPrettyPrinter; @@ -117,19 +117,19 @@ private void getSnapshotDiff(ObjectStore store, String volumeName, String bucketName) throws IOException { SnapshotDiffResponse diffResponse = store.snapshotDiff(volumeName, bucketName, fromSnapshot, toSnapshot, token, pageSize, forceFullDiff, diffDisableNativeLibs); - try (PrintStream stream = out()) { + try (PrintWriter writer = out()) { if (json) { - stream.println(toJsonStringWithDefaultPrettyPrinter(getJsonObject(diffResponse))); + writer.println(toJsonStringWithDefaultPrettyPrinter(getJsonObject(diffResponse))); } else { - stream.println(diffResponse); + writer.println(diffResponse); } } } private void cancelSnapshotDiff(ObjectStore store, String volumeName, String bucketName) throws IOException { - try (PrintStream stream = out()) { - stream.println(store.cancelSnapshotDiff(volumeName, bucketName, fromSnapshot, toSnapshot)); + try (PrintWriter writer = out()) { + writer.println(store.cancelSnapshotDiff(volumeName, bucketName, fromSnapshot, toSnapshot)); } } From b024a6bb5185a7865aebe15dd8a7e6fe5b624446 Mon Sep 17 00:00:00 2001 From: Abhishek Pal <43001336+devabhishekpal@users.noreply.github.com> Date: Tue, 14 Jan 2025 15:55:09 +0530 Subject: [PATCH 085/168] HDDS-12042. Fix capacity count for cluster capacity card in new vs old UI (#7666) --- .../src/components/overviewCard/overviewCard.tsx | 7 ++++++- .../src/v2/components/overviewCard/overviewStorageCard.tsx | 2 +- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/overviewCard/overviewCard.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/overviewCard/overviewCard.tsx index 6ccc106d019..9d22a5f380c 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/overviewCard/overviewCard.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/components/overviewCard/overviewCard.tsx @@ -154,7 +154,12 @@ class OverviewCard extends React.Component {

{meta}
- +
); diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/overviewCard/overviewStorageCard.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/overviewCard/overviewStorageCard.tsx index 51de4669b99..60cfbb8f032 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/overviewCard/overviewStorageCard.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/overviewCard/overviewStorageCard.tsx @@ -44,7 +44,7 @@ function getUsagePercentages( ozoneUsedPercentage: Math.floor(used / capacity * 100), nonOzoneUsedPercentage: Math.floor((capacity - remaining - used) / capacity * 100), committedPercentage: Math.floor(committed / capacity * 100), - usagePercentage: Math.floor((capacity - remaining) / capacity * 100) + usagePercentage: Math.round((capacity - remaining) / capacity * 100) } } From 864c8a58b83dbfad21dd89ddad2cab43020b105e Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Tue, 14 Jan 2025 16:31:28 +0100 Subject: [PATCH 086/168] HDDS-10312. Speed up TestOMDbCheckpointServlet (#7691) --- .../ozone/om/TestOMDbCheckpointServlet.java | 176 ++++++++---------- 1 file changed, 81 insertions(+), 95 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java index a94f6ea017f..253c993f1df 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java @@ -50,6 +50,7 @@ import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -57,12 +58,14 @@ import com.google.common.collect.Sets; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.utils.db.DBCheckpoint; import org.apache.hadoop.hdds.utils.db.DBStore; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.TestDataUtil; import org.apache.hadoop.ozone.client.OzoneBucket; +import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.lock.BootstrapStateHandler; import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; @@ -92,9 +95,6 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.junit.jupiter.api.io.TempDir; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.Arguments; -import org.junit.jupiter.params.provider.MethodSource; import static org.apache.hadoop.ozone.om.OmSnapshotManager.OM_HARDLINK_FILE; import static org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils.DATA_PREFIX; @@ -110,7 +110,6 @@ import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.params.provider.Arguments.arguments; import static org.mockito.Mockito.any; import static org.mockito.Mockito.anyBoolean; import static org.mockito.Mockito.anyInt; @@ -135,6 +134,8 @@ public class TestOMDbCheckpointServlet { private File tempFile; private ServletOutputStream servletOutputStream; private MiniOzoneCluster cluster = null; + private OzoneClient client; + private OzoneManager om; private OMMetrics omMetrics = null; private HttpServletRequest requestMock = null; private HttpServletResponse responseMock = null; @@ -144,23 +145,33 @@ public class TestOMDbCheckpointServlet { private String snapshotDirName2; private Path compactionDirPath; private DBCheckpoint dbCheckpoint; - private String method; @TempDir private Path folder; private static final String FABRICATED_FILE_NAME = "fabricatedFile.sst"; + private static final AtomicInteger COUNTER = new AtomicInteger(); - /** - * Create a MiniDFSCluster for testing. - *

- * Ozone is made active by setting OZONE_ENABLED = true - * - * @throws Exception - */ @BeforeEach void init() throws Exception { conf = new OzoneConfiguration(); + } + + @AfterEach + void shutdown() { + IOUtils.closeQuietly(client, cluster); + } + + private void setupCluster() throws Exception { + cluster = MiniOzoneCluster.newBuilder(conf) + .setNumDatanodes(1) + .build(); + cluster.waitForClusterToBeReady(); + client = cluster.newClient(); + om = cluster.getOzoneManager(); + omMetrics = om.getMetrics(); + } - final Path tempPath = folder.resolve("temp.tar"); + private void setupMocks() throws Exception { + final Path tempPath = folder.resolve("temp" + COUNTER.incrementAndGet() + ".tar"); tempFile = tempPath.toFile(); servletOutputStream = new ServletOutputStream() { @@ -186,30 +197,12 @@ public void write(int b) throws IOException { fileOutputStream.write(b); } }; - } - - /** - * Shutdown MiniDFSCluster. - */ - @AfterEach - public void shutdown() throws InterruptedException { - if (cluster != null) { - cluster.shutdown(); - } - } - - private void setupCluster() throws Exception { - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(1) - .build(); - cluster.waitForClusterToBeReady(); - omMetrics = cluster.getOzoneManager().getMetrics(); omDbCheckpointServletMock = mock(OMDBCheckpointServlet.class); BootstrapStateHandler.Lock lock = - new OMDBCheckpointServlet.Lock(cluster.getOzoneManager()); + new OMDBCheckpointServlet.Lock(om); doCallRealMethod().when(omDbCheckpointServletMock).init(); assertNull( doCallRealMethod().when(omDbCheckpointServletMock).getDbStore()); @@ -225,7 +218,7 @@ private void setupCluster() throws Exception { .thenReturn(servletContextMock); when(servletContextMock.getAttribute(OzoneConsts.OM_CONTEXT_ATTRIBUTE)) - .thenReturn(cluster.getOzoneManager()); + .thenReturn(om); when(requestMock.getParameter(OZONE_DB_CHECKPOINT_REQUEST_FLUSH)) .thenReturn("true"); @@ -244,17 +237,29 @@ private void setupCluster() throws Exception { anyBoolean()); } - @ParameterizedTest - @MethodSource("getHttpMethods") - public void testEndpoint(String httpMethod) throws Exception { - this.method = httpMethod; - + @Test + void testWithoutACL() throws Exception { conf.setBoolean(OZONE_ACL_ENABLED, false); conf.set(OZONE_ADMINISTRATORS, OZONE_ADMINISTRATORS_WILDCARD); setupCluster(); - final OzoneManager om = cluster.getOzoneManager(); + testBootstrapLocking(); + + testEndpoint("POST"); + testEndpoint("GET"); + testDoPostWithInvalidContentType(); + + prepSnapshotData(); + + testWriteDbDataWithoutOmSnapshot(); + testWriteDbDataToStream(); + testWriteDbDataWithToExcludeFileList(); + } + + private void testEndpoint(String method) throws Exception { + setupMocks(); + doCallRealMethod().when(omDbCheckpointServletMock).initialize( om.getMetadataManager().getStore(), om.getMetrics().getDBCheckpointMetrics(), @@ -270,7 +275,7 @@ public void testEndpoint(String httpMethod) throws Exception { toExcludeList.add("sstFile1.sst"); toExcludeList.add("sstFile2.sst"); - setupHttpMethod(toExcludeList); + setupHttpMethod(method, toExcludeList); when(responseMock.getOutputStream()).thenReturn(servletOutputStream); @@ -278,7 +283,7 @@ public void testEndpoint(String httpMethod) throws Exception { long initialCheckpointCount = omMetrics.getDBCheckpointMetrics().getNumCheckpoints(); - doEndpoint(); + doEndpoint(method); assertThat(tempFile.length()).isGreaterThan(0); assertThat(omMetrics.getDBCheckpointMetrics().getLastCheckpointCreationTimeTaken()) @@ -292,14 +297,8 @@ public void testEndpoint(String httpMethod) throws Exception { any(), any(), eq(toExcludeList), any(), any()); } - @Test - public void testDoPostWithInvalidContentType() throws Exception { - conf.setBoolean(OZONE_ACL_ENABLED, false); - conf.set(OZONE_ADMINISTRATORS, OZONE_ADMINISTRATORS_WILDCARD); - - setupCluster(); - - final OzoneManager om = cluster.getOzoneManager(); + private void testDoPostWithInvalidContentType() throws Exception { + setupMocks(); doCallRealMethod().when(omDbCheckpointServletMock).initialize( om.getMetadataManager().getStore(), @@ -322,11 +321,8 @@ public void testDoPostWithInvalidContentType() throws Exception { verify(responseMock).setStatus(HttpServletResponse.SC_BAD_REQUEST); } - @ParameterizedTest - @MethodSource("getHttpMethods") - public void testSpnegoEnabled(String httpMethod) throws Exception { - this.method = httpMethod; - + @Test + void testSpnegoEnabled() throws Exception { conf.setBoolean(OZONE_ACL_ENABLED, true); conf.set(OZONE_ADMINISTRATORS, ""); conf.set(OZONE_OM_HTTP_AUTH_TYPE, "kerberos"); @@ -334,7 +330,13 @@ public void testSpnegoEnabled(String httpMethod) throws Exception { setupCluster(); - final OzoneManager om = cluster.getOzoneManager(); + testSpnegoEnabled("POST"); + testSpnegoEnabled("GET"); + } + + private void testSpnegoEnabled(String method) throws Exception { + setupMocks(); + Collection allowedUsers = new LinkedHashSet<>(om.getOmAdminUsernames()); allowedUsers.add("recon"); @@ -349,9 +351,9 @@ public void testSpnegoEnabled(String httpMethod) throws Exception { omDbCheckpointServletMock.init(); - setupHttpMethod(new ArrayList<>()); + setupHttpMethod(method, new ArrayList<>()); - doEndpoint(); + doEndpoint(method); // Response status should be set to 403 Forbidden since there was no user // principal set in the request @@ -364,7 +366,7 @@ public void testSpnegoEnabled(String httpMethod) throws Exception { when(userPrincipalMock.getName()).thenReturn("dn/localhost@REALM"); when(requestMock.getUserPrincipal()).thenReturn(userPrincipalMock); - doEndpoint(); + doEndpoint(method); // Verify that the Response status is set to 403 again for DN user. verify(responseMock, times(2)).setStatus(HttpServletResponse.SC_FORBIDDEN); @@ -375,22 +377,21 @@ public void testSpnegoEnabled(String httpMethod) throws Exception { when(requestMock.getUserPrincipal()).thenReturn(userPrincipalMock); when(responseMock.getOutputStream()).thenReturn(servletOutputStream); - doEndpoint(); + doEndpoint(method); // Recon user should be able to access the servlet and download the // snapshot assertThat(tempFile.length()).isGreaterThan(0); } - @Test - public void testWriteDbDataToStream() throws Exception { - prepSnapshotData(); + private void testWriteDbDataToStream() throws Exception { + setupMocks(); + // Set http param to include snapshot data. when(requestMock.getParameter(OZONE_DB_CHECKPOINT_INCLUDE_SNAPSHOT_DATA)) .thenReturn("true"); // Create a "spy" dbstore keep track of the checkpoint. - OzoneManager om = cluster.getOzoneManager(); DBStore dbStore = om.getMetadataManager().getStore(); DBStore spyDbStore = spy(dbStore); @@ -517,10 +518,9 @@ private static long tmpHardLinkFileCount() throws IOException { } } - @Test - public void testWriteDbDataWithoutOmSnapshot() + private void testWriteDbDataWithoutOmSnapshot() throws Exception { - prepSnapshotData(); + setupMocks(); doCallRealMethod().when(omDbCheckpointServletMock).initialize( any(), any(), anyBoolean(), any(), any(), anyBoolean()); @@ -553,10 +553,9 @@ public void testWriteDbDataWithoutOmSnapshot() assertEquals(initialCheckpointSet, finalCheckpointSet); } - @Test - public void testWriteDbDataWithToExcludeFileList() + private void testWriteDbDataWithToExcludeFileList() throws Exception { - prepSnapshotData(); + setupMocks(); doCallRealMethod().when(omDbCheckpointServletMock).initialize( any(), any(), anyBoolean(), any(), any(), anyBoolean()); @@ -604,7 +603,7 @@ public void testWriteDbDataWithToExcludeFileList() /** * Calls endpoint in regards to parametrized HTTP method. */ - private void doEndpoint() { + private void doEndpoint(String method) { if (method.equals("POST")) { omDbCheckpointServletMock.doPost(requestMock, responseMock); } else { @@ -612,20 +611,13 @@ private void doEndpoint() { } } - /** - * Parametrizes test with HTTP method. - * @return HTTP method. - */ - private static Stream getHttpMethods() { - return Stream.of(arguments("POST"), arguments("GET")); - } - /** * Setups HTTP method details depending on parametrized HTTP method. + * * @param toExcludeList SST file names to be excluded. * @throws IOException */ - private void setupHttpMethod(List toExcludeList) throws IOException { + private void setupHttpMethod(String method, List toExcludeList) throws IOException { if (method.equals("POST")) { setupPostMethod(toExcludeList); } else { @@ -684,11 +676,10 @@ private void setupGetMethod(List toExcludeList) { } private void prepSnapshotData() throws Exception { - setupCluster(); metaDir = OMStorage.getOmDbDir(conf); OzoneBucket bucket = TestDataUtil - .createVolumeAndBucket(cluster.newClient()); + .createVolumeAndBucket(client); // Create dummy keys for snapshotting. TestDataUtil.createKey(bucket, UUID.randomUUID().toString(), @@ -730,17 +721,16 @@ private void prepSnapshotData() throws Exception { Path currentLink = Paths.get(compactionDirPath.toString(), "CURRENT"); Files.createLink(currentLink, currentFile); - dbCheckpoint = cluster.getOzoneManager() - .getMetadataManager().getStore() + dbCheckpoint = om.getMetadataManager() + .getStore() .getCheckpoint(true); } private String createSnapshot(String vname, String bname) throws IOException, InterruptedException, TimeoutException { - final OzoneManager om = cluster.getOzoneManager(); String snapshotName = UUID.randomUUID().toString(); - OzoneManagerProtocol writeClient = cluster.newClient().getObjectStore() + OzoneManagerProtocol writeClient = client.getObjectStore() .getClientProxy().getOzoneManagerClient(); writeClient.createSnapshot(vname, bname, snapshotName); @@ -839,22 +829,18 @@ private void checkLine(String shortSnapshotLocation, assertEquals(file0, file1, "hl filenames are the same"); } - @Test - public void testBootstrapLocking() throws Exception { - cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).build(); - cluster.waitForClusterToBeReady(); - + private void testBootstrapLocking() throws Exception { // Get the bootstrap state handlers - KeyManager keyManager = cluster.getOzoneManager().getKeyManager(); + KeyManager keyManager = om.getKeyManager(); BootstrapStateHandler keyDeletingService = keyManager.getDeletingService(); BootstrapStateHandler snapshotDeletingService = keyManager.getSnapshotDeletingService(); BootstrapStateHandler sstFilteringService = keyManager.getSnapshotSstFilteringService(); - BootstrapStateHandler differ = - cluster.getOzoneManager().getMetadataManager() - .getStore().getRocksDBCheckpointDiffer(); + BootstrapStateHandler differ = om.getMetadataManager() + .getStore() + .getRocksDBCheckpointDiffer(); ExecutorService executorService = Executors.newCachedThreadPool(); @@ -863,7 +849,7 @@ public void testBootstrapLocking() throws Exception { OMDBCheckpointServlet spyServlet = spy(omDbCheckpointServlet); ServletContext servletContext = mock(ServletContext.class); when(servletContext.getAttribute(OzoneConsts.OM_CONTEXT_ATTRIBUTE)) - .thenReturn(cluster.getOzoneManager()); + .thenReturn(om); doReturn(servletContext).when(spyServlet).getServletContext(); spyServlet.init(); From 147318b049953610984f72a375454e039d04d247 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Tue, 14 Jan 2025 18:16:19 +0100 Subject: [PATCH 087/168] HDDS-12074. Enable sortpom in ozone-insight, s3-secret-store, s3gateway and tools (#7692) --- hadoop-ozone/insight/pom.xml | 78 ++++----- hadoop-ozone/s3-secret-store/pom.xml | 29 ++-- hadoop-ozone/s3gateway/pom.xml | 241 +++++++++++++-------------- hadoop-ozone/tools/pom.xml | 212 ++++++++++++----------- 4 files changed, 269 insertions(+), 291 deletions(-) diff --git a/hadoop-ozone/insight/pom.xml b/hadoop-ozone/insight/pom.xml index 97cdf786502..cdbef482ef3 100644 --- a/hadoop-ozone/insight/pom.xml +++ b/hadoop-ozone/insight/pom.xml @@ -12,10 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -24,16 +21,39 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> ozone-insight 2.0.0-SNAPSHOT - Apache Ozone Insight Tool - Apache Ozone Insight Tool jar + Apache Ozone Insight Tool + Apache Ozone Insight Tool false - true + + info.picocli + picocli + + + io.dropwizard.metrics + metrics-core + + + jakarta.activation + jakarta.activation-api + + + jakarta.xml.bind + jakarta.xml.bind-api + + + org.apache.httpcomponents + httpclient + + + org.apache.httpcomponents + httpcore + org.apache.ozone hdds-common @@ -56,15 +76,15 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone - ozone-manager + hdds-server-framework org.apache.ozone - ozone-common + hdds-server-scm org.apache.ozone - hdds-server-scm + hdds-tools org.apache.ozone @@ -72,48 +92,24 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone - ozone-filesystem + ozone-common org.apache.ozone - ozone-interface-client + ozone-filesystem org.apache.ozone - hdds-server-framework + ozone-interface-client org.apache.ozone - hdds-tools - - - org.apache.httpcomponents - httpclient - - - org.apache.httpcomponents - httpcore - - - info.picocli - picocli - - - jakarta.xml.bind - jakarta.xml.bind-api + ozone-manager org.glassfish.jaxb jaxb-runtime - - jakarta.activation - jakarta.activation-api - - - io.dropwizard.metrics - metrics-core - @@ -133,8 +129,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> com.github.spotbugs spotbugs-maven-plugin - ${basedir}/dev-support/findbugsExcludeFile.xml - + ${basedir}/dev-support/findbugsExcludeFile.xml true 2048 @@ -160,7 +155,8 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> maven-enforcer-plugin - ban-annotations + ban-annotations + diff --git a/hadoop-ozone/s3-secret-store/pom.xml b/hadoop-ozone/s3-secret-store/pom.xml index 1dcaa17d560..decc23ba8af 100644 --- a/hadoop-ozone/s3-secret-store/pom.xml +++ b/hadoop-ozone/s3-secret-store/pom.xml @@ -12,9 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -22,36 +20,33 @@ 2.0.0-SNAPSHOT ozone-s3-secret-store - Apache Ozone S3 Secret Store - jar 2.0.0-SNAPSHOT + jar + Apache Ozone S3 Secret Store - UTF-8 true - true + UTF-8 + + com.bettercloud + vault-java-driver + + + jakarta.annotation + jakarta.annotation-api + org.apache.ozone ozone-common compile - org.apache.ozone ozone-manager compile - - - com.bettercloud - vault-java-driver - - - jakarta.annotation - jakarta.annotation-api - org.slf4j slf4j-api diff --git a/hadoop-ozone/s3gateway/pom.xml b/hadoop-ozone/s3gateway/pom.xml index 351db4b61fb..63e21359823 100644 --- a/hadoop-ozone/s3gateway/pom.xml +++ b/hadoop-ozone/s3gateway/pom.xml @@ -12,9 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -22,226 +20,224 @@ 2.0.0-SNAPSHOT ozone-s3gateway - Apache Ozone S3 Gateway - jar 2.0.0-SNAPSHOT + jar + Apache Ozone S3 Gateway false - UTF-8 true - true + UTF-8 - org.apache.ozone - hdds-client + com.fasterxml.jackson.core + jackson-databind - org.apache.ozone - hdds-common + com.fasterxml.jackson.dataformat + jackson-dataformat-xml - org.apache.ozone - hdds-config + com.fasterxml.jackson.module + jackson-module-jaxb-annotations - org.apache.ozone - hdds-interface-server + com.google.guava + guava - org.apache.ozone - ozone-common - compile + commons-codec + commons-codec - org.apache.ozone - ozone-interface-client + commons-io + commons-io - org.javassist - javassist + info.picocli + picocli - org.apache.ozone - hdds-server-framework + io.grpc + grpc-netty - commons-codec - commons-codec + io.grpc + grpc-protobuf + + + com.google.code.findbugs + jsr305 + + - commons-io - commons-io + io.grpc + grpc-stub - org.apache.httpcomponents - httpclient + io.netty + netty-codec-http2 - org.apache.httpcomponents - httpcore + io.netty + netty-transport - org.apache.kerby - kerby-util + io.opentracing + opentracing-api - org.apache.ratis - ratis-common + io.opentracing + opentracing-noop - - org.jboss.weld.servlet - weld-servlet-shaded + io.opentracing + opentracing-util - org.eclipse.jetty - jetty-servlet + jakarta.activation + jakarta.activation-api - org.eclipse.jetty - jetty-webapp + jakarta.annotation + jakarta.annotation-api - org.glassfish.jersey.containers - jersey-container-servlet-core + jakarta.ws.rs + jakarta.ws.rs-api - org.glassfish.jersey.core - jersey-common + jakarta.xml.bind + jakarta.xml.bind-api - org.glassfish.jersey.core - jersey-server + javax.annotation + javax.annotation-api - org.glassfish.jersey.ext.cdi - jersey-cdi1x + javax.enterprise + cdi-api - org.glassfish.jersey.inject - jersey-hk2 + javax.servlet + javax.servlet-api - org.glassfish.jersey.media - jersey-media-jaxb + org.apache.commons + commons-lang3 - org.glassfish.hk2 - hk2-api + org.apache.httpcomponents + httpclient - org.slf4j - slf4j-api + org.apache.httpcomponents + httpcore - com.fasterxml.jackson.core - jackson-databind + org.apache.kerby + kerby-util - com.fasterxml.jackson.dataformat - jackson-dataformat-xml + org.apache.ozone + hdds-client - com.fasterxml.jackson.module - jackson-module-jaxb-annotations + org.apache.ozone + hdds-common - com.google.guava - guava + org.apache.ozone + hdds-config - javax.annotation - javax.annotation-api + org.apache.ozone + hdds-hadoop-dependency-server - javax.enterprise - cdi-api + org.apache.ozone + hdds-interface-server - javax.servlet - javax.servlet-api + org.apache.ozone + hdds-server-framework - jakarta.annotation - jakarta.annotation-api + org.apache.ozone + ozone-client - jakarta.ws.rs - jakarta.ws.rs-api + org.apache.ozone + ozone-common + compile - jakarta.xml.bind - jakarta.xml.bind-api + org.apache.ozone + ozone-interface-client - org.glassfish.jaxb - jaxb-runtime + org.apache.ratis + ratis-common - jakarta.activation - jakarta.activation-api + org.eclipse.jetty + jetty-servlet - info.picocli - picocli + org.eclipse.jetty + jetty-webapp - io.grpc - grpc-netty + org.glassfish.hk2 + hk2-api - io.grpc - grpc-protobuf - - - com.google.code.findbugs - jsr305 - - + org.glassfish.jaxb + jaxb-runtime - io.grpc - grpc-stub + org.glassfish.jersey.containers + jersey-container-servlet-core - io.netty - netty-codec-http2 + org.glassfish.jersey.core + jersey-common - io.netty - netty-transport + org.glassfish.jersey.core + jersey-server - io.opentracing - opentracing-api + org.glassfish.jersey.ext.cdi + jersey-cdi1x - io.opentracing - opentracing-noop + org.glassfish.jersey.inject + jersey-hk2 - io.opentracing - opentracing-util + org.glassfish.jersey.media + jersey-media-jaxb - org.apache.ozone - hdds-hadoop-dependency-server + org.javassist + javassist - org.apache.ozone - ozone-client + org.jboss.weld.servlet + weld-servlet-shaded + + + org.slf4j + slf4j-api org.apache.ozone hdds-docs provided - - org.apache.commons - commons-lang3 - @@ -278,7 +274,8 @@ maven-enforcer-plugin - ban-annotations + ban-annotations + @@ -300,25 +297,22 @@ copy-common-html - prepare-package unpack + prepare-package org.apache.ozone hdds-server-framework - ${project.build.outputDirectory} - + ${project.build.outputDirectory} webapps/static/**/*.* org.apache.ozone hdds-docs - - ${project.build.outputDirectory}/webapps/static - + ${project.build.outputDirectory}/webapps/static docs/**/*.* @@ -331,8 +325,7 @@ com.github.spotbugs spotbugs-maven-plugin - ${basedir}/dev-support/findbugsExcludeFile.xml - + ${basedir}/dev-support/findbugsExcludeFile.xml diff --git a/hadoop-ozone/tools/pom.xml b/hadoop-ozone/tools/pom.xml index f3605d358eb..b3ee3ff5793 100644 --- a/hadoop-ozone/tools/pom.xml +++ b/hadoop-ozone/tools/pom.xml @@ -12,10 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -24,16 +21,95 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> ozone-tools 2.0.0-SNAPSHOT - Apache Ozone Tools - Apache Ozone Tools jar + Apache Ozone Tools + Apache Ozone Tools false - true + + com.amazonaws + aws-java-sdk-core + + + com.amazonaws + aws-java-sdk-s3 + + + com.fasterxml.jackson.core + jackson-annotations + + + com.fasterxml.jackson.core + jackson-core + + + com.fasterxml.jackson.core + jackson-databind + + + com.fasterxml.jackson.datatype + jackson-datatype-jsr310 + + + com.google.guava + guava + + + commons-codec + commons-codec + + + commons-io + commons-io + + + info.picocli + picocli + + + info.picocli + picocli-shell-jline3 + + + io.dropwizard.metrics + metrics-core + + + io.opentracing + opentracing-api + + + io.opentracing + opentracing-util + + + jakarta.activation + jakarta.activation-api + + + jakarta.annotation + jakarta.annotation-api + + + jakarta.xml.bind + jakarta.xml.bind-api + + + org.apache.commons + commons-lang3 + + + org.apache.httpcomponents + httpclient + + + org.apache.httpcomponents + httpcore + org.apache.ozone hdds-client @@ -50,6 +126,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds-container-service + + org.apache.ozone + hdds-hadoop-dependency-server + org.apache.ozone hdds-interface-admin @@ -68,15 +148,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone - hdds-server-scm - - - org.apache.ozone - ozone-manager + hdds-server-framework org.apache.ozone - ozone-common + hdds-server-scm org.apache.ozone @@ -88,12 +164,16 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone - ozone-filesystem-common + ozone-common org.apache.ozone ozone-filesystem + + org.apache.ozone + ozone-filesystem-common + org.apache.ozone ozone-interface-client @@ -104,7 +184,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone - hdds-server-framework + ozone-manager org.apache.ozone @@ -116,31 +196,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> - - org.apache.ozone - hdds-hadoop-dependency-server - - - - commons-codec - commons-codec - - - commons-io - commons-io - - - org.apache.commons - commons-lang3 - - - org.apache.httpcomponents - httpclient - - - org.apache.httpcomponents - httpcore - org.apache.ratis ratis-client @@ -159,84 +214,23 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ratis - ratis-thirdparty-misc + ratis-shell org.apache.ratis - ratis-tools + ratis-thirdparty-misc org.apache.ratis - ratis-shell - - - - info.picocli - picocli - - - info.picocli - picocli-shell-jline3 - - - org.jline - jline - - - jakarta.xml.bind - jakarta.xml.bind-api + ratis-tools org.glassfish.jaxb jaxb-runtime - jakarta.annotation - jakarta.annotation-api - - - jakarta.activation - jakarta.activation-api - - - io.dropwizard.metrics - metrics-core - - - io.opentracing - opentracing-api - - - io.opentracing - opentracing-util - - - com.amazonaws - aws-java-sdk-core - - - com.amazonaws - aws-java-sdk-s3 - - - com.fasterxml.jackson.core - jackson-annotations - - - com.fasterxml.jackson.core - jackson-core - - - com.fasterxml.jackson.core - jackson-databind - - - com.fasterxml.jackson.datatype - jackson-datatype-jsr310 - - - com.google.guava - guava + org.jline + jline org.jooq @@ -273,8 +267,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> com.github.spotbugs spotbugs-maven-plugin - ${basedir}/dev-support/findbugsExcludeFile.xml - + ${basedir}/dev-support/findbugsExcludeFile.xml true 2048 @@ -309,7 +302,8 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> maven-enforcer-plugin - ban-annotations + ban-annotations + From 0723902ac927d19faeab735c0d1fcd8339fb58b6 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Tue, 14 Jan 2025 21:54:23 +0100 Subject: [PATCH 088/168] HDDS-12082. CI checks fail with Maven 3.9.9 (#7699) --- .github/workflows/ci.yml | 2 +- hadoop-ozone/dist/src/main/compose/common/s3a-test.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4c6723daff8..2ae4d42f3a5 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -30,7 +30,7 @@ env: FAIL_FAST: ${{ github.event_name == 'pull_request' }} # Minimum required Java version for running Ozone is defined in pom.xml (javac.version). TEST_JAVA_VERSION: 21 # JDK version used by CI build and tests; should match the JDK version in apache/ozone-runner image - MAVEN_ARGS: --batch-mode --settings ${{ github.workspace }}/dev-support/ci/maven-settings.xml --show-version + MAVEN_ARGS: --batch-mode --settings ${{ github.workspace }}/dev-support/ci/maven-settings.xml MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 HADOOP_IMAGE: ghcr.io/apache/hadoop OZONE_IMAGE: ghcr.io/apache/ozone diff --git a/hadoop-ozone/dist/src/main/compose/common/s3a-test.sh b/hadoop-ozone/dist/src/main/compose/common/s3a-test.sh index 03600616a76..35ec5f68ed4 100644 --- a/hadoop-ozone/dist/src/main/compose/common/s3a-test.sh +++ b/hadoop-ozone/dist/src/main/compose/common/s3a-test.sh @@ -98,7 +98,7 @@ EOF # - ITestS3AContractDistCp: HDDS-10616 # - ITestS3AContractMkdirWithCreatePerf: HDDS-11662 # - ITestS3AContractRename: HDDS-10665 - mvn ${MAVEN_ARGS:-} --fail-never \ + mvn ${MAVEN_ARGS:-} --fail-never --show-version \ -Dtest='ITestS3AContract*, ITestS3ACommitterMRJob, !ITestS3AContractBulkDelete, !ITestS3AContractCreate#testOverwrite*EmptyDirectory[*], !ITestS3AContractDistCp, !ITestS3AContractMkdirWithCreatePerf, !ITestS3AContractRename' \ clean test From 2f6e229dea0d9ddd2645e488332acb70e9c21eab Mon Sep 17 00:00:00 2001 From: Sarveksha Yeshavantha Raju <79865743+sarvekshayr@users.noreply.github.com> Date: Wed, 15 Jan 2025 13:54:43 +0530 Subject: [PATCH 089/168] HDDS-12029. Move ozone debug recover to ozone admin om lease recover (#7670) --- .../dist/src/main/compose/ozone/test.sh | 2 -- .../lease-recovery.robot} | 10 ++++-- .../main/smoketest/compatibility/read.robot | 2 +- .../main/smoketest/debug/ozone-debug.robot | 5 --- .../om/lease}/TestLeaseRecoverer.java | 2 +- .../apache/hadoop/ozone/admin/om/OMAdmin.java | 4 ++- .../om/lease}/LeaseRecoverer.java | 13 +++---- .../ozone/admin/om/lease/LeaseSubCommand.java | 35 +++++++++++++++++++ .../ozone/admin/om/lease/package-info.java | 22 ++++++++++++ 9 files changed, 74 insertions(+), 21 deletions(-) rename hadoop-ozone/dist/src/main/smoketest/{debug/ozone-debug-lease-recovery.robot => admincli/lease-recovery.robot} (89%) rename hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/{debug => admin/om/lease}/TestLeaseRecoverer.java (99%) rename hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/{debug => admin/om/lease}/LeaseRecoverer.java (83%) create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/lease/LeaseSubCommand.java create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/lease/package-info.java diff --git a/hadoop-ozone/dist/src/main/compose/ozone/test.sh b/hadoop-ozone/dist/src/main/compose/ozone/test.sh index 6477dbd0979..a580fd83309 100755 --- a/hadoop-ozone/dist/src/main/compose/ozone/test.sh +++ b/hadoop-ozone/dist/src/main/compose/ozone/test.sh @@ -48,8 +48,6 @@ execute_robot_test scm freon execute_robot_test scm cli execute_robot_test scm admincli -execute_robot_test scm debug/ozone-debug-lease-recovery.robot - execute_robot_test scm -v USERNAME:httpfs httpfs execute_debug_tests diff --git a/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-lease-recovery.robot b/hadoop-ozone/dist/src/main/smoketest/admincli/lease-recovery.robot similarity index 89% rename from hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-lease-recovery.robot rename to hadoop-ozone/dist/src/main/smoketest/admincli/lease-recovery.robot index 691769dbd72..a44ceacc112 100644 --- a/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-lease-recovery.robot +++ b/hadoop-ozone/dist/src/main/smoketest/admincli/lease-recovery.robot @@ -18,7 +18,6 @@ Documentation Test lease recovery of ozone filesystem Library OperatingSystem Resource ../lib/os.robot Resource ../lib/fs.robot -Resource ozone-debug.robot Test Timeout 5 minute Suite Setup Create volume bucket and put key @@ -35,8 +34,13 @@ Create volume bucket and put key Create File ${TEMP_DIR}/${TESTFILE} Execute ozone sh key put /${VOLUME}/${BUCKET}/${TESTFILE} ${TEMP_DIR}/${TESTFILE} +Execute Lease recovery cli + [Arguments] ${KEY_PATH} + ${result} = Execute And Ignore Error ozone admin om lease recover --path=${KEY_PATH} + [Return] ${result} + *** Test Cases *** -Test ozone debug recover for o3fs +Test ozone admin om lease recover for o3fs ${o3fs_path} = Format FS URL o3fs ${VOLUME} ${BUCKET} ${TESTFILE} ${result} = Execute Lease recovery cli ${o3fs_path} Should Contain ${result} Lease recovery SUCCEEDED @@ -44,7 +48,7 @@ Test ozone debug recover for o3fs ${result} = Execute Lease recovery cli ${o3fs_path} Should Contain ${result} not found -Test ozone debug recover for ofs +Test ozone admin om lease recover for ofs ${ofs_path} = Format FS URL ofs ${VOLUME} ${BUCKET} ${TESTFILE} ${result} = Execute Lease recovery cli ${ofs_path} Should Contain ${result} Lease recovery SUCCEEDED diff --git a/hadoop-ozone/dist/src/main/smoketest/compatibility/read.robot b/hadoop-ozone/dist/src/main/smoketest/compatibility/read.robot index 9061677eae6..b5dfbb9739e 100644 --- a/hadoop-ozone/dist/src/main/smoketest/compatibility/read.robot +++ b/hadoop-ozone/dist/src/main/smoketest/compatibility/read.robot @@ -154,4 +154,4 @@ HSync Lease Recover Can Be Used Pass Execution If '${DATA_VERSION}' < '${FSO_VERSION}' Skipped write test case Pass Execution If '${CLIENT_VERSION}' < '${HSYNC_VERSION}' Client does not support HSYNC Pass Execution If '${CLUSTER_VERSION}' < '${HSYNC_VERSION}' Cluster does not support HSYNC - Execute ozone debug recover --path=ofs://om/vol1/fso-bucket-${DATA_VERSION}/dir/subdir/file + Execute ozone admin om lease recover --path=ofs://om/vol1/fso-bucket-${DATA_VERSION}/dir/subdir/file diff --git a/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug.robot b/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug.robot index fa915819eea..e0964e4c160 100644 --- a/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug.robot +++ b/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug.robot @@ -26,11 +26,6 @@ Execute read-replicas CLI tool File Should Exist ${directory}/${TESTFILE}_manifest [Return] ${directory} -Execute Lease recovery cli - [Arguments] ${KEY_PATH} - ${result} = Execute And Ignore Error ozone debug recover --path=${KEY_PATH} - [Return] ${result} - Read Replicas Manifest ${manifest} = Get File ${DIR}/${TESTFILE}_manifest ${json} = Evaluate json.loads('''${manifest}''') json diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLeaseRecoverer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/admin/om/lease/TestLeaseRecoverer.java similarity index 99% rename from hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLeaseRecoverer.java rename to hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/admin/om/lease/TestLeaseRecoverer.java index 29f91821ebd..004338483bb 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLeaseRecoverer.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/admin/om/lease/TestLeaseRecoverer.java @@ -14,7 +14,7 @@ * License for the specific language governing permissions and limitations under * the License. */ -package org.apache.hadoop.ozone.debug; +package org.apache.hadoop.ozone.admin.om.lease; import java.io.IOException; import java.io.PrintWriter; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/OMAdmin.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/OMAdmin.java index 2e6eedb9420..4d10132a7cb 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/OMAdmin.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/OMAdmin.java @@ -24,6 +24,7 @@ import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ozone.OmUtils; +import org.apache.hadoop.ozone.admin.om.lease.LeaseSubCommand; import org.apache.hadoop.ozone.client.OzoneClientException; import org.apache.hadoop.ozone.client.OzoneClientFactory; import org.apache.hadoop.ozone.client.protocol.ClientProtocol; @@ -58,7 +59,8 @@ DecommissionOMSubcommand.class, UpdateRangerSubcommand.class, TransferOmLeaderSubCommand.class, - FetchKeySubCommand.class + FetchKeySubCommand.class, + LeaseSubCommand.class }) @MetaInfServices(AdminSubcommand.class) public class OMAdmin implements AdminSubcommand { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/LeaseRecoverer.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/lease/LeaseRecoverer.java similarity index 83% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/LeaseRecoverer.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/lease/LeaseRecoverer.java index 9c3865ae241..c62932747fb 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/LeaseRecoverer.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/lease/LeaseRecoverer.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.debug; +package org.apache.hadoop.ozone.admin.om.lease; import java.net.URI; import java.util.concurrent.Callable; @@ -24,24 +24,21 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.LeaseRecoverable; -import org.apache.hadoop.hdds.cli.DebugSubcommand; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.kohsuke.MetaInfServices; import picocli.CommandLine; import picocli.CommandLine.Model.CommandSpec; import picocli.CommandLine.Spec; /** - * Tool that recover the lease of a specified file. + * CLI to recover the lease of a specified file. */ @CommandLine.Command( name = "recover", - customSynopsis = "ozone debug recover --path=", - description = "recover the lease of a specified file. Make sure to specify " + customSynopsis = "ozone admin om lease recover --path=", + description = "Recover the lease of a specified file. Make sure to specify " + "file system scheme if ofs:// is not the default.") -@MetaInfServices(DebugSubcommand.class) -public class LeaseRecoverer implements Callable, DebugSubcommand { +public class LeaseRecoverer implements Callable { @Spec private CommandSpec spec; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/lease/LeaseSubCommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/lease/LeaseSubCommand.java new file mode 100644 index 00000000000..001dccaf835 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/lease/LeaseSubCommand.java @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.admin.om.lease; + +import picocli.CommandLine; + +/** + * Handler of ozone admin om lease command. + */ +@CommandLine.Command( + name = "lease", + description = "Command for all lease related queries.", + subcommands = { + LeaseRecoverer.class + } +) +public class LeaseSubCommand { + +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/lease/package-info.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/lease/package-info.java new file mode 100644 index 00000000000..a5cb707dd9a --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/om/lease/package-info.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Lease related OM Admin tools. + */ +package org.apache.hadoop.ozone.admin.om.lease; From af36d73823c659b7062d70ed68ccc4d035b9e42c Mon Sep 17 00:00:00 2001 From: Clay Johnson Date: Wed, 15 Jan 2025 07:58:11 -0600 Subject: [PATCH 090/168] HDDS-12083. Publish build scans to develocity.apache.org (#7701) --- .github/workflows/ci.yml | 12 ++++++------ .github/workflows/intermittent-test-check.yml | 2 +- .github/workflows/repeat-acceptance.yml | 2 +- .mvn/develocity.xml | 3 ++- .mvn/extensions.xml | 2 +- 5 files changed, 11 insertions(+), 10 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2ae4d42f3a5..7d0f911ed3e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -146,7 +146,7 @@ jobs: - name: Run a full build run: hadoop-ozone/dev-support/checks/build.sh -Pdist -Psrc -Dmaven.javadoc.skip=true ${{ inputs.ratis_args }} env: - DEVELOCITY_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} + DEVELOCITY_ACCESS_KEY: ${{ secrets.DEVELOCITY_ACCESS_KEY }} - name: Store binaries for tests uses: actions/upload-artifact@v4 with: @@ -226,7 +226,7 @@ jobs: run: hadoop-ozone/dev-support/checks/build.sh -Pdist -DskipRecon -Dmaven.javadoc.failOnWarnings=${{ matrix.java != 8 }} -Djavac.version=${{ matrix.java }} ${{ inputs.ratis_args }} env: OZONE_WITH_COVERAGE: false - DEVELOCITY_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} + DEVELOCITY_ACCESS_KEY: ${{ secrets.DEVELOCITY_ACCESS_KEY }} basic: needs: - build-info @@ -274,7 +274,7 @@ jobs: - name: Execute tests run: hadoop-ozone/dev-support/checks/${{ matrix.check }}.sh ${{ inputs.ratis_args }} env: - DEVELOCITY_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} + DEVELOCITY_ACCESS_KEY: ${{ secrets.DEVELOCITY_ACCESS_KEY }} - name: Summary of failures run: hadoop-ozone/dev-support/checks/_summary.sh target/${{ matrix.check }}/summary.txt if: ${{ failure() }} @@ -321,7 +321,7 @@ jobs: - name: Execute tests run: hadoop-ozone/dev-support/checks/${{ github.job }}.sh ${{ inputs.ratis_args }} env: - DEVELOCITY_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} + DEVELOCITY_ACCESS_KEY: ${{ secrets.DEVELOCITY_ACCESS_KEY }} - name: Summary of failures run: hadoop-ozone/dev-support/checks/_summary.sh target/${{ github.job }}/summary.txt if: ${{ failure() }} @@ -641,7 +641,7 @@ jobs: hadoop-ozone/dev-support/checks/integration.sh -P${{ matrix.profile }} ${args} env: - DEVELOCITY_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} + DEVELOCITY_ACCESS_KEY: ${{ secrets.DEVELOCITY_ACCESS_KEY }} - name: Summary of failures run: | if [[ -s "target/${{ github.job }}/summary.md" ]]; then @@ -701,7 +701,7 @@ jobs: env: SONAR_TOKEN: ${{ secrets.SONARCLOUD_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - DEVELOCITY_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} + DEVELOCITY_ACCESS_KEY: ${{ secrets.DEVELOCITY_ACCESS_KEY }} - name: Archive build results uses: actions/upload-artifact@v4 with: diff --git a/.github/workflows/intermittent-test-check.yml b/.github/workflows/intermittent-test-check.yml index cb765f36217..4154d1a9ac3 100644 --- a/.github/workflows/intermittent-test-check.yml +++ b/.github/workflows/intermittent-test-check.yml @@ -203,7 +203,7 @@ jobs: fi continue-on-error: true env: - DEVELOCITY_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} + DEVELOCITY_ACCESS_KEY: ${{ secrets.DEVELOCITY_ACCESS_KEY }} - name: Summary of failures run: hadoop-ozone/dev-support/checks/_summary.sh target/unit/summary.txt if: ${{ !cancelled() }} diff --git a/.github/workflows/repeat-acceptance.yml b/.github/workflows/repeat-acceptance.yml index 1c6fc3797ed..c36a841817e 100644 --- a/.github/workflows/repeat-acceptance.yml +++ b/.github/workflows/repeat-acceptance.yml @@ -110,7 +110,7 @@ jobs: - name: Run a full build run: hadoop-ozone/dev-support/checks/build.sh -Pdist -Psrc -Dmaven.javadoc.skip=true env: - DEVELOCITY_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }} + DEVELOCITY_ACCESS_KEY: ${{ secrets.DEVELOCITY_ACCESS_KEY }} - name: Store binaries for tests uses: actions/upload-artifact@v4 with: diff --git a/.mvn/develocity.xml b/.mvn/develocity.xml index 30295e9d204..5aa72e6acbb 100644 --- a/.mvn/develocity.xml +++ b/.mvn/develocity.xml @@ -22,8 +22,9 @@ + ozone - https://ge.apache.org + https://develocity.apache.org false diff --git a/.mvn/extensions.xml b/.mvn/extensions.xml index 549a1cddcd3..8ceede33b9c 100644 --- a/.mvn/extensions.xml +++ b/.mvn/extensions.xml @@ -24,7 +24,7 @@ com.gradle develocity-maven-extension - 1.23 + 1.22.2 com.gradle From 85e752123f4a30d109a686b46c6e14336035ed40 Mon Sep 17 00:00:00 2001 From: Sarveksha Yeshavantha Raju <79865743+sarvekshayr@users.noreply.github.com> Date: Wed, 15 Jan 2025 21:16:36 +0530 Subject: [PATCH 091/168] HDDS-12056. Move ozone debug chunkinfo to ozone debug replicas chunk-info (#7702) --- hadoop-ozone/dist/src/main/compose/testlib.sh | 2 +- .../ozone/shell/TestOzoneDebugShell.java | 4 +- .../ozone/debug/replicas/ReplicasDebug.java | 39 +++++++++++++++++++ .../chunk/ChunkDataNodeDetails.java | 2 +- .../{ => replicas}/chunk/ChunkDetails.java | 2 +- .../{ => replicas}/chunk/ChunkKeyHandler.java | 20 +++------- .../debug/{ => replicas}/chunk/ChunkType.java | 2 +- .../chunk/ContainerChunkInfo.java | 2 +- .../debug/replicas/chunk/package-info.java | 22 +++++++++++ .../{chunk => replicas}/package-info.java | 4 +- 10 files changed, 75 insertions(+), 24 deletions(-) create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/ReplicasDebug.java rename hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/{ => replicas}/chunk/ChunkDataNodeDetails.java (96%) rename hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/{ => replicas}/chunk/ChunkDetails.java (96%) rename hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/{ => replicas}/chunk/ChunkKeyHandler.java (94%) rename hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/{ => replicas}/chunk/ChunkType.java (94%) rename hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/{ => replicas}/chunk/ContainerChunkInfo.java (98%) create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/chunk/package-info.java rename hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/{chunk => replicas}/package-info.java (90%) diff --git a/hadoop-ozone/dist/src/main/compose/testlib.sh b/hadoop-ozone/dist/src/main/compose/testlib.sh index 8ced94e5007..1ab7533942f 100755 --- a/hadoop-ozone/dist/src/main/compose/testlib.sh +++ b/hadoop-ozone/dist/src/main/compose/testlib.sh @@ -580,7 +580,7 @@ execute_debug_tests() { # get block locations for key local chunkinfo="${key}-blocks-${prefix}" - docker-compose exec -T ${SCM} bash -c "ozone debug chunkinfo ${volume}/${bucket}/${key}" > "$chunkinfo" + docker-compose exec -T ${SCM} bash -c "ozone debug replicas chunk-info ${volume}/${bucket}/${key}" > "$chunkinfo" local host="$(jq -r '.KeyLocations[0][0]["Datanode-HostName"]' ${chunkinfo})" local container="${host%%.*}" diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java index d8315cb427d..b7b23c05280 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDebugShell.java @@ -206,7 +206,7 @@ private int runChunkInfoCommand(String volumeName, String bucketName, Path.SEPARATOR + volumeName + Path.SEPARATOR + bucketName; String[] args = new String[] { getSetConfStringFromConf(OMConfigKeys.OZONE_OM_ADDRESS_KEY), - "chunkinfo", bucketPath + Path.SEPARATOR + keyName }; + "replicas", "chunk-info", bucketPath + Path.SEPARATOR + keyName }; int exitCode = ozoneDebugShell.execute(args); return exitCode; @@ -218,7 +218,7 @@ private int runChunkInfoAndVerifyPaths(String volumeName, String bucketName, Path.SEPARATOR + volumeName + Path.SEPARATOR + bucketName; String[] args = new String[] { getSetConfStringFromConf(OMConfigKeys.OZONE_OM_ADDRESS_KEY), - "chunkinfo", bucketPath + Path.SEPARATOR + keyName }; + "replicas", "chunk-info", bucketPath + Path.SEPARATOR + keyName }; int exitCode = 1; try (GenericTestUtils.SystemOutCapturer capture = new GenericTestUtils .SystemOutCapturer()) { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/ReplicasDebug.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/ReplicasDebug.java new file mode 100644 index 00000000000..f68da07ca5f --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/ReplicasDebug.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.debug.replicas; + +import org.apache.hadoop.hdds.cli.DebugSubcommand; +import org.apache.hadoop.ozone.debug.replicas.chunk.ChunkKeyHandler; +import org.kohsuke.MetaInfServices; +import picocli.CommandLine; + +/** + * Replicas debug related commands. + */ +@CommandLine.Command( + name = "replicas", + description = "Debug commands for replica-related issues, retrieving replica information from the OM and " + + "performing checks over the network against a running cluster.", + subcommands = { + ChunkKeyHandler.class + } +) +@MetaInfServices(DebugSubcommand.class) +public class ReplicasDebug implements DebugSubcommand { +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/chunk/ChunkDataNodeDetails.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/chunk/ChunkDataNodeDetails.java similarity index 96% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/chunk/ChunkDataNodeDetails.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/chunk/ChunkDataNodeDetails.java index cf6b7d7a11d..0c2d62aa96a 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/chunk/ChunkDataNodeDetails.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/chunk/ChunkDataNodeDetails.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.debug.chunk; +package org.apache.hadoop.ozone.debug.replicas.chunk; /** * Class that gives datanode details on which the chunk is present. */ diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/chunk/ChunkDetails.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/chunk/ChunkDetails.java similarity index 96% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/chunk/ChunkDetails.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/chunk/ChunkDetails.java index 4e2b5314a06..49d998ee4d4 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/chunk/ChunkDetails.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/chunk/ChunkDetails.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.debug.chunk; +package org.apache.hadoop.ozone.debug.replicas.chunk; /** * Class that gives chunkDetails. diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/chunk/ChunkKeyHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/chunk/ChunkKeyHandler.java similarity index 94% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/chunk/ChunkKeyHandler.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/chunk/ChunkKeyHandler.java index 6944c380493..3f6723b750f 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/chunk/ChunkKeyHandler.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/chunk/ChunkKeyHandler.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.debug.chunk; +package org.apache.hadoop.ozone.debug.replicas.chunk; import java.io.File; import java.io.IOException; @@ -27,7 +27,6 @@ import com.fasterxml.jackson.databind.node.ArrayNode; import com.fasterxml.jackson.databind.node.ObjectNode; -import org.apache.hadoop.hdds.cli.DebugSubcommand; import org.apache.hadoop.hdds.client.ECReplicationConfig; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; import org.apache.hadoop.hdds.protocol.DatanodeDetails; @@ -42,15 +41,12 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; -import org.apache.hadoop.ozone.debug.OzoneDebug; import org.apache.hadoop.ozone.om.helpers.OmKeyArgs; import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; import org.apache.hadoop.ozone.shell.OzoneAddress; import org.apache.hadoop.ozone.shell.keys.KeyHandler; -import org.kohsuke.MetaInfServices; -import picocli.CommandLine; import picocli.CommandLine.Command; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; @@ -58,15 +54,9 @@ /** * Class that gives chunk location given a specific key. */ -@Command(name = "chunkinfo", - description = "returns chunk location" - + " information about an existing key") -@MetaInfServices(DebugSubcommand.class) -public class ChunkKeyHandler extends KeyHandler implements - DebugSubcommand { - - @CommandLine.ParentCommand - private OzoneDebug parent; +@Command(name = "chunk-info", + description = "Returns chunk location information about an existing key") +public class ChunkKeyHandler extends KeyHandler { private String getChunkLocationPath(String containerLocation) { return containerLocation + File.separator + OzoneConsts.STORAGE_DIR_CHUNKS; @@ -75,7 +65,7 @@ private String getChunkLocationPath(String containerLocation) { @Override protected void execute(OzoneClient client, OzoneAddress address) throws IOException { - try (ContainerOperationClient containerOperationClient = new ContainerOperationClient(parent.getOzoneConf()); + try (ContainerOperationClient containerOperationClient = new ContainerOperationClient(getOzoneConf()); XceiverClientManager xceiverClientManager = containerOperationClient.getXceiverClientManager()) { OzoneManagerProtocol ozoneManagerClient = client.getObjectStore().getClientProxy().getOzoneManagerClient(); address.ensureKeyAddress(); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/chunk/ChunkType.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/chunk/ChunkType.java similarity index 94% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/chunk/ChunkType.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/chunk/ChunkType.java index 3af7f810402..56e969b7d1b 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/chunk/ChunkType.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/chunk/ChunkType.java @@ -14,7 +14,7 @@ * License for the specific language governing permissions and limitations under * the License. */ -package org.apache.hadoop.ozone.debug.chunk; +package org.apache.hadoop.ozone.debug.replicas.chunk; /** * The type of chunks of an Erasure Coded key. diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/chunk/ContainerChunkInfo.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/chunk/ContainerChunkInfo.java similarity index 98% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/chunk/ContainerChunkInfo.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/chunk/ContainerChunkInfo.java index 1c5fc090b0e..a7e2edc1ef6 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/chunk/ContainerChunkInfo.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/chunk/ContainerChunkInfo.java @@ -16,7 +16,7 @@ * limitations under the License. */ -package org.apache.hadoop.ozone.debug.chunk; +package org.apache.hadoop.ozone.debug.replicas.chunk; import com.fasterxml.jackson.annotation.JsonInclude; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/chunk/package-info.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/chunk/package-info.java new file mode 100644 index 00000000000..3a2200166a9 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/chunk/package-info.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * Command to debug replicas chunk information. + */ +package org.apache.hadoop.ozone.debug.replicas.chunk; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/chunk/package-info.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/package-info.java similarity index 90% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/chunk/package-info.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/package-info.java index d81f2276a65..7369ab256cf 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/chunk/package-info.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/replicas/package-info.java @@ -17,6 +17,6 @@ */ /** - * Command to debug chunk information. + * Replicas debug related commands. */ -package org.apache.hadoop.ozone.debug.chunk; +package org.apache.hadoop.ozone.debug.replicas; From 6c41a9aa3c35a519f1c536e18ce15b6becf2891b Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran <47532440+swamirishi@users.noreply.github.com> Date: Wed, 15 Jan 2025 11:12:27 -0800 Subject: [PATCH 092/168] HDDS-12064. Optimize bootstrap logic to reduce loop while checking file links (#7676) * HDDS-12064. Optimize bootstrap logic to reduce loop while checking file links Change-Id: I6871db471adc1790ac3a0ff295a4db6eeb7608ad * HDDS-12064. Fix findbugs Change-Id: If6f300d6068c4be2c8da99fdef3ae8495680d5ea * HDDS-12064. Address review comments Change-Id: Ic2b623cdb5ea6cbdcfad2b82ebb11bad62caa6d2 * HDDS-12064. Address review comments Change-Id: I03befbcab5d08add580c44cc7ee52dbfaeb101ba --- .../ozone/om/OMDBCheckpointServlet.java | 46 ++++++++------ .../ozone/om/TestOmSnapshotManager.java | 60 +++++++++++-------- 2 files changed, 63 insertions(+), 43 deletions(-) diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java index c8237b79673..ee8633ae3f1 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMDBCheckpointServlet.java @@ -51,6 +51,7 @@ import java.nio.file.Path; import java.nio.file.Paths; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashSet; @@ -149,7 +150,7 @@ public void writeDbDataToStream(DBCheckpoint checkpoint, // the same. For synchronization purposes, some files are copied // to a temp directory on the leader. In those cases the source // and dest won't be the same. - Map copyFiles = new HashMap<>(); + Map> copyFiles = new HashMap<>(); // Map of link to path. Map hardLinkFiles = new HashMap<>(); @@ -168,12 +169,14 @@ public void writeDbDataToStream(DBCheckpoint checkpoint, differ.getCompactionLogDir()); // Files to be excluded from tarball - Map sstFilesToExclude = normalizeExcludeList(toExcludeList, + Map> sstFilesToExclude = normalizeExcludeList(toExcludeList, checkpoint.getCheckpointLocation(), sstBackupDir); boolean completed = getFilesForArchive(checkpoint, copyFiles, hardLinkFiles, sstFilesToExclude, includeSnapshotData(request), excludedList, sstBackupDir, compactionLogDir); - writeFilesToArchive(copyFiles, hardLinkFiles, archiveOutputStream, + Map flatCopyFiles = copyFiles.values().stream().flatMap(map -> map.entrySet().stream()) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + writeFilesToArchive(flatCopyFiles, hardLinkFiles, archiveOutputStream, completed, checkpoint.getCheckpointLocation()); } catch (Exception e) { LOG.error("got exception writing to archive " + e); @@ -194,14 +197,19 @@ hardLinkFiles, sstFilesToExclude, includeSnapshotData(request), * include sst files.) */ @VisibleForTesting - public static Map normalizeExcludeList( + public static Map> normalizeExcludeList( List toExcludeList, Path checkpointLocation, DirectoryData sstBackupDir) { - Map paths = new HashMap<>(); + Map> paths = new HashMap<>(); Path metaDirPath = getMetaDirPath(checkpointLocation); for (String s : toExcludeList) { + Path fileName = Paths.get(s).getFileName(); + if (fileName == null) { + continue; + } Path destPath = Paths.get(metaDirPath.toString(), s); + Map fileMap = paths.computeIfAbsent(fileName.toString(), (k) -> new HashMap<>()); if (destPath.toString().startsWith( sstBackupDir.getOriginalDir().toString())) { // The source of the sstBackupDir is a temporary directory and needs @@ -210,12 +218,12 @@ public static Map normalizeExcludeList( sstBackupDir.getOriginalDir().toString().length() + 1; Path srcPath = Paths.get(sstBackupDir.getTmpDir().toString(), truncateFileName(truncateLength, destPath)); - paths.put(srcPath, destPath); + fileMap.put(srcPath, destPath); } else if (!s.startsWith(OM_SNAPSHOT_DIR)) { Path fixedPath = Paths.get(checkpointLocation.toString(), s); - paths.put(fixedPath, fixedPath); + fileMap.put(fixedPath, fixedPath); } else { - paths.put(destPath, destPath); + fileMap.put(destPath, destPath); } } return paths; @@ -266,9 +274,9 @@ public File getTmpDir() { @SuppressWarnings("checkstyle:ParameterNumber") private boolean getFilesForArchive(DBCheckpoint checkpoint, - Map copyFiles, + Map> copyFiles, Map hardLinkFiles, - Map sstFilesToExclude, + Map> sstFilesToExclude, boolean includeSnapshotData, List excluded, DirectoryData sstBackupDir, @@ -360,9 +368,9 @@ private void waitForDirToExist(Path dir) throws IOException { } @SuppressWarnings("checkstyle:ParameterNumber") - private boolean processDir(Path dir, Map copyFiles, + private boolean processDir(Path dir, Map> copyFiles, Map hardLinkFiles, - Map sstFilesToExclude, + Map> sstFilesToExclude, Set snapshotPaths, List excluded, AtomicLong copySize, @@ -437,9 +445,9 @@ private boolean processDir(Path dir, Map copyFiles, * @param excluded The list of db files that actually were excluded. */ @VisibleForTesting - public static long processFile(Path file, Map copyFiles, + public static long processFile(Path file, Map> copyFiles, Map hardLinkFiles, - Map sstFilesToExclude, + Map> sstFilesToExclude, List excluded, Path destDir) throws IOException { @@ -458,7 +466,7 @@ public static long processFile(Path file, Map copyFiles, if (destDir != null) { destFile = Paths.get(destDir.toString(), fileName); } - if (sstFilesToExclude.containsKey(file)) { + if (sstFilesToExclude.getOrDefault(fileNamePath.toString(), Collections.emptyMap()).containsKey(file)) { excluded.add(destFile.toString()); } else { if (fileName.endsWith(ROCKSDB_SST_SUFFIX)) { @@ -473,13 +481,13 @@ public static long processFile(Path file, Map copyFiles, hardLinkFiles.put(destFile, linkPath); } else { // Add to tarball. - copyFiles.put(file, destFile); + copyFiles.computeIfAbsent(fileNamePath.toString(), (k) -> new HashMap<>()).put(file, destFile); fileSize = Files.size(file); } } } else { // Not sst file. - copyFiles.put(file, destFile); + copyFiles.computeIfAbsent(fileNamePath.toString(), (k) -> new HashMap<>()).put(file, destFile); } } return fileSize; @@ -494,7 +502,7 @@ public static long processFile(Path file, Map copyFiles, * @param file - File to be linked. * @return dest path of file to be linked to. */ - private static Path findLinkPath(Map files, Path file) + private static Path findLinkPath(Map> files, Path file) throws IOException { // findbugs nonsense Path fileNamePath = file.getFileName(); @@ -503,7 +511,7 @@ private static Path findLinkPath(Map files, Path file) } String fileName = fileNamePath.toString(); - for (Map.Entry entry: files.entrySet()) { + for (Map.Entry entry : files.getOrDefault(fileName, Collections.emptyMap()).entrySet()) { Path srcPath = entry.getKey(); Path destPath = entry.getValue(); if (!srcPath.toString().endsWith(fileName)) { diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java index 1d00ec614cd..3a98b4f6298 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.om; +import com.google.common.collect.ImmutableMap; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.HddsWhiteboxTestUtils; @@ -72,6 +73,7 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.mockito.Mockito.mock; @@ -371,17 +373,17 @@ public void testExcludeUtilities() throws IOException { "backup.sst"); truncateLength = leaderDir.toString().length() + 1; existingSstList.add(truncateFileName(truncateLength, destSstBackup)); - Map normalizedMap = + Map> normalizedMap = OMDBCheckpointServlet.normalizeExcludeList(existingSstList, leaderCheckpointDir.toPath(), sstBackupDir); - Map expectedMap = new TreeMap<>(); + Map> expectedMap = new TreeMap<>(); Path s1 = Paths.get(leaderSnapDir1.toString(), "s1.sst"); Path noLink = Paths.get(leaderSnapDir2.toString(), "noLink.sst"); Path f1 = Paths.get(leaderCheckpointDir.toString(), "f1.sst"); - expectedMap.put(s1, s1); - expectedMap.put(noLink, noLink); - expectedMap.put(f1, f1); - expectedMap.put(srcSstBackup, destSstBackup); + expectedMap.put("s1.sst", ImmutableMap.of(s1, s1)); + expectedMap.put("noLink.sst", ImmutableMap.of(noLink, noLink)); + expectedMap.put("f1.sst", ImmutableMap.of(f1, f1)); + expectedMap.put("backup.sst", ImmutableMap.of(srcSstBackup, destSstBackup)); assertEquals(expectedMap, new TreeMap<>(normalizedMap)); } @@ -396,11 +398,15 @@ void testProcessFileWithNullDestDirParameter(@TempDir File testDir) throws IOExc assertTrue(new File(testDir, "snap2").mkdirs()); Path copyFile = Paths.get(testDir.toString(), "snap1/copyfile.sst"); + Path copyFileName = copyFile.getFileName(); + assertNotNull(copyFileName); Files.write(copyFile, "dummyData".getBytes(StandardCharsets.UTF_8)); long expectedFileSize = Files.size(copyFile); Path excludeFile = Paths.get(testDir.toString(), "snap1/excludeFile.sst"); + Path excludeFileName = excludeFile.getFileName(); + assertNotNull(excludeFileName); Files.write(excludeFile, "dummyData".getBytes(StandardCharsets.UTF_8)); Path linkToExcludedFile = Paths.get(testDir.toString(), @@ -418,10 +424,12 @@ void testProcessFileWithNullDestDirParameter(@TempDir File testDir) throws IOExc Files.write(addNonSstToCopiedFiles, "dummyData".getBytes(StandardCharsets.UTF_8)); - Map toExcludeFiles = new HashMap<>(); - toExcludeFiles.put(excludeFile, excludeFile); - Map copyFiles = new HashMap<>(); - copyFiles.put(copyFile, copyFile); + Map> toExcludeFiles = new HashMap<>(); + toExcludeFiles.computeIfAbsent(excludeFileName.toString(), (k) -> new HashMap<>()).put(excludeFile, + excludeFile); + Map> copyFiles = new HashMap<>(); + copyFiles.computeIfAbsent(copyFileName.toString(), (k) -> new HashMap<>()).put(copyFile, + copyFile); List excluded = new ArrayList<>(); Map hardLinkFiles = new HashMap<>(); long fileSize; @@ -461,10 +469,10 @@ void testProcessFileWithNullDestDirParameter(@TempDir File testDir) throws IOExc toExcludeFiles, excluded, null); assertEquals(excluded.size(), 0); assertEquals(copyFiles.size(), 2); - assertEquals(copyFiles.get(addToCopiedFiles), addToCopiedFiles); + assertEquals(copyFiles.get(addToCopiedFiles.getFileName().toString()).get(addToCopiedFiles), addToCopiedFiles); assertEquals(fileSize, expectedFileSize); copyFiles = new HashMap<>(); - copyFiles.put(copyFile, copyFile); + copyFiles.computeIfAbsent(copyFileName.toString(), (k) -> new HashMap<>()).put(copyFile, copyFile); // Confirm the addNonSstToCopiedFiles gets added to list of copied files fileSize = processFile(addNonSstToCopiedFiles, copyFiles, hardLinkFiles, @@ -472,7 +480,7 @@ void testProcessFileWithNullDestDirParameter(@TempDir File testDir) throws IOExc assertEquals(excluded.size(), 0); assertEquals(copyFiles.size(), 2); assertEquals(fileSize, 0); - assertEquals(copyFiles.get(addNonSstToCopiedFiles), + assertEquals(copyFiles.get(addNonSstToCopiedFiles.getFileName().toString()).get(addNonSstToCopiedFiles), addNonSstToCopiedFiles); } @@ -492,6 +500,8 @@ void testProcessFileWithDestDirParameter(@TempDir File testDir) throws IOExcepti // Create test files. Path copyFile = Paths.get(testDir.toString(), "snap1/copyfile.sst"); + Path copyFileName = copyFile.getFileName(); + assertNotNull(copyFileName); Path destCopyFile = Paths.get(destDir.toString(), "snap1/copyfile.sst"); Files.write(copyFile, @@ -505,6 +515,8 @@ void testProcessFileWithDestDirParameter(@TempDir File testDir) throws IOExcepti long expectedFileSize = Files.size(copyFile); Path excludeFile = Paths.get(testDir.toString(), "snap1/excludeFile.sst"); + Path excludeFileName = excludeFile.getFileName(); + assertNotNull(excludeFileName); Path destExcludeFile = Paths.get(destDir.toString(), "snap1/excludeFile.sst"); Files.write(excludeFile, @@ -539,10 +551,10 @@ void testProcessFileWithDestDirParameter(@TempDir File testDir) throws IOExcepti "dummyData".getBytes(StandardCharsets.UTF_8)); // Create test data structures. - Map toExcludeFiles = new HashMap<>(); - toExcludeFiles.put(excludeFile, destExcludeFile); - Map copyFiles = new HashMap<>(); - copyFiles.put(copyFile, destCopyFile); + Map> toExcludeFiles = new HashMap<>(); + toExcludeFiles.put(excludeFileName.toString(), ImmutableMap.of(excludeFile, destExcludeFile)); + Map> copyFiles = new HashMap<>(); + copyFiles.computeIfAbsent(copyFileName.toString(), (k) -> new HashMap<>()).put(copyFile, destCopyFile); List excluded = new ArrayList<>(); Map hardLinkFiles = new HashMap<>(); long fileSize; @@ -575,11 +587,11 @@ void testProcessFileWithDestDirParameter(@TempDir File testDir) throws IOExcepti assertEquals(excluded.size(), 0); assertEquals(copyFiles.size(), 2); assertEquals(hardLinkFiles.size(), 0); - assertEquals(copyFiles.get(sameNameAsExcludeFile), + assertEquals(copyFiles.get(sameNameAsExcludeFile.getFileName().toString()).get(sameNameAsExcludeFile), destSameNameAsExcludeFile); assertEquals(fileSize, expectedFileSize); copyFiles = new HashMap<>(); - copyFiles.put(copyFile, destCopyFile); + copyFiles.computeIfAbsent(copyFileName.toString(), (k) -> new HashMap<>()).put(copyFile, destCopyFile); // Confirm the file with same name as copy file gets copied. @@ -588,11 +600,11 @@ void testProcessFileWithDestDirParameter(@TempDir File testDir) throws IOExcepti assertEquals(excluded.size(), 0); assertEquals(copyFiles.size(), 2); assertEquals(hardLinkFiles.size(), 0); - assertEquals(copyFiles.get(sameNameAsCopyFile), + assertEquals(copyFiles.get(sameNameAsCopyFile.getFileName().toString()).get(sameNameAsCopyFile), destSameNameAsCopyFile); assertEquals(fileSize, expectedFileSize); copyFiles = new HashMap<>(); - copyFiles.put(copyFile, destCopyFile); + copyFiles.computeIfAbsent(copyFileName.toString(), (k) -> new HashMap<>()).put(copyFile, destCopyFile); // Confirm the linkToCopiedFile gets added as a link. @@ -611,11 +623,11 @@ void testProcessFileWithDestDirParameter(@TempDir File testDir) throws IOExcepti toExcludeFiles, excluded, destAddToCopiedFiles.getParent()); assertEquals(excluded.size(), 0); assertEquals(copyFiles.size(), 2); - assertEquals(copyFiles.get(addToCopiedFiles), + assertEquals(copyFiles.get(addToCopiedFiles.getFileName().toString()).get(addToCopiedFiles), destAddToCopiedFiles); assertEquals(fileSize, expectedFileSize); copyFiles = new HashMap<>(); - copyFiles.put(copyFile, destCopyFile); + copyFiles.computeIfAbsent(copyFileName.toString(), (k) -> new HashMap<>()).put(copyFile, destCopyFile); // Confirm the addNonSstToCopiedFiles gets added to list of copied files fileSize = processFile(addNonSstToCopiedFiles, copyFiles, hardLinkFiles, @@ -623,7 +635,7 @@ void testProcessFileWithDestDirParameter(@TempDir File testDir) throws IOExcepti assertEquals(excluded.size(), 0); assertEquals(copyFiles.size(), 2); assertEquals(fileSize, 0); - assertEquals(copyFiles.get(addNonSstToCopiedFiles), + assertEquals(copyFiles.get(addNonSstToCopiedFiles.getFileName().toString()).get(addNonSstToCopiedFiles), destAddNonSstToCopiedFiles); } From 7f70729d03875d7bae11433590bc2f97c6a632c6 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Fri, 17 Jan 2025 11:21:20 +0100 Subject: [PATCH 093/168] HDDS-12092. Enable sortpom in Recon (#7707) --- hadoop-ozone/recon-codegen/pom.xml | 49 +- hadoop-ozone/recon/pom.xml | 696 ++++++++++++++--------------- 2 files changed, 369 insertions(+), 376 deletions(-) diff --git a/hadoop-ozone/recon-codegen/pom.xml b/hadoop-ozone/recon-codegen/pom.xml index 0a279c40ab2..2dfb77e9e97 100644 --- a/hadoop-ozone/recon-codegen/pom.xml +++ b/hadoop-ozone/recon-codegen/pom.xml @@ -12,32 +12,25 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + + 4.0.0 - ozone org.apache.ozone + ozone 2.0.0-SNAPSHOT - 4.0.0 ozone-reconcodegen Apache Ozone Recon CodeGen - true - true + + true - org.apache.ozone - hdds-config - - - org.apache.ozone - ozone-common + com.google.inject + guice - commons-io commons-io @@ -46,18 +39,17 @@ org.apache.derby derby - - org.slf4j - slf4j-api + org.apache.ozone + hdds-config - org.springframework - spring-jdbc + org.apache.ozone + ozone-common - org.springframework - spring-tx + org.jooq + jooq org.jooq @@ -68,12 +60,16 @@ jooq-meta - org.jooq - jooq + org.slf4j + slf4j-api - com.google.inject - guice + org.springframework + spring-jdbc + + + org.springframework + spring-tx @@ -100,7 +96,8 @@ maven-enforcer-plugin - ban-annotations + ban-annotations + diff --git a/hadoop-ozone/recon/pom.xml b/hadoop-ozone/recon/pom.xml index 85d2eac9d2b..1fad8fab076 100644 --- a/hadoop-ozone/recon/pom.xml +++ b/hadoop-ozone/recon/pom.xml @@ -12,292 +12,139 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + + 4.0.0 org.apache.ozone ozone 2.0.0-SNAPSHOT - Apache Ozone Recon - 4.0.0 ozone-recon + Apache Ozone Recon false 8.15.7 - true - - - - src/main/resources - - **/node_modules/** - - - - - - org.apache.maven.plugins - maven-compiler-plugin - - - - org.apache.ozone - hdds-config - ${hdds.version} - - - - org.apache.hadoop.hdds.conf.ConfigFileGenerator - - - - - org.apache.maven.plugins - maven-enforcer-plugin - - - ban-annotations - - - - Only selected annotation processors are enabled, see configuration of maven-compiler-plugin. - - org.apache.hadoop.ozone.om.request.validation.RequestFeatureValidator - org.apache.hadoop.hdds.scm.metadata.Replicate - org.kohsuke.MetaInfServices - - - - - - - - - org.codehaus.mojo - exec-maven-plugin - - - generate-resources - - java - - - - - java - compile - org.hadoop.ozone.recon.codegen.JooqCodeGenerator - false - - ${project.build.directory}/generated-sources/java - - - - - org.codehaus.mojo - build-helper-maven-plugin - - - add-source - generate-sources - - add-source - - - - ${project.build.directory}/generated-sources/java - - - - - - - com.github.spotbugs - spotbugs-maven-plugin - - ${basedir}/dev-support/findbugsExcludeFile.xml - - - - - com.github.eirslett - frontend-maven-plugin - ${frontend-maven-plugin.version} - - false - target - ${basedir}/src/main/resources/webapps/recon/ozone-recon-web - - - - Install node and npm locally to the project - - install-node-and-npm - - - v${nodejs.version} - - - - set pnpm@${pnpm.version} store path - - npx - - - pnpm@${pnpm.version} config set store-dir ~/.pnpm-store - - - - install frontend dependencies - - npx - - - pnpm@${pnpm.version} install --frozen-lockfile - - - - Build frontend - - npx - - - pnpm@${pnpm.version} run build - - - - - - maven-clean-plugin - ${maven-clean-plugin.version} - - - - ${basedir}/src/main/resources/webapps/recon/ozone-recon-web/build - - ** - - false - - - ${basedir}/src/main/resources/webapps/recon/ozone-recon-web/node_modules - - ** - - false - - - - - - org.apache.maven.plugins - maven-resources-plugin - - - Copy frontend build to target - process-resources - - copy-resources - - - ${project.build.outputDirectory}/webapps/recon - - - ${basedir}/src/main/resources/webapps/recon/ozone-recon-web/build - - static/** - - true - - - - - - Copy frontend static files to target - process-resources - - copy-resources - - - ${project.build.outputDirectory}/webapps/static - - - ${basedir}/src/main/resources/webapps/recon/ozone-recon-web/build/static - true - - - - woff - woff2 - - - - - - - - org.apache.ozone - hdds-common + aopalliance + aopalliance - org.apache.ozone - hdds-config + com.fasterxml.jackson.core + jackson-annotations - org.apache.ozone - hdds-container-service + com.fasterxml.jackson.core + jackson-databind - org.apache.ozone - hdds-interface-client + com.google.guava + guava + + + com.google.inject + guice + + + com.google.inject.extensions + guice-assistedinject + + + com.google.inject.extensions + guice-servlet + + + com.google.protobuf + protobuf-java + + + com.jolbox + bonecp + + + commons-collections + commons-collections + + + commons-io + commons-io + + + info.picocli + picocli + + + jakarta.activation + jakarta.activation-api + + + jakarta.annotation + jakarta.annotation-api + + + jakarta.validation + jakarta.validation-api + + + jakarta.ws.rs + jakarta.ws.rs-api + + + jakarta.xml.bind + jakarta.xml.bind-api + + + javax.inject + javax.inject + + + javax.servlet + javax.servlet-api + + + org.apache.commons + commons-compress + + + org.apache.commons + commons-lang3 + + + org.apache.derby + derby org.apache.ozone - hdds-interface-server + hdds-common org.apache.ozone - hdds-managed-rocksdb + hdds-config org.apache.ozone - hdds-server-framework + hdds-container-service org.apache.ozone - ozone-common + hdds-interface-client org.apache.ozone - ozone-interface-client + hdds-interface-server org.apache.ozone - ozone-interface-storage + hdds-managed-rocksdb org.apache.ozone - ozone-reconcodegen - ${ozone.version} + hdds-server-framework org.apache.ozone - ozone-manager - - - com.sun.jersey - * - - + hdds-server-scm @@ -312,24 +159,30 @@ org.apache.ozone - hdds-server-scm + ozone-common - - commons-collections - commons-collections + org.apache.ozone + ozone-interface-client - org.apache.commons - commons-compress + org.apache.ozone + ozone-interface-storage - commons-io - commons-io + org.apache.ozone + ozone-manager + + + com.sun.jersey + * + + - org.apache.commons - commons-lang3 + org.apache.ozone + ozone-reconcodegen + ${ozone.version} org.apache.ratis @@ -339,59 +192,34 @@ org.apache.ratis ratis-proto - - - aopalliance - aopalliance - - com.fasterxml.jackson.core - jackson-annotations - - - com.fasterxml.jackson.core - jackson-databind - - - com.google.guava - guava + org.eclipse.jetty + jetty-servlet - com.google.inject - guice + org.eclipse.jetty + jetty-util - com.google.inject.extensions - guice-assistedinject + org.glassfish.hk2 + guice-bridge - com.google.inject.extensions - guice-servlet + org.glassfish.hk2 + hk2-api - com.google.protobuf - protobuf-java + org.glassfish.hk2.external + jakarta.inject org.glassfish.jersey.containers jersey-container-servlet - - info.picocli - picocli - org.glassfish.jersey.containers jersey-container-servlet-core - - org.glassfish.hk2 - guice-bridge - - - org.glassfish.hk2.external - jakarta.inject - org.glassfish.jersey.core jersey-common @@ -401,48 +229,32 @@ jersey-server - org.glassfish.jersey.media - jersey-media-json-jackson + org.glassfish.jersey.inject + jersey-hk2 org.glassfish.jersey.media jersey-media-jaxb - org.glassfish.jersey.inject - jersey-hk2 - - - org.jooq - jooq + org.glassfish.jersey.media + jersey-media-json-jackson - org.jooq - jooq-meta + org.javassist + javassist org.jooq - jooq-codegen - - - com.jolbox - bonecp - - - org.apache.derby - derby - - - org.eclipse.jetty - jetty-servlet + jooq - org.eclipse.jetty - jetty-util + org.jooq + jooq-codegen - org.glassfish.hk2 - hk2-api + org.jooq + jooq-meta org.reflections @@ -452,10 +264,6 @@ org.rocksdb rocksdbjni - - org.xerial - sqlite-jdbc - org.slf4j slf4j-api @@ -473,36 +281,8 @@ spring-tx - jakarta.activation - jakarta.activation-api - - - jakarta.annotation - jakarta.annotation-api - - - jakarta.validation - jakarta.validation-api - - - jakarta.ws.rs - jakarta.ws.rs-api - - - jakarta.xml.bind - jakarta.xml.bind-api - - - javax.inject - javax.inject - - - javax.servlet - javax.servlet-api - - - org.javassist - javassist + org.xerial + sqlite-jdbc @@ -523,4 +303,220 @@ test + + + + src/main/resources + + **/node_modules/** + + + + + + org.apache.maven.plugins + maven-compiler-plugin + + + + org.apache.ozone + hdds-config + ${hdds.version} + + + + org.apache.hadoop.hdds.conf.ConfigFileGenerator + + + + + org.apache.maven.plugins + maven-enforcer-plugin + + + ban-annotations + + + + + Only selected annotation processors are enabled, see configuration of maven-compiler-plugin. + + org.apache.hadoop.ozone.om.request.validation.RequestFeatureValidator + org.apache.hadoop.hdds.scm.metadata.Replicate + org.kohsuke.MetaInfServices + + + + + + + + + org.codehaus.mojo + exec-maven-plugin + + java + compile + org.hadoop.ozone.recon.codegen.JooqCodeGenerator + false + + ${project.build.directory}/generated-sources/java + + + + + + java + + generate-resources + + + + + org.codehaus.mojo + build-helper-maven-plugin + + + add-source + + add-source + + generate-sources + + + ${project.build.directory}/generated-sources/java + + + + + + + com.github.spotbugs + spotbugs-maven-plugin + + ${basedir}/dev-support/findbugsExcludeFile.xml + + + + + com.github.eirslett + frontend-maven-plugin + ${frontend-maven-plugin.version} + + false + target + ${basedir}/src/main/resources/webapps/recon/ozone-recon-web + + + + Install node and npm locally to the project + + install-node-and-npm + + + v${nodejs.version} + + + + set pnpm@${pnpm.version} store path + + npx + + + pnpm@${pnpm.version} config set store-dir ~/.pnpm-store + + + + install frontend dependencies + + npx + + + pnpm@${pnpm.version} install --frozen-lockfile + + + + Build frontend + + npx + + + pnpm@${pnpm.version} run build + + + + + + maven-clean-plugin + ${maven-clean-plugin.version} + + + + ${basedir}/src/main/resources/webapps/recon/ozone-recon-web/build + + ** + + false + + + ${basedir}/src/main/resources/webapps/recon/ozone-recon-web/node_modules + + ** + + false + + + + + + org.apache.maven.plugins + maven-resources-plugin + + + Copy frontend build to target + + copy-resources + + process-resources + + ${project.build.outputDirectory}/webapps/recon + + + ${basedir}/src/main/resources/webapps/recon/ozone-recon-web/build + + static/** + + true + + + + + + Copy frontend static files to target + + copy-resources + + process-resources + + ${project.build.outputDirectory}/webapps/static + + + ${basedir}/src/main/resources/webapps/recon/ozone-recon-web/build/static + true + + + + woff + woff2 + + + + + + + From e76d99041fb2150b402181183a998826f10e6c96 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Fri, 17 Jan 2025 21:09:08 +0100 Subject: [PATCH 094/168] HDDS-12088. Speed up TestStorageContainerManager (#7706) --- .../hdds/scm/TestStorageContainerManager.java | 470 +++++++----------- 1 file changed, 193 insertions(+), 277 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java index b00c7f8040b..c7e6e96284a 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java @@ -21,9 +21,9 @@ import com.google.common.collect.Lists; import com.google.common.collect.Maps; import org.apache.commons.lang3.RandomUtils; +import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.HddsUtils; -import org.apache.hadoop.hdds.conf.DefaultConfigManager; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -89,30 +89,25 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand; -import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode; import org.apache.hadoop.ozone.protocol.commands.DeleteBlocksCommand; import org.apache.hadoop.ozone.protocol.commands.SCMCommand; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.ExitUtil; -import org.apache.hadoop.util.Time; import org.apache.log4j.Level; import org.apache.log4j.LogManager; import org.apache.ozone.test.GenericTestUtils; import org.apache.ratis.conf.RaftProperties; import org.apache.ratis.protocol.RaftGroupId; import org.apache.ratis.server.RaftServerConfigKeys; -import org.junit.jupiter.api.AfterAll; -import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; import org.junit.jupiter.api.io.TempDir; -import org.mockito.ArgumentMatcher; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.File; import java.io.IOException; +import java.net.UnknownHostException; import java.nio.file.Path; import java.time.Duration; import java.util.ArrayList; @@ -131,7 +126,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.function.Predicate; import java.util.stream.Stream; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY; @@ -162,55 +156,57 @@ */ @Timeout(900) public class TestStorageContainerManager { + private static final int KEY_COUNT = 5; private static final String LOCALHOST_IP = "127.0.0.1"; - private static XceiverClientManager xceiverClientManager; private static final Logger LOG = LoggerFactory.getLogger( TestStorageContainerManager.class); - @BeforeAll - public static void setup() throws IOException { - xceiverClientManager = new XceiverClientManager(new OzoneConfiguration()); - } + /** This runs most test cases in a single cluster. */ + @Test + void test(@TempDir Path tempDir) throws Exception { + OzoneConfiguration conf = new OzoneConfiguration(); + configureTopology(conf); + configureBlockDeletion(conf); + Path scmPath = tempDir.resolve("scm-meta"); + conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); - @AfterAll - public static void cleanup() { - if (xceiverClientManager != null) { - xceiverClientManager.close(); - } - } + try (MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(1).build()) { + cluster.waitForClusterToBeReady(); - @AfterEach - public void cleanupDefaults() { - DefaultConfigManager.clearDefaultConfigs(); + // non-destructive test cases + testBlockDeletionTransactions(cluster); + testRpcPermission(cluster); + testScmProcessDatanodeHeartbeat(cluster); + + StorageContainerManager scm = cluster.getStorageContainerManager(); + List directories = Arrays.asList( + new File(SCMHAUtils.getRatisStorageDir(scm.getConfiguration())), + scm.getScmMetadataStore().getStore().getDbLocation(), + new File(scm.getScmStorageConfig().getStorageDir()) + ); + + // re-init + testSCMReinitialization(cluster); + + // re-init after delete + directories.forEach(FileUtil::fullyDelete); + testOldDNRegistersToReInitialisedSCM(cluster); + } } - @Test - public void testRpcPermission() throws Exception { + private void testRpcPermission(MiniOzoneCluster cluster) throws Exception { // Test with default configuration - OzoneConfiguration defaultConf = new OzoneConfiguration(); - testRpcPermissionWithConf(defaultConf, any -> false, "unknownUser"); + testRpcPermission(cluster, "anyUser", true); + + // Update ozone.administrators in configuration + cluster.getStorageContainerManager() + .getReconfigurationHandler() + .reconfigureProperty(OzoneConfigKeys.OZONE_ADMINISTRATORS, "adminUser1, adminUser2"); - // Test with ozone.administrators defined in configuration - String admins = "adminUser1, adminUser2"; - OzoneConfiguration ozoneConf = new OzoneConfiguration(); - ozoneConf.setStrings(OzoneConfigKeys.OZONE_ADMINISTRATORS, admins); // Non-admin user will get permission denied. + testRpcPermission(cluster, "unknownUser", true); // Admin user will pass the permission check. - testRpcPermissionWithConf(ozoneConf, admins::contains, - "unknownUser", "adminUser2"); - } - - private void testRpcPermissionWithConf( - OzoneConfiguration ozoneConf, - Predicate isAdmin, - String... usernames) throws Exception { - try (MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(ozoneConf).build()) { - cluster.waitForClusterToBeReady(); - for (String username : usernames) { - testRpcPermission(cluster, username, - !isAdmin.test(username)); - } - } // The cluster is automatically closed here + testRpcPermission(cluster, "adminUser2", false); } private void testRpcPermission(MiniOzoneCluster cluster, @@ -260,10 +256,64 @@ private void verifyPermissionDeniedException(Exception e, String userName) { assertEquals(expectedErrorMessage, e.getMessage()); } - @Test - public void testBlockDeletionTransactions() throws Exception { - int numKeys = 5; - OzoneConfiguration conf = new OzoneConfiguration(); + private void testBlockDeletionTransactions(MiniOzoneCluster cluster) throws Exception { + DeletedBlockLog delLog = cluster.getStorageContainerManager() + .getScmBlockManager().getDeletedBlockLog(); + assertEquals(0, delLog.getNumOfValidTransactions()); + + Map keyLocations = TestDataUtil.createKeys(cluster, KEY_COUNT); + // Wait for container report + Thread.sleep(1000); + for (OmKeyInfo keyInfo : keyLocations.values()) { + OzoneTestUtils.closeContainers(keyInfo.getKeyLocationVersions(), + cluster.getStorageContainerManager()); + } + Map> containerBlocks = createDeleteTXLog( + cluster.getStorageContainerManager(), + delLog, keyLocations, cluster); + + // Verify a few TX gets created in the TX log. + assertThat(delLog.getNumOfValidTransactions()).isGreaterThan(0); + + // Once TXs are written into the log, SCM starts to fetch TX + // entries from the log and schedule block deletions in HB interval, + // after sometime, all the TX should be proceed and by then + // the number of containerBlocks of all known containers will be + // empty again. + OzoneTestUtils.waitBlockDeleted(cluster.getStorageContainerManager()); + assertTrue(verifyBlocksWithTxnTable(cluster, containerBlocks)); + // Continue the work, add some TXs that with known container names, + // but unknown block IDs. + for (Long containerID : containerBlocks.keySet()) { + // Add 2 TXs per container. + Map> deletedBlocks = new HashMap<>(); + List blocks = new ArrayList<>(); + blocks.add(RandomUtils.nextLong()); + blocks.add(RandomUtils.nextLong()); + deletedBlocks.put(containerID, blocks); + addTransactions(cluster.getStorageContainerManager(), delLog, + deletedBlocks); + } + + // Verify a few TX gets created in the TX log. + assertThat(delLog.getNumOfValidTransactions()).isGreaterThan(0); + + // These blocks cannot be found in the container, skip deleting them + // eventually these TX will success. + GenericTestUtils.waitFor(() -> { + try { + if (SCMHAUtils.isSCMHAEnabled(cluster.getConf())) { + cluster.getStorageContainerManager().getScmHAManager() + .asSCMHADBTransactionBuffer().flush(); + } + return delLog.getFailedTransactions(-1, 0).size() == 0; + } catch (IOException e) { + return false; + } + }, 1000, 20000); + } + + private static void configureBlockDeletion(OzoneConfiguration conf) { conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 100, TimeUnit.MILLISECONDS); DatanodeConfiguration datanodeConfiguration = conf.getObject( @@ -289,153 +339,71 @@ public void testBlockDeletionTransactions() throws Exception { conf.setInt(ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 5); // Reset container provision size, otherwise only one container // is created by default. - conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, - numKeys); - - try (MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) - .build()) { - cluster.waitForClusterToBeReady(); - DeletedBlockLog delLog = cluster.getStorageContainerManager() - .getScmBlockManager().getDeletedBlockLog(); - assertEquals(0, delLog.getNumOfValidTransactions()); - - Map keyLocations = TestDataUtil.createKeys(cluster, numKeys); - // Wait for container report - Thread.sleep(1000); - for (OmKeyInfo keyInfo : keyLocations.values()) { - OzoneTestUtils.closeContainers(keyInfo.getKeyLocationVersions(), - cluster.getStorageContainerManager()); - } - Map> containerBlocks = createDeleteTXLog( - cluster.getStorageContainerManager(), - delLog, keyLocations, cluster, conf); - - // Verify a few TX gets created in the TX log. - assertThat(delLog.getNumOfValidTransactions()).isGreaterThan(0); - - // Once TXs are written into the log, SCM starts to fetch TX - // entries from the log and schedule block deletions in HB interval, - // after sometime, all the TX should be proceed and by then - // the number of containerBlocks of all known containers will be - // empty again. - OzoneTestUtils.waitBlockDeleted(cluster.getStorageContainerManager()); - assertTrue(verifyBlocksWithTxnTable(cluster, conf, containerBlocks)); - // Continue the work, add some TXs that with known container names, - // but unknown block IDs. - for (Long containerID : containerBlocks.keySet()) { - // Add 2 TXs per container. - Map> deletedBlocks = new HashMap<>(); - List blocks = new ArrayList<>(); - blocks.add(RandomUtils.nextLong()); - blocks.add(RandomUtils.nextLong()); - deletedBlocks.put(containerID, blocks); - addTransactions(cluster.getStorageContainerManager(), delLog, - deletedBlocks); - } - - // Verify a few TX gets created in the TX log. - assertThat(delLog.getNumOfValidTransactions()).isGreaterThan(0); - - // These blocks cannot be found in the container, skip deleting them - // eventually these TX will success. - GenericTestUtils.waitFor(() -> { - try { - if (SCMHAUtils.isSCMHAEnabled(cluster.getConf())) { - cluster.getStorageContainerManager().getScmHAManager() - .asSCMHADBTransactionBuffer().flush(); - } - return delLog.getFailedTransactions(-1, 0).size() == 0; - } catch (IOException e) { - return false; - } - }, 1000, 20000); - } + conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, 10 * KEY_COUNT); } - @Test - public void testOldDNRegistersToReInitialisedSCM() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1000, TimeUnit.MILLISECONDS); - conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 3000, TimeUnit.MILLISECONDS); - - - - try (MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(1) - .build()) { - cluster.waitForClusterToBeReady(); - HddsDatanodeService datanode = cluster.getHddsDatanodes().get(0); - StorageContainerManager scm = cluster.getStorageContainerManager(); - File dbDir = scm.getScmMetadataStore().getStore().getDbLocation(); - scm.stop(); - - // re-initialise SCM with new clusterID - - GenericTestUtils.deleteDirectory(new File(SCMHAUtils.getRatisStorageDir(conf))); - GenericTestUtils.deleteDirectory(dbDir); - GenericTestUtils.deleteDirectory( - new File(scm.getScmStorageConfig().getStorageDir())); - String newClusterId = UUID.randomUUID().toString(); - StorageContainerManager.scmInit(scm.getConfiguration(), newClusterId); - scm = HddsTestUtils.getScmSimple(scm.getConfiguration()); - - DatanodeStateMachine dsm = datanode.getDatanodeStateMachine(); - assertEquals(DatanodeStateMachine.DatanodeStates.RUNNING, + // assumes SCM is already stopped + private void testOldDNRegistersToReInitialisedSCM(MiniOzoneCluster cluster) throws Exception { + HddsDatanodeService datanode = cluster.getHddsDatanodes().get(0); + + // re-initialise SCM with new clusterID + String newClusterId = UUID.randomUUID().toString(); + StorageContainerManager.scmInit(cluster.getConf(), newClusterId); + StorageContainerManager scm = HddsTestUtils.getScmSimple(cluster.getConf()); + + DatanodeStateMachine dsm = datanode.getDatanodeStateMachine(); + assertEquals(DatanodeStateMachine.DatanodeStates.RUNNING, + dsm.getContext().getState()); + // DN Endpoint State has already gone through GetVersion and Register, + // so it will be in HEARTBEAT state. + for (EndpointStateMachine endpoint : dsm.getConnectionManager() + .getValues()) { + assertEquals(EndpointStateMachine.EndPointStates.HEARTBEAT, + endpoint.getState()); + } + GenericTestUtils.LogCapturer scmDnHBDispatcherLog = + GenericTestUtils.LogCapturer.captureLogs( + SCMDatanodeHeartbeatDispatcher.LOG); + LogManager.getLogger(HeartbeatEndpointTask.class).setLevel(Level.DEBUG); + GenericTestUtils.LogCapturer heartbeatEndpointTaskLog = + GenericTestUtils.LogCapturer.captureLogs(HeartbeatEndpointTask.LOG); + GenericTestUtils.LogCapturer versionEndPointTaskLog = + GenericTestUtils.LogCapturer.captureLogs(VersionEndpointTask.LOG); + // Initially empty + assertThat(scmDnHBDispatcherLog.getOutput()).isEmpty(); + assertThat(versionEndPointTaskLog.getOutput()).isEmpty(); + // start the new SCM + try { + scm.start(); + // DN heartbeats to new SCM, SCM doesn't recognize the node, sends the + // command to DN to re-register. Wait for SCM to send re-register command + String expectedLog = String.format( + "SCM received heartbeat from an unregistered datanode %s. " + + "Asking datanode to re-register.", + datanode.getDatanodeDetails()); + GenericTestUtils.waitFor( + () -> scmDnHBDispatcherLog.getOutput().contains(expectedLog), 100, + 30000); + ExitUtil.disableSystemExit(); + // As part of processing response for re-register, DN EndpointStateMachine + // goes to GET-VERSION state which checks if there is already existing + // version file on the DN & if the clusterID matches with that of the SCM + // In this case, it won't match and gets InconsistentStorageStateException + // and DN shuts down. + String expectedLog2 = "Received SCM notification to register." + + " Interrupt HEARTBEAT and transit to GETVERSION state."; + GenericTestUtils.waitFor( + () -> heartbeatEndpointTaskLog.getOutput().contains(expectedLog2), + 100, 5000); + GenericTestUtils.waitFor(() -> dsm.getContext().getShutdownOnError(), 100, + 5000); + assertEquals(DatanodeStateMachine.DatanodeStates.SHUTDOWN, dsm.getContext().getState()); - // DN Endpoint State has already gone through GetVersion and Register, - // so it will be in HEARTBEAT state. - for (EndpointStateMachine endpoint : dsm.getConnectionManager() - .getValues()) { - assertEquals(EndpointStateMachine.EndPointStates.HEARTBEAT, - endpoint.getState()); - } - GenericTestUtils.LogCapturer scmDnHBDispatcherLog = - GenericTestUtils.LogCapturer.captureLogs( - SCMDatanodeHeartbeatDispatcher.LOG); - LogManager.getLogger(HeartbeatEndpointTask.class).setLevel(Level.DEBUG); - GenericTestUtils.LogCapturer heartbeatEndpointTaskLog = - GenericTestUtils.LogCapturer.captureLogs(HeartbeatEndpointTask.LOG); - GenericTestUtils.LogCapturer versionEndPointTaskLog = - GenericTestUtils.LogCapturer.captureLogs(VersionEndpointTask.LOG); - // Initially empty - assertThat(scmDnHBDispatcherLog.getOutput()).isEmpty(); - assertThat(versionEndPointTaskLog.getOutput()).isEmpty(); - // start the new SCM - try { - scm.start(); - // Initially DatanodeStateMachine will be in Running state - assertEquals(DatanodeStateMachine.DatanodeStates.RUNNING, - dsm.getContext().getState()); - // DN heartbeats to new SCM, SCM doesn't recognize the node, sends the - // command to DN to re-register. Wait for SCM to send re-register command - String expectedLog = String.format( - "SCM received heartbeat from an unregistered datanode %s. " - + "Asking datanode to re-register.", - datanode.getDatanodeDetails()); - GenericTestUtils.waitFor( - () -> scmDnHBDispatcherLog.getOutput().contains(expectedLog), 100, - 30000); - ExitUtil.disableSystemExit(); - // As part of processing response for re-register, DN EndpointStateMachine - // goes to GET-VERSION state which checks if there is already existing - // version file on the DN & if the clusterID matches with that of the SCM - // In this case, it won't match and gets InconsistentStorageStateException - // and DN shuts down. - String expectedLog2 = "Received SCM notification to register." - + " Interrupt HEARTBEAT and transit to GETVERSION state."; - GenericTestUtils.waitFor( - () -> heartbeatEndpointTaskLog.getOutput().contains(expectedLog2), - 100, 5000); - GenericTestUtils.waitFor(() -> dsm.getContext().getShutdownOnError(), 100, - 5000); - assertEquals(DatanodeStateMachine.DatanodeStates.SHUTDOWN, - dsm.getContext().getState()); - assertThat(versionEndPointTaskLog.getOutput()).contains( - "org.apache.hadoop.ozone.common" + - ".InconsistentStorageStateException: Mismatched ClusterIDs"); - } finally { - scm.stop(); - } + assertThat(versionEndPointTaskLog.getOutput()).contains( + "org.apache.hadoop.ozone.common" + + ".InconsistentStorageStateException: Mismatched ClusterIDs"); + } finally { + scm.stop(); } } @@ -481,7 +449,7 @@ public void testBlockDeletingThrottling() throws Exception { } createDeleteTXLog(cluster.getStorageContainerManager(), - delLog, keyLocations, cluster, conf); + delLog, keyLocations, cluster); // Verify a few TX gets created in the TX log. assertThat(delLog.getNumOfValidTransactions()).isGreaterThan(0); @@ -508,7 +476,7 @@ public void testBlockDeletingThrottling() throws Exception { private Map> createDeleteTXLog( StorageContainerManager scm, DeletedBlockLog delLog, - Map keyLocations, MiniOzoneCluster cluster, OzoneConfiguration conf) + Map keyLocations, MiniOzoneCluster cluster) throws IOException, TimeoutException { // These keys will be written into a bunch of containers, // gets a set of container names, verify container containerBlocks @@ -527,7 +495,7 @@ private Map> createDeleteTXLog( } assertThat(totalCreatedBlocks).isGreaterThan(0); assertEquals(totalCreatedBlocks, - getAllBlocks(cluster, conf, containerNames).size()); + getAllBlocks(cluster, containerNames).size()); // Create a deletion TX for each key. Map> containerBlocks = Maps.newHashMap(); @@ -571,25 +539,15 @@ public void testSCMInitialization(@TempDir Path tempDir) throws Exception { validateRatisGroupExists(conf, clusterId.toString()); } - @Test - public void testSCMReinitialization(@TempDir Path tempDir) throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - Path scmPath = tempDir.resolve("scm-meta"); - conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString()); - //This will set the cluster id in the version file - + private void testSCMReinitialization(MiniOzoneCluster cluster) throws Exception { + cluster.getStorageContainerManager().stop(); - try (MiniOzoneCluster cluster = - MiniOzoneCluster.newBuilder(conf).setNumDatanodes(3).build()) { - cluster.waitForClusterToBeReady(); - cluster.getStorageContainerManager().stop(); - final UUID clusterId = UUID.randomUUID(); - // This will initialize SCM - StorageContainerManager.scmInit(conf, clusterId.toString()); - SCMStorageConfig scmStore = new SCMStorageConfig(conf); - assertNotEquals(clusterId.toString(), scmStore.getClusterID()); - assertTrue(scmStore.isSCMHAEnabled()); - } + final UUID clusterId = UUID.randomUUID(); + // This will initialize SCM + StorageContainerManager.scmInit(cluster.getConf(), clusterId.toString()); + SCMStorageConfig scmStore = new SCMStorageConfig(cluster.getConf()); + assertNotEquals(clusterId.toString(), scmStore.getClusterID()); + assertTrue(scmStore.isSCMHAEnabled()); } @VisibleForTesting @@ -678,10 +636,22 @@ public void testScmInfo(@TempDir Path tempDir) throws Exception { /** * Test datanode heartbeat well processed with a 4-layer network topology. */ - @Test - public void testScmProcessDatanodeHeartbeat() throws Exception { + private void testScmProcessDatanodeHeartbeat(MiniOzoneCluster cluster) { + NodeManager nodeManager = cluster.getStorageContainerManager().getScmNodeManager(); + List allNodes = nodeManager.getAllNodes(); + assertEquals(cluster.getHddsDatanodes().size(), allNodes.size()); + + for (DatanodeDetails node : allNodes) { + DatanodeInfo datanodeInfo = assertInstanceOf(DatanodeInfo.class, nodeManager.getNodeByUuid(node.getUuid())); + assertNotNull(datanodeInfo); + assertThat(datanodeInfo.getLastHeartbeatTime()).isPositive(); + assertEquals(datanodeInfo.getUuidString(), datanodeInfo.getNetworkName()); + assertEquals("/rack1", datanodeInfo.getNetworkLocation()); + } + } + + private static void configureTopology(OzoneConfiguration conf) throws UnknownHostException { String rackName = "/rack1"; - OzoneConfiguration conf = new OzoneConfiguration(); conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY, StaticMapping.class, DNSToSwitchMapping.class); StaticMapping.addNodeToRack(NetUtils.normalizeHostName(HddsUtils.getHostName(conf)), @@ -689,34 +659,6 @@ public void testScmProcessDatanodeHeartbeat() throws Exception { // In case of JDK17, the IP address is resolved to localhost mapped to 127.0.0.1 which is not in sync with JDK8 // and hence need to make following entry under HDDS-10132 StaticMapping.addNodeToRack(LOCALHOST_IP, rackName); - - final int datanodeNum = 3; - - try (MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(datanodeNum) - .build()) { - cluster.waitForClusterToBeReady(); - StorageContainerManager scm = cluster.getStorageContainerManager(); - // first sleep 10s - Thread.sleep(10000); - // verify datanode heartbeats are well processed - long heartbeatCheckerIntervalMs = cluster.getConf() - .getTimeDuration(HDDS_HEARTBEAT_INTERVAL, 1000, - TimeUnit.MILLISECONDS); - long start = Time.monotonicNow(); - Thread.sleep(heartbeatCheckerIntervalMs * 2); - - List allNodes = scm.getScmNodeManager().getAllNodes(); - assertEquals(datanodeNum, allNodes.size()); - for (DatanodeDetails node : allNodes) { - DatanodeInfo datanodeInfo = (DatanodeInfo) scm.getScmNodeManager() - .getNodeByUuid(node.getUuidString()); - assertThat(datanodeInfo.getLastHeartbeatTime()).isGreaterThan(start); - assertEquals(datanodeInfo.getUuidString(), - datanodeInfo.getNetworkName()); - assertEquals("/rack1", datanodeInfo.getNetworkLocation()); - } - } } @Test @@ -951,44 +893,18 @@ private void addTransactions(StorageContainerManager scm, } } - private static class CloseContainerCommandMatcher - implements ArgumentMatcher { - - private final CommandForDatanode cmd; - private final UUID uuid; - - CloseContainerCommandMatcher(UUID uuid, CommandForDatanode cmd) { - this.uuid = uuid; - this.cmd = cmd; - } - - @Override - public boolean matches(CommandForDatanode cmdRight) { - CloseContainerCommand left = (CloseContainerCommand) cmd.getCommand(); - CloseContainerCommand right = - (CloseContainerCommand) cmdRight.getCommand(); - return cmdRight.getDatanodeId().equals(uuid) - && left.getContainerID() == right.getContainerID() - && left.getPipelineID().equals(right.getPipelineID()) - && left.getType() == right.getType() - && left.getProto().equals(right.getProto()); - } - } - - public List getAllBlocks(MiniOzoneCluster cluster, OzoneConfiguration conf, Set containerIDs) - throws IOException { + public List getAllBlocks(MiniOzoneCluster cluster, Set containerIDs) throws IOException { List allBlocks = Lists.newArrayList(); for (Long containerID : containerIDs) { - allBlocks.addAll(getAllBlocks(cluster, conf, containerID)); + allBlocks.addAll(getAllBlocks(cluster, containerID)); } return allBlocks; } - public List getAllBlocks(MiniOzoneCluster cluster, - OzoneConfiguration conf, Long containerID) throws IOException { + public List getAllBlocks(MiniOzoneCluster cluster, Long containerID) throws IOException { List allBlocks = Lists.newArrayList(); KeyValueContainerData cData = getContainerMetadata(cluster, containerID); - try (DBHandle db = BlockUtils.getDB(cData, conf)) { + try (DBHandle db = BlockUtils.getDB(cData, cluster.getConf())) { List> kvs = db.getStore().getBlockDataTable() @@ -1003,12 +919,12 @@ public List getAllBlocks(MiniOzoneCluster cluster, return allBlocks; } - public boolean verifyBlocksWithTxnTable(MiniOzoneCluster cluster, OzoneConfiguration conf, + public boolean verifyBlocksWithTxnTable(MiniOzoneCluster cluster, Map> containerBlocks) throws IOException { for (Map.Entry> entry : containerBlocks.entrySet()) { KeyValueContainerData cData = getContainerMetadata(cluster, entry.getKey()); - try (DBHandle db = BlockUtils.getDB(cData, conf)) { + try (DBHandle db = BlockUtils.getDB(cData, cluster.getConf())) { DatanodeStore ds = db.getStore(); DatanodeStoreSchemaThreeImpl dnStoreImpl = (DatanodeStoreSchemaThreeImpl) ds; From 185917669964a81dfc93d3969bec08ccde36b8d3 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 18 Jan 2025 12:17:46 +0100 Subject: [PATCH 095/168] HDDS-12101. Bump sqlite-jdbc to 3.48.0.0 (#7712) --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 9477d0fb97d..9526f9d6f6f 100644 --- a/pom.xml +++ b/pom.xml @@ -215,7 +215,7 @@ 3.0.1 3.1.12.2 5.3.39 - 3.47.2.0 + 3.48.0.0 4.2.2 false 1200 From 90466e402283fef2dd7beefa07e7d60be6f77bbb Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 18 Jan 2025 14:24:23 +0100 Subject: [PATCH 096/168] HDDS-12102. Bump Bouncy Castle to 1.80 (#7713) --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 9526f9d6f6f..8cbaad0942e 100644 --- a/pom.xml +++ b/pom.xml @@ -37,7 +37,7 @@ 3.27.2 1.12.661 0.8.0.RELEASE - 1.79 + 1.80 3.6.0 2.0 9.3 From 81d098255ba67489d1928b8d6a979d9c51d1a7d5 Mon Sep 17 00:00:00 2001 From: Nandakumar Vadivelu Date: Sat, 18 Jan 2025 20:49:45 +0530 Subject: [PATCH 097/168] HDDS-12021. Enable sortpom in hadoop-ozone client, common, csi and datanode. (#7717) --- hadoop-ozone/client/pom.xml | 75 ++++++------- hadoop-ozone/common/pom.xml | 150 ++++++++++++------------- hadoop-ozone/csi/pom.xml | 200 ++++++++++++++++------------------ hadoop-ozone/datanode/pom.xml | 38 +++---- 4 files changed, 221 insertions(+), 242 deletions(-) diff --git a/hadoop-ozone/client/pom.xml b/hadoop-ozone/client/pom.xml index 427237eeaed..b935faae90d 100644 --- a/hadoop-ozone/client/pom.xml +++ b/hadoop-ozone/client/pom.xml @@ -12,10 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -24,25 +21,49 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> ozone-client 2.0.0-SNAPSHOT - Apache Ozone Client - Apache Ozone Client jar - - true - + Apache Ozone Client + Apache Ozone Client + + + com.fasterxml.jackson.core + jackson-annotations + + + com.github.stephenc.jcip + jcip-annotations + + + com.google.guava + guava + + + + commons-collections + commons-collections + + + jakarta.annotation + jakarta.annotation-api + + + + org.apache.commons + commons-lang3 + org.apache.ozone - hdds-common + hdds-client org.apache.ozone - hdds-config + hdds-common org.apache.ozone - hdds-client + hdds-config org.apache.ozone @@ -60,11 +81,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone ozone-interface-client - - - org.apache.commons - commons-lang3 - org.apache.ratis ratis-common @@ -78,28 +94,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> slf4j-api - - com.fasterxml.jackson.core - jackson-annotations - - - com.github.stephenc.jcip - jcip-annotations - - - com.google.guava - guava - - - - commons-collections - commons-collections - - - jakarta.annotation - jakarta.annotation-api - - org.apache.ozone @@ -155,7 +149,8 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> maven-enforcer-plugin - ban-annotations + ban-annotations + diff --git a/hadoop-ozone/common/pom.xml b/hadoop-ozone/common/pom.xml index 1084e418069..0f8b0d9f0b1 100644 --- a/hadoop-ozone/common/pom.xml +++ b/hadoop-ozone/common/pom.xml @@ -12,10 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -24,55 +21,74 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> ozone-common 2.0.0-SNAPSHOT - Apache Ozone Common - Apache Ozone Common jar - - - true - + Apache Ozone Common + Apache Ozone Common + - io.grpc - grpc-netty + com.fasterxml.jackson.core + jackson-annotations - io.netty - netty-codec-http2 + com.fasterxml.jackson.core + jackson-databind - io.netty - netty-handler-proxy + com.github.stephenc.jcip + jcip-annotations - io.netty - netty-tcnative-boringssl-static - runtime + com.google.guava + guava - org.apache.commons - commons-compress + com.google.protobuf + protobuf-java + - org.apache.ozone - hdds-common + io.grpc + grpc-api + + + com.google.code.findbugs + jsr305 + + - org.apache.ozone - hdds-client + io.grpc + grpc-netty - org.apache.ozone - hdds-config + io.grpc + grpc-stub - org.apache.ozone - hdds-interface-client + io.netty + netty-codec-http2 - org.apache.ozone - ozone-interface-client + io.netty + netty-common + + + io.netty + netty-handler + + + io.netty + netty-handler-proxy + + + jakarta.annotation + jakarta.annotation-api + + + org.apache.commons + commons-compress @@ -92,68 +108,45 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> httpcore - org.apache.ratis - ratis-common - - - org.apache.ratis - ratis-proto - - - org.apache.ratis - ratis-thirdparty-misc - - - org.slf4j - slf4j-api + org.apache.ozone + hdds-client - - com.fasterxml.jackson.core - jackson-annotations + org.apache.ozone + hdds-common - com.fasterxml.jackson.core - jackson-databind + org.apache.ozone + hdds-config - com.github.stephenc.jcip - jcip-annotations + org.apache.ozone + hdds-interface-client - com.google.guava - guava + org.apache.ozone + ozone-interface-client - com.google.protobuf - protobuf-java + org.apache.ratis + ratis-common - - io.grpc - grpc-api - - - com.google.code.findbugs - jsr305 - - + org.apache.ratis + ratis-proto - io.grpc - grpc-stub + org.apache.ratis + ratis-thirdparty-misc - io.netty - netty-common + org.slf4j + slf4j-api io.netty - netty-handler - - - jakarta.annotation - jakarta.annotation-api + netty-tcnative-boringssl-static + runtime @@ -181,18 +174,18 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> + false ${basedir}/src/main/resources ozone-version-info.properties - false + true ${basedir}/src/main/resources ozone-version-info.properties - true @@ -202,10 +195,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> version-info - generate-resources version-info + generate-resources ${basedir}/../ @@ -246,7 +239,8 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> maven-enforcer-plugin - ban-annotations + ban-annotations + diff --git a/hadoop-ozone/csi/pom.xml b/hadoop-ozone/csi/pom.xml index 2c5bb5d7f96..84b4dd62969 100644 --- a/hadoop-ozone/csi/pom.xml +++ b/hadoop-ozone/csi/pom.xml @@ -12,10 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -24,99 +21,94 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> ozone-csi 2.0.0-SNAPSHOT - Apache Ozone CSI service - Apache Ozone CSI service jar + Apache Ozone CSI service + Apache Ozone CSI service false - true - true + + true - org.apache.ozone - hdds-common + ch.qos.reload4j + reload4j + + + com.google.guava + guava + ${guava.version} + + + com.google.protobuf + protobuf-java + ${grpc.protobuf-compile.version} com.google.protobuf protobuf-java-util ${grpc.protobuf-compile.version} - - com.google.protobuf - protobuf-java - - - com.google.j2objc - j2objc-annotations - com.google.code.findbugs jsr305 - - - - org.apache.ozone - hdds-config - - org.apache.hadoop - hadoop-common + com.google.j2objc + j2objc-annotations - org.apache.hadoop - hadoop-hdfs + com.google.protobuf + protobuf-java - - org.apache.ozone - hdds-server-framework - - - org.apache.ozone - ozone-common - commons-io commons-io - com.google.code.findbugs - jsr305 - 3.0.2 - provided - - - com.google.guava - guava - ${guava.version} + info.picocli + picocli - com.google.protobuf - protobuf-java - ${grpc.protobuf-compile.version} + io.grpc + grpc-api + + + com.google.code.findbugs + jsr305 + + io.grpc grpc-netty - io.netty - netty-transport + io.grpc + grpc-protobuf + + + com.google.code.findbugs + jsr305 + + + com.google.protobuf + protobuf-java + + - io.netty - netty-transport-classes-epoll + io.grpc + grpc-stub io.netty - netty-transport-native-epoll - linux-x86_64 + netty-codec-http2 io.netty @@ -124,55 +116,42 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> io.netty - netty-codec-http2 + netty-transport io.netty - netty-transport-native-unix-common - - - ch.qos.reload4j - reload4j + netty-transport-classes-epoll - org.slf4j - slf4j-api + io.netty + netty-transport-native-epoll + linux-x86_64 - org.slf4j - slf4j-reload4j + io.netty + netty-transport-native-unix-common - io.grpc - grpc-api - - - com.google.code.findbugs - jsr305 - - + org.apache.ozone + hdds-common - io.grpc - grpc-protobuf + org.apache.ozone + hdds-config - com.google.protobuf - protobuf-java + org.apache.hadoop + hadoop-common - com.google.code.findbugs - jsr305 + org.apache.hadoop + hadoop-hdfs - io.grpc - grpc-stub - - - info.picocli - picocli + org.apache.ozone + hdds-server-framework org.apache.ozone @@ -188,25 +167,35 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> io.netty - netty-all + netty io.netty - netty + netty-all + + org.apache.ozone + ozone-common + + + org.slf4j + slf4j-api + + + org.slf4j + slf4j-reload4j + + + com.google.code.findbugs + jsr305 + 3.0.2 + provided + - - - - kr.motd.maven - os-maven-plugin - ${os-maven-plugin.version} - - com.salesforce.servicelibs @@ -234,9 +223,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> ${protobuf-maven-plugin.version} true - - com.google.protobuf:protoc:${grpc.protobuf-compile.version}:exe:${os.detected.classifier} - + com.google.protobuf:protoc:${grpc.protobuf-compile.version}:exe:${os.detected.classifier} ${basedir}/src/main/proto/ csi.proto @@ -255,9 +242,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> grpc-java - - io.grpc:protoc-gen-grpc-java:${io.grpc.version}:exe:${os.detected.classifier} - + io.grpc:protoc-gen-grpc-java:${io.grpc.version}:exe:${os.detected.classifier} @@ -267,10 +252,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> depcheck - + - ban-annotations + ban-annotations + @@ -290,10 +276,16 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> com.github.spotbugs spotbugs-maven-plugin - ${basedir}/dev-support/findbugsExcludeFile.xml - + ${basedir}/dev-support/findbugsExcludeFile.xml + + + kr.motd.maven + os-maven-plugin + ${os-maven-plugin.version} + + diff --git a/hadoop-ozone/datanode/pom.xml b/hadoop-ozone/datanode/pom.xml index 2c98b3b8500..1c6bef22fc2 100644 --- a/hadoop-ozone/datanode/pom.xml +++ b/hadoop-ozone/datanode/pom.xml @@ -12,9 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -22,18 +20,30 @@ 2.0.0-SNAPSHOT ozone-datanode - Apache Ozone Datanode - jar 2.0.0-SNAPSHOT + jar + Apache Ozone Datanode false - true + + true true - true + + jakarta.activation + jakarta.activation-api + + + jakarta.xml.bind + jakarta.xml.bind-api + + + org.apache.ozone + hdds-container-service + org.apache.ozone hdds-hadoop-dependency-server @@ -45,22 +55,10 @@ - - org.apache.ozone - hdds-container-service - - - jakarta.xml.bind - jakarta.xml.bind-api - org.glassfish.jaxb jaxb-runtime - - jakarta.activation - jakarta.activation-api - @@ -77,10 +75,10 @@ add-classpath-descriptor - prepare-package build-classpath + prepare-package ${project.build.outputDirectory}/${project.artifactId}.classpath $HDDS_LIB_JARS_DIR From c9a530a9ad228ba4a54470d0e7729966dd152c58 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Sun, 19 Jan 2025 06:52:57 +0100 Subject: [PATCH 098/168] HDDS-12104. Enable sortpom in ozonefs modules (#7718) --- hadoop-ozone/ozonefs-common/pom.xml | 101 ++++++++--------- hadoop-ozone/ozonefs-hadoop2/pom.xml | 69 ++++++------ hadoop-ozone/ozonefs-hadoop3-client/pom.xml | 45 ++++---- hadoop-ozone/ozonefs-hadoop3/pom.xml | 22 ++-- hadoop-ozone/ozonefs-shaded/pom.xml | 95 ++++++---------- hadoop-ozone/ozonefs/pom.xml | 118 +++++++++----------- 6 files changed, 200 insertions(+), 250 deletions(-) diff --git a/hadoop-ozone/ozonefs-common/pom.xml b/hadoop-ozone/ozonefs-common/pom.xml index faad52a8f07..2bad40c885c 100644 --- a/hadoop-ozone/ozonefs-common/pom.xml +++ b/hadoop-ozone/ozonefs-common/pom.xml @@ -12,9 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -22,94 +20,77 @@ 2.0.0-SNAPSHOT ozone-filesystem-common - Apache Ozone FileSystem Common - jar 2.0.0-SNAPSHOT + jar + Apache Ozone FileSystem Common UTF-8 - true - true - - - - org.apache.maven.plugins - maven-compiler-plugin - - none - - - - - - org.apache.ozone - hdds-client + com.google.guava + guava - org.apache.ozone - hdds-config + commons-collections + commons-collections - org.apache.ozone - hdds-hadoop-dependency-client + io.opentracing + opentracing-api - org.apache.ozone - hdds-interface-client + io.opentracing + opentracing-util - org.apache.ozone - ozone-client + jakarta.annotation + jakarta.annotation-api - org.apache.ozone - hdds-common + org.apache.commons + commons-lang3 - org.apache.ozone - ozone-common + org.apache.httpcomponents + httpclient - - commons-collections - commons-collections + org.apache.ozone + hdds-client - org.apache.commons - commons-lang3 + org.apache.ozone + hdds-common - org.apache.httpcomponents - httpclient + org.apache.ozone + hdds-config - org.apache.ratis - ratis-common + org.apache.ozone + hdds-hadoop-dependency-client - org.slf4j - slf4j-api + org.apache.ozone + hdds-interface-client - - com.google.guava - guava + org.apache.ozone + ozone-client - - io.opentracing - opentracing-api + org.apache.ozone + ozone-common - io.opentracing - opentracing-util + org.apache.ratis + ratis-common - jakarta.annotation - jakarta.annotation-api + org.slf4j + slf4j-api @@ -119,4 +100,16 @@ test + + + + + org.apache.maven.plugins + maven-compiler-plugin + + none + + + + diff --git a/hadoop-ozone/ozonefs-hadoop2/pom.xml b/hadoop-ozone/ozonefs-hadoop2/pom.xml index 48a64745dae..b9964f547ee 100644 --- a/hadoop-ozone/ozonefs-hadoop2/pom.xml +++ b/hadoop-ozone/ozonefs-hadoop2/pom.xml @@ -12,9 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -22,12 +20,11 @@ 2.0.0-SNAPSHOT ozone-filesystem-hadoop2 - Apache Ozone FS Hadoop 2.x compatibility - jar 2.0.0-SNAPSHOT + jar + Apache Ozone FS Hadoop 2.x compatibility org.apache.hadoop.ozone.shaded - true @@ -35,6 +32,10 @@ ozone-filesystem-shaded true + + org.apache.hadoop + hadoop-annotations + org.apache.hadoop hadoop-common @@ -43,47 +44,24 @@ org.apache.ozone hadoop-hdfs-client - - org.apache.hadoop - hadoop-annotations - - org.apache.hadoop - hadoop-common + ch.qos.reload4j + reload4j provided - ${hadoop2.version} - - - org.apache.hadoop - hadoop-auth - - - org.apache.hadoop - hadoop-annotations - - - org.slf4j - slf4j-log4j12 - - - com.sun.jersey - * - - org.apache.hadoop hadoop-annotations - provided ${hadoop2.version} + provided org.apache.hadoop hadoop-auth - provided ${hadoop2.version} + provided org.slf4j @@ -92,9 +70,28 @@ - ch.qos.reload4j - reload4j + org.apache.hadoop + hadoop-common + ${hadoop2.version} provided + + + com.sun.jersey + * + + + org.apache.hadoop + hadoop-annotations + + + org.apache.hadoop + hadoop-auth + + + org.slf4j + slf4j-log4j12 + + org.slf4j @@ -130,10 +127,10 @@ include-dependencies - prepare-package unpack + prepare-package ${maven.shade.skip} META-INF/versions/**/*.* diff --git a/hadoop-ozone/ozonefs-hadoop3-client/pom.xml b/hadoop-ozone/ozonefs-hadoop3-client/pom.xml index 445c4a3fe54..a6901eccaef 100644 --- a/hadoop-ozone/ozonefs-hadoop3-client/pom.xml +++ b/hadoop-ozone/ozonefs-hadoop3-client/pom.xml @@ -12,25 +12,27 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone ozone 2.0.0-SNAPSHOT - + ozone-filesystem-hadoop3-client - Apache Ozone FS Hadoop shaded 3.x compatibility - jar 2.0.0-SNAPSHOT + jar + Apache Ozone FS Hadoop shaded 3.x compatibility + + + true + org.apache.ozone @@ -38,10 +40,6 @@ true - - true - true - @@ -57,10 +55,10 @@ include-dependencies - prepare-package unpack + prepare-package ${maven.shade.skip} META-INF/versions/**/*.* @@ -81,15 +79,14 @@ maven-shade-plugin - package shade + package ${maven.shade.skip} - + META-INF/BC1024KE.DSA META-INF/BC2048KE.DSA @@ -97,19 +94,15 @@ META-INF/BC2048KE.SF - - + + ozone-default-generated.xml com.google.protobuf - - org.apache.hadoop.shaded.com.google.protobuf - + org.apache.hadoop.shaded.com.google.protobuf com.google.protobuf.* diff --git a/hadoop-ozone/ozonefs-hadoop3/pom.xml b/hadoop-ozone/ozonefs-hadoop3/pom.xml index baf68142c3b..bcaee248d6c 100644 --- a/hadoop-ozone/ozonefs-hadoop3/pom.xml +++ b/hadoop-ozone/ozonefs-hadoop3/pom.xml @@ -12,9 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -22,13 +20,13 @@ 2.0.0-SNAPSHOT ozone-filesystem-hadoop3 - Apache Ozone FS Hadoop 3.x compatibility - jar 2.0.0-SNAPSHOT + jar + Apache Ozone FS Hadoop 3.x compatibility - true + + true org.apache.hadoop.ozone.shaded - true @@ -37,8 +35,8 @@ true - org.apache.hadoop - hadoop-common + ch.qos.reload4j + reload4j provided @@ -52,8 +50,8 @@ provided - ch.qos.reload4j - reload4j + org.apache.hadoop + hadoop-common provided @@ -77,10 +75,10 @@ include-dependencies - prepare-package unpack + prepare-package ${maven.shade.skip} META-INF/versions/**/*.* diff --git a/hadoop-ozone/ozonefs-shaded/pom.xml b/hadoop-ozone/ozonefs-shaded/pom.xml index 0aacb602cb4..5db55f359da 100644 --- a/hadoop-ozone/ozonefs-shaded/pom.xml +++ b/hadoop-ozone/ozonefs-shaded/pom.xml @@ -12,9 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -22,51 +20,55 @@ 2.0.0-SNAPSHOT ozone-filesystem-shaded - Apache Ozone FileSystem Shaded - jar 2.0.0-SNAPSHOT + jar + Apache Ozone FileSystem Shaded - true + + true org.apache.hadoop.ozone.shaded - true + + com.google.protobuf + protobuf-java + org.apache.ozone ozone-filesystem-common - org.apache.hadoop - hadoop-common + ch.qos.reload4j + reload4j - org.apache.hadoop - hadoop-hdfs-client + com.google.protobuf + protobuf-java - org.apache.hadoop - hadoop-hdfs + log4j + log4j org.apache.hadoop hadoop-annotations - org.apache.hadoop.thirdparty - * + org.apache.hadoop + hadoop-common - log4j - log4j + org.apache.hadoop + hadoop-hdfs - ch.qos.reload4j - reload4j + org.apache.hadoop + hadoop-hdfs-client - org.slf4j + org.apache.hadoop.thirdparty * @@ -74,17 +76,11 @@ * - com.google.protobuf - protobuf-java + org.slf4j + * - - com.google.protobuf - protobuf-java - 2.5.0 - compile - @@ -100,15 +96,14 @@ maven-shade-plugin - package shade + package ${maven.shade.skip} - + META-INF/BC1024KE.DSA META-INF/BC2048KE.DSA @@ -116,19 +111,15 @@ META-INF/BC2048KE.SF - - + + ozone-default-generated.xml org - - ${shaded.prefix}.org - + ${shaded.prefix}.org org.yaml.**.* org.sqlite.**.* @@ -151,9 +142,7 @@ com - - ${shaded.prefix}.com - + ${shaded.prefix}.com com.google.common.**.* com.google.gson.**.* @@ -166,27 +155,19 @@ kotlin - - ${shaded.prefix}.kotlin - + ${shaded.prefix}.kotlin picocli - - ${shaded.prefix}.picocli - + ${shaded.prefix}.picocli info - - ${shaded.prefix}.info - + ${shaded.prefix}.info io - - ${shaded.prefix}.io - + ${shaded.prefix}.io io!netty!* @@ -195,15 +176,11 @@ okio - - ${shaded.prefix}.okio - + ${shaded.prefix}.okio okhttp3 - - ${shaded.prefix}.okhttp3 - + ${shaded.prefix}.okhttp3 diff --git a/hadoop-ozone/ozonefs/pom.xml b/hadoop-ozone/ozonefs/pom.xml index 90514ae5b2e..6cb03099d7a 100644 --- a/hadoop-ozone/ozonefs/pom.xml +++ b/hadoop-ozone/ozonefs/pom.xml @@ -12,9 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -22,15 +20,63 @@ 2.0.0-SNAPSHOT ozone-filesystem - Apache Ozone FileSystem - jar 2.0.0-SNAPSHOT + jar + Apache Ozone FileSystem UTF-8 - true - true + + + com.google.guava + guava + + + io.opentracing + opentracing-api + + + io.opentracing + opentracing-util + + + org.apache.ozone + hdds-common + + + org.apache.ozone + hdds-config + + + org.apache.ozone + ozone-client + + + org.apache.ozone + ozone-common + + + org.apache.ozone + ozone-filesystem-common + + + org.apache.ratis + ratis-common + + + org.slf4j + slf4j-api + + + + + org.apache.ozone + hdds-hadoop-dependency-test + test + + + @@ -57,71 +103,17 @@ deplist - compile list + compile - - ${project.basedir}/target/1hadoop-tools-deps/${project.artifactId}.tools-optional.txt - + ${project.basedir}/target/1hadoop-tools-deps/${project.artifactId}.tools-optional.txt - - - - org.apache.ozone - hdds-config - - - org.apache.ozone - hdds-common - - - org.apache.ozone - ozone-client - - - org.apache.ozone - ozone-common - - - org.apache.ozone - ozone-filesystem-common - - - - org.apache.ratis - ratis-common - - - - com.google.guava - guava - - - io.opentracing - opentracing-api - - - io.opentracing - opentracing-util - - - org.slf4j - slf4j-api - - - - - org.apache.ozone - hdds-hadoop-dependency-test - test - - From f90e625eeae6eaf7e521c631f6e2e0f2c117ebe8 Mon Sep 17 00:00:00 2001 From: Abhishek Pal <43001336+devabhishekpal@users.noreply.github.com> Date: Sun, 19 Jan 2025 11:57:54 +0530 Subject: [PATCH 099/168] HDDS-11680. Enhance Recon Metrics For Improved Observability (#7517) --- .../smoketest/recon/recon-taskstatus.robot | 125 +++++++++ .../ozone/TestOzoneConfigurationFields.java | 1 + .../recon/TestReconWithOzoneManager.java | 4 +- .../schema/ReconTaskSchemaDefinition.java | 2 + .../ozone/recon/ReconControllerModule.java | 2 + .../ozone/recon/api/TaskStatusService.java | 5 +- .../ozone/recon/fsck/ContainerHealthTask.java | 23 +- .../ozone/recon/scm/PipelineSyncTask.java | 22 +- .../ozone/recon/scm/ReconDeadNodeHandler.java | 4 +- .../hadoop/ozone/recon/scm/ReconScmTask.java | 44 +-- .../recon/scm/ReconStaleNodeHandler.java | 2 +- .../ReconStorageContainerManagerFacade.java | 34 +-- .../impl/OzoneManagerServiceProviderImpl.java | 115 +++++--- .../recon/tasks/ContainerKeyMapperTask.java | 6 +- .../recon/tasks/ContainerSizeCountTask.java | 61 ++-- .../ozone/recon/tasks/FileSizeCountTask.java | 4 +- .../ozone/recon/tasks/NSSummaryTask.java | 3 + .../ozone/recon/tasks/OMDBUpdatesHandler.java | 9 + .../ozone/recon/tasks/OMUpdateEventBatch.java | 10 +- .../ozone/recon/tasks/OmTableInsightTask.java | 4 +- .../recon/tasks/ReconTaskController.java | 14 +- .../recon/tasks/ReconTaskControllerImpl.java | 263 ++++++++++-------- .../recon/tasks/types/NamedCallableTask.java | 49 ++++ .../tasks/types/TaskExecutionException.java | 35 +++ .../ozone/recon/tasks/types/package-info.java | 22 ++ .../tasks/updater/ReconTaskStatusUpdater.java | 123 ++++++++ .../ReconTaskStatusUpdaterManager.java | 69 +++++ .../recon/tasks/updater/package-info.java | 22 ++ .../recon/upgrade/ReconLayoutFeature.java | 3 +- .../ReconTaskStatusTableUpgradeAction.java | 104 +++++++ .../hadoop/ozone/recon/api/TestEndpoints.java | 2 +- .../recon/api/TestTaskStatusService.java | 21 +- .../recon/api/TestTriggerDBSyncEndpoint.java | 16 +- .../recon/fsck/TestContainerHealthTask.java | 34 ++- .../TestReconInternalSchemaDefinition.java | 6 +- .../TestReconWithDifferentSqlDBs.java | 2 +- .../recon/persistence/TestSqlSchemaSetup.java | 10 +- .../TestOzoneManagerServiceProviderImpl.java | 115 ++++---- .../tasks/TestContainerKeyMapperTask.java | 6 +- .../tasks/TestContainerSizeCountTask.java | 22 +- .../recon/tasks/TestFileSizeCountTask.java | 8 +- .../ozone/recon/tasks/TestNSSummaryTask.java | 2 +- .../recon/tasks/TestNSSummaryTaskWithFSO.java | 2 +- .../tasks/TestNSSummaryTaskWithLegacy.java | 2 +- .../TestNSSummaryTaskWithLegacyOBSLayout.java | 2 +- .../recon/tasks/TestNSSummaryTaskWithOBS.java | 2 +- .../recon/tasks/TestOmTableInsightTask.java | 18 +- .../tasks/TestReconTaskControllerImpl.java | 53 +++- .../tasks/TestReconTaskStatusUpdater.java | 101 +++++++ 49 files changed, 1200 insertions(+), 408 deletions(-) create mode 100644 hadoop-ozone/dist/src/main/smoketest/recon/recon-taskstatus.robot create mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/types/NamedCallableTask.java create mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/types/TaskExecutionException.java create mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/types/package-info.java create mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/updater/ReconTaskStatusUpdater.java create mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/updater/ReconTaskStatusUpdaterManager.java create mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/updater/package-info.java create mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReconTaskStatusTableUpgradeAction.java create mode 100644 hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestReconTaskStatusUpdater.java diff --git a/hadoop-ozone/dist/src/main/smoketest/recon/recon-taskstatus.robot b/hadoop-ozone/dist/src/main/smoketest/recon/recon-taskstatus.robot new file mode 100644 index 00000000000..3b5b011523e --- /dev/null +++ b/hadoop-ozone/dist/src/main/smoketest/recon/recon-taskstatus.robot @@ -0,0 +1,125 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +*** Settings *** +Documentation Test to validate the recon task status API works properly +Library OperatingSystem +Library String +Library BuiltIn +Library Collections +Resource ../ozone-lib/freon.robot +Resource ../commonlib.robot +Test Timeout 5 minutes + +*** Variables *** +${BASE_URL} http://recon:9888 +${TASK_STATUS_ENDPOINT} ${BASE_URL}/api/v1/task/status +${TRIGGER_SYNC_ENDPOINT} ${BASE_URL}/api/v1/triggerdbsync/om +${TASK_NAME_1} ContainerHealthTask +${TASK_NAME_2} OmDeltaRequest +${BUCKET} testbucket +${VOLUME} testvolume +${KEYPATH} ${VOLUME}/${BUCKET}/testkey + +*** Keywords *** + +Kinit as ozone admin + Run Keyword if '${SECURITY_ENABLED}' == 'true' Kinit test user testuser testuser.keytab + +Sync OM Data + Log To Console Sending CURL request to ${TRIGGER_SYNC_ENDPOINT} + ${result} = Execute curl --negotiate -u : -LSs ${TRIGGER_SYNC_ENDPOINT} + [return] ${result} + +Fetch Task Status + Log To Console Sending CURL request to ${TASK_STATUS_ENDPOINT} + ${result} = Execute curl -H "Accepts: application/json" --negotiate -u : -LSs ${TASK_STATUS_ENDPOINT} + ${parsed_response} = Evaluate json.loads('''${result}''') + ${tasks} = Evaluate [task for task in ${parsed_response}] + [return] ${tasks} + +*** Test Cases *** + +Prepopulate Data and Trigger OM DB Sync + [Documentation] Use Freon to prepopulate the OM DB with data and trigger OM DB sync. + + Kinit as ozone admin + Freon DFSG n=100 path=${KEYPATH} size=100 + + ${result} = Sync OM Data + Should contain ${result} true # Sync should return true if successful + +Validate Task Status After Sync + [Documentation] Validate that task status is updated after triggering the OM DB sync. + + ${tasks} = Fetch Task Status + Should Not Be Empty ${tasks} + + FOR ${task} IN @{tasks} + Dictionary Should Contain Key ${task} taskName + Dictionary Should Contain Key ${task} lastUpdatedSeqNumber + Dictionary Should Contain Key ${task} lastUpdatedTimestamp + Dictionary Should Contain Key ${task} isCurrentTaskRunning + Dictionary Should Contain Key ${task} lastTaskRunStatus + END + +Validate Stats for Specific Task + [Documentation] Validate response for a specific task after OM DB sync. + + ${tasks} = Fetch Task Status + + ${task_list} = Evaluate [task for task in ${tasks} if task["taskName"] == "${TASK_NAME_1}"] + ${list_length} = Get Length ${task_list} + Should Be Equal As Integers ${list_length} 1 + + ${task} = Get From List ${task_list} 0 + + # Validate table fields + Should Be True ${task["lastUpdatedTimestamp"]}!=${None} + Should Be True ${task["lastUpdatedSeqNumber"]}!=${None} + Should Be True ${task["isCurrentTaskRunning"]}!=${None} + Should Be True ${task["lastTaskRunStatus"]}!=${None} + +Validate All Tasks Updated After Sync + [Documentation] Ensure all tasks have been updated after an OM DB sync operation. + + ${tasks} = Fetch Task Status + Should Not Be Empty ${tasks} + + FOR ${task} IN @{tasks} + Should Be True ${task["lastUpdatedTimestamp"]}!=${None} + Should Be True ${task["lastUpdatedSeqNumber"]}!=${None} + END + +Validate Sequence number is updated after sync + Log To Console Triggering OM DB sync for updates + Sync OM Data + ${tasks} = Fetch Task Status + Should Not Be Empty ${tasks} + + ${om_delta_task_list} = Evaluate [task for task in ${tasks} if task["taskName"] == "OmDeltaRequest"] + ${list_length} = Get Length ${om_delta_task_list} + Should Be Equal As Integers ${list_length} 1 + + ${om_delta_task} = Get From List ${om_delta_task_list} 0 + ${om_delta_task_seq_num} = Evaluate int(${om_delta_task["lastUpdatedSeqNumber"]}) + ${om_task_names} = Evaluate ["NSSummaryTask", "ContainerKeyMapperTask", "FileSizeCountTask", "OmTableInsightTask"] + ${om_tasks} = Evaluate [task for task in ${tasks} if task["taskName"] in ${om_task_names}] + + FOR ${task} IN @{om_tasks} + IF ${task["isCurrentTaskRunning"]} == 0 + Should Be Equal As Integers ${task["lastUpdatedSeqNumber"]} ${om_delta_task_seq_num} + END + END diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java index 3b650f1bf51..809c19c972f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java @@ -138,3 +138,4 @@ private void addPropertiesNotInXml() { )); } } + diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManager.java index f51d12a7c53..daf506bc7c3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconWithOzoneManager.java @@ -28,6 +28,7 @@ import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.RECON_OM_DELTA_UPDATE_LOOP_LIMIT; import static org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl.OmSnapshotTaskName.OmDeltaRequest; import static org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl.OmSnapshotTaskName.OmSnapshotRequest; +import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.slf4j.event.Level.INFO; @@ -255,8 +256,7 @@ public void testOmDBSyncing() throws Exception { "lastUpdatedTimestamp"); // verify only Delta updates were added to recon after restart. - assertEquals(beforeRestartSnapShotTimeStamp, - afterRestartSnapShotTimeStamp); + assertThat(afterRestartSnapShotTimeStamp).isGreaterThanOrEqualTo(beforeRestartSnapShotTimeStamp); //verify sequence number after Delta Updates assertEquals(omLatestSeqNumber, reconLatestSeqNumber); diff --git a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ReconTaskSchemaDefinition.java b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ReconTaskSchemaDefinition.java index dfa76eac4fa..ffa2ec3dbb5 100644 --- a/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ReconTaskSchemaDefinition.java +++ b/hadoop-ozone/recon-codegen/src/main/java/org/hadoop/ozone/recon/schema/ReconTaskSchemaDefinition.java @@ -64,6 +64,8 @@ private void createReconTaskStatusTable(Connection conn) { .column("task_name", SQLDataType.VARCHAR(766).nullable(false)) .column("last_updated_timestamp", SQLDataType.BIGINT) .column("last_updated_seq_number", SQLDataType.BIGINT) + .column("last_task_run_status", SQLDataType.INTEGER) + .column("is_current_task_running", SQLDataType.INTEGER) .constraint(DSL.constraint("pk_task_name") .primaryKey("task_name")) .execute(); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java index 39f41395bc8..1a974dd4803 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconControllerModule.java @@ -55,6 +55,7 @@ import org.apache.hadoop.ozone.recon.tasks.ReconTaskController; import org.apache.hadoop.ozone.recon.tasks.ReconTaskControllerImpl; import org.apache.hadoop.ozone.recon.tasks.OmTableInsightTask; +import org.apache.hadoop.ozone.recon.tasks.updater.ReconTaskStatusUpdaterManager; import org.apache.hadoop.security.UserGroupInformation; import org.apache.ratis.protocol.ClientId; import org.hadoop.ozone.recon.codegen.ReconSqlDbConfig; @@ -110,6 +111,7 @@ protected void configure() { install(new ReconOmTaskBindingModule()); install(new ReconDaoBindingModule()); + bind(ReconTaskStatusUpdaterManager.class).in(Singleton.class); bind(ReconTaskController.class) .to(ReconTaskControllerImpl.class).in(Singleton.class); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/TaskStatusService.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/TaskStatusService.java index f60fc73737d..f2f47cfca78 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/TaskStatusService.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/TaskStatusService.java @@ -40,13 +40,12 @@ public class TaskStatusService { private ReconTaskStatusDao reconTaskStatusDao; /** - * Return the list of Recon Tasks and the last successful timestamp and - * sequence number. + * Return the list of Recon Tasks and their related stats from RECON_TASK_STATUS table. * @return {@link Response} */ @GET @Path("status") - public Response getTaskTimes() { + public Response getTaskStats() { List resultSet = reconTaskStatusDao.findAll(); return Response.ok(resultSet).build(); } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java index 11af6eaff53..3893123f3a8 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java @@ -45,9 +45,10 @@ import org.apache.hadoop.ozone.recon.spi.ReconContainerMetadataManager; import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider; import org.apache.hadoop.ozone.recon.tasks.ReconTaskConfig; +import org.apache.hadoop.ozone.recon.tasks.updater.ReconTaskStatusUpdater; +import org.apache.hadoop.ozone.recon.tasks.updater.ReconTaskStatusUpdaterManager; import org.apache.hadoop.util.Time; import org.hadoop.ozone.recon.schema.ContainerSchemaDefinition.UnHealthyContainerStates; -import org.hadoop.ozone.recon.schema.tables.daos.ReconTaskStatusDao; import org.hadoop.ozone.recon.schema.tables.pojos.UnhealthyContainers; import org.hadoop.ozone.recon.schema.tables.records.UnhealthyContainersRecord; import org.jooq.Cursor; @@ -86,17 +87,18 @@ public class ContainerHealthTask extends ReconScmTask { private final OzoneConfiguration conf; + private final ReconTaskStatusUpdater taskStatusUpdater; + @SuppressWarnings("checkstyle:ParameterNumber") public ContainerHealthTask( ContainerManager containerManager, StorageContainerServiceProvider scmClient, - ReconTaskStatusDao reconTaskStatusDao, ContainerHealthSchemaManager containerHealthSchemaManager, PlacementPolicy placementPolicy, ReconTaskConfig reconTaskConfig, ReconContainerMetadataManager reconContainerMetadataManager, - OzoneConfiguration conf) { - super(reconTaskStatusDao); + OzoneConfiguration conf, ReconTaskStatusUpdaterManager taskStatusUpdaterManager) { + super(taskStatusUpdaterManager); this.scmClient = scmClient; this.containerHealthSchemaManager = containerHealthSchemaManager; this.reconContainerMetadataManager = reconContainerMetadataManager; @@ -104,13 +106,14 @@ public ContainerHealthTask( this.containerManager = containerManager; this.conf = conf; interval = reconTaskConfig.getMissingContainerTaskInterval().toMillis(); + this.taskStatusUpdater = getTaskStatusUpdater(); } @Override public void run() { try { while (canRun()) { - triggerContainerHealthCheck(); + initializeAndRunTask(); Thread.sleep(interval); } } catch (Throwable t) { @@ -118,10 +121,13 @@ public void run() { if (t instanceof InterruptedException) { Thread.currentThread().interrupt(); } + taskStatusUpdater.setLastTaskRunStatus(-1); + taskStatusUpdater.recordRunCompletion(); } } - public void triggerContainerHealthCheck() { + @Override + protected void runTask() throws Exception { lock.writeLock().lock(); // Map contains all UNHEALTHY STATES as keys and value is another map // with 3 keys (CONTAINER_COUNT, TOTAL_KEYS, TOTAL_USED_BYTES) and value @@ -144,7 +150,11 @@ public void triggerContainerHealthCheck() { " process {} existing database records.", Time.monotonicNow() - start, existingCount); + start = Time.monotonicNow(); checkAndProcessContainers(unhealthyContainerStateStatsMap, currentTime); + LOG.debug("Container Health Task thread took {} milliseconds to process containers", + Time.monotonicNow() - start); + taskStatusUpdater.setLastTaskRunStatus(0); processedContainers.clear(); } finally { lock.writeLock().unlock(); @@ -165,7 +175,6 @@ private void checkAndProcessContainers( .filter(c -> !processedContainers.contains(c)) .forEach(c -> processContainer(c, currentTime, unhealthyContainerStateStatsMap)); - recordSingleRunCompletion(); LOG.debug("Container Health task thread took {} milliseconds for" + " processing {} containers.", Time.monotonicNow() - start, containers.size()); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/PipelineSyncTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/PipelineSyncTask.java index ae3a3531cbf..a2acebff630 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/PipelineSyncTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/PipelineSyncTask.java @@ -22,7 +22,6 @@ import java.io.IOException; import java.util.List; -import java.util.concurrent.TimeoutException; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.stream.Collectors; @@ -34,8 +33,9 @@ import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider; import org.apache.hadoop.ozone.recon.tasks.ReconTaskConfig; +import org.apache.hadoop.ozone.recon.tasks.updater.ReconTaskStatusUpdater; +import org.apache.hadoop.ozone.recon.tasks.updater.ReconTaskStatusUpdaterManager; import org.apache.hadoop.util.Time; -import org.hadoop.ozone.recon.schema.tables.daos.ReconTaskStatusDao; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -55,24 +55,26 @@ public class PipelineSyncTask extends ReconScmTask { private ReadWriteLock lock = new ReentrantReadWriteLock(true); private final long interval; + private final ReconTaskStatusUpdater taskStatusUpdater; public PipelineSyncTask(ReconPipelineManager pipelineManager, ReconNodeManager nodeManager, StorageContainerServiceProvider scmClient, - ReconTaskStatusDao reconTaskStatusDao, - ReconTaskConfig reconTaskConfig) { - super(reconTaskStatusDao); + ReconTaskConfig reconTaskConfig, + ReconTaskStatusUpdaterManager taskStatusUpdaterManager) { + super(taskStatusUpdaterManager); this.scmClient = scmClient; this.reconPipelineManager = pipelineManager; this.nodeManager = nodeManager; this.interval = reconTaskConfig.getPipelineSyncTaskInterval().toMillis(); + this.taskStatusUpdater = getTaskStatusUpdater(); } @Override public void run() { try { while (canRun()) { - triggerPipelineSyncTask(); + initializeAndRunTask(); Thread.sleep(interval); } } catch (Throwable t) { @@ -80,11 +82,13 @@ public void run() { if (t instanceof InterruptedException) { Thread.currentThread().interrupt(); } + taskStatusUpdater.setLastTaskRunStatus(-1); + taskStatusUpdater.recordRunCompletion(); } } - public void triggerPipelineSyncTask() - throws IOException, TimeoutException, NodeNotFoundException { + @Override + protected void runTask() throws IOException, NodeNotFoundException { lock.writeLock().lock(); try { long start = Time.monotonicNow(); @@ -93,7 +97,7 @@ public void triggerPipelineSyncTask() syncOperationalStateOnDeadNodes(); LOG.debug("Pipeline sync Thread took {} milliseconds.", Time.monotonicNow() - start); - recordSingleRunCompletion(); + taskStatusUpdater.setLastTaskRunStatus(0); } finally { lock.writeLock().unlock(); } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconDeadNodeHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconDeadNodeHandler.java index 828942c8e5a..a5d55526214 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconDeadNodeHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconDeadNodeHandler.java @@ -83,8 +83,8 @@ public void onMessage(final DatanodeDetails datanodeDetails, LOG.warn("Node {} has reached DEAD state, but SCM does not have " + "information about it.", datanodeDetails); } - containerHealthTask.triggerContainerHealthCheck(); - pipelineSyncTask.triggerPipelineSyncTask(); + containerHealthTask.initializeAndRunTask(); + pipelineSyncTask.initializeAndRunTask(); } catch (Exception ioEx) { LOG.error("Error trying to verify Node operational state from SCM.", ioEx); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconScmTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconScmTask.java index 7c070c7d2b9..7127d73038f 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconScmTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconScmTask.java @@ -18,8 +18,8 @@ package org.apache.hadoop.ozone.recon.scm; -import org.hadoop.ozone.recon.schema.tables.daos.ReconTaskStatusDao; -import org.hadoop.ozone.recon.schema.tables.pojos.ReconTaskStatus; +import org.apache.hadoop.ozone.recon.tasks.updater.ReconTaskStatusUpdater; +import org.apache.hadoop.ozone.recon.tasks.updater.ReconTaskStatusUpdaterManager; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -30,28 +30,20 @@ public abstract class ReconScmTask { private static final Logger LOG = LoggerFactory.getLogger(ReconScmTask.class); private Thread taskThread; - private ReconTaskStatusDao reconTaskStatusDao; private volatile boolean running; + private final ReconTaskStatusUpdater taskStatusUpdater; - protected ReconScmTask(ReconTaskStatusDao reconTaskStatusDao) { - this.reconTaskStatusDao = reconTaskStatusDao; - } - - private void register() { - String taskName = getTaskName(); - if (!reconTaskStatusDao.existsById(taskName)) { - ReconTaskStatus reconTaskStatusRecord = new ReconTaskStatus( - taskName, 0L, 0L); - reconTaskStatusDao.insert(reconTaskStatusRecord); - LOG.info("Registered {} task ", taskName); - } + protected ReconScmTask( + ReconTaskStatusUpdaterManager taskStatusUpdaterManager + ) { + // In case the task is not already present in the DB, table is updated with initial values for task + this.taskStatusUpdater = taskStatusUpdaterManager.getTaskStatusUpdater(getTaskName()); } /** * Start underlying start thread. */ public synchronized void start() { - register(); if (!isRunning()) { LOG.info("Starting {} Thread.", getTaskName()); running = true; @@ -87,11 +79,6 @@ public boolean isRunning() { return true; } - protected void recordSingleRunCompletion() { - reconTaskStatusDao.update(new ReconTaskStatus(getTaskName(), - System.currentTimeMillis(), 0L)); - } - protected boolean canRun() { return running; } @@ -100,5 +87,20 @@ public String getTaskName() { return getClass().getSimpleName(); } + public ReconTaskStatusUpdater getTaskStatusUpdater() { + return this.taskStatusUpdater; + } + protected abstract void run(); + + protected void initializeAndRunTask() throws Exception { + taskStatusUpdater.recordRunStart(); + runTask(); + taskStatusUpdater.recordRunCompletion(); + } + + /** + * Override this method for the actual processing logic in child tasks. + */ + protected abstract void runTask() throws Exception; } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStaleNodeHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStaleNodeHandler.java index 998f0639249..8a3ffb90c41 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStaleNodeHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStaleNodeHandler.java @@ -49,7 +49,7 @@ public void onMessage(final DatanodeDetails datanodeDetails, final EventPublisher publisher) { super.onMessage(datanodeDetails, publisher); try { - pipelineSyncTask.triggerPipelineSyncTask(); + pipelineSyncTask.initializeAndRunTask(); } catch (Exception exp) { LOG.error("Error trying to trigger pipeline sync task..", exp); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java index eff68848a2f..2c9ac7a8c2d 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java @@ -124,10 +124,10 @@ import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.ContainerReportFromDatanode; import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.IncrementalContainerReportFromDatanode; +import org.apache.hadoop.ozone.recon.tasks.updater.ReconTaskStatusUpdaterManager; import org.apache.ratis.util.ExitUtils; import org.hadoop.ozone.recon.schema.UtilizationSchemaDefinition; import org.hadoop.ozone.recon.schema.tables.daos.ContainerCountBySizeDao; -import org.hadoop.ozone.recon.schema.tables.daos.ReconTaskStatusDao; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -184,7 +184,6 @@ public class ReconStorageContainerManagerFacade @SuppressWarnings({"checkstyle:ParameterNumber", "checkstyle:MethodLength"}) public ReconStorageContainerManagerFacade(OzoneConfiguration conf, StorageContainerServiceProvider scmServiceProvider, - ReconTaskStatusDao reconTaskStatusDao, ContainerCountBySizeDao containerCountBySizeDao, UtilizationSchemaDefinition utilizationSchemaDefinition, ContainerHealthSchemaManager containerHealthSchemaManager, @@ -192,7 +191,9 @@ public ReconStorageContainerManagerFacade(OzoneConfiguration conf, ReconUtils reconUtils, ReconSafeModeManager safeModeManager, ReconContext reconContext, - DataSource dataSource) throws IOException { + DataSource dataSource, + ReconTaskStatusUpdaterManager taskStatusUpdaterManager) + throws IOException { reconNodeDetails = reconUtils.getReconNodeDetails(conf); this.threadNamePrefix = reconNodeDetails.threadNamePrefix(); this.eventQueue = new EventQueue(threadNamePrefix); @@ -270,24 +271,15 @@ public ReconStorageContainerManagerFacade(OzoneConfiguration conf, new PipelineActionHandler(pipelineManager, scmContext, conf); ReconTaskConfig reconTaskConfig = conf.getObject(ReconTaskConfig.class); - PipelineSyncTask pipelineSyncTask = new PipelineSyncTask( - pipelineManager, - nodeManager, - scmServiceProvider, - reconTaskStatusDao, - reconTaskConfig); - containerHealthTask = new ContainerHealthTask( - containerManager, scmServiceProvider, reconTaskStatusDao, - containerHealthSchemaManager, containerPlacementPolicy, reconTaskConfig, - reconContainerMetadataManager, conf); - - this.containerSizeCountTask = new ContainerSizeCountTask( - containerManager, - scmServiceProvider, - reconTaskStatusDao, - reconTaskConfig, - containerCountBySizeDao, - utilizationSchemaDefinition); + PipelineSyncTask pipelineSyncTask = new PipelineSyncTask(pipelineManager, nodeManager, + scmServiceProvider, reconTaskConfig, taskStatusUpdaterManager); + + containerHealthTask = new ContainerHealthTask(containerManager, scmServiceProvider, + containerHealthSchemaManager, containerPlacementPolicy, + reconTaskConfig, reconContainerMetadataManager, conf, taskStatusUpdaterManager); + + this.containerSizeCountTask = new ContainerSizeCountTask(containerManager, scmServiceProvider, + reconTaskConfig, containerCountBySizeDao, utilizationSchemaDefinition, taskStatusUpdaterManager); this.dataSource = dataSource; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java index d5b7b1cfc91..5a49e55b06b 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/OzoneManagerServiceProviderImpl.java @@ -69,6 +69,8 @@ import org.apache.hadoop.ozone.recon.tasks.OMDBUpdatesHandler; import org.apache.hadoop.ozone.recon.tasks.OMUpdateEventBatch; import org.apache.hadoop.ozone.recon.tasks.ReconTaskController; +import org.apache.hadoop.ozone.recon.tasks.updater.ReconTaskStatusUpdater; +import org.apache.hadoop.ozone.recon.tasks.updater.ReconTaskStatusUpdaterManager; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.util.Time; @@ -97,8 +99,6 @@ import static org.apache.hadoop.ozone.recon.ReconUtils.convertNumericToSymbolic; import static org.apache.ratis.proto.RaftProtos.RaftPeerRole.LEADER; -import org.hadoop.ozone.recon.schema.tables.daos.ReconTaskStatusDao; -import org.hadoop.ozone.recon.schema.tables.pojos.ReconTaskStatus; import org.rocksdb.RocksDBException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -124,7 +124,6 @@ public class OzoneManagerServiceProviderImpl private ReconOMMetadataManager omMetadataManager; private ReconTaskController reconTaskController; - private ReconTaskStatusDao reconTaskStatusDao; private ReconUtils reconUtils; private OzoneManagerSyncMetrics metrics; @@ -135,6 +134,7 @@ public class OzoneManagerServiceProviderImpl private final String threadNamePrefix; private ThreadFactory threadFactory; private ReconContext reconContext; + private ReconTaskStatusUpdaterManager taskStatusUpdaterManager; /** * OM Snapshot related task names. @@ -145,13 +145,15 @@ public enum OmSnapshotTaskName { } @Inject + @SuppressWarnings("checkstyle:ParameterNumber") public OzoneManagerServiceProviderImpl( OzoneConfiguration configuration, ReconOMMetadataManager omMetadataManager, ReconTaskController reconTaskController, ReconUtils reconUtils, OzoneManagerProtocol ozoneManagerClient, - ReconContext reconContext) { + ReconContext reconContext, + ReconTaskStatusUpdaterManager taskStatusUpdaterManager) { int connectionTimeout = (int) configuration.getTimeDuration( OZONE_RECON_OM_CONNECTION_TIMEOUT, @@ -212,7 +214,6 @@ public OzoneManagerServiceProviderImpl( this.reconUtils = reconUtils; this.omMetadataManager = omMetadataManager; this.reconTaskController = reconTaskController; - this.reconTaskStatusDao = reconTaskController.getReconTaskStatusDao(); this.ozoneManagerClient = ozoneManagerClient; this.configuration = configuration; this.metrics = OzoneManagerSyncMetrics.create(); @@ -225,28 +226,7 @@ public OzoneManagerServiceProviderImpl( new ThreadFactoryBuilder().setNameFormat(threadNamePrefix + "SyncOM-%d") .build(); this.reconContext = reconContext; - } - - public void registerOMDBTasks() { - ReconTaskStatus reconTaskStatusRecord = new ReconTaskStatus( - OmSnapshotTaskName.OmDeltaRequest.name(), - System.currentTimeMillis(), getCurrentOMDBSequenceNumber()); - if (!reconTaskStatusDao.existsById( - OmSnapshotTaskName.OmDeltaRequest.name())) { - reconTaskStatusDao.insert(reconTaskStatusRecord); - LOG.info("Registered {} task ", - OmSnapshotTaskName.OmDeltaRequest.name()); - } - - reconTaskStatusRecord = new ReconTaskStatus( - OmSnapshotTaskName.OmSnapshotRequest.name(), - System.currentTimeMillis(), getCurrentOMDBSequenceNumber()); - if (!reconTaskStatusDao.existsById( - OmSnapshotTaskName.OmSnapshotRequest.name())) { - reconTaskStatusDao.insert(reconTaskStatusRecord); - LOG.info("Registered {} task ", - OmSnapshotTaskName.OmSnapshotRequest.name()); - } + this.taskStatusUpdaterManager = taskStatusUpdaterManager; } @Override @@ -258,7 +238,6 @@ public OMMetadataManager getOMMetadataManagerInstance() { public void start() { LOG.info("Starting Ozone Manager Service Provider."); scheduler = Executors.newScheduledThreadPool(1, threadFactory); - registerOMDBTasks(); try { omMetadataManager.start(configuration); } catch (IOException ioEx) { @@ -304,7 +283,7 @@ private void startSyncDataFromOM(long initialDelay) { LOG.info("Last known sequence number before sync: {}", getCurrentOMDBSequenceNumber()); boolean isSuccess = syncDataFromOM(); if (!isSuccess) { - LOG.debug("OM DB sync is already running."); + LOG.debug("OM DB sync is already running, or encountered an error while trying to sync data."); } LOG.info("Sequence number after sync: {}", getCurrentOMDBSequenceNumber()); } catch (Throwable t) { @@ -495,6 +474,8 @@ void getAndApplyDeltaUpdatesFromOM( inLoopStartSequenceNumber = inLoopLatestSequenceNumber; loopCount++; } + + omdbUpdatesHandler.setLatestSequenceNumber(getCurrentOMDBSequenceNumber()); LOG.info("Delta updates received from OM : {} loops, {} records", loopCount, getCurrentOMDBSequenceNumber() - fromSequenceNumber ); @@ -551,12 +532,21 @@ boolean innerGetAndApplyDeltaUpdatesFromOM(long fromSequenceNumber, } /** - * Based on current state of Recon's OM DB, we either get delta updates or - * full snapshot from Ozone Manager. + * This method performs the syncing of data from OM. + *

    + *
  • Initially it will fetch a snapshot of OM DB.
  • + *
  • If we already have data synced it will try to fetch delta updates.
  • + *
  • If the sync is completed successfully it will trigger other OM tasks to process events
  • + *
  • If there is any exception while trying to fetch delta updates, it will fall back to full snapshot update
  • + *
  • If there is any exception in full snapshot update it will do nothing, and return true.
  • + *
  • In case of an interrupt signal (irrespective of delta or snapshot sync), + * it will catch and mark the task as interrupted, and return false i.e. sync failed status.
  • + *
* @return true or false if sync operation between Recon and OM was successful or failed. */ @VisibleForTesting public boolean syncDataFromOM() { + ReconTaskStatusUpdater reconTaskUpdater; if (isSyncDataFromOMRunning.compareAndSet(false, true)) { try { LOG.info("Syncing data from Ozone Manager."); @@ -567,25 +557,43 @@ public boolean syncDataFromOM() { if (currentSequenceNumber <= 0) { fullSnapshot = true; } else { + reconTaskUpdater = taskStatusUpdaterManager.getTaskStatusUpdater( + OmSnapshotTaskName.OmDeltaRequest.name()); + try (OMDBUpdatesHandler omdbUpdatesHandler = new OMDBUpdatesHandler(omMetadataManager)) { LOG.info("Obtaining delta updates from Ozone Manager"); - // Get updates from OM and apply to local Recon OM DB. + + // If interrupt was previously signalled, + // we should check for it before starting delta update sync. + if (Thread.currentThread().isInterrupted()) { + throw new InterruptedException("Thread interrupted during delta update."); + } + + // Get updates from OM and apply to local Recon OM DB and update task status in table + reconTaskUpdater.recordRunStart(); getAndApplyDeltaUpdatesFromOM(currentSequenceNumber, omdbUpdatesHandler); - // Update timestamp of successful delta updates query. - ReconTaskStatus reconTaskStatusRecord = new ReconTaskStatus( - OmSnapshotTaskName.OmDeltaRequest.name(), - System.currentTimeMillis(), getCurrentOMDBSequenceNumber()); - reconTaskStatusDao.update(reconTaskStatusRecord); + reconTaskUpdater.setLastTaskRunStatus(0); + reconTaskUpdater.setLastUpdatedSeqNumber(getCurrentOMDBSequenceNumber()); + reconTaskUpdater.recordRunCompletion(); // Pass on DB update events to tasks that are listening. reconTaskController.consumeOMEvents(new OMUpdateEventBatch( - omdbUpdatesHandler.getEvents()), omMetadataManager); + omdbUpdatesHandler.getEvents(), omdbUpdatesHandler.getLatestSequenceNumber()), omMetadataManager); } catch (InterruptedException intEx) { + LOG.error("OM DB Delta update sync thread was interrupted."); + // We are updating the table even if it didn't run i.e. got interrupted beforehand + // to indicate that a task was supposed to run, but it didn't. + reconTaskUpdater.setLastTaskRunStatus(-1); + reconTaskUpdater.recordRunCompletion(); Thread.currentThread().interrupt(); + // Since thread is interrupted, we do not fall back to snapshot sync. Return with sync failed status. + return false; } catch (Exception e) { metrics.incrNumDeltaRequestsFailed(); + reconTaskUpdater.setLastTaskRunStatus(-1); + reconTaskUpdater.recordRunCompletion(); LOG.warn("Unable to get and apply delta updates from OM.", e.getMessage()); fullSnapshot = true; @@ -593,19 +601,26 @@ public boolean syncDataFromOM() { } if (fullSnapshot) { + reconTaskUpdater = taskStatusUpdaterManager.getTaskStatusUpdater( + OmSnapshotTaskName.OmSnapshotRequest.name()); try { metrics.incrNumSnapshotRequests(); LOG.info("Obtaining full snapshot from Ozone Manager"); + + // Similarly if the interrupt was signalled in between, + // we should check before starting snapshot sync. + if (Thread.currentThread().isInterrupted()) { + throw new InterruptedException("Thread interrupted during snapshot sync."); + } + // Update local Recon OM DB to new snapshot. + reconTaskUpdater.recordRunStart(); boolean success = updateReconOmDBWithNewSnapshot(); // Update timestamp of successful delta updates query. if (success) { - ReconTaskStatus reconTaskStatusRecord = - new ReconTaskStatus( - OmSnapshotTaskName.OmSnapshotRequest.name(), - System.currentTimeMillis(), - getCurrentOMDBSequenceNumber()); - reconTaskStatusDao.update(reconTaskStatusRecord); + reconTaskUpdater.setLastUpdatedSeqNumber(getCurrentOMDBSequenceNumber()); + reconTaskUpdater.setLastTaskRunStatus(0); + reconTaskUpdater.recordRunCompletion(); // Reinitialize tasks that are listening. LOG.info("Calling reprocess on Recon tasks."); @@ -616,14 +631,23 @@ public boolean syncDataFromOM() { reconContext.getErrors().remove(ReconContext.ErrorCode.GET_OM_DB_SNAPSHOT_FAILED); } else { metrics.incrNumSnapshotRequestsFailed(); + reconTaskUpdater.setLastTaskRunStatus(-1); + reconTaskUpdater.recordRunCompletion(); // Update health status in ReconContext reconContext.updateHealthStatus(new AtomicBoolean(false)); reconContext.updateErrors(ReconContext.ErrorCode.GET_OM_DB_SNAPSHOT_FAILED); } } catch (InterruptedException intEx) { + LOG.error("OM DB Snapshot update sync thread was interrupted."); + reconTaskUpdater.setLastTaskRunStatus(-1); + reconTaskUpdater.recordRunCompletion(); Thread.currentThread().interrupt(); + // Mark sync status as failed. + return false; } catch (Exception e) { metrics.incrNumSnapshotRequestsFailed(); + reconTaskUpdater.setLastTaskRunStatus(-1); + reconTaskUpdater.recordRunCompletion(); LOG.error("Unable to update Recon's metadata with new OM DB. ", e); // Update health status in ReconContext reconContext.updateHealthStatus(new AtomicBoolean(false)); @@ -699,7 +723,8 @@ public void checkAndValidateReconDbPermissions() { * Get OM RocksDB's latest sequence number. * @return latest sequence number. */ - private long getCurrentOMDBSequenceNumber() { + @VisibleForTesting + public long getCurrentOMDBSequenceNumber() { return omMetadataManager.getLastSequenceNumberFromDB(); } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerKeyMapperTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerKeyMapperTask.java index bf34c9f8930..3202f6aa8bb 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerKeyMapperTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerKeyMapperTask.java @@ -208,7 +208,7 @@ public Pair process(OMUpdateEventBatch events) { Map containerKeyCountMap = new HashMap<>(); // List of the deleted (container, key) pair's List deletedKeyCountList = new ArrayList<>(); - + long startTime = System.currentTimeMillis(); while (eventIterator.hasNext()) { OMDBUpdateEvent omdbUpdateEvent = eventIterator.next(); // Filter event inside process method to avoid duping @@ -258,8 +258,8 @@ public Pair process(OMUpdateEventBatch events) { LOG.error("Unable to write Container Key Prefix data in Recon DB.", e); return new ImmutablePair<>(getTaskName(), false); } - LOG.debug("{} successfully processed {} OM DB update event(s).", - getTaskName(), eventCount); + LOG.debug("{} successfully processed {} OM DB update event(s) in {} milliseconds.", + getTaskName(), eventCount, (System.currentTimeMillis() - startTime)); return new ImmutablePair<>(getTaskName(), true); } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerSizeCountTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerSizeCountTask.java index f95bc3a8555..de55fd4fe99 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerSizeCountTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ContainerSizeCountTask.java @@ -19,16 +19,18 @@ package org.apache.hadoop.ozone.recon.tasks; +import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.hdds.scm.container.ContainerID; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerManager; import org.apache.hadoop.ozone.recon.ReconUtils; import org.apache.hadoop.ozone.recon.scm.ReconScmTask; import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider; +import org.apache.hadoop.ozone.recon.tasks.updater.ReconTaskStatusUpdater; +import org.apache.hadoop.ozone.recon.tasks.updater.ReconTaskStatusUpdaterManager; import org.hadoop.ozone.recon.schema.ContainerSchemaDefinition; import org.hadoop.ozone.recon.schema.UtilizationSchemaDefinition; import org.hadoop.ozone.recon.schema.tables.daos.ContainerCountBySizeDao; -import org.hadoop.ozone.recon.schema.tables.daos.ReconTaskStatusDao; import org.hadoop.ozone.recon.schema.tables.pojos.ContainerCountBySize; import org.jooq.DSLContext; import org.jooq.Record1; @@ -65,20 +67,22 @@ public class ContainerSizeCountTask extends ReconScmTask { private Map> unhealthyContainerStateStatsMap; private ReadWriteLock lock = new ReentrantReadWriteLock(true); + private final ReconTaskStatusUpdater taskStatusUpdater; public ContainerSizeCountTask( ContainerManager containerManager, StorageContainerServiceProvider scmClient, - ReconTaskStatusDao reconTaskStatusDao, ReconTaskConfig reconTaskConfig, ContainerCountBySizeDao containerCountBySizeDao, - UtilizationSchemaDefinition utilizationSchemaDefinition) { - super(reconTaskStatusDao); + UtilizationSchemaDefinition utilizationSchemaDefinition, + ReconTaskStatusUpdaterManager taskStatusUpdaterManager) { + super(taskStatusUpdaterManager); this.scmClient = scmClient; this.containerManager = containerManager; this.containerCountBySizeDao = containerCountBySizeDao; this.dslContext = utilizationSchemaDefinition.getDSLContext(); interval = reconTaskConfig.getContainerSizeCountTaskInterval().toMillis(); + this.taskStatusUpdater = getTaskStatusUpdater(); } @@ -93,21 +97,8 @@ protected synchronized void run() { while (canRun()) { wait(interval); long startTime, endTime, duration, durationMilliseconds; - final List containers = containerManager.getContainers(); - if (processedContainers.isEmpty()) { - try { - int execute = - dslContext.truncate(CONTAINER_COUNT_BY_SIZE).execute(); - LOG.debug("Deleted {} records from {}", execute, - CONTAINER_COUNT_BY_SIZE); - } catch (Exception e) { - LOG.error("An error occurred while truncating the table {}: {}", - CONTAINER_COUNT_BY_SIZE, e.getMessage(), e); - return; - } - } startTime = System.nanoTime(); - process(containers); + initializeAndRunTask(); endTime = System.nanoTime(); duration = endTime - startTime; durationMilliseconds = duration / 1_000_000; @@ -119,11 +110,13 @@ protected synchronized void run() { if (t instanceof InterruptedException) { Thread.currentThread().interrupt(); } + taskStatusUpdater.setLastTaskRunStatus(-1); + taskStatusUpdater.recordRunCompletion(); } } private void process(ContainerInfo container, - Map map) { + Map map) { final ContainerID id = container.containerID(); final long usedBytes = container.getUsedBytes(); final long currentSize; @@ -143,8 +136,18 @@ private void process(ContainerInfo container, incrementContainerSizeCount(currentSize, map); } + @Override + protected void runTask() throws Exception { + final List containers = containerManager.getContainers(); + if (processedContainers.isEmpty()) { + int execute = dslContext.truncate(CONTAINER_COUNT_BY_SIZE).execute(); + LOG.debug("Deleted {} records from {}", execute, CONTAINER_COUNT_BY_SIZE); + } + processContainers(containers); + } + /** - * The process() function is responsible for updating the counts of + * The processContainers() function is responsible for updating the counts of * containers being tracked in a containerSizeCountMap based on the * ContainerInfo objects in the list containers. It then iterates through * the list of containers and does the following for each container: @@ -168,8 +171,11 @@ private void process(ContainerInfo container, * size counts. Finally, the counts in the containerSizeCountMap are written * to the database using the writeCountsToDB() function. */ - public void process(List containers) { + @VisibleForTesting + public void processContainers(List containers) { lock.writeLock().lock(); + boolean processingFailed = false; + try { final Map containerSizeCountMap = new HashMap<>(); @@ -186,6 +192,7 @@ public void process(List containers) { try { process(container, containerSizeCountMap); } catch (Exception e) { + processingFailed = true; // FIXME: it is a bug if there is an exception. LOG.error("FIXME: Failed to process " + container, e); } @@ -198,6 +205,7 @@ public void process(List containers) { writeCountsToDB(false, containerSizeCountMap); containerSizeCountMap.clear(); LOG.debug("Completed a 'process' run of ContainerSizeCountTask."); + taskStatusUpdater.setLastTaskRunStatus(processingFailed ? -1 : 0); } finally { lock.writeLock().unlock(); } @@ -258,11 +266,6 @@ private void writeCountsToDB(boolean isDbTruncated, containerCountBySizeDao.update(updateInDb); } - @Override - public String getTaskName() { - return "ContainerSizeCountTask"; - } - /** * * Handles the deletion of containers by updating the tracking of processed containers @@ -305,7 +308,7 @@ private void handleContainerDeleteOperations( * @param containerSize to calculate the upperSizeBound */ private static void incrementContainerSizeCount(long containerSize, - Map containerSizeCountMap) { + Map containerSizeCountMap) { updateContainerSizeCount(containerSize, 1, containerSizeCountMap); } @@ -326,12 +329,12 @@ private static void incrementContainerSizeCount(long containerSize, * @param containerSize to calculate the upperSizeBound */ private static void decrementContainerSizeCount(long containerSize, - Map containerSizeCountMap) { + Map containerSizeCountMap) { updateContainerSizeCount(containerSize, -1, containerSizeCountMap); } private static void updateContainerSizeCount(long containerSize, int delta, - Map containerSizeCountMap) { + Map containerSizeCountMap) { ContainerSizeCountKey key = getContainerSizeCountKey(containerSize); containerSizeCountMap.compute(key, (k, previous) -> previous != null ? previous + delta : delta); diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/FileSizeCountTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/FileSizeCountTask.java index cbbbe6c3732..6906d4bbb82 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/FileSizeCountTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/FileSizeCountTask.java @@ -149,6 +149,7 @@ public Pair process(OMUpdateEventBatch events) { Map fileSizeCountMap = new HashMap<>(); final Collection taskTables = getTaskTables(); + long startTime = System.currentTimeMillis(); while (eventIterator.hasNext()) { OMDBUpdateEvent omdbUpdateEvent = eventIterator.next(); // Filter event inside process method to avoid duping @@ -198,7 +199,8 @@ public Pair process(OMUpdateEventBatch events) { } } writeCountsToDB(false, fileSizeCountMap); - LOG.debug("Completed a 'process' run of FileSizeCountTask."); + LOG.debug("{} successfully processed in {} milliseconds", + getTaskName(), (System.currentTimeMillis() - startTime)); return new ImmutablePair<>(getTaskName(), true); } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java index 256e0c687ba..5048050f9d4 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/NSSummaryTask.java @@ -100,6 +100,7 @@ public String getTaskName() { @Override public Pair process(OMUpdateEventBatch events) { + long startTime = System.currentTimeMillis(); boolean success = nsSummaryTaskWithFSO.processWithFSO(events); if (!success) { LOG.error("processWithFSO failed."); @@ -112,6 +113,8 @@ public Pair process(OMUpdateEventBatch events) { if (!success) { LOG.error("processWithOBS failed."); } + LOG.debug("{} successfully processed in {} milliseconds", + getTaskName(), (System.currentTimeMillis() - startTime)); return new ImmutablePair<>(getTaskName(), success); } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMDBUpdatesHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMDBUpdatesHandler.java index d1f98c49bdc..d586718ffb4 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMDBUpdatesHandler.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMDBUpdatesHandler.java @@ -51,12 +51,21 @@ public class OMDBUpdatesHandler extends ManagedWriteBatch.Handler { private Map> omdbLatestUpdateEvents = new HashMap<>(); private final OMDBDefinition omdbDefinition = OMDBDefinition.get(); private final OmUpdateEventValidator omUpdateEventValidator = new OmUpdateEventValidator(omdbDefinition); + private long batchSequenceNumber; // Store the current sequence number for the batch public OMDBUpdatesHandler(OMMetadataManager metadataManager) { omMetadataManager = metadataManager; tablesNames = metadataManager.getStore().getTableNames(); } + public void setLatestSequenceNumber(long sequenceNumber) { + this.batchSequenceNumber = sequenceNumber; + } + + public long getLatestSequenceNumber() { + return this.batchSequenceNumber; + } + @Override public void put(int cfIndex, byte[] keyBytes, byte[] valueBytes) { try { diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMUpdateEventBatch.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMUpdateEventBatch.java index 3ed50a4dd05..1a724162ebd 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMUpdateEventBatch.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OMUpdateEventBatch.java @@ -27,9 +27,11 @@ public class OMUpdateEventBatch { private final List events; + private final long batchSequenceNumber; - public OMUpdateEventBatch(List e) { + public OMUpdateEventBatch(List e, long batchSequenceNumber) { events = e; + this.batchSequenceNumber = batchSequenceNumber; } /** @@ -37,11 +39,7 @@ public OMUpdateEventBatch(List e) { * @return Event Info instance. */ long getLastSequenceNumber() { - if (events.isEmpty()) { - return -1; - } else { - return events.get(events.size() - 1).getSequenceNumber(); - } + return this.batchSequenceNumber; } /** diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableInsightTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableInsightTask.java index 3dc91d0dfc9..37a0e16e934 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableInsightTask.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/OmTableInsightTask.java @@ -164,6 +164,7 @@ public Pair process(OMUpdateEventBatch events) { final Collection taskTables = getTaskTables(); // Process each update event + long startTime = System.currentTimeMillis(); while (eventIterator.hasNext()) { OMDBUpdateEvent omdbUpdateEvent = eventIterator.next(); String tableName = omdbUpdateEvent.getTable(); @@ -208,7 +209,8 @@ public Pair process(OMUpdateEventBatch events) { if (!replicatedSizeMap.isEmpty()) { writeDataToDB(replicatedSizeMap); } - LOG.debug("Completed a 'process' run of OmTableInsightTask."); + LOG.debug("{} successfully processed in {} milliseconds", + getTaskName(), (System.currentTimeMillis() - startTime)); return new ImmutablePair<>(getTaskName(), true); } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskController.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskController.java index 1a514ceb90b..8e0c6f4b1c5 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskController.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskController.java @@ -22,7 +22,6 @@ import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; -import org.hadoop.ozone.recon.schema.tables.daos.ReconTaskStatusDao; /** * Controller used by Recon to manage Tasks that are waiting on Recon events. @@ -38,18 +37,15 @@ public interface ReconTaskController { /** * Pass on a set of OM DB update events to the registered tasks. * @param events set of events - * @throws InterruptedException InterruptedException */ void consumeOMEvents(OMUpdateEventBatch events, - OMMetadataManager omMetadataManager) - throws InterruptedException; + OMMetadataManager omMetadataManager); /** * Pass on the handle to a new OM DB instance to the registered tasks. * @param omMetadataManager OM Metadata Manager instance */ - void reInitializeTasks(ReconOMMetadataManager omMetadataManager) - throws InterruptedException; + void reInitializeTasks(ReconOMMetadataManager omMetadataManager); /** * Get set of registered tasks. @@ -57,12 +53,6 @@ void reInitializeTasks(ReconOMMetadataManager omMetadataManager) */ Map getRegisteredTasks(); - /** - * Get instance of ReconTaskStatusDao. - * @return instance of ReconTaskStatusDao - */ - ReconTaskStatusDao getReconTaskStatusDao(); - /** * Start the task scheduler. */ diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskControllerImpl.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskControllerImpl.java index 64405095855..32b440badb7 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskControllerImpl.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/ReconTaskControllerImpl.java @@ -28,19 +28,23 @@ import java.util.Map; import java.util.Set; import java.util.concurrent.Callable; -import java.util.concurrent.ExecutionException; +import java.util.concurrent.CancellationException; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionException; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; -import java.util.concurrent.Future; import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; import com.google.common.util.concurrent.ThreadFactoryBuilder; import org.apache.commons.lang3.tuple.Pair; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; -import org.hadoop.ozone.recon.schema.tables.daos.ReconTaskStatusDao; -import org.hadoop.ozone.recon.schema.tables.pojos.ReconTaskStatus; +import org.apache.hadoop.ozone.recon.tasks.types.NamedCallableTask; +import org.apache.hadoop.ozone.recon.tasks.types.TaskExecutionException; +import org.apache.hadoop.ozone.recon.tasks.updater.ReconTaskStatusUpdater; +import org.apache.hadoop.ozone.recon.tasks.updater.ReconTaskStatusUpdaterManager; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -59,16 +63,16 @@ public class ReconTaskControllerImpl implements ReconTaskController { private final int threadCount; private Map taskFailureCounter = new HashMap<>(); private static final int TASK_FAILURE_THRESHOLD = 2; - private ReconTaskStatusDao reconTaskStatusDao; + private final ReconTaskStatusUpdaterManager taskStatusUpdaterManager; @Inject public ReconTaskControllerImpl(OzoneConfiguration configuration, - ReconTaskStatusDao reconTaskStatusDao, - Set tasks) { + Set tasks, + ReconTaskStatusUpdaterManager taskStatusUpdaterManager) { reconOmTasks = new HashMap<>(); threadCount = configuration.getInt(OZONE_RECON_TASK_THREAD_COUNT_KEY, OZONE_RECON_TASK_THREAD_COUNT_DEFAULT); - this.reconTaskStatusDao = reconTaskStatusDao; + this.taskStatusUpdaterManager = taskStatusUpdaterManager; for (ReconOmTask task : tasks) { registerTask(task); } @@ -83,12 +87,6 @@ public void registerTask(ReconOmTask task) { reconOmTasks.put(taskName, task); // Store Task in Task failure tracker. taskFailureCounter.put(taskName, new AtomicInteger(0)); - // Create DB record for the task. - ReconTaskStatus reconTaskStatusRecord = new ReconTaskStatus(taskName, - 0L, 0L); - if (!reconTaskStatusDao.existsById(taskName)) { - reconTaskStatusDao.insert(reconTaskStatusRecord); - } } /** @@ -97,55 +95,46 @@ public void registerTask(ReconOmTask task) { * reprocess call more than 2 times across events, it is unregistered * (ignored). * @param events set of events - * @throws InterruptedException */ @Override - public synchronized void consumeOMEvents(OMUpdateEventBatch events, - OMMetadataManager omMetadataManager) - throws InterruptedException { + public synchronized void consumeOMEvents(OMUpdateEventBatch events, OMMetadataManager omMetadataManager) { + if (!events.isEmpty()) { + Collection>> tasks = new ArrayList<>(); + List failedTasks = new ArrayList<>(); + for (Map.Entry taskEntry : + reconOmTasks.entrySet()) { + ReconOmTask task = taskEntry.getValue(); + ReconTaskStatusUpdater taskStatusUpdater = taskStatusUpdaterManager.getTaskStatusUpdater(task.getTaskName()); + taskStatusUpdater.recordRunStart(); + // events passed to process method is no longer filtered + tasks.add(new NamedCallableTask<>(task.getTaskName(), () -> task.process(events))); + } + processTasks(tasks, events, failedTasks); - try { - if (!events.isEmpty()) { - Collection>> tasks = new ArrayList<>(); - for (Map.Entry taskEntry : - reconOmTasks.entrySet()) { - ReconOmTask task = taskEntry.getValue(); + // Retry processing failed tasks + List retryFailedTasks = new ArrayList<>(); + if (!failedTasks.isEmpty()) { + tasks.clear(); + for (String taskName : failedTasks) { + ReconOmTask task = reconOmTasks.get(taskName); // events passed to process method is no longer filtered - tasks.add(() -> task.process(events)); - } - - List>> results = - executorService.invokeAll(tasks); - List failedTasks = processTaskResults(results, events); - - // Retry - List retryFailedTasks = new ArrayList<>(); - if (!failedTasks.isEmpty()) { - tasks.clear(); - for (String taskName : failedTasks) { - ReconOmTask task = reconOmTasks.get(taskName); - // events passed to process method is no longer filtered - tasks.add(() -> task.process(events)); - } - results = executorService.invokeAll(tasks); - retryFailedTasks = processTaskResults(results, events); + tasks.add(new NamedCallableTask<>(task.getTaskName(), + () -> task.process(events))); } + processTasks(tasks, events, retryFailedTasks); + } - // Reprocess the failed tasks. - if (!retryFailedTasks.isEmpty()) { - tasks.clear(); - for (String taskName : failedTasks) { - ReconOmTask task = reconOmTasks.get(taskName); - tasks.add(() -> task.reprocess(omMetadataManager)); - } - results = executorService.invokeAll(tasks); - List reprocessFailedTasks = - processTaskResults(results, events); - ignoreFailedTasks(reprocessFailedTasks); + // Reprocess the failed tasks. + if (!retryFailedTasks.isEmpty()) { + tasks.clear(); + for (String taskName : failedTasks) { + ReconOmTask task = reconOmTasks.get(taskName); + tasks.add(new NamedCallableTask<>(task.getTaskName(), () -> task.reprocess(omMetadataManager))); } + List reprocessFailedTasks = new ArrayList<>(); + processTasks(tasks, events, reprocessFailedTasks); + ignoreFailedTasks(reprocessFailedTasks); } - } catch (ExecutionException e) { - LOG.error("Unexpected error : ", e); } } @@ -166,45 +155,57 @@ private void ignoreFailedTasks(List failedTasks) { } @Override - public synchronized void reInitializeTasks( - ReconOMMetadataManager omMetadataManager) throws InterruptedException { - try { - Collection>> tasks = new ArrayList<>(); - for (Map.Entry taskEntry : - reconOmTasks.entrySet()) { - ReconOmTask task = taskEntry.getValue(); - tasks.add(() -> task.reprocess(omMetadataManager)); - } - List>> results = - executorService.invokeAll(tasks); - for (Future> f : results) { - String taskName = f.get().getLeft(); - if (!f.get().getRight()) { - LOG.info("Init failed for task {}.", taskName); - } else { - //store the timestamp for the task - ReconTaskStatus reconTaskStatusRecord = new ReconTaskStatus(taskName, - System.currentTimeMillis(), - omMetadataManager.getLastSequenceNumberFromDB()); - reconTaskStatusDao.update(reconTaskStatusRecord); - } - } - } catch (ExecutionException e) { - LOG.error("Unexpected error : ", e); + public synchronized void reInitializeTasks(ReconOMMetadataManager omMetadataManager) { + Collection>> tasks = new ArrayList<>(); + for (Map.Entry taskEntry : + reconOmTasks.entrySet()) { + ReconOmTask task = taskEntry.getValue(); + ReconTaskStatusUpdater taskStatusUpdater = taskStatusUpdaterManager.getTaskStatusUpdater(task.getTaskName()); + taskStatusUpdater.recordRunStart(); + tasks.add(new NamedCallableTask<>(task.getTaskName(), () -> task.reprocess(omMetadataManager))); } - } - /** - * Store the last completed event sequence number and timestamp to the DB - * for that task. - * @param taskName taskname to be updated. - * @param lastSequenceNumber contains the new sequence number. - */ - private void storeLastCompletedTransaction( - String taskName, long lastSequenceNumber) { - ReconTaskStatus reconTaskStatusRecord = new ReconTaskStatus(taskName, - System.currentTimeMillis(), lastSequenceNumber); - reconTaskStatusDao.update(reconTaskStatusRecord); + try { + CompletableFuture.allOf(tasks.stream() + .map(task -> CompletableFuture.supplyAsync(() -> { + try { + return task.call(); + } catch (Exception e) { + if (e instanceof InterruptedException) { + Thread.currentThread().interrupt(); + } + // Wrap the exception with the task name + throw new TaskExecutionException(task.getTaskName(), e); + } + }, executorService).thenAccept(result -> { + String taskName = result.getLeft(); + ReconTaskStatusUpdater taskStatusUpdater = taskStatusUpdaterManager.getTaskStatusUpdater(taskName); + if (!result.getRight()) { + LOG.error("Init failed for task {}.", taskName); + taskStatusUpdater.setLastTaskRunStatus(-1); + } else { + taskStatusUpdater.setLastTaskRunStatus(0); + taskStatusUpdater.setLastUpdatedSeqNumber(omMetadataManager.getLastSequenceNumberFromDB()); + } + taskStatusUpdater.recordRunCompletion(); + }).exceptionally(ex -> { + LOG.error("Task failed with exception: ", ex); + if (ex.getCause() instanceof TaskExecutionException) { + TaskExecutionException taskEx = (TaskExecutionException) ex.getCause(); + String taskName = taskEx.getTaskName(); + LOG.error("The above error occurred while trying to execute task: {}", taskName); + + ReconTaskStatusUpdater taskStatusUpdater = taskStatusUpdaterManager.getTaskStatusUpdater(taskName); + taskStatusUpdater.setLastTaskRunStatus(-1); + taskStatusUpdater.recordRunCompletion(); + } + return null; + })).toArray(CompletableFuture[]::new)).join(); + } catch (CompletionException ce) { + LOG.error("Completing all tasks failed with exception ", ce); + } catch (CancellationException ce) { + LOG.error("Some tasks were cancelled with exception", ce); + } } @Override @@ -212,11 +213,6 @@ public Map getRegisteredTasks() { return reconOmTasks; } - @Override - public ReconTaskStatusDao getReconTaskStatusDao() { - return reconTaskStatusDao; - } - @Override public synchronized void start() { LOG.info("Starting Recon Task Controller."); @@ -234,28 +230,59 @@ public synchronized void stop() { } /** - * Wait on results of all tasks. - * @param results Set of Futures. - * @param events Events. - * @return List of failed task names - * @throws ExecutionException execution Exception - * @throws InterruptedException Interrupted Exception + * For a given list of {@link Callable} tasks process them and add any failed task to the provided list. + * The tasks are executed in parallel, but will wait for the tasks to complete i.e. the longest + * time taken by this method will be the time taken by the longest task in the list. + * @param tasks A list of tasks to execute. + * @param events A batch of {@link OMUpdateEventBatch} events to fetch sequence number of last event in batch. + * @param failedTasks Reference of the list to which we want to add the failed tasks for retry/reprocessing */ - private List processTaskResults(List>> - results, - OMUpdateEventBatch events) - throws ExecutionException, InterruptedException { - List failedTasks = new ArrayList<>(); - for (Future> f : results) { - String taskName = f.get().getLeft(); - if (!f.get().getRight()) { - LOG.info("Failed task : {}", taskName); - failedTasks.add(f.get().getLeft()); - } else { - taskFailureCounter.get(taskName).set(0); - storeLastCompletedTransaction(taskName, events.getLastSequenceNumber()); - } + private void processTasks(Collection>> tasks, + OMUpdateEventBatch events, List failedTasks) { + List> futures = tasks.stream() + .map(task -> CompletableFuture.supplyAsync(() -> { + try { + return task.call(); + } catch (Exception e) { + if (e instanceof InterruptedException) { + Thread.currentThread().interrupt(); + } + // Wrap the exception with the task name + throw new TaskExecutionException(task.getTaskName(), e); + } + }, executorService).thenAccept(result -> { + String taskName = result.getLeft(); + ReconTaskStatusUpdater taskStatusUpdater = taskStatusUpdaterManager.getTaskStatusUpdater(taskName); + if (!result.getRight()) { + LOG.error("Task {} failed", taskName); + failedTasks.add(result.getLeft()); + taskStatusUpdater.setLastTaskRunStatus(-1); + } else { + taskFailureCounter.get(taskName).set(0); + taskStatusUpdater.setLastTaskRunStatus(0); + taskStatusUpdater.setLastUpdatedSeqNumber(events.getLastSequenceNumber()); + } + taskStatusUpdater.recordRunCompletion(); + }).exceptionally(ex -> { + LOG.error("Task failed with exception: ", ex); + if (ex.getCause() instanceof TaskExecutionException) { + TaskExecutionException taskEx = (TaskExecutionException) ex.getCause(); + String taskName = taskEx.getTaskName(); + LOG.error("The above error occurred while trying to execute task: {}", taskName); + + ReconTaskStatusUpdater taskStatusUpdater = taskStatusUpdaterManager.getTaskStatusUpdater(taskName); + taskStatusUpdater.setLastTaskRunStatus(-1); + taskStatusUpdater.recordRunCompletion(); + } + return null; + })).collect(Collectors.toList()); + + try { + CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])).join(); + } catch (CompletionException ce) { + LOG.error("Completing all tasks failed with exception ", ce); + } catch (CancellationException ce) { + LOG.error("Some tasks were cancelled with exception", ce); } - return failedTasks; } } diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/types/NamedCallableTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/types/NamedCallableTask.java new file mode 100644 index 00000000000..f91ab5255de --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/types/NamedCallableTask.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.tasks.types; + +import java.util.concurrent.Callable; + +/** + * This class is a wrapper over the {@link java.util.concurrent.Callable} interface. + *
+ * If there is any runtime exception which occurs during execution of a task via the executor, + * we lose all data regarding the task, so we do not have information on which task actually failed. + *
+ * This class is useful, as it will associate a background task to a task name so that we can efficiently + * record any failure. + */ +public class NamedCallableTask implements Callable { + private final String taskName; + private final Callable task; + + public NamedCallableTask(String taskName, Callable task) { + this.taskName = taskName; + this.task = task; + } + + public String getTaskName() { + return this.taskName; + } + + @Override + public V call() throws Exception { + return task.call(); + } +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/types/TaskExecutionException.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/types/TaskExecutionException.java new file mode 100644 index 00000000000..a83e643c5b7 --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/types/TaskExecutionException.java @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.tasks.types; + +/** + * Wrapper over {@link RuntimeException} to associate an exception to a task name. + */ +public class TaskExecutionException extends RuntimeException { + private final String taskName; + + public TaskExecutionException(String taskName, Throwable cause) { + super(cause); + this.taskName = taskName; + } + + public String getTaskName() { + return this.taskName; + } +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/types/package-info.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/types/package-info.java new file mode 100644 index 00000000000..68f107ddf1e --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/types/package-info.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * The classes in this package contains a few Wrapper classes for better task tracking. + */ + +package org.apache.hadoop.ozone.recon.tasks.types; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/updater/ReconTaskStatusUpdater.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/updater/ReconTaskStatusUpdater.java new file mode 100644 index 00000000000..8708fe5bc3d --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/updater/ReconTaskStatusUpdater.java @@ -0,0 +1,123 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.tasks.updater; + +import com.google.common.annotations.VisibleForTesting; +import org.hadoop.ozone.recon.schema.tables.daos.ReconTaskStatusDao; +import org.hadoop.ozone.recon.schema.tables.pojos.ReconTaskStatus; +import org.jooq.exception.DataAccessException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * This class provides utilities to update/modify Recon Task related data + * like updating table, incrementing counter etc. + */ +public class ReconTaskStatusUpdater { + + private static final Logger LOG = LoggerFactory.getLogger(ReconTaskStatusUpdater.class); + + private ReconTaskStatus reconTaskStatus; + + private ReconTaskStatusDao reconTaskStatusDao; + + private String taskName; + + @VisibleForTesting + public ReconTaskStatusUpdater(ReconTaskStatusDao reconTaskStatusDao, + String taskName) { + this.taskName = taskName; + this.reconTaskStatusDao = reconTaskStatusDao; + this.reconTaskStatus = new ReconTaskStatus(taskName, 0L, 0L, 0, 0); + } + + public ReconTaskStatusUpdater(ReconTaskStatusDao reconTaskStatusDao, ReconTaskStatus task) { + this.taskName = task.getTaskName(); + this.reconTaskStatusDao = reconTaskStatusDao; + this.reconTaskStatus = new ReconTaskStatus(taskName, task.getLastUpdatedTimestamp(), + task.getLastUpdatedSeqNumber(), task.getLastTaskRunStatus(), task.getIsCurrentTaskRunning()); + } + + public void setTaskName(String taskName) { + this.taskName = taskName; + this.reconTaskStatus.setTaskName(taskName); + } + + public void setLastUpdatedSeqNumber(long lastUpdatedSeqNumber) { + this.reconTaskStatus.setLastUpdatedSeqNumber(lastUpdatedSeqNumber); + } + + public void setLastUpdatedTimestamp(long lastUpdatedTimestamp) { + this.reconTaskStatus.setLastUpdatedTimestamp(lastUpdatedTimestamp); + } + + public void setLastTaskRunStatus(int lastTaskRunStatus) { + this.reconTaskStatus.setLastTaskRunStatus(lastTaskRunStatus); + } + + public void setIsCurrentTaskRunning(int isCurrentTaskRunning) { + this.reconTaskStatus.setIsCurrentTaskRunning(isCurrentTaskRunning); + } + + /** + * Helper function to update TASK_STATUS table with task start values. + * Set the isCurrentTaskRunning as true, update the timestamp. + * Call this function before the actual task processing starts to update table in DB. + */ + public void recordRunStart() { + try { + this.reconTaskStatus.setIsCurrentTaskRunning(1); + this.reconTaskStatus.setLastUpdatedTimestamp(System.currentTimeMillis()); + updateDetails(); + } catch (DataAccessException e) { + LOG.error("Failed to update table for start of task: {}", this.reconTaskStatus.getTaskName()); + } + } + + /** + * Helper function to update TASK_STATUS table with task end values. + * Set isCurrentTaskRunning as false, update the timestamp. + * Call this function after the actual task processing ends to update table in DB. + * It is expected that the task status result (successful/0, failure/-1) is already set + * before calling. + */ + public void recordRunCompletion() { + try { + this.reconTaskStatus.setIsCurrentTaskRunning(0); + this.reconTaskStatus.setLastUpdatedTimestamp(System.currentTimeMillis()); + updateDetails(); + } catch (DataAccessException e) { + LOG.error("Failed to update table for task: {}", this.reconTaskStatus.getTaskName()); + } + } + + /** + * Utility function to update table with task details and update the counter if needed. + */ + public void updateDetails() { + if (!reconTaskStatusDao.existsById(this.taskName)) { + // First time getting the task, so insert value + reconTaskStatusDao.insert(this.reconTaskStatus); + LOG.info("Registered Task: {}", this.taskName); + } else { + // We already have row for the task in the table, update the row + reconTaskStatusDao.update(this.reconTaskStatus); + } + } +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/updater/ReconTaskStatusUpdaterManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/updater/ReconTaskStatusUpdaterManager.java new file mode 100644 index 00000000000..1e7ee75f248 --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/updater/ReconTaskStatusUpdaterManager.java @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.tasks.updater; + +import com.google.inject.Inject; +import com.google.inject.Singleton; +import org.hadoop.ozone.recon.schema.tables.daos.ReconTaskStatusDao; +import org.hadoop.ozone.recon.schema.tables.pojos.ReconTaskStatus; + +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; + +/** + * This class provides caching for ReconTaskStatusUpdater instances. + * For each task we maintain a map of updater instance and provide it to consumers + * to update. + * Here we also make a single call to the TASK_STATUS_TABLE to check if previous values are present + * for a task in the DB to avoid overwrite to initial state + */ +@Singleton +public class ReconTaskStatusUpdaterManager { + private final ReconTaskStatusDao reconTaskStatusDao; + // Act as a cache for the task updater instancesF + private final Map updaterCache; + + @Inject + public ReconTaskStatusUpdaterManager( + ReconTaskStatusDao reconTaskStatusDao + ) { + this.reconTaskStatusDao = reconTaskStatusDao; + this.updaterCache = new ConcurrentHashMap<>(); + + // Fetch the tasks present in the DB already + List tasks = reconTaskStatusDao.findAll(); + for (ReconTaskStatus task: tasks) { + updaterCache.put(task.getTaskName(), + new ReconTaskStatusUpdater(reconTaskStatusDao, task)); + } + } + + /** + * Gets the updater for the provided task name and updates DB with initial values + * if the task is not already present in DB. + * @param taskName The name of the task for which we want to get instance of the updater + * @return An instance of {@link ReconTaskStatusUpdater} for the provided task name. + */ + public ReconTaskStatusUpdater getTaskStatusUpdater(String taskName) { + // If the task is not already present in the DB then we can initialize using initial values + return updaterCache.computeIfAbsent(taskName, (name) -> + new ReconTaskStatusUpdater(reconTaskStatusDao, name)); + } +} diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/updater/package-info.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/updater/package-info.java new file mode 100644 index 00000000000..1aeedb88a9f --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/updater/package-info.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * The classes in this package contains the utility methods and classes to update various task status. + */ +package org.apache.hadoop.ozone.recon.tasks.updater; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReconLayoutFeature.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReconLayoutFeature.java index 52739efe1a6..05393237913 100644 --- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReconLayoutFeature.java +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReconLayoutFeature.java @@ -32,7 +32,8 @@ */ public enum ReconLayoutFeature { // Represents the starting point for Recon's layout versioning system. - INITIAL_VERSION(0, "Recon Layout Versioning Introduction"); + INITIAL_VERSION(0, "Recon Layout Versioning Introduction"), + TASK_STATUS_STATISTICS(1, "Recon Task Status Statistics Tracking Introduced"); private final int version; private final String description; diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReconTaskStatusTableUpgradeAction.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReconTaskStatusTableUpgradeAction.java new file mode 100644 index 00000000000..f613b216724 --- /dev/null +++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/upgrade/ReconTaskStatusTableUpgradeAction.java @@ -0,0 +1,104 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.upgrade; + +import org.apache.hadoop.ozone.recon.scm.ReconStorageContainerManagerFacade; +import org.jooq.DSLContext; +import org.jooq.exception.DataAccessException; +import org.jooq.impl.DSL; +import org.jooq.impl.SQLDataType; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + + +import javax.sql.DataSource; +import java.sql.Connection; +import java.sql.SQLException; + +import static org.hadoop.ozone.recon.codegen.SqlDbUtils.TABLE_EXISTS_CHECK; +import static org.hadoop.ozone.recon.schema.ReconTaskSchemaDefinition.RECON_TASK_STATUS_TABLE_NAME; + + +/** + * Upgrade action for TASK_STATUS_STATISTICS feature layout change, which adds + * last_task_run_status and current_task_run_status columns to + * {@link org.hadoop.ozone.recon.schema.ReconTaskSchemaDefinition} in case it is missing . + */ +@UpgradeActionRecon(feature = ReconLayoutFeature.TASK_STATUS_STATISTICS, + type = ReconUpgradeAction.UpgradeActionType.FINALIZE) +public class ReconTaskStatusTableUpgradeAction implements ReconUpgradeAction { + + public static final Logger LOG = LoggerFactory.getLogger(ReconTaskStatusTableUpgradeAction.class); + + /** + * Utility function to add provided column to RECON_TASK_STATUS table as INTEGER type. + * @param dslContext Stores {@link DSLContext} to perform alter operations + * @param columnName Name of the column to be inserted to the table + */ + private void addColumnToTable(DSLContext dslContext, String columnName) { + //Column is set as nullable to avoid any errors. + dslContext.alterTable(RECON_TASK_STATUS_TABLE_NAME) + .addColumn(columnName, SQLDataType.INTEGER.nullable(true)).execute(); + } + + /** + * Utility function to set the provided column as Non-Null to enforce constraints in RECON_TASK_STATUS table. + * @param dslContext Stores {@link DSLContext} to perform alter operations + * @param columnName Name of the column to set as non-null + */ + private void setColumnAsNonNullable(DSLContext dslContext, String columnName) { + dslContext.alterTable(RECON_TASK_STATUS_TABLE_NAME) + .alterColumn(columnName).setNotNull().execute(); + } + + @Override + public void execute(ReconStorageContainerManagerFacade scmFacade) throws DataAccessException { + DataSource dataSource = scmFacade.getDataSource(); + try (Connection conn = dataSource.getConnection()) { + if (!TABLE_EXISTS_CHECK.test(conn, RECON_TASK_STATUS_TABLE_NAME)) { + return; + } + + DSLContext dslContext = DSL.using(conn); + // JOOQ doesn't support Derby DB officially, there is no way to run 'ADD COLUMN' command in single call + // for multiple columns. Hence, we run it as two separate steps. + LOG.info("Adding 'last_task_run_status' column to task status table"); + addColumnToTable(dslContext, "last_task_run_status"); + LOG.info("Adding 'is_current_task_running' column to task status table"); + addColumnToTable(dslContext, "is_current_task_running"); + + //Handle previous table values with new columns default values + int updatedRowCount = dslContext.update(DSL.table(RECON_TASK_STATUS_TABLE_NAME)) + .set(DSL.field("last_task_run_status", SQLDataType.INTEGER), 0) + .set(DSL.field("is_current_task_running", SQLDataType.INTEGER), 0) + .execute(); + LOG.info("Updated {} rows with default value for new columns", updatedRowCount); + + // Now we will set the column as not-null to enforce constraints + setColumnAsNonNullable(dslContext, "last_task_run_status"); + setColumnAsNonNullable(dslContext, "is_current_task_running"); + } catch (SQLException | DataAccessException ex) { + LOG.error("Error while upgrading RECON_TASK_STATUS table.", ex); + } + } + + @Override public UpgradeActionType getType() { + return UpgradeActionType.FINALIZE; + } +} diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java index f1dafa2c75b..5b6d22bf267 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java @@ -942,7 +942,7 @@ public void testGetContainerCounts() throws Exception { containers.add(omContainerInfo2); // Process the list of containers through the container size count task - containerSizeCountTask.process(containers); + containerSizeCountTask.processContainers(containers); // Test fetching all container counts Response response = utilizationEndpoint.getContainerCounts(0L); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestTaskStatusService.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestTaskStatusService.java index 741dcf3be4c..b75fde08c48 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestTaskStatusService.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestTaskStatusService.java @@ -18,21 +18,23 @@ package org.apache.hadoop.ozone.recon.api; -import static org.junit.jupiter.api.Assertions.assertEquals; import com.google.inject.AbstractModule; import com.google.inject.Injector; - import org.apache.hadoop.ozone.recon.persistence.AbstractReconSqlDBTest; import org.hadoop.ozone.recon.schema.tables.daos.ReconTaskStatusDao; import org.hadoop.ozone.recon.schema.tables.pojos.ReconTaskStatus; import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; import javax.ws.rs.core.Response; import java.util.ArrayList; import java.util.List; +import static org.junit.jupiter.api.Assertions.assertEquals; + + /** * Test for Task Status Service. */ @@ -44,7 +46,7 @@ public TestTaskStatusService() { } @BeforeEach - public void setUp() { + public void setUp() throws Exception { Injector parentInjector = getInjector(); parentInjector.createChildInjector(new AbstractModule() { @Override @@ -55,18 +57,19 @@ protected void configure() { }); } - @Test - public void testGetTaskTimes() { + @ParameterizedTest + @ValueSource(ints = {0, 1, -1}) + public void testTaskTableValues(int lastTaskRunStatus) { ReconTaskStatusDao reconTaskStatusDao = getDao(ReconTaskStatusDao.class); ReconTaskStatus reconTaskStatusRecord = new ReconTaskStatus( - "Dummy_Task", System.currentTimeMillis(), 0L); + "Dummy_Task", System.currentTimeMillis(), 0L, lastTaskRunStatus, 0); reconTaskStatusDao.insert(reconTaskStatusRecord); List resultList = new ArrayList<>(); resultList.add(reconTaskStatusRecord); - Response response = taskStatusService.getTaskTimes(); + Response response = taskStatusService.getTaskStats(); List responseList = (List) response.getEntity(); @@ -76,6 +79,8 @@ public void testGetTaskTimes() { assertEquals(reconTaskStatusRecord.getTaskName(), r.getTaskName()); assertEquals(reconTaskStatusRecord.getLastUpdatedTimestamp(), r.getLastUpdatedTimestamp()); + assertEquals(reconTaskStatusRecord.getLastTaskRunStatus(), r.getLastTaskRunStatus()); + assertEquals(reconTaskStatusRecord.getIsCurrentTaskRunning(), r.getIsCurrentTaskRunning()); } } } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestTriggerDBSyncEndpoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestTriggerDBSyncEndpoint.java index 7ad5dcc58cd..480d1348c17 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestTriggerDBSyncEndpoint.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestTriggerDBSyncEndpoint.java @@ -38,6 +38,8 @@ import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl; import org.apache.hadoop.ozone.recon.spi.impl.StorageContainerServiceProviderImpl; import org.apache.hadoop.ozone.recon.tasks.ReconTaskController; +import org.apache.hadoop.ozone.recon.tasks.updater.ReconTaskStatusUpdater; +import org.apache.hadoop.ozone.recon.tasks.updater.ReconTaskStatusUpdaterManager; import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.hadoop.ozone.recon.schema.tables.daos.ReconTaskStatusDao; import org.junit.jupiter.api.BeforeEach; @@ -102,6 +104,12 @@ public void setUp() throws IOException, AuthenticationException { ReconUtils reconUtilsMock = mock(ReconUtils.class); + + ReconTaskStatusDao reconTaskStatusDaoMock = mock(ReconTaskStatusDao.class); + ReconTaskStatusUpdaterManager taskStatusUpdaterManagerMock = mock(ReconTaskStatusUpdaterManager.class); + when(taskStatusUpdaterManagerMock.getTaskStatusUpdater(anyString())).thenReturn(new ReconTaskStatusUpdater( + reconTaskStatusDaoMock, "dummyTaskManager")); + DBCheckpoint checkpoint = omMetadataManager.getStore() .getCheckpoint(true); File tarFile = createTarFile(checkpoint.getCheckpointLocation()); @@ -116,13 +124,11 @@ public void setUp() throws IOException, AuthenticationException { commonUtils.getReconNodeDetails()); ReconTaskController reconTaskController = mock(ReconTaskController.class); - when(reconTaskController.getReconTaskStatusDao()) - .thenReturn(mock(ReconTaskStatusDao.class)); - OzoneManagerServiceProviderImpl ozoneManagerServiceProvider = new OzoneManagerServiceProviderImpl(configuration, - reconOMMetadataManager, reconTaskController, reconUtilsMock, - ozoneManagerProtocol, new ReconContext(configuration, reconUtilsMock)); + reconOMMetadataManager, reconTaskController, + reconUtilsMock, ozoneManagerProtocol, new ReconContext(configuration, reconUtilsMock), + taskStatusUpdaterManagerMock); ozoneManagerServiceProvider.start(); reconTestInjector = diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java index 46e4506a5ef..8c8b72ea451 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java @@ -26,6 +26,7 @@ import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.fail; +import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.times; import static org.mockito.Mockito.any; @@ -65,6 +66,8 @@ import org.apache.hadoop.ozone.recon.spi.ReconContainerMetadataManager; import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider; import org.apache.hadoop.ozone.recon.tasks.ReconTaskConfig; +import org.apache.hadoop.ozone.recon.tasks.updater.ReconTaskStatusUpdater; +import org.apache.hadoop.ozone.recon.tasks.updater.ReconTaskStatusUpdaterManager; import org.apache.ozone.test.LambdaTestUtils; import org.hadoop.ozone.recon.schema.ContainerSchemaDefinition; import org.apache.hadoop.ozone.recon.persistence.AbstractReconSqlDBTest; @@ -189,10 +192,9 @@ public void testRun() throws Exception { // Start container health task ContainerHealthTask containerHealthTask = new ContainerHealthTask(scmMock.getContainerManager(), - scmMock.getScmServiceProvider(), - reconTaskStatusDao, containerHealthSchemaManager, - placementMock, reconTaskConfig, - reconContainerMetadataManager, new OzoneConfiguration()); + scmMock.getScmServiceProvider(), containerHealthSchemaManager, + placementMock, reconTaskConfig, reconContainerMetadataManager, + new OzoneConfiguration(), getMockTaskStatusUpdaterManager()); containerHealthTask.start(); // Ensure unhealthy container count in DB matches expected @@ -362,10 +364,9 @@ public void testDeletedContainer() throws Exception { 1L)).thenReturn(5L); ContainerHealthTask containerHealthTask = new ContainerHealthTask(scmMock.getContainerManager(), - scmMock.getScmServiceProvider(), - reconTaskStatusDao, containerHealthSchemaManager, - placementMock, reconTaskConfig, - reconContainerMetadataManager, new OzoneConfiguration()); + scmMock.getScmServiceProvider(), containerHealthSchemaManager, + placementMock, reconTaskConfig, reconContainerMetadataManager, + new OzoneConfiguration(), getMockTaskStatusUpdaterManager()); containerHealthTask.start(); LambdaTestUtils.await(6000, 1000, () -> (unHealthyContainersTableHandle.count() == 1)); @@ -543,15 +544,13 @@ public void testMissingAndEmptyMissingContainerDeletion() throws Exception { when(reconContainerMetadataManager.getKeyCountForContainer(2L)).thenReturn(0L); // Start the container health task - ReconTaskStatusDao reconTaskStatusDao = getDao(ReconTaskStatusDao.class); ReconTaskConfig reconTaskConfig = new ReconTaskConfig(); reconTaskConfig.setMissingContainerTaskInterval(Duration.ofSeconds(2)); ContainerHealthTask containerHealthTask = new ContainerHealthTask(scmMock.getContainerManager(), - scmMock.getScmServiceProvider(), - reconTaskStatusDao, containerHealthSchemaManager, - placementMock, reconTaskConfig, - reconContainerMetadataManager, new OzoneConfiguration()); + scmMock.getScmServiceProvider(), containerHealthSchemaManager, + placementMock, reconTaskConfig, reconContainerMetadataManager, + new OzoneConfiguration(), getMockTaskStatusUpdaterManager()); containerHealthTask.start(); @@ -566,6 +565,15 @@ public void testMissingAndEmptyMissingContainerDeletion() throws Exception { }); } + private ReconTaskStatusUpdaterManager getMockTaskStatusUpdaterManager() { + ReconTaskStatusUpdaterManager reconTaskStatusUpdaterManager = mock(ReconTaskStatusUpdaterManager.class); + when(reconTaskStatusUpdaterManager.getTaskStatusUpdater(anyString())).thenAnswer(inv -> { + String taskName = inv.getArgument(0); + return new ReconTaskStatusUpdater(getDao(ReconTaskStatusDao.class), taskName); + }); + return reconTaskStatusUpdaterManager; + } + private Set getMockReplicas( long containerId, State...states) { Set replicas = new HashSet<>(); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestReconInternalSchemaDefinition.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestReconInternalSchemaDefinition.java index 1230a9ba543..5cc2ec0c1aa 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestReconInternalSchemaDefinition.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestReconInternalSchemaDefinition.java @@ -60,6 +60,10 @@ public void testSchemaCreated() throws Exception { Types.BIGINT)); expectedPairs.add(new ImmutablePair<>("last_updated_seq_number", Types.BIGINT)); + expectedPairs.add(new ImmutablePair<>("last_task_run_status", + Types.INTEGER)); + expectedPairs.add(new ImmutablePair<>("is_current_task_running", + Types.INTEGER)); List> actualPairs = new ArrayList<>(); @@ -69,7 +73,7 @@ public void testSchemaCreated() throws Exception { resultSet.getInt("DATA_TYPE"))); } - assertEquals(3, actualPairs.size()); + assertEquals(5, actualPairs.size()); assertEquals(expectedPairs, actualPairs); } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestReconWithDifferentSqlDBs.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestReconWithDifferentSqlDBs.java index 18053688468..4c598601612 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestReconWithDifferentSqlDBs.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestReconWithDifferentSqlDBs.java @@ -72,7 +72,7 @@ public void testSchemaSetup(Provider provider) assertNotNull(reconSqlDB.getDao(dao)); }); ReconTaskStatusDao dao = reconSqlDB.getDao(ReconTaskStatusDao.class); - dao.insert(new ReconTaskStatus("TestTask", 1L, 2L)); + dao.insert(new ReconTaskStatus("TestTask", 1L, 2L, 1, 0)); assertEquals(1, dao.findAll().size()); int numRows = reconSqlDB.getDslContext(). diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestSqlSchemaSetup.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestSqlSchemaSetup.java index 4c6ae91998c..983ceb1d8af 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestSqlSchemaSetup.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestSqlSchemaSetup.java @@ -26,7 +26,8 @@ import org.hadoop.ozone.recon.schema.tables.daos.ReconTaskStatusDao; import org.hadoop.ozone.recon.schema.tables.pojos.ReconTaskStatus; -import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; /** * Class to test basic SQL schema setup. @@ -41,8 +42,9 @@ public TestSqlSchemaSetup() { * Make sure schema was created correctly. * @throws SQLException */ - @Test - public void testSchemaSetup() throws SQLException { + @ParameterizedTest + @ValueSource(ints = {0, 1, -1}) + public void testSchemaSetup(int lastTaskRunStatus) throws SQLException { assertNotNull(getInjector()); assertNotNull(getConfiguration()); assertNotNull(getDslContext()); @@ -51,7 +53,7 @@ public void testSchemaSetup() throws SQLException { assertNotNull(getDao(dao)); }); ReconTaskStatusDao dao = getDao(ReconTaskStatusDao.class); - dao.insert(new ReconTaskStatus("TestTask", 1L, 2L)); + dao.insert(new ReconTaskStatus("TestTask", 1L, 2L, lastTaskRunStatus, 0)); assertEquals(1, dao.findAll().size()); } } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestOzoneManagerServiceProviderImpl.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestOzoneManagerServiceProviderImpl.java index 2700034aaed..0d5050a934e 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestOzoneManagerServiceProviderImpl.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestOzoneManagerServiceProviderImpl.java @@ -75,8 +75,9 @@ import org.apache.hadoop.ozone.recon.tasks.OMUpdateEventBatch; import org.apache.hadoop.ozone.recon.tasks.ReconTaskController; +import org.apache.hadoop.ozone.recon.tasks.updater.ReconTaskStatusUpdater; +import org.apache.hadoop.ozone.recon.tasks.updater.ReconTaskStatusUpdaterManager; import org.hadoop.ozone.recon.schema.tables.daos.ReconTaskStatusDao; -import org.hadoop.ozone.recon.schema.tables.pojos.ReconTaskStatus; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; @@ -138,8 +139,8 @@ public void testUpdateReconOmDBWithNewSnapshot( OzoneManagerServiceProviderImpl ozoneManagerServiceProvider = new OzoneManagerServiceProviderImpl(configuration, - reconOMMetadataManager, reconTaskController, reconUtilsMock, - ozoneManagerProtocol, reconContext); + reconOMMetadataManager, reconTaskController, reconUtilsMock, ozoneManagerProtocol, + reconContext, getMockTaskStatusUpdaterManager()); assertNull(reconOMMetadataManager.getKeyTable(getBucketLayout()) .get("/sampleVol/bucketOne/key_one")); @@ -179,8 +180,8 @@ public void testUpdateReconOmDBWithNewSnapshotFailure( OzoneManagerServiceProviderImpl ozoneManagerServiceProvider = new OzoneManagerServiceProviderImpl(configuration, - reconOMMetadataManager, reconTaskController, reconUtilsMock, - ozoneManagerProtocol, reconContext); + reconOMMetadataManager, reconTaskController, reconUtilsMock, ozoneManagerProtocol, + reconContext, getMockTaskStatusUpdaterManager()); assertFalse(ozoneManagerServiceProvider.updateReconOmDBWithNewSnapshot()); @@ -216,8 +217,8 @@ public void testUpdateReconOmDBWithNewSnapshotSuccess( OzoneManagerServiceProviderImpl ozoneManagerServiceProvider = new OzoneManagerServiceProviderImpl(configuration, - reconOMMetadataManager, reconTaskController, reconUtilsMock, - ozoneManagerProtocol, reconContext); + reconOMMetadataManager, reconTaskController, reconUtilsMock, ozoneManagerProtocol, + reconContext, getMockTaskStatusUpdaterManager()); assertTrue(reconContext.getErrors().contains(ReconContext.ErrorCode.GET_OM_DB_SNAPSHOT_FAILED)); assertTrue(ozoneManagerServiceProvider.updateReconOmDBWithNewSnapshot()); @@ -259,8 +260,8 @@ public void testReconOmDBCloseAndOpenNewSnapshotDb( OzoneManagerServiceProviderImpl ozoneManagerServiceProvider1 = new OzoneManagerServiceProviderImpl(configuration, - reconOMMetadataManager, reconTaskController, reconUtilsMock, - ozoneManagerProtocol, reconContext); + reconOMMetadataManager, reconTaskController, reconUtilsMock, ozoneManagerProtocol, + reconContext, getMockTaskStatusUpdaterManager()); assertTrue(ozoneManagerServiceProvider1.updateReconOmDBWithNewSnapshot()); HttpURLConnection httpURLConnectionMock2 = mock(HttpURLConnection.class); @@ -269,8 +270,8 @@ public void testReconOmDBCloseAndOpenNewSnapshotDb( .thenReturn(httpURLConnectionMock2); OzoneManagerServiceProviderImpl ozoneManagerServiceProvider2 = new OzoneManagerServiceProviderImpl(configuration, - reconOMMetadataManager, reconTaskController, reconUtilsMock, - ozoneManagerProtocol, reconContext); + reconOMMetadataManager, reconTaskController, reconUtilsMock, ozoneManagerProtocol, + reconContext, getMockTaskStatusUpdaterManager()); assertTrue(ozoneManagerServiceProvider2.updateReconOmDBWithNewSnapshot()); } @@ -315,8 +316,8 @@ public void testGetOzoneManagerDBSnapshot(@TempDir File dirReconMetadata) ReconTaskController reconTaskController = getMockTaskController(); OzoneManagerServiceProviderImpl ozoneManagerServiceProvider = new OzoneManagerServiceProviderImpl(configuration, - reconOMMetadataManager, reconTaskController, reconUtilsMock, - ozoneManagerProtocol, reconContext); + reconOMMetadataManager, reconTaskController, reconUtilsMock, ozoneManagerProtocol, + reconContext, getMockTaskStatusUpdaterManager()); DBCheckpoint checkpoint = ozoneManagerServiceProvider .getOzoneManagerDBSnapshot(); @@ -365,8 +366,8 @@ public void testGetAndApplyDeltaUpdatesFromOM( OzoneManagerServiceProviderImpl ozoneManagerServiceProvider = new OzoneManagerServiceProviderImpl(configuration, getTestReconOmMetadataManager(omMetadataManager, dirReconMetadata), - getMockTaskController(), new ReconUtils(), - getMockOzoneManagerClient(dbUpdatesWrapper), reconContext); + getMockTaskController(), new ReconUtils(), getMockOzoneManagerClient(dbUpdatesWrapper), + reconContext, getMockTaskStatusUpdaterManager()); OMDBUpdatesHandler updatesHandler = new OMDBUpdatesHandler(omMetadataManager); @@ -436,7 +437,8 @@ public void testGetAndApplyDeltaUpdatesFromOMWithLimit( getTestReconOmMetadataManager(omMetadataManager, dirReconMetadata), getMockTaskController(), new ReconUtils(), getMockOzoneManagerClientWith4Updates(dbUpdatesWrapper[0], - dbUpdatesWrapper[1], dbUpdatesWrapper[2], dbUpdatesWrapper[3]), reconContext); + dbUpdatesWrapper[1], dbUpdatesWrapper[2], dbUpdatesWrapper[3]), + reconContext, getMockTaskStatusUpdaterManager()); assertTrue(dbUpdatesWrapper[0].isDBUpdateSuccess()); assertTrue(dbUpdatesWrapper[1].isDBUpdateSuccess()); @@ -479,20 +481,15 @@ public void testSyncDataFromOMFullSnapshot( // Empty OM DB to start with. ReconOMMetadataManager omMetadataManager = getTestReconOmMetadataManager( initializeEmptyOmMetadataManager(dirOmMetadata), dirReconMetadata); - ReconTaskStatusDao reconTaskStatusDaoMock = - mock(ReconTaskStatusDao.class); - doNothing().when(reconTaskStatusDaoMock) - .update(any(ReconTaskStatus.class)); ReconTaskController reconTaskControllerMock = getMockTaskController(); - when(reconTaskControllerMock.getReconTaskStatusDao()) - .thenReturn(reconTaskStatusDaoMock); doNothing().when(reconTaskControllerMock) .reInitializeTasks(omMetadataManager); + ReconTaskStatusUpdaterManager reconTaskStatusUpdaterManager = getMockTaskStatusUpdaterManager(); OzoneManagerServiceProviderImpl ozoneManagerServiceProvider = - new MockOzoneServiceProvider(configuration, omMetadataManager, - reconTaskControllerMock, new ReconUtils(), ozoneManagerProtocol, reconContext); + new MockOzoneServiceProvider(configuration, omMetadataManager, reconTaskControllerMock, + new ReconUtils(), ozoneManagerProtocol, reconContext, reconTaskStatusUpdaterManager); OzoneManagerSyncMetrics metrics = ozoneManagerServiceProvider.getMetrics(); assertEquals(0, metrics.getNumSnapshotRequests()); @@ -500,11 +497,9 @@ public void testSyncDataFromOMFullSnapshot( // Should trigger full snapshot request. ozoneManagerServiceProvider.syncDataFromOM(); - ArgumentCaptor captor = - ArgumentCaptor.forClass(ReconTaskStatus.class); - verify(reconTaskStatusDaoMock, times(1)) - .update(captor.capture()); - assertEquals(OmSnapshotRequest.name(), captor.getValue().getTaskName()); + ArgumentCaptor taskNameCaptor = ArgumentCaptor.forClass(String.class); + verify(reconTaskStatusUpdaterManager).getTaskStatusUpdater(taskNameCaptor.capture()); + assertEquals(OmSnapshotRequest.name(), taskNameCaptor.getValue()); verify(reconTaskControllerMock, times(1)) .reInitializeTasks(omMetadataManager); assertEquals(1, metrics.getNumSnapshotRequests()); @@ -518,32 +513,26 @@ public void testSyncDataFromOMDeltaUpdates( // Non-Empty OM DB to start with. ReconOMMetadataManager omMetadataManager = getTestReconOmMetadataManager( initializeNewOmMetadataManager(dirOmMetadata), dirReconMetadata); - ReconTaskStatusDao reconTaskStatusDaoMock = - mock(ReconTaskStatusDao.class); - doNothing().when(reconTaskStatusDaoMock) - .update(any(ReconTaskStatus.class)); ReconTaskController reconTaskControllerMock = getMockTaskController(); - when(reconTaskControllerMock.getReconTaskStatusDao()) - .thenReturn(reconTaskStatusDaoMock); doNothing().when(reconTaskControllerMock) .consumeOMEvents(any(OMUpdateEventBatch.class), any(OMMetadataManager.class)); + ReconTaskStatusUpdaterManager reconTaskStatusUpdaterManager = getMockTaskStatusUpdaterManager(); OzoneManagerServiceProviderImpl ozoneManagerServiceProvider = - new OzoneManagerServiceProviderImpl(configuration, omMetadataManager, - reconTaskControllerMock, new ReconUtils(), ozoneManagerProtocol, reconContext); + new OzoneManagerServiceProviderImpl(configuration, omMetadataManager, reconTaskControllerMock, + new ReconUtils(), ozoneManagerProtocol, reconContext, reconTaskStatusUpdaterManager); OzoneManagerSyncMetrics metrics = ozoneManagerServiceProvider.getMetrics(); // Should trigger delta updates. ozoneManagerServiceProvider.syncDataFromOM(); - ArgumentCaptor captor = - ArgumentCaptor.forClass(ReconTaskStatus.class); - verify(reconTaskStatusDaoMock, times(1)) - .update(captor.capture()); - assertEquals(OmDeltaRequest.name(), captor.getValue().getTaskName()); + ArgumentCaptor captor = + ArgumentCaptor.forClass(String.class); + verify(reconTaskStatusUpdaterManager).getTaskStatusUpdater(captor.capture()); + assertEquals(OmDeltaRequest.name(), captor.getValue()); verify(reconTaskControllerMock, times(1)) .consumeOMEvents(any(OMUpdateEventBatch.class), @@ -559,32 +548,26 @@ public void testSyncDataFromOMFullSnapshotForSNNFE( // Non-Empty OM DB to start with. ReconOMMetadataManager omMetadataManager = getTestReconOmMetadataManager( initializeNewOmMetadataManager(dirOmMetadata), dirReconMetadata); - ReconTaskStatusDao reconTaskStatusDaoMock = - mock(ReconTaskStatusDao.class); - doNothing().when(reconTaskStatusDaoMock) - .update(any(ReconTaskStatus.class)); ReconTaskController reconTaskControllerMock = getMockTaskController(); - when(reconTaskControllerMock.getReconTaskStatusDao()) - .thenReturn(reconTaskStatusDaoMock); doNothing().when(reconTaskControllerMock) .reInitializeTasks(omMetadataManager); + ReconTaskStatusUpdaterManager reconTaskStatusUpdaterManager = getMockTaskStatusUpdaterManager(); OzoneManagerProtocol protocol = getMockOzoneManagerClientWithThrow(); OzoneManagerServiceProviderImpl ozoneManagerServiceProvider = - new MockOzoneServiceProvider(configuration, omMetadataManager, - reconTaskControllerMock, new ReconUtils(), protocol, reconContext); + new MockOzoneServiceProvider(configuration, omMetadataManager, reconTaskControllerMock, + new ReconUtils(), protocol, reconContext, reconTaskStatusUpdaterManager); OzoneManagerSyncMetrics metrics = ozoneManagerServiceProvider.getMetrics(); // Should trigger full snapshot request. ozoneManagerServiceProvider.syncDataFromOM(); - ArgumentCaptor captor = - ArgumentCaptor.forClass(ReconTaskStatus.class); - verify(reconTaskStatusDaoMock, times(1)) - .update(captor.capture()); - assertEquals(OmSnapshotRequest.name(), captor.getValue().getTaskName()); + ArgumentCaptor captor = + ArgumentCaptor.forClass(String.class); + verify(reconTaskStatusUpdaterManager).getTaskStatusUpdater(captor.capture()); + assertEquals(OmSnapshotRequest.name(), captor.getValue()); verify(reconTaskControllerMock, times(1)) .reInitializeTasks(omMetadataManager); assertEquals(1, metrics.getNumSnapshotRequests()); @@ -632,6 +615,15 @@ private OzoneManagerProtocol getMockOzoneManagerClientWith4Updates( return ozoneManagerProtocolMock; } + private ReconTaskStatusUpdaterManager getMockTaskStatusUpdaterManager() { + ReconTaskStatusUpdaterManager reconTaskStatusUpdaterManager = mock(ReconTaskStatusUpdaterManager.class); + when(reconTaskStatusUpdaterManager.getTaskStatusUpdater(anyString())).thenAnswer(inv -> new ReconTaskStatusUpdater( + mock(ReconTaskStatusDao.class), (String) inv.getArgument(0))); + when(reconTaskStatusUpdaterManager.getTaskStatusUpdater(anyString())).thenAnswer(inv -> + new ReconTaskStatusUpdater(mock(ReconTaskStatusDao.class), (String) inv.getArgument(0))); + return reconTaskStatusUpdaterManager; + } + private BucketLayout getBucketLayout() { return BucketLayout.DEFAULT; } @@ -648,13 +640,20 @@ class MockOzoneServiceProvider extends OzoneManagerServiceProviderImpl { ReconTaskController reconTaskController, ReconUtils reconUtils, OzoneManagerProtocol ozoneManagerClient, - ReconContext reconContext) { - super(configuration, omMetadataManager, reconTaskController, reconUtils, - ozoneManagerClient, reconContext); + ReconContext reconContext, + ReconTaskStatusUpdaterManager taskStatusUpdaterManager) { + super(configuration, omMetadataManager, reconTaskController, reconUtils, ozoneManagerClient, + reconContext, taskStatusUpdaterManager); } @Override public boolean updateReconOmDBWithNewSnapshot() { return true; } + + // Override to trigger full snapshot + @Override + public long getCurrentOMDBSequenceNumber() { + return 0; + } } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerKeyMapperTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerKeyMapperTask.java index 67f962f1869..2790a485990 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerKeyMapperTask.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerKeyMapperTask.java @@ -315,7 +315,7 @@ public void testKeyTableProcess() throws IOException { ArrayList() {{ add(keyEvent1); add(keyEvent2); - }}); + }}, 0L); ContainerKeyMapperTask containerKeyMapperTask = new ContainerKeyMapperTask(reconContainerMetadataManager, @@ -427,7 +427,7 @@ public void testFileTableProcess() throws Exception { add(keyEvent1); add(keyEvent2); } - }); + }, 0L); // Process PUT event for both the keys containerKeyMapperTask.process(omUpdateEventBatch); @@ -460,7 +460,7 @@ public void testFileTableProcess() throws Exception { { add(keyEvent3); } - }); + }, 0L); // Process DELETE event for key2 containerKeyMapperTask.process(omUpdateEventBatch2); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerSizeCountTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerSizeCountTask.java index a996f167a1b..314176e3c62 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerSizeCountTask.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestContainerSizeCountTask.java @@ -25,8 +25,10 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.CLOSING; import static org.hadoop.ozone.recon.schema.tables.ContainerCountBySizeTable.CONTAINER_COUNT_BY_SIZE; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.BDDMockito.given; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; import java.time.Duration; import java.util.ArrayList; @@ -37,6 +39,8 @@ import org.apache.hadoop.hdds.scm.container.ContainerManager; import org.apache.hadoop.ozone.recon.persistence.AbstractReconSqlDBTest; import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider; +import org.apache.hadoop.ozone.recon.tasks.updater.ReconTaskStatusUpdater; +import org.apache.hadoop.ozone.recon.tasks.updater.ReconTaskStatusUpdaterManager; import org.hadoop.ozone.recon.schema.UtilizationSchemaDefinition; import org.hadoop.ozone.recon.schema.tables.daos.ContainerCountBySizeDao; import org.hadoop.ozone.recon.schema.tables.daos.ReconTaskStatusDao; @@ -52,8 +56,8 @@ public class TestContainerSizeCountTask extends AbstractReconSqlDBTest { private ContainerManager containerManager; private StorageContainerServiceProvider scmClient; - private ReconTaskStatusDao reconTaskStatusDao; private ReconTaskConfig reconTaskConfig; + private ReconTaskStatusUpdaterManager reconTaskStatusUpdaterManager; private ContainerCountBySizeDao containerCountBySizeDao; private UtilizationSchemaDefinition utilizationSchemaDefinition; private ContainerSizeCountTask task; @@ -69,18 +73,20 @@ public void setUp() { getSchemaDefinition(UtilizationSchemaDefinition.class); dslContext = utilizationSchemaDefinition.getDSLContext(); containerCountBySizeDao = getDao(ContainerCountBySizeDao.class); - reconTaskStatusDao = getDao(ReconTaskStatusDao.class); reconTaskConfig = new ReconTaskConfig(); reconTaskConfig.setContainerSizeCountTaskInterval(Duration.ofSeconds(1)); + reconTaskStatusUpdaterManager = mock(ReconTaskStatusUpdaterManager.class); + when(reconTaskStatusUpdaterManager.getTaskStatusUpdater(anyString())).thenReturn(new ReconTaskStatusUpdater( + getDao(ReconTaskStatusDao.class), "mockedTask-" + System.currentTimeMillis())); containerManager = mock(ContainerManager.class); scmClient = mock(StorageContainerServiceProvider.class); task = new ContainerSizeCountTask( containerManager, scmClient, - reconTaskStatusDao, reconTaskConfig, containerCountBySizeDao, - utilizationSchemaDefinition); + utilizationSchemaDefinition, + reconTaskStatusUpdaterManager); // Truncate table before running each test dslContext.truncate(CONTAINER_COUNT_BY_SIZE); } @@ -111,7 +117,7 @@ public void testProcess() { containers.add(omContainerInfo1); containers.add(omContainerInfo2); - task.process(containers); + task.processContainers(containers); // Verify 3 containers are in correct bins. assertEquals(3, containerCountBySizeDao.count()); @@ -143,7 +149,7 @@ public void testProcess() { given(omContainerInfo2.containerID()).willReturn(new ContainerID(2)); given(omContainerInfo2.getUsedBytes()).willReturn(50000L); // 50KB - task.process(containers); + task.processContainers(containers); // Total size groups added to the database assertEquals(5, containerCountBySizeDao.count()); @@ -166,7 +172,7 @@ public void testProcess() { // Remove the container having size 1.5GB and upperbound 2147483648L containers.remove(omContainerInfo1); - task.process(containers); + task.processContainers(containers); recordToFind.value1(2147483648L); assertEquals(0, containerCountBySizeDao .findById(recordToFind.value1()) @@ -222,7 +228,7 @@ public void testProcessDeletedAndNegativeSizedContainers() { containers.add(negativeSizeDeletedContainer); containers.add(validSizeContainer); - task.process(containers); + task.processContainers(containers); // Verify that only the valid containers are counted assertEquals(3, containerCountBySizeDao.count()); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestFileSizeCountTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestFileSizeCountTask.java index 3572f5813ef..e2e6f5c600c 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestFileSizeCountTask.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestFileSizeCountTask.java @@ -195,7 +195,7 @@ public void testProcess() { .build(); OMUpdateEventBatch omUpdateEventBatch = - new OMUpdateEventBatch(Arrays.asList(event, event2)); + new OMUpdateEventBatch(Arrays.asList(event, event2), 0L); fileSizeCountTask.process(omUpdateEventBatch); // Verify 2 keys are in correct bins. @@ -250,7 +250,7 @@ public void testProcess() { .build(); omUpdateEventBatch = new OMUpdateEventBatch( - Arrays.asList(updateEvent, putEvent, deleteEvent)); + Arrays.asList(updateEvent, putEvent, deleteEvent), 0L); fileSizeCountTask.process(omUpdateEventBatch); assertEquals(4, fileCountBySizeDao.count()); @@ -388,7 +388,7 @@ public void testProcessAtScale() { } OMUpdateEventBatch omUpdateEventBatch = - new OMUpdateEventBatch(omDbEventList); + new OMUpdateEventBatch(omDbEventList, 0L); fileSizeCountTask.process(omUpdateEventBatch); // Verify 2 keys are in correct bins. @@ -464,7 +464,7 @@ public void testProcessAtScale() { } } - omUpdateEventBatch = new OMUpdateEventBatch(omDbEventList); + omUpdateEventBatch = new OMUpdateEventBatch(omDbEventList, 0L); fileSizeCountTask.process(omUpdateEventBatch); assertEquals(10000, fileCountBySizeDao.count()); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTask.java index 485804240d5..22f74bc79fb 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTask.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTask.java @@ -267,7 +267,7 @@ private OMUpdateEventBatch processEventBatch() throws IOException { .setAction(OMDBUpdateEvent.OMDBUpdateAction.DELETE) .build(); - return new OMUpdateEventBatch(Arrays.asList(keyEvent1, keyEvent2)); + return new OMUpdateEventBatch(Arrays.asList(keyEvent1, keyEvent2), 0L); } @Test diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithFSO.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithFSO.java index ba2e7497417..8a7bbb85e2d 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithFSO.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithFSO.java @@ -424,7 +424,7 @@ private OMUpdateEventBatch processEventBatch() throws IOException { return new OMUpdateEventBatch(Arrays.asList( keyEvent1, keyEvent2, keyEvent3, keyEvent4, keyEvent5, keyEvent6, keyEvent7 - )); + ), 0L); } @Test diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacy.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacy.java index 5ffd03cbb88..7fca7bc924b 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacy.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacy.java @@ -432,7 +432,7 @@ private OMUpdateEventBatch processEventBatch() throws IOException { return new OMUpdateEventBatch(Arrays.asList( keyEvent1, keyEvent2, keyEvent3, keyEvent4, keyEvent5, keyEvent6, keyEvent7 - )); + ), 0L); } @Test diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacyOBSLayout.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacyOBSLayout.java index db480367639..cef2b5ec7aa 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacyOBSLayout.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithLegacyOBSLayout.java @@ -326,7 +326,7 @@ private OMUpdateEventBatch processEventBatch() throws IOException { .build(); return new OMUpdateEventBatch( - Arrays.asList(keyEvent1, keyEvent2, keyEvent3, keyEvent4)); + Arrays.asList(keyEvent1, keyEvent2, keyEvent3, keyEvent4), 0L); } @Test diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java index 8f9d6b2990a..6cf81063ce2 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestNSSummaryTaskWithOBS.java @@ -321,7 +321,7 @@ private OMUpdateEventBatch processEventBatch() throws IOException { .build(); return new OMUpdateEventBatch( - Arrays.asList(keyEvent1, keyEvent2, keyEvent3, keyEvent4)); + Arrays.asList(keyEvent1, keyEvent2, keyEvent3, keyEvent4), 0L); } @Test diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmTableInsightTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmTableInsightTask.java index 56d8fe21315..8bb4c1f724b 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmTableInsightTask.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestOmTableInsightTask.java @@ -320,7 +320,7 @@ public void testProcessForDeletedDirectoryTable() throws IOException { getOmKeyInfo("vol1", "bucket1", DIR_ONE, (i + 1), false), DELETED_DIR_TABLE, PUT, null)); } - OMUpdateEventBatch putEventBatch = new OMUpdateEventBatch(putEvents); + OMUpdateEventBatch putEventBatch = new OMUpdateEventBatch(putEvents, 0L); omTableInsightTask.process(putEventBatch); assertEquals(5, getCountForTable(DELETED_DIR_TABLE)); @@ -334,7 +334,7 @@ public void testProcessForDeletedDirectoryTable() throws IOException { deleteEvents.add(getOMUpdateEvent(paths.get(2), getOmKeyInfo("vol1", "bucket1", DIR_ONE, 3L, false), DELETED_DIR_TABLE, DELETE, null)); - OMUpdateEventBatch deleteEventBatch = new OMUpdateEventBatch(deleteEvents); + OMUpdateEventBatch deleteEventBatch = new OMUpdateEventBatch(deleteEvents, 0L); omTableInsightTask.process(deleteEventBatch); assertEquals(3, getCountForTable(DELETED_DIR_TABLE)); } @@ -470,7 +470,7 @@ public void testProcessForCount() { } // Processing the initial batch of events - OMUpdateEventBatch initialBatch = new OMUpdateEventBatch(initialEvents); + OMUpdateEventBatch initialBatch = new OMUpdateEventBatch(initialEvents, 0L); omTableInsightTask.process(initialBatch); // Verifying the count in each table @@ -499,7 +499,7 @@ public void testProcessForCount() { // Processing the additional events OMUpdateEventBatch additionalBatch = - new OMUpdateEventBatch(additionalEvents); + new OMUpdateEventBatch(additionalEvents, 0L); omTableInsightTask.process(additionalBatch); // Verifying the final count in each table for (String tableName : omTableInsightTask.getTaskTables()) { @@ -528,7 +528,7 @@ public void testProcessForOpenKeyTableAndOpenFileTable() { putEvents.add(getOMUpdateEvent("item" + i, omKeyInfo, table, PUT, null)); } - OMUpdateEventBatch putEventBatch = new OMUpdateEventBatch(putEvents); + OMUpdateEventBatch putEventBatch = new OMUpdateEventBatch(putEvents, 0L); omTableInsightTask.process(putEventBatch); // After 5 PUTs, size should be 5 * 1000 = 5000 @@ -546,7 +546,7 @@ public void testProcessForOpenKeyTableAndOpenFileTable() { deleteEvents.add( getOMUpdateEvent("item0", omKeyInfo, OPEN_FILE_TABLE, DELETE, null)); - OMUpdateEventBatch deleteEventBatch = new OMUpdateEventBatch(deleteEvents); + OMUpdateEventBatch deleteEventBatch = new OMUpdateEventBatch(deleteEvents, 0L); omTableInsightTask.process(deleteEventBatch); // After deleting "item0", size should be 4 * 1000 = 4000 @@ -569,7 +569,7 @@ public void testProcessForOpenKeyTableAndOpenFileTable() { getOMUpdateEvent("item1", newKeyInfo, tableName, UPDATE, omKeyInfo)); } - OMUpdateEventBatch updateEventBatch = new OMUpdateEventBatch(updateEvents); + OMUpdateEventBatch updateEventBatch = new OMUpdateEventBatch(updateEvents, 0L); omTableInsightTask.process(updateEventBatch); // After updating "item1", size should be 4000 - 1000 + 2000 = 5000 @@ -606,7 +606,7 @@ public void testProcessForDeletedTable() { getOMUpdateEvent("item" + i, repeatedOmKeyInfo, DELETED_TABLE, PUT, null)); } - OMUpdateEventBatch putEventBatch = new OMUpdateEventBatch(putEvents); + OMUpdateEventBatch putEventBatch = new OMUpdateEventBatch(putEvents, 0L); omTableInsightTask.process(putEventBatch); // Each of the 5 RepeatedOmKeyInfo object has 5 OmKeyInfo obj, // so total deleted keys should be 5 * 5 = 25 @@ -622,7 +622,7 @@ public void testProcessForDeletedTable() { deleteEvents.add( getOMUpdateEvent("item0", repeatedOmKeyInfo, DELETED_TABLE, DELETE, null)); - OMUpdateEventBatch deleteEventBatch = new OMUpdateEventBatch(deleteEvents); + OMUpdateEventBatch deleteEventBatch = new OMUpdateEventBatch(deleteEvents, 0L); omTableInsightTask.process(deleteEventBatch); // After deleting "item0" total deleted keys should be 20 assertEquals(20L, getCountForTable(DELETED_TABLE)); diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestReconTaskControllerImpl.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestReconTaskControllerImpl.java index b5e82a48a87..477a5cace73 100644 --- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestReconTaskControllerImpl.java +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestReconTaskControllerImpl.java @@ -21,11 +21,12 @@ import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertSame; -import static org.mockito.Mockito.any; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import static org.mockito.Mockito.verify; import java.util.HashSet; @@ -34,6 +35,8 @@ import org.apache.hadoop.ozone.om.OMMetadataManager; import org.apache.hadoop.ozone.recon.persistence.AbstractReconSqlDBTest; import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager; +import org.apache.hadoop.ozone.recon.tasks.updater.ReconTaskStatusUpdater; +import org.apache.hadoop.ozone.recon.tasks.updater.ReconTaskStatusUpdaterManager; import org.hadoop.ozone.recon.schema.tables.daos.ReconTaskStatusDao; import org.hadoop.ozone.recon.schema.tables.pojos.ReconTaskStatus; import org.junit.jupiter.api.BeforeEach; @@ -55,8 +58,14 @@ public TestReconTaskControllerImpl() { public void setUp() { OzoneConfiguration ozoneConfiguration = new OzoneConfiguration(); reconTaskStatusDao = getDao(ReconTaskStatusDao.class); - reconTaskController = new ReconTaskControllerImpl(ozoneConfiguration, - reconTaskStatusDao, new HashSet<>()); + ReconTaskStatusUpdaterManager reconTaskStatusUpdaterManagerMock = mock(ReconTaskStatusUpdaterManager.class); + when(reconTaskStatusUpdaterManagerMock.getTaskStatusUpdater(anyString())) + .thenAnswer(i -> { + String taskName = i.getArgument(0); + return new ReconTaskStatusUpdater(reconTaskStatusDao, taskName); + }); + reconTaskController = new ReconTaskControllerImpl(ozoneConfiguration, new HashSet<>(), + reconTaskStatusUpdaterManagerMock); reconTaskController.start(); } @@ -90,7 +99,6 @@ public void testConsumeOMEvents() throws Exception { .process(any()); long endTime = System.currentTimeMillis(); - reconTaskStatusDao = getDao(ReconTaskStatusDao.class); ReconTaskStatus reconTaskStatus = reconTaskStatusDao.findById("MockTask"); long taskTimeStamp = reconTaskStatus.getLastUpdatedTimestamp(); long seqNumber = reconTaskStatus.getLastUpdatedSeqNumber(); @@ -99,6 +107,38 @@ public void testConsumeOMEvents() throws Exception { assertEquals(seqNumber, omUpdateEventBatchMock.getLastSequenceNumber()); } + @Test + public void testTaskRecordsFailureOnException() throws Exception { + ReconOmTask reconOmTaskMock = getMockTask("MockTask"); + OMUpdateEventBatch omUpdateEventBatchMock = mock(OMUpdateEventBatch.class); + + // Throw exception when trying to run task + when(reconOmTaskMock.process(any(OMUpdateEventBatch.class))) + .thenThrow(new RuntimeException("Mock Failure")); + reconTaskController.registerTask(reconOmTaskMock); + when(omUpdateEventBatchMock.getLastSequenceNumber()).thenReturn(100L); + when(omUpdateEventBatchMock.isEmpty()).thenReturn(false); + + long startTime = System.currentTimeMillis(); + reconTaskController.consumeOMEvents( + omUpdateEventBatchMock, + mock(OMMetadataManager.class)); + + verify(reconOmTaskMock, times(1)) + .process(any()); + long endTime = System.currentTimeMillis(); + + ReconTaskStatus reconTaskStatus = reconTaskStatusDao.findById("MockTask"); + long taskTimeStamp = reconTaskStatus.getLastUpdatedTimestamp(); + long seqNumber = reconTaskStatus.getLastUpdatedSeqNumber(); + int taskStatus = reconTaskStatus.getLastTaskRunStatus(); + + assertThat(taskTimeStamp).isGreaterThanOrEqualTo(startTime).isLessThanOrEqualTo(endTime); + // Task failed so seqNumber should not be updated, and last task status should be -1 + assertEquals(seqNumber, 0); + assertEquals(taskStatus, -1); + } + @Test public void testFailedTaskRetryLogic() throws Exception { String taskName = "Dummy_" + System.currentTimeMillis(); @@ -149,6 +189,7 @@ public void testBadBehavedTaskIsIgnored() throws Exception { } //Should be ignored now. + Long startTime = System.currentTimeMillis(); reconTaskController.consumeOMEvents(omUpdateEventBatchMock, omMetadataManagerMock); assertThat(reconTaskController.getRegisteredTasks()).isEmpty(); @@ -157,7 +198,7 @@ public void testBadBehavedTaskIsIgnored() throws Exception { ReconTaskStatus dbRecord = reconTaskStatusDao.findById(taskName); assertEquals(taskName, dbRecord.getTaskName()); - assertEquals(Long.valueOf(0L), dbRecord.getLastUpdatedTimestamp()); + assertThat(dbRecord.getLastUpdatedTimestamp()).isGreaterThanOrEqualTo(startTime); assertEquals(Long.valueOf(0L), dbRecord.getLastUpdatedSeqNumber()); } diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestReconTaskStatusUpdater.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestReconTaskStatusUpdater.java new file mode 100644 index 00000000000..379732d2c8e --- /dev/null +++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestReconTaskStatusUpdater.java @@ -0,0 +1,101 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.recon.tasks; + +import org.apache.hadoop.ozone.recon.persistence.AbstractReconSqlDBTest; +import org.apache.hadoop.ozone.recon.tasks.updater.ReconTaskStatusUpdater; +import org.hadoop.ozone.recon.schema.tables.daos.ReconTaskStatusDao; +import org.hadoop.ozone.recon.schema.tables.pojos.ReconTaskStatus; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +/** + * This class contains tests to validate {@link ReconTaskStatusUpdater} class. + */ +public class TestReconTaskStatusUpdater extends AbstractReconSqlDBTest { + private ReconTaskStatusDao reconTaskStatusDaoMock; + private ReconTaskStatusUpdater updater; + + @BeforeEach + void setup() { + this.reconTaskStatusDaoMock = mock(ReconTaskStatusDao.class); + this.updater = mock(ReconTaskStatusUpdater.class); + + doAnswer(inv -> { + if (!reconTaskStatusDaoMock.existsById(anyString())) { + // First time getting the task, so insert value + reconTaskStatusDaoMock.insert(new ReconTaskStatus("task1", 0L, 0L, 0, 0)); + } else { + // We already have row for the task in the table, update the row + reconTaskStatusDaoMock.update(new ReconTaskStatus("task1", 0L, 0L, 0, 0)); + } + return null; + }).when(updater).updateDetails(); + doNothing().when(updater).setLastUpdatedSeqNumber(anyLong()); + doNothing().when(updater).setLastUpdatedTimestamp(anyLong()); + doNothing().when(updater).setLastTaskRunStatus(anyInt()); + doNothing().when(updater).setIsCurrentTaskRunning(anyInt()); + } + + @Test + void testUpdateDetailsFirstTime() { + when(reconTaskStatusDaoMock.existsById(anyString())).thenReturn(false); + + updater.updateDetails(); + + verify(reconTaskStatusDaoMock, times(1)).insert(any(ReconTaskStatus.class)); + verify(reconTaskStatusDaoMock, never()).update(any(ReconTaskStatus.class)); + } + + @Test + void testUpdateDetailsUpdateExisting() { + when(reconTaskStatusDaoMock.existsById(anyString())).thenReturn(true); + + updater.setLastTaskRunStatus(1); // Task success + updater.updateDetails(); + + verify(reconTaskStatusDaoMock, times(1)).update(any(ReconTaskStatus.class)); + } + + @Test + void testSetters() { + updater.setLastUpdatedSeqNumber(100L); + updater.setLastUpdatedTimestamp(200L); + updater.setLastTaskRunStatus(1); + updater.setIsCurrentTaskRunning(0); + + // Verify the fields are updated without throwing exceptions + verify(updater, times(1)).setLastUpdatedSeqNumber(anyLong()); + verify(updater, times(1)).setLastUpdatedTimestamp(anyLong()); + verify(updater, times(1)).setLastTaskRunStatus(anyInt()); + verify(updater, times(1)).setIsCurrentTaskRunning(anyInt()); + } +} From 3a6b05efbccfc205d44293796fb7ae48b8f68e81 Mon Sep 17 00:00:00 2001 From: Nandakumar Vadivelu Date: Sun, 19 Jan 2025 13:18:37 +0530 Subject: [PATCH 100/168] HDDS-12105. Enable sortpom in ozone-manager and httpfsgateway. (#7720) --- hadoop-ozone/httpfsgateway/pom.xml | 175 +++++++------ hadoop-ozone/ozone-manager/pom.xml | 392 ++++++++++++++--------------- 2 files changed, 279 insertions(+), 288 deletions(-) diff --git a/hadoop-ozone/httpfsgateway/pom.xml b/hadoop-ozone/httpfsgateway/pom.xml index e21bc166817..f7b5b455c29 100644 --- a/hadoop-ozone/httpfsgateway/pom.xml +++ b/hadoop-ozone/httpfsgateway/pom.xml @@ -14,10 +14,7 @@ --> - + 4.0.0 org.apache.ozone @@ -33,39 +30,27 @@ false + ${maven.build.timestamp} REPO NOT AVAIL REVISION NOT AVAIL yyyy-MM-dd'T'HH:mm:ssZ - ${maven.build.timestamp} - true - true + + true - org.apache.ozone - hdds-common - - - org.apache.ozone - hdds-config - - - org.apache.ozone - hdds-server-framework - - - org.apache.ozone - ozone-filesystem-common + ch.qos.reload4j + reload4j + compile - org.apache.ozone - ozone-filesystem - runtime + com.fasterxml.jackson.core + jackson-databind - org.apache.hadoop - hadoop-hdfs-client + com.google.guava + guava com.googlecode.json-simple @@ -79,10 +64,42 @@ + + jakarta.ws.rs + jakarta.ws.rs-api + + + jakarta.xml.bind + jakarta.xml.bind-api + javax.servlet javax.servlet-api + + org.apache.commons + commons-lang3 + + + org.apache.hadoop + hadoop-hdfs-client + + + org.apache.ozone + hdds-common + + + org.apache.ozone + hdds-config + + + org.apache.ozone + hdds-server-framework + + + org.apache.ozone + ozone-filesystem-common + org.eclipse.jetty jetty-server @@ -92,18 +109,20 @@ jetty-webapp - commons-codec - commons-codec - runtime + org.glassfish.hk2 + hk2-api - org.apache.commons - commons-lang3 + org.glassfish.jaxb + jaxb-runtime - ch.qos.reload4j - reload4j - compile + org.glassfish.jersey.core + jersey-server + + + org.glassfish.jersey.inject + jersey-hk2 org.slf4j @@ -111,8 +130,8 @@ compile - org.slf4j - slf4j-reload4j + commons-codec + commons-codec runtime @@ -122,22 +141,14 @@ - - org.apache.commons - commons-math - - - org.apache.yetus - audience-annotations - - - org.slf4j - slf4j-log4j12 - com.sun.mail javax.mail + + io.netty + * + jline jline @@ -147,57 +158,43 @@ log4j - io.netty - * + org.apache.commons + commons-math + + + org.apache.yetus + audience-annotations + + + org.slf4j + slf4j-log4j12 - com.fasterxml.jackson.core - jackson-databind - - - com.google.guava - guava - - - jakarta.ws.rs - jakarta.ws.rs-api - - - jakarta.xml.bind - jakarta.xml.bind-api - - - org.glassfish.hk2 - hk2-api - - - org.glassfish.jersey.core - jersey-server - - - org.glassfish.jersey.inject - jersey-hk2 + org.apache.ozone + ozone-filesystem + runtime - org.glassfish.jaxb - jaxb-runtime + org.slf4j + slf4j-reload4j + runtime - src/main/resources true + src/main/resources httpfs.properties - src/main/resources false + src/main/resources httpfs.properties @@ -205,12 +202,12 @@ - ${basedir}/src/test/resources false + ${basedir}/src/test/resources - ${basedir}/src/test/resources true + ${basedir}/src/test/resources @@ -283,31 +280,29 @@ create-web-xmls - generate-test-resources run + generate-test-resources - + - + site - site run + site - + @@ -344,10 +339,10 @@ dist - package single + package ${project.artifactId}-${project.version} false diff --git a/hadoop-ozone/ozone-manager/pom.xml b/hadoop-ozone/ozone-manager/pom.xml index 0547ec4c2cf..64e0704bf0a 100644 --- a/hadoop-ozone/ozone-manager/pom.xml +++ b/hadoop-ozone/ozone-manager/pom.xml @@ -12,10 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -24,96 +21,90 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> ozone-manager 2.0.0-SNAPSHOT - Apache Ozone Manager Server - Apache Ozone Manager Server jar + Apache Ozone Manager Server + Apache Ozone Manager Server false - true + - org.apache.ozone - hdds-annotation-processing - test + com.fasterxml.jackson.core + jackson-annotations - org.apache.ozone - hdds-client + com.fasterxml.jackson.core + jackson-databind - org.apache.ozone - hdds-common + com.google.guava + guava - org.apache.ozone - hdds-config + com.google.protobuf + protobuf-java - org.apache.ozone - hdds-interface-server + com.sun.jersey + jersey-client + - org.apache.ozone - hdds-managed-rocksdb + commons-codec + commons-codec - org.apache.ozone - ozone-interface-client + commons-io + commons-io - - org.aspectj - aspectjrt - ${aspectj.version} + info.picocli + picocli - - org.aspectj - aspectjweaver - ${aspectj.version} + io.grpc + grpc-api + + + com.google.code.findbugs + jsr305 + + - - org.apache.ozone - ozone-common + io.grpc + grpc-netty - - org.apache.ozone - ozone-client + io.grpc + grpc-stub - - org.apache.ozone - hdds-docs - provided + io.netty + netty-common - - - org.apache.ozone - hdds-interface-client + io.netty + netty-handler - - org.apache.ozone - ozone-interface-storage + io.netty + netty-transport - - org.apache.ozone - hdds-server-framework + jakarta.annotation + jakarta.annotation-api - org.apache.ozone - hdds-hadoop-dependency-server + jakarta.xml.bind + jakarta.xml.bind-api - org.apache.ozone - rocksdb-checkpoint-differ + javax.servlet + javax.servlet-api @@ -133,141 +124,71 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> log4j-api - org.apache.ratis - ratis-common - - - org.apache.ratis - ratis-grpc - - - org.apache.ratis - ratis-netty - - - org.apache.ratis - ratis-proto + org.apache.ozone + hdds-client - org.apache.ratis - ratis-server-api + org.apache.ozone + hdds-common - org.apache.ratis - ratis-server + org.apache.ozone + hdds-config - org.apache.ratis - ratis-thirdparty-misc + org.apache.ozone + hdds-hadoop-dependency-server + - org.bouncycastle - bcprov-jdk18on - - - io.grpc - grpc-api - - - com.google.code.findbugs - jsr305 - - - - - io.grpc - grpc-netty - - - io.grpc - grpc-stub - - - io.netty - netty-common + org.apache.ozone + hdds-interface-client - io.netty - netty-handler + org.apache.ozone + hdds-interface-server - io.netty - netty-tcnative-boringssl-static - runtime + org.apache.ozone + hdds-managed-rocksdb - io.netty - netty-transport + org.apache.ozone + hdds-rocks-native - org.reflections - reflections + org.apache.ozone + hdds-server-framework - com.fasterxml.jackson.core - jackson-annotations - - - com.fasterxml.jackson.core - jackson-databind - - - com.google.guava - guava - - - com.google.protobuf - protobuf-java - - - com.sun.jersey - jersey-client - - - info.picocli - picocli - - - jakarta.annotation - jakarta.annotation-api + org.apache.ozone + ozone-client + - jakarta.xml.bind - jakarta.xml.bind-api + org.apache.ozone + ozone-common - javax.servlet - javax.servlet-api + org.apache.ozone + ozone-interface-client - - org.codehaus.jackson - jackson-core-asl - ${jackson1.version} + org.apache.ozone + ozone-interface-storage - org.codehaus.jackson - jackson-mapper-asl - ${jackson1.version} + org.apache.ozone + rocksdb-checkpoint-differ - org.codehaus.jackson - jackson-jaxrs - ${jackson-jaxr.version} - - - org.codehaus.jackson - jackson-core-asl - - - org.codehaus.jackson - jackson-mapper-asl - - + org.apache.ranger + ranger-intg + ${ranger.version} @@ -282,6 +203,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> ch.qos.logback logback-classic + + com.amazonaws + aws-java-sdk-bundle + com.google.cloud.bigdataoss gcs-connector @@ -295,55 +220,114 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> jersey-bundle - org.apache.lucene - * + net.minidev + json-smart - org.elasticsearch - * + org.apache.hive + hive-storage-api - org.elasticsearch.client - * + org.apache.kafka + kafka-clients - org.elasticsearch.plugin + org.apache.lucene * - com.amazonaws - aws-java-sdk-bundle + org.apache.solr + solr-solrj - org.apache.hive - hive-storage-api + org.elasticsearch + * - org.apache.kafka - kafka-clients + org.elasticsearch.client + * - org.apache.solr - solr-solrj + org.elasticsearch.plugin + * org.opensearch.client opensearch-rest-client - - net.minidev - json-smart - - org.apache.ranger - ranger-intg - ${ranger.version} + org.apache.ratis + ratis-common - org.apache.ozone - hdds-rocks-native + org.apache.ratis + ratis-grpc + + + org.apache.ratis + ratis-netty + + + org.apache.ratis + ratis-proto + + + org.apache.ratis + ratis-server + + + org.apache.ratis + ratis-server-api + + + org.apache.ratis + ratis-thirdparty-misc + + + + org.aspectj + aspectjrt + ${aspectj.version} + + + + org.aspectj + aspectjweaver + ${aspectj.version} + + + + org.bouncycastle + bcprov-jdk18on + + + + + org.codehaus.jackson + jackson-core-asl + ${jackson1.version} + + + org.codehaus.jackson + jackson-jaxrs + ${jackson-jaxr.version} + + + org.codehaus.jackson + jackson-core-asl + + + org.codehaus.jackson + jackson-mapper-asl + + + + + org.codehaus.jackson + jackson-mapper-asl + ${jackson1.version} org.eclipse.jetty @@ -353,6 +337,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.eclipse.jetty jetty-webapp + + + org.reflections + reflections + org.rocksdb rocksdbjni @@ -363,12 +352,14 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> - commons-codec - commons-codec + org.apache.ozone + hdds-docs + provided - commons-io - commons-io + io.netty + netty-tcnative-boringssl-static + runtime @@ -377,6 +368,11 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> compile-testing test + + org.apache.ozone + hdds-annotation-processing + test + org.apache.ozone hdds-common @@ -391,8 +387,8 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone hdds-server-scm - test test-jar + test org.apache.ozone @@ -401,6 +397,14 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> + + + ${basedir}/../../hdds/common/src/main/resources + + + ${basedir}/src/test/resources + + org.apache.maven.plugins @@ -429,7 +433,8 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> maven-enforcer-plugin - ban-annotations + ban-annotations + @@ -450,17 +455,16 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> copy-common-html - prepare-package unpack + prepare-package org.apache.ozone hdds-server-framework - ${project.build.outputDirectory} - + ${project.build.outputDirectory} webapps/static/**/*.* @@ -503,13 +507,5 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> - - - ${basedir}/../../hdds/common/src/main/resources - - - ${basedir}/src/test/resources - - From c006c572c546a3c60b7e87947d2fc9ea4dffd7e4 Mon Sep 17 00:00:00 2001 From: Abhishek Pal <43001336+devabhishekpal@users.noreply.github.com> Date: Sun, 19 Jan 2025 13:34:17 +0530 Subject: [PATCH 101/168] HDDS-12016. Fixed duplicate entries when changing path in DU page (#7657) --- .../ozone-recon-web/src/utils/common.tsx | 29 ++ .../v2/components/duMetadata/duMetadata.tsx | 316 +++++++++--------- .../src/v2/pages/overview/overview.tsx | 25 +- 3 files changed, 181 insertions(+), 189 deletions(-) diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/utils/common.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/utils/common.tsx index f641b8797d9..9b1f9e09eaf 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/utils/common.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/utils/common.tsx @@ -18,6 +18,7 @@ import moment from 'moment'; import { notification } from 'antd'; +import { CanceledError } from 'axios'; export const getCapacityPercent = (used: number, total: number) => Math.round((used / total) * 100); @@ -80,3 +81,31 @@ export const nullAwareLocaleCompare = (a: string, b: string) => { return a.localeCompare(b); }; + +export function removeDuplicatesAndMerge(origArr: T[], updateArr: T[], mergeKey: string): T[] { + return Array.from([...origArr, ...updateArr].reduce( + (accumulator, curr) => accumulator.set(curr[mergeKey as keyof T], curr), + new Map + ).values()); +} + +export const checkResponseError = (responses: Awaited>[]) => { + const responseError = responses.filter( + (resp) => resp.status === 'rejected' + ); + + if (responseError.length !== 0) { + responseError.forEach((err) => { + if (err.reason.toString().includes("CanceledError")) { + throw new CanceledError('canceled', "ERR_CANCELED"); + } + else { + const reqMethod = err.reason.config.method; + const reqURL = err.reason.config.url + showDataFetchError( + `Failed to ${reqMethod} URL ${reqURL}\n${err.reason.toString()}` + ); + } + }) + } +} \ No newline at end of file diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/duMetadata/duMetadata.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/duMetadata/duMetadata.tsx index e46282f1856..5cae2fbc87e 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/duMetadata/duMetadata.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/duMetadata/duMetadata.tsx @@ -18,11 +18,11 @@ import React, { useRef, useState } from 'react'; import moment from 'moment'; -import { AxiosError } from 'axios'; +import axios, { AxiosError } from 'axios'; import { Table } from 'antd'; -import { AxiosGetHelper, cancelRequests } from '@/utils/axiosRequestHelper'; -import { byteToSize, showDataFetchError } from '@/utils/common'; +import { AxiosGetHelper, cancelRequests, PromiseAllSettledGetHelper } from '@/utils/axiosRequestHelper'; +import { byteToSize, checkResponseError, removeDuplicatesAndMerge, showDataFetchError } from '@/utils/common'; import { Acl } from '@/v2/types/acl.types'; @@ -115,9 +115,9 @@ type MetadataProps = { }; type MetadataState = { - keys: string[]; - values: (string | number | boolean | null)[]; -}; + key: string, + value: string | number | boolean | null +}[]; // ------------- Component -------------- // @@ -125,18 +125,12 @@ const DUMetadata: React.FC = ({ path = '/' }) => { const [loading, setLoading] = useState(false); - const [state, setState] = useState({ - keys: [], - values: [] - }); - const cancelSummarySignal = useRef(); + const [state, setState] = useState([]); const keyMetadataSummarySignal = useRef(); - const cancelQuotaSignal = useRef(); + const cancelMetadataSignal = useRef(); const getObjectInfoMapping = React.useCallback((summaryResponse) => { - - const keys: string[] = []; - const values: (string | number | boolean | null)[] = []; + const data: MetadataState = []; /** * We are creating a specific set of keys under Object Info response * which do not require us to modify anything @@ -154,226 +148,216 @@ const DUMetadata: React.FC = ({ // The following regex will match abcDef and produce Abc Def let keyName = key.replace(/([a-z0-9])([A-Z])/g, '$1 $2'); keyName = keyName.charAt(0).toUpperCase() + keyName.slice(1); - keys.push(keyName); - values.push(objectInfo[key as keyof ObjectInfo]); + data.push({ + key: keyName as string, + value: objectInfo[key as keyof ObjectInfo] + }); } }); if (objectInfo?.creationTime !== undefined && objectInfo?.creationTime !== -1) { - keys.push('Creation Time'); - values.push(moment(objectInfo.creationTime).format('ll LTS')); + data.push({ + key: 'Creation Time', + value: moment(objectInfo.creationTime).format('ll LTS') + }); } if (objectInfo?.usedBytes !== undefined && objectInfo?.usedBytes !== -1 && objectInfo!.usedBytes !== null) { - keys.push('Used Bytes'); - values.push(byteToSize(objectInfo.usedBytes, 3)); + data.push({ + key: 'Used Bytes', + value: byteToSize(objectInfo.usedBytes, 3) + }); } if (objectInfo?.dataSize !== undefined && objectInfo?.dataSize !== -1) { - keys.push('Data Size'); - values.push(byteToSize(objectInfo.dataSize, 3)); + data.push({ + key: 'Data Size', + value: byteToSize(objectInfo.dataSize, 3) + }); } if (objectInfo?.modificationTime !== undefined && objectInfo?.modificationTime !== -1) { - keys.push('Modification Time'); - values.push(moment(objectInfo.modificationTime).format('ll LTS')); + data.push({ + key: 'Modification Time', + value: moment(objectInfo.modificationTime).format('ll LTS') + }); } if (objectInfo?.quotaInNamespace !== undefined && objectInfo?.quotaInNamespace !== -1) { - keys.push('Quota In Namespace'); - values.push(byteToSize(objectInfo.quotaInNamespace, 3)); + data.push({ + key: 'Quota In Namespace', + value: byteToSize(objectInfo.quotaInNamespace, 3) + }); } if (summaryResponse.objectInfo?.replicationConfig?.replicationFactor !== undefined) { - keys.push('Replication Factor'); - values.push(summaryResponse.objectInfo.replicationConfig.replicationFactor); + data.push({ + key: 'Replication Factor', + value: summaryResponse.objectInfo.replicationConfig.replicationFactor + }); } if (summaryResponse.objectInfo?.replicationConfig?.replicationType !== undefined) { - keys.push('Replication Type'); - values.push(summaryResponse.objectInfo.replicationConfig.replicationType); + data.push({ + key: 'Replication Type', + value: summaryResponse.objectInfo.replicationConfig.replicationType + }); } if (summaryResponse.objectInfo?.replicationConfig?.requiredNodes !== undefined && summaryResponse.objectInfo?.replicationConfig?.requiredNodes !== -1) { - keys.push('Replication Required Nodes'); - values.push(summaryResponse.objectInfo.replicationConfig.requiredNodes); + data.push({ + key: 'Replication Required Nodes', + value: summaryResponse.objectInfo.replicationConfig.requiredNodes + }); } - return { keys, values } + return data; }, [path]); - function loadMetadataSummary(path: string) { - cancelRequests([ - cancelSummarySignal.current!, - keyMetadataSummarySignal.current! - ]); - const keys: string[] = []; - const values: (string | number | boolean | null)[] = []; - - const { request, controller } = AxiosGetHelper( + function loadData(path: string) { + const { requests, controller } = PromiseAllSettledGetHelper([ `/api/v1/namespace/summary?path=${path}`, - cancelSummarySignal.current - ); - cancelSummarySignal.current = controller; - - request.then(response => { - const summaryResponse: SummaryResponse = response.data; - keys.push('Entity Type'); - values.push(summaryResponse.type); - + `/api/v1/namespace/quota?path=${path}` + ], cancelMetadataSignal.current); + cancelMetadataSignal.current = controller; + + requests.then(axios.spread(( + nsSummaryResponse: Awaited>, + quotaApiResponse: Awaited>, + ) => { + checkResponseError([nsSummaryResponse, quotaApiResponse]); + const summaryResponse: SummaryResponse = nsSummaryResponse.value?.data ?? {}; + const quotaResponse = quotaApiResponse.value?.data ?? {}; + let data: MetadataState = []; + let summaryResponsePresent = true; + let quotaResponsePresent = true; + + // Error checks if (summaryResponse.status === 'INITIALIZING') { + summaryResponsePresent = false; showDataFetchError(`The metadata is currently initializing. Please wait a moment and try again later`); - return; } - if (summaryResponse.status === 'PATH_NOT_FOUND') { + if (summaryResponse.status === 'PATH_NOT_FOUND' || quotaResponse.status === 'PATH_NOT_FOUND') { + summaryResponsePresent = false; + quotaResponsePresent = false; showDataFetchError(`Invalid Path: ${path}`); - return; } - // If the entity is a Key then fetch the Key metadata only - if (summaryResponse.type === 'KEY') { - const { request: metadataRequest, controller: metadataNewController } = AxiosGetHelper( - `/api/v1/namespace/du?path=${path}&replica=true`, - keyMetadataSummarySignal.current - ); - keyMetadataSummarySignal.current = metadataNewController; - metadataRequest.then(response => { - keys.push('File Size'); - values.push(byteToSize(response.data.size, 3)); - keys.push('File Size With Replication'); - values.push(byteToSize(response.data.sizeWithReplica, 3)); - keys.push("Creation Time"); - values.push(moment(summaryResponse.objectInfo.creationTime).format('ll LTS')); - keys.push("Modification Time"); - values.push(moment(summaryResponse.objectInfo.modificationTime).format('ll LTS')); - - setState({ - keys: keys, - values: values - }); - }).catch(error => { - showDataFetchError(error.toString()); + if (summaryResponsePresent) { + // Summary Response data section + data.push({ + key: 'Entity Type', + value: summaryResponse.type }); - return; - } - /** - * Will iterate over the keys of the countStats to avoid multiple if blocks - * and check from the map for the respective key name / title to insert - */ - const countStats: CountStats = summaryResponse.countStats ?? {}; - const keyToNameMap: Record = { - numVolume: 'Volumes', - numBucket: 'Buckets', - numDir: 'Total Directories', - numKey: 'Total Keys' - } - Object.keys(countStats).forEach((key: string) => { - if (countStats[key as keyof CountStats] !== undefined - && countStats[key as keyof CountStats] !== -1) { - keys.push(keyToNameMap[key]); - values.push(countStats[key as keyof CountStats]); + // If the entity is a Key then fetch the Key metadata only + if (summaryResponse.type === 'KEY') { + const { request: metadataRequest, controller: metadataNewController } = AxiosGetHelper( + `/api/v1/namespace/du?path=${path}&replica=true`, + keyMetadataSummarySignal.current + ); + keyMetadataSummarySignal.current = metadataNewController; + metadataRequest.then(response => { + data.push(...[{ + key: 'File Size', + value: byteToSize(response.data.size, 3) + }, { + key: 'File Size With Replication', + value: byteToSize(response.data.sizeWithReplica, 3) + }, { + key: 'Creation Time', + value: moment(summaryResponse.objectInfo.creationTime).format('ll LTS') + }, { + key: 'Modification Time', + value: moment(summaryResponse.objectInfo.modificationTime).format('ll LTS') + }]) + setState(data); + }).catch(error => { + showDataFetchError(error.toString()); + }); + return; } - }) - - const { - keys: objectInfoKeys, - values: objectInfoValues - } = getObjectInfoMapping(summaryResponse); - - keys.push(...objectInfoKeys); - values.push(...objectInfoValues); - setState({ - keys: keys, - values: values - }); - }).catch(error => { - showDataFetchError((error as AxiosError).toString()); - }); - } - - function loadQuotaSummary(path: string) { - cancelRequests([ - cancelQuotaSignal.current! - ]); - - const { request, controller } = AxiosGetHelper( - `/api/v1/namespace/quota?path=${path}`, - cancelQuotaSignal.current - ); - cancelQuotaSignal.current = controller; - - request.then(response => { - const quotaResponse = response.data; + data = removeDuplicatesAndMerge(data, getObjectInfoMapping(summaryResponse), 'key'); + + /** + * Will iterate over the keys of the countStats to avoid multiple if blocks + * and check from the map for the respective key name / title to insert + */ + const countStats: CountStats = summaryResponse.countStats ?? {}; + const keyToNameMap: Record = { + numVolume: 'Volumes', + numBucket: 'Buckets', + numDir: 'Total Directories', + numKey: 'Total Keys' + } + Object.keys(countStats).forEach((key: string) => { + if (countStats[key as keyof CountStats] !== undefined + && countStats[key as keyof CountStats] !== -1) { + data.push({ + key: keyToNameMap[key], + value: countStats[key as keyof CountStats] + }); + } + }) + } - if (quotaResponse.status === 'INITIALIZING') { - return; + if (quotaResponse.state === 'INITIALIZING') { + quotaResponsePresent = false; + showDataFetchError(`The quota is currently initializing. Please wait a moment and try again later`); } + if (quotaResponse.status === 'TYPE_NOT_APPLICABLE') { - return; - } - if (quotaResponse.status === 'PATH_NOT_FOUND') { - showDataFetchError(`Invalid Path: ${path}`); - return; + quotaResponsePresent = false; } - const keys: string[] = []; - const values: (string | number | boolean | null)[] = []; - // Append quota information - // In case the object's quota isn't set - if (quotaResponse.allowed !== undefined && quotaResponse.allowed !== -1) { - keys.push('Quota Allowed'); - values.push(byteToSize(quotaResponse.allowed, 3)); - } + if (quotaResponsePresent) { + // Quota Response section + // In case the object's quota isn't set, we should not populate the values + if (quotaResponse.allowed !== undefined && quotaResponse.allowed !== -1) { + data.push({ + key: 'Quota Allowed', + value: byteToSize(quotaResponse.allowed, 3) + }); + } - if (quotaResponse.used !== undefined && quotaResponse.used !== -1) { - keys.push('Quota Used'); - values.push(byteToSize(quotaResponse.used, 3)); + if (quotaResponse.used !== undefined && quotaResponse.used !== -1) { + data.push({ + key: 'Quota Used', + value: byteToSize(quotaResponse.used, 3) + }) + } } - setState((prevState) => ({ - keys: [...prevState.keys, ...keys], - values: [...prevState.values, ...values] - })); - }).catch(error => { - showDataFetchError(error.toString()); + setState(data); + })).catch(error => { + showDataFetchError((error as AxiosError).toString()); }); } React.useEffect(() => { setLoading(true); - loadMetadataSummary(path); - loadQuotaSummary(path); + loadData(path); setLoading(false); return (() => { cancelRequests([ - cancelSummarySignal.current!, - keyMetadataSummarySignal.current!, - cancelQuotaSignal.current! + cancelMetadataSignal.current!, ]); }) }, [path]); - const content = []; - for (const [i, v] of state.keys.entries()) { - content.push({ - key: v, - value: state.values[i] - }); - } - return ( diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/overview/overview.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/overview/overview.tsx index e14f134a0e2..6014577f90a 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/overview/overview.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/overview/overview.tsx @@ -19,7 +19,7 @@ import React, { useEffect, useRef, useState } from 'react'; import moment from 'moment'; import filesize from 'filesize'; -import axios, { CanceledError } from 'axios'; +import axios from 'axios'; import { Row, Col, Button } from 'antd'; import { CheckCircleFilled, @@ -33,7 +33,7 @@ import OverviewStorageCard from '@/v2/components/overviewCard/overviewStorageCar import OverviewSimpleCard from '@/v2/components/overviewCard/overviewSimpleCard'; import { AutoReloadHelper } from '@/utils/autoReloadHelper'; -import { showDataFetchError } from '@/utils/common'; +import { checkResponseError, showDataFetchError } from '@/utils/common'; import { AxiosGetHelper, cancelRequests, PromiseAllSettledGetHelper } from '@/utils/axiosRequestHelper'; import { ClusterStateResponse, OverviewState, StorageReport } from '@/v2/types/overview.types'; @@ -73,27 +73,6 @@ const getHealthIcon = (value: string): React.ReactElement => { ) } -const checkResponseError = (responses: Awaited>[]) => { - const responseError = responses.filter( - (resp) => resp.status === 'rejected' - ); - - if (responseError.length !== 0) { - responseError.forEach((err) => { - if (err.reason.toString().includes("CanceledError")) { - throw new CanceledError('canceled', "ERR_CANCELED"); - } - else { - const reqMethod = err.reason.config.method; - const reqURL = err.reason.config.url - showDataFetchError( - `Failed to ${reqMethod} URL ${reqURL}\n${err.reason.toString()}` - ); - } - }) - } -} - const getSummaryTableValue = ( value: number | string | undefined, colType: 'value' | undefined = undefined From 98b4e55543549d3f47b5ccd95a8b4755e402d0b2 Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Sun, 19 Jan 2025 01:04:54 -0800 Subject: [PATCH 102/168] HDDS-11948. [Docs] DistCp integration (#7588) --- .../docs/content/integration/DistCp.md | 89 +++++++++++++++++++ 1 file changed, 89 insertions(+) create mode 100644 hadoop-hdds/docs/content/integration/DistCp.md diff --git a/hadoop-hdds/docs/content/integration/DistCp.md b/hadoop-hdds/docs/content/integration/DistCp.md new file mode 100644 index 00000000000..a57028da979 --- /dev/null +++ b/hadoop-hdds/docs/content/integration/DistCp.md @@ -0,0 +1,89 @@ +--- +title: Hadoop DistCp +weight: 4 +menu: + main: + parent: "Application Integrations" +--- + + +[Hadoop DistCp](https://hadoop.apache.org/docs/current/hadoop-distcp/DistCp.html) is a command line, MapReduce-based tool for bulk data copying. + +The `hadoop distcp` command can be used to copy data to and from Ozone and any Hadoop compatible file systems, such as HDFS or S3A. + +## Basic usage + +To copy files from a source Ozone cluster directory to a destination Ozone cluster directory, use the following command: + +```bash +hadoop distcp ofs://ozone1/vol1/bucket/dir1 ofs://ozone2/vol2/bucket2/dir2 +``` + +You must define the service IDs for both `ozone1` and `ozone2` clusters in the `ozone-site.xml` configuration file. For example: +```bash + + ozone.om.service.ids + ozone1,ozone2 + +``` + +Next, define their logical mappings. For more details, refer to [OM High Availability]({{< ref "OM-HA.md" >}}). + +## Copy from HDFS to Ozone + +DistCp performs a file checksum check to ensure file integrity. However, since the default checksum type of HDFS (`CRC32C`) differs from that of Ozone (`CRC32`), the file checksum check will cause the DistCp job to fail. + +To prevent job failures, specify checksum options in the DistCp command to force Ozone to use the same checksum type as HDFS. For example: + +```bash +hadoop distcp \ + -Ddfs.checksum.combine.mode=COMPOSITE_CRC \ + -Dozone.client.checksum.type=CRC32C \ + hdfs://ns1/tmp ofs://ozone1/vol1/bucket1/dst +``` + +> Note: The parameter `-Ddfs.checksum.combine.mode=COMPOSITE_CRC` is not required if the HDFS cluster is running Hadoop 3.1.1 or later. + +Alternatively, you can skip the file checksum check entirely: + +```bash +hadoop distcp \ + -skipcrccheck \ + hdfs://ns1/tmp ofs://ozone1/vol1/bucket1/dst +``` + +## Copy from Ozone to HDFS + +When copying files from Ozone to HDFS, similar issues can occur due to differences in checksum types. In this case, you must configure the checksum type for HDFS, as it is the destination system. + +Example: + +```bash +hadoop distcp \ + -Ddfs.checksum.combine.mode=COMPOSITE_CRC \ + -Ddfs.checksum.type=CRC32 \ + ofs://ozone1/vol1/bucket1/src hdfs://ns1/tmp/dst +``` + +By specifying the appropriate checksum configuration or skipping the validation, you can ensure that DistCp jobs complete successfully when transferring data between HDFS and Ozone. + +## Encrypted data + +When data resides in an HDFS encryption zone or Ozone encrypted buckets, the file checksum will not match. This is because the underlying block data differs due to the use of a new EDEK (Encryption Data Encryption Key) at the destination. In such cases, specify the `-skipcrccheck` parameter to avoid job failures. + +For more information about using Hadoop DistCp, consult the [DistCp Guide](https://hadoop.apache.org/docs/current/hadoop-distcp/DistCp.html). From 7239594abf430b38ec5ceea3280926dabd1ffe8d Mon Sep 17 00:00:00 2001 From: Tejaskriya <87555809+Tejaskriya@users.noreply.github.com> Date: Sun, 19 Jan 2025 15:51:45 +0530 Subject: [PATCH 103/168] HDDS-11775. Add tool to create RocksDB checkpoint (#7664) --- .../smoketest/debug/ozone-debug-ldb.robot | 6 ++ .../hadoop/ozone/debug/ldb/Checkpoint.java | 63 +++++++++++++++++++ .../hadoop/ozone/debug/ldb/RDBParser.java | 1 + 3 files changed, 70 insertions(+) create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/Checkpoint.java diff --git a/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-ldb.robot b/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-ldb.robot index 4380a3cf94d..d5effac7f0b 100644 --- a/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-ldb.robot +++ b/hadoop-ozone/dist/src/main/smoketest/debug/ozone-debug-ldb.robot @@ -144,3 +144,9 @@ Test ozone debug ldb scan with filter option failure # test filter option for lesser/greater operator on non-numeric field ${output} = Execute And Ignore Error ozone debug ldb --db=/data/metadata/om.db scan --cf=keyTable --filter="keyName:lesser:k1" Should contain ${output} only on numeric values + +Test ozone debug ldb checkpoint command + ${output} = Execute ozone debug ldb --db=/data/metadata/om.db checkpoint --output=/data/metadata/checkpoint1.db + Should contain ${output} Created checkpoint at + ${output} = Execute ls /data/metadata/checkpoint1.db + Should contain ${output} .sst diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/Checkpoint.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/Checkpoint.java new file mode 100644 index 00000000000..d12cf479336 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/Checkpoint.java @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.debug.ldb; + +import org.apache.hadoop.hdds.cli.AbstractSubcommand; +import org.apache.hadoop.hdds.utils.db.managed.ManagedCheckpoint; +import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; +import org.apache.hadoop.ozone.debug.RocksDBUtils; +import org.rocksdb.ColumnFamilyDescriptor; +import org.rocksdb.ColumnFamilyHandle; +import picocli.CommandLine; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.Callable; + +/** + * Create a checkpoint for a rocksDB. + */ +@CommandLine.Command( + name = "checkpoint", + description = "Create checkpoint for specified db" +) +public class Checkpoint extends AbstractSubcommand implements Callable { + @CommandLine.Option(names = {"--output"}, + required = true, + description = "Path to output directory for the checkpoint.") + private String outputPath; + + @CommandLine.ParentCommand + private RDBParser parent; + + @Override + public Void call() throws Exception { + List cfDescList = + RocksDBUtils.getColumnFamilyDescriptors(parent.getDbPath()); + final List cfHandleList = new ArrayList<>(); + + // Create checkpoint + try (ManagedRocksDB db = ManagedRocksDB.openReadOnly( + parent.getDbPath(), cfDescList, cfHandleList)) { + ManagedCheckpoint cp = ManagedCheckpoint.create(db); + cp.get().createCheckpoint(outputPath); + out().println("Created checkpoint at " + outputPath); + } + return null; + } +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/RDBParser.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/RDBParser.java index f07e8f35fba..ba58b05ac54 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/RDBParser.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/RDBParser.java @@ -33,6 +33,7 @@ DropTable.class, ListTables.class, ValueSchema.class, + Checkpoint.class, }, description = "Parse rocksdb file content") @MetaInfServices(DebugSubcommand.class) From 9bf597a25109b89a6dc4220dbf67982325411730 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Sun, 19 Jan 2025 20:52:42 +0100 Subject: [PATCH 104/168] HDDS-12107. Enable sortpom in ozone-dist and ozone-interface modules (#7721) --- hadoop-hdds/common/pom.xml | 5 - hadoop-hdds/hadoop-dependency-test/pom.xml | 4 - hadoop-hdds/pom.xml | 23 --- hadoop-ozone/client/pom.xml | 3 - hadoop-ozone/common/pom.xml | 3 - hadoop-ozone/csi/pom.xml | 1 - hadoop-ozone/dist/pom.xml | 203 ++++++++++----------- hadoop-ozone/interface-client/pom.xml | 91 +++------ hadoop-ozone/interface-storage/pom.xml | 60 +++--- hadoop-ozone/ozone-manager/pom.xml | 17 +- 10 files changed, 152 insertions(+), 258 deletions(-) diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml index c1a2749fde5..35fe32713b1 100644 --- a/hadoop-hdds/common/pom.xml +++ b/hadoop-hdds/common/pom.xml @@ -77,7 +77,6 @@ info.picocli picocli - io.dropwizard.metrics metrics-core @@ -118,7 +117,6 @@ jakarta.xml.bind jakarta.xml.bind-api - javax.annotation javax.annotation-api @@ -151,7 +149,6 @@ org.apache.ratis ratis-client - org.apache.ratis ratis-common @@ -178,7 +175,6 @@ org.apache.ratis ratis-proto - org.apache.ratis ratis-server-api @@ -187,7 +183,6 @@ org.apache.ratis ratis-thirdparty-misc - org.bouncycastle bcpkix-jdk18on diff --git a/hadoop-hdds/hadoop-dependency-test/pom.xml b/hadoop-hdds/hadoop-dependency-test/pom.xml index 48bdff714fb..6a2b8aa4e06 100644 --- a/hadoop-hdds/hadoop-dependency-test/pom.xml +++ b/hadoop-hdds/hadoop-dependency-test/pom.xml @@ -63,12 +63,10 @@ - org.assertj assertj-core - org.junit.jupiter junit-jupiter-api @@ -82,7 +80,6 @@ org.junit.jupiter junit-jupiter-params - org.mockito mockito-core @@ -95,7 +92,6 @@ org.mockito mockito-junit-jupiter - org.slf4j slf4j-reload4j diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml index 7e4fbd32db3..a2cd95066a7 100644 --- a/hadoop-hdds/pom.xml +++ b/hadoop-hdds/pom.xml @@ -53,115 +53,96 @@ - org.apache.ozone hdds-annotation-processing ${hdds.version} - org.apache.ozone hdds-client ${hdds.version} - org.apache.ozone hdds-common ${hdds.version} - org.apache.ozone hdds-config ${hdds.version} - org.apache.ozone hdds-container-service ${hdds.version} - org.apache.ozone hdds-docs ${hdds.version} - org.apache.ozone hdds-erasurecode ${hdds.version} - org.apache.ozone hdds-hadoop-dependency-client ${hdds.version} - org.apache.ozone hdds-hadoop-dependency-server ${hdds.version} - org.apache.ozone hdds-interface-admin ${hdds.version} - org.apache.ozone hdds-interface-client ${hdds.version} - org.apache.ozone hdds-interface-server ${hdds.version} - org.apache.ozone hdds-managed-rocksdb ${hdds.version} - org.apache.ozone hdds-rocks-native ${hdds.rocks.native.version} - org.apache.ozone hdds-server-framework ${hdds.version} - org.apache.ozone hdds-server-scm ${hdds.version} - org.apache.ozone hdds-tools ${hdds.version} - org.apache.ozone rocksdb-checkpoint-differ ${hdds.version} - org.apache.ozone hdds-common @@ -169,7 +150,6 @@ test-jar test - org.apache.ozone hdds-container-service @@ -177,14 +157,12 @@ test-jar test - org.apache.ozone hdds-hadoop-dependency-test ${hdds.version} test - org.apache.ozone hdds-server-scm @@ -192,7 +170,6 @@ test-jar test - org.apache.ozone hdds-test-utils diff --git a/hadoop-ozone/client/pom.xml b/hadoop-ozone/client/pom.xml index b935faae90d..d0cb463ad6f 100644 --- a/hadoop-ozone/client/pom.xml +++ b/hadoop-ozone/client/pom.xml @@ -26,7 +26,6 @@ Apache Ozone Client - com.fasterxml.jackson.core jackson-annotations @@ -39,7 +38,6 @@ com.google.guava guava - commons-collections commons-collections @@ -48,7 +46,6 @@ jakarta.annotation jakarta.annotation-api - org.apache.commons commons-lang3 diff --git a/hadoop-ozone/common/pom.xml b/hadoop-ozone/common/pom.xml index 0f8b0d9f0b1..1e040341d85 100644 --- a/hadoop-ozone/common/pom.xml +++ b/hadoop-ozone/common/pom.xml @@ -26,7 +26,6 @@ Apache Ozone Common - com.fasterxml.jackson.core jackson-annotations @@ -47,7 +46,6 @@ com.google.protobuf protobuf-java - io.grpc grpc-api @@ -90,7 +88,6 @@ org.apache.commons commons-compress - org.apache.commons commons-lang3 diff --git a/hadoop-ozone/csi/pom.xml b/hadoop-ozone/csi/pom.xml index 84b4dd62969..905ef0f4adb 100644 --- a/hadoop-ozone/csi/pom.xml +++ b/hadoop-ozone/csi/pom.xml @@ -65,7 +65,6 @@ - commons-io commons-io diff --git a/hadoop-ozone/dist/pom.xml b/hadoop-ozone/dist/pom.xml index 0a9079747c9..055169e7c4a 100644 --- a/hadoop-ozone/dist/pom.xml +++ b/hadoop-ozone/dist/pom.xml @@ -12,9 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -22,20 +20,84 @@ 2.0.0-SNAPSHOT ozone-dist - Apache Ozone Distribution - jar 2.0.0-SNAPSHOT + jar + Apache Ozone Distribution - UTF-8 - true - apache/ozone - -rocky + 20241216-1-jdk21 ghcr.io/apache/ozone-testkrb5:20241129-1 - true - true + apache/ozone + -rocky + true + UTF-8 + + true + + + org.apache.ozone + hdds-container-service + + + org.apache.ozone + hdds-docs + + + org.apache.ozone + hdds-rocks-native + + + org.apache.ozone + hdds-server-scm + + + org.apache.ozone + hdds-tools + + + org.apache.ozone + ozone-common + + + org.apache.ozone + ozone-csi + + + org.apache.ozone + ozone-datanode + + + org.apache.ozone + ozone-httpfsgateway + + + org.apache.ozone + ozone-insight + + + org.apache.ozone + ozone-manager + + + org.apache.ozone + ozone-recon + + + org.apache.ozone + ozone-s3-secret-store + + + org.apache.ozone + ozone-s3gateway + + + org.apache.ozone + ozone-tools + + + @@ -51,17 +113,15 @@ dist - prepare-package exec + prepare-package ${shell-executable} ${project.build.directory} - - ${basedir}/dev-support/bin/dist-layout-stitching - + ${basedir}/dev-support/bin/dist-layout-stitching ${project.build.directory} ${hdds.version} @@ -75,10 +135,10 @@ copy-omitted-jars - prepare-package copy-dependencies + prepare-package target/ozone-${ozone.version}/share/ozone/lib runtime @@ -91,10 +151,10 @@ copy-compose-files - compile copy-resources + compile ${basedir}/target/compose @@ -107,10 +167,10 @@ copy-and-filter-dockerfile - compile copy-resources + compile ${project.build.directory} @@ -123,10 +183,10 @@ copy-k8s - compile copy-resources + compile ${basedir}/target/k8s @@ -147,75 +207,12 @@ depcheck - + - - - - org.apache.ozone - hdds-tools - - - org.apache.ozone - hdds-server-scm - - - org.apache.ozone - hdds-container-service - - - org.apache.ozone - ozone-recon - - - org.apache.ozone - ozone-s3gateway - - - org.apache.ozone - ozone-csi - - - org.apache.ozone - ozone-manager - - - org.apache.ozone - ozone-tools - - - org.apache.ozone - ozone-common - - - org.apache.ozone - ozone-datanode - - - org.apache.ozone - hdds-docs - - - org.apache.ozone - ozone-insight - - - org.apache.ozone - ozone-httpfsgateway - - - org.apache.ozone - hdds-rocks-native - - - org.apache.ozone - ozone-s3-secret-store - - build-with-ozonefs @@ -251,14 +248,12 @@ copy-jacoco-files - prepare-package copy + prepare-package - - target/ozone-${ozone.version}/share/coverage - + target/ozone-${ozone.version}/share/coverage org.jacoco @@ -283,8 +278,8 @@ org.apache.ozone hdds-test-utils - - + + @@ -307,10 +302,10 @@ src-dist - package single + package ../.. false @@ -339,26 +334,24 @@ io.fabric8 docker-maven-plugin - - - - build - - package - - ${docker.image} - - ${project.build.directory}/ozone-${project.version} - + ${project.build.directory}/ozone-${project.version} + + + + build + + package + + @@ -392,17 +385,15 @@ tar-ozone - package exec + package ${shell-executable} - ${project.build.directory} - + ${project.build.directory} - ${basedir}/dev-support/bin/dist-tar-stitching - + ${basedir}/dev-support/bin/dist-tar-stitching ${hdds.version} ${project.build.directory} diff --git a/hadoop-ozone/interface-client/pom.xml b/hadoop-ozone/interface-client/pom.xml index f7f53bda6a6..cc8a4e2fee2 100644 --- a/hadoop-ozone/interface-client/pom.xml +++ b/hadoop-ozone/interface-client/pom.xml @@ -12,10 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -24,14 +21,15 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> ozone-interface-client 2.0.0-SNAPSHOT - Apache Ozone Client interface - Apache Ozone Client Interface jar + Apache Ozone Client Interface + Apache Ozone Client interface - true - true - true + + true + + true @@ -43,16 +41,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> com.google.protobuf protobuf-java - - org.apache.ozone - hdds-interface-client - - - - org.apache.hadoop.thirdparty - hadoop-shaded-protobuf_3_25 - - io.grpc grpc-api @@ -85,6 +73,14 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> io.netty netty-handler-proxy + + org.apache.hadoop.thirdparty + hadoop-shaded-protobuf_3_25 + + + org.apache.ozone + hdds-interface-client + @@ -114,16 +110,12 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> test-compile-custom - - com.google.protobuf:protoc:${proto2.hadooprpc.protobuf.version}:exe:${os.detected.classifier} - + com.google.protobuf:protoc:${proto2.hadooprpc.protobuf.version}:exe:${os.detected.classifier} ${basedir}/src/main/proto/ target/generated-sources/protobuf/java false grpc-java - - io.grpc:protoc-gen-grpc-java:${io.grpc.version}:exe:${os.detected.classifier} - + io.grpc:protoc-gen-grpc-java:${io.grpc.version}:exe:${os.detected.classifier} @@ -134,9 +126,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> ${basedir}/src/main/proto/ - - com.google.protobuf:protoc:${proto3.hadooprpc.protobuf.version}:exe:${os.detected.classifier} - + com.google.protobuf:protoc:${proto3.hadooprpc.protobuf.version}:exe:${os.detected.classifier} target/generated-sources/protobuf/java/proto3 false @@ -148,44 +138,23 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> ${maven-antrun-plugin.version} + + run + generate-sources - - - - - - - - - - - - - - - + + + + + + + + + - - run - diff --git a/hadoop-ozone/interface-storage/pom.xml b/hadoop-ozone/interface-storage/pom.xml index 63ee02a0da1..3f97007520c 100644 --- a/hadoop-ozone/interface-storage/pom.xml +++ b/hadoop-ozone/interface-storage/pom.xml @@ -12,10 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -24,53 +21,45 @@ ozone-interface-storage 2.0.0-SNAPSHOT - Apache Ozone Storage Interface - Apache Ozone Storage Interface jar - - true - + Apache Ozone Storage Interface + Apache Ozone Storage Interface - - org.apache.ozone - hdds-common + com.google.guava + guava - org.apache.ozone - hdds-interface-client + com.google.protobuf + protobuf-java org.apache.ozone - ozone-common + hdds-common org.apache.ozone - rocksdb-checkpoint-differ - - - - org.apache.ratis - ratis-common + hdds-interface-client - - com.google.guava - guava + org.apache.ozone + hdds-server-framework - com.google.protobuf - protobuf-java + org.apache.ozone + ozone-common - org.apache.ozone ozone-interface-client - org.apache.ozone - hdds-server-framework + rocksdb-checkpoint-differ + + + org.apache.ratis + ratis-common @@ -82,13 +71,13 @@ org.apache.ozone - hdds-server-scm - test-jar + hdds-hadoop-dependency-test test org.apache.ozone - hdds-hadoop-dependency-test + hdds-server-scm + test-jar test @@ -120,7 +109,8 @@ maven-enforcer-plugin - ban-annotations + ban-annotations + @@ -150,9 +140,7 @@ ${basedir}/src/main/proto/ - - com.google.protobuf:protoc:${proto2.hadooprpc.protobuf.version}:exe:${os.detected.classifier} - + com.google.protobuf:protoc:${proto2.hadooprpc.protobuf.version}:exe:${os.detected.classifier} diff --git a/hadoop-ozone/ozone-manager/pom.xml b/hadoop-ozone/ozone-manager/pom.xml index 64e0704bf0a..31ae740c7e2 100644 --- a/hadoop-ozone/ozone-manager/pom.xml +++ b/hadoop-ozone/ozone-manager/pom.xml @@ -30,7 +30,6 @@ - com.fasterxml.jackson.core jackson-annotations @@ -51,7 +50,6 @@ com.sun.jersey jersey-client - commons-codec commons-codec @@ -106,7 +104,6 @@ javax.servlet javax.servlet-api - org.apache.commons commons-compress @@ -139,7 +136,6 @@ org.apache.ozone hdds-hadoop-dependency-server - org.apache.ozone @@ -157,17 +153,14 @@ org.apache.ozone hdds-rocks-native - org.apache.ozone hdds-server-framework - org.apache.ozone ozone-client - org.apache.ozone ozone-common @@ -176,7 +169,6 @@ org.apache.ozone ozone-interface-client - org.apache.ozone ozone-interface-storage @@ -190,7 +182,6 @@ ranger-intg ${ranger.version} - org.apache.ranger ranger-plugins-common @@ -285,25 +276,21 @@ org.apache.ratis ratis-thirdparty-misc - org.aspectj aspectjrt ${aspectj.version} - org.aspectj aspectjweaver ${aspectj.version} - org.bouncycastle bcprov-jdk18on - - + org.codehaus.jackson jackson-core-asl @@ -337,7 +324,6 @@ org.eclipse.jetty jetty-webapp - org.reflections reflections @@ -350,7 +336,6 @@ org.slf4j slf4j-api - org.apache.ozone hdds-docs From ce82d123cff53d43121f7648a1575b85e7021c90 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Mon, 20 Jan 2025 05:24:24 +0100 Subject: [PATCH 105/168] HDDS-12093. Exclude generated code for import restrictions (#7709) --- pom.xml | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/pom.xml b/pom.xml index 8cbaad0942e..8242711d6eb 100644 --- a/pom.xml +++ b/pom.xml @@ -1456,6 +1456,10 @@ org.rocksdb.RocksDB.* + + ${project.build.directory}/generated-sources/java + ${project.build.directory}/generated-sources/protobuf/java + org.apache.hadoop.hdds.utils.db.managed.* org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer @@ -1480,6 +1484,10 @@ org.apache.hadoop.thirdparty.com.google.common.** org.apache.hadoop.util.Preconditions + + ${project.build.directory}/generated-sources/java + ${project.build.directory}/generated-sources/protobuf/java + true @@ -1487,6 +1495,10 @@ org.junit.jupiter.api.Disabled + + ${project.build.directory}/generated-sources/java + ${project.build.directory}/generated-sources/protobuf/java + true @@ -1498,6 +1510,10 @@ org.apache.hadoop.classification.InterfaceAudience org.apache.hadoop.classification.InterfaceStability + + ${project.build.directory}/generated-sources/java + ${project.build.directory}/generated-sources/protobuf/java + org.apache.hadoop.fs.contract.* org.apache.hadoop.tools.contract.* @@ -1510,6 +1526,10 @@ org.apache.hadoop.hdfs.MiniDFSCluster org.apache.hadoop.hdfs.DFSConfigKeys + + ${project.build.directory}/generated-sources/java + ${project.build.directory}/generated-sources/protobuf/java + true @@ -1522,6 +1542,10 @@ org.junit.jupiter.** org.junit.platform.** + + ${project.build.directory}/generated-sources/java + ${project.build.directory}/generated-sources/protobuf/java + true @@ -1534,6 +1558,10 @@ org.jetbrains.annotations.NotNull org.jetbrains.annotations.Nullable + + ${project.build.directory}/generated-sources/java + ${project.build.directory}/generated-sources/protobuf/java + true @@ -1541,6 +1569,10 @@ org.apache.commons.lang.** + + ${project.build.directory}/generated-sources/java + ${project.build.directory}/generated-sources/protobuf/java + @@ -1562,6 +1594,10 @@ org.apache.hadoop.hdds.scm.metadata.Replicate org.kohsuke.MetaInfServices + + ${project.build.directory}/generated-sources/java + ${project.build.directory}/generated-sources/protobuf/java + From 0d75ed6dded3380717207c613287601d656a037d Mon Sep 17 00:00:00 2001 From: VarshaRavi <30603028+VarshaRaviCV@users.noreply.github.com> Date: Mon, 20 Jan 2025 14:56:07 +0530 Subject: [PATCH 106/168] HDDS-11300. Update Swagger documentation for Recon APIs (#7678) --- .../static/swagger-resources/recon-api.yaml | 1146 ++++++++++++----- 1 file changed, 815 insertions(+), 331 deletions(-) diff --git a/hadoop-hdds/docs/themes/ozonedoc/static/swagger-resources/recon-api.yaml b/hadoop-hdds/docs/themes/ozonedoc/static/swagger-resources/recon-api.yaml index c0272a3d76d..f5209d63c29 100644 --- a/hadoop-hdds/docs/themes/ozonedoc/static/swagger-resources/recon-api.yaml +++ b/hadoop-hdds/docs/themes/ozonedoc/static/swagger-resources/recon-api.yaml @@ -25,6 +25,10 @@ servers: tags: - name: Containers description: APIs to fetch information about the available containers. **Admin Only** + - name: Volumes + description: APIs to fetch information about the available volumes. **Admin Only** + - name: Buckets + description: APIs to fetch information about the available buckets. **Admin Only** - name: Keys description: APIs to fetch information about the available keys. **Admin Only** - name: Containers and Keys @@ -62,6 +66,19 @@ paths: application/json: schema: $ref: '#/components/schemas/ContainerMetadata' + /containers/deleted: + get: + tags: + - Containers + summary: Return all DELETED containers in SCM + operationId: getSCMDeletedContainers + responses: + 200: + description: Successful operation + content: + application/json: + schema: + $ref: '#/components/schemas/DeletedContainers' /containers/missing: get: tags: @@ -109,7 +126,7 @@ paths: get: tags: - Containers - summary: Get UnhealthyContainerMetadata for all the unhealthy containers + summary: Get UnhealthyContainerMetadata for all the unhealthy containers operationId: getUnhealthyContainers parameters: - name: batchNum @@ -228,7 +245,67 @@ paths: content: application/json: schema: - $ref: '#/components/responses/DeletedMismatchedContainers' + $ref: '#/components/schemas/DeletedMismatchedContainers' + /volumes: + get: + tags: + - Volumes + summary: Returns the set of all volumes present + operationId: getVolumes + parameters: + - name: prevKey + in: query + description: Stores the previous key after which to fetch the data + required: false + schema: + type: string + - name: limit + in: query + description: Stores the limit for the number of results to fetch + required: false + schema: + type: integer + default: 1000 + responses: + '200': + description: Successful operation + content: + application/json: + schema: + $ref: '#/components/schemas/Volumes' + /buckets: + get: + tags: + - Buckets + summary: Returns the set of all buckets across all volumes + operationId: getBuckets + parameters: + - name: volume + in: query + description: Stores the name of the volumes whose buckets to fetch + required: false + schema: + type: string + - name: prevKey + in: query + description: Stores the previous key after which to fetch the data + required: false + schema: + type: string + - name: limit + in: query + description: Stores the limit for the number of results to fetch + required: false + schema: + type: integer + default: 1000 + responses: + '200': + description: Successful operation + content: + application/json: + schema: + $ref: '#/components/schemas/Buckets' /keys/open: get: tags: @@ -249,6 +326,11 @@ paths: schema: type: integer default: 1000 + - name: startPrefix + in: query + description: Will return keys matching this prefix + schema: + type: integer - name: includeFso in: query description: Boolean value to determine whether to include FSO keys or not @@ -269,7 +351,21 @@ paths: content: application/json: schema: - $ref: '#/components/responses/OpenKeys' + $ref: '#/components/schemas/OpenKeys' + /keys/open/summary: + get: + tags: + - Keys + summary: Returns the summary of all open keys info + operationId: getOpenKeySummary + responses: + '200': + description: Successful operation + content: + application/json: + schema: + $ref: '#/components/schemas/OpenKeysSummary' + /keys/deletePending: get: tags: @@ -290,13 +386,18 @@ paths: schema: type: integer default: 1000 + - name: startPrefix + in: query + description: Will return keys matching this prefix + schema: + type: string responses: '200': description: Successful operation content: application/json: schema: - $ref: '#/components/responses/DeletePendingKeys' + $ref: '#/components/schemas/DeletePendingKeys' /keys/deletePending/dirs: get: tags: @@ -323,7 +424,36 @@ paths: content: application/json: schema: - $ref: '#/components/responses/DeletePendingDirs' + $ref: '#/components/schemas/DeletePendingDirs' + /keys/deletePending/summary: + get: + tags: + - Keys + summary: Returns the summary of all keys pending deletion info + operationId: getDeletedKeySummary + responses: + '200': + description: Successful operation + content: + application/json: + schema: + $ref: '#/components/schemas/DeletePendingSummary' + /keys/deletePending/dirs/summary: + get: + tags: + - Keys + summary: Retrieves the summary of deleted directories. + operationId: getDeletedDirectorySummary + responses: + '200': + description: Successful operation + content: + application/json: + schema: + type: object + properties: + totalDeletedDirectories: + type: integer /containers/{id}/keys: get: tags: @@ -384,7 +514,7 @@ paths: content: application/json: schema: - $ref: '#/components/responses/DeletePendingBlocks' + $ref: '#/components/schemas/DeletePendingBlocks' /namespace/summary: get: tags: @@ -409,7 +539,7 @@ paths: content: application/json: schema: - $ref: '#/components/responses/NamespaceMetadataResponse' + $ref: '#/components/schemas/NamespaceMetadataResponse' /namespace/du: get: tags: @@ -451,7 +581,7 @@ paths: content: application/json: schema: - $ref: '#/components/responses/MetadataDiskUsage' + $ref: '#/components/schemas/MetadataDiskUsage' /namespace/quota: get: tags: @@ -476,7 +606,7 @@ paths: content: application/json: schema: - $ref: '#/components/responses/MetadataQuota' + $ref: '#/components/schemas/MetadataQuota' /namespace/dist: get: tags: @@ -503,7 +633,7 @@ paths: content: application/json: schema: - $ref: '#/components/responses/MetadataSpaceDist' + $ref: '#/components/schemas/MetadataSpaceDist' /clusterState: get: tags: @@ -516,7 +646,7 @@ paths: content: application/json: schema: - $ref: '#/components/responses/ClusterState' + $ref: '#/components/schemas/ClusterState' /datanodes: get: tags: @@ -529,21 +659,68 @@ paths: content: application/json: schema: - $ref: '#/components/responses/DatanodesSummary' - - /datanodes/remove: + $ref: '#/components/schemas/DatanodesSummary' + /datanodes/decommission/info: get: + tags: + - Datanodes + summary: Returns all the datanodes in the decommissioning state + operationId: getDecommissioningDatanodes + responses: + '200': + description: Successful Operation + content: + application/json: + schema: + $ref: '#/components/schemas/DatanodesDecommissionInfo' + /datanodes/decommission/info/datanode: + get: + tags: + - Datanodes + summary: Returns info of a specific datanode for which decommissioning is initiated + operationId: getDecommissionInfoForDatanode + parameters: + - name: uuid + in: query + description: The uuid of the datanode being decommissioned. + required: false + schema: + type: string + - name: ipAddress + in: query + description: The ipAddress of the datanode being decommissioned. + required: false + schema: + type: string + responses: + '200': + description: Successful Operation + content: + application/json: + schema: + $ref: '#/components/schemas/DatanodesDecommissionInfo' + /datanodes/remove: + put: tags: - Datanodes summary: Removes datanodes from Recon's memory and nodes table in Recon DB. operationId: removeDatanodes + requestBody: + description: List of datanodes to be removed + required: true + content: + application/json: + schema: + type: array + items: + type: string responses: '200': description: Successful Operation content: application/json: schema: - $ref: '#/components/responses/RemovedDatanodesResponse' + $ref: '#/components/schemas/RemovedDatanodesResponse' /pipelines: get: @@ -560,7 +737,7 @@ paths: content: application/json: schema: - $ref: '#/components/responses/PipelinesSummary' + $ref: '#/components/schemas/PipelinesSummary' /task/status: get: tags: @@ -573,7 +750,7 @@ paths: content: application/json: schema: - $ref: '#/components/responses/TasksStatus' + $ref: '#/components/schemas/TasksStatus' /utilization/fileCount: get: tags: @@ -610,7 +787,7 @@ paths: content: application/json: schema: - $ref: '#/components/responses/FileSizeUtilization' + $ref: '#/components/schemas/FileSizeUtilization' /utilization/containerCount: get: tags: @@ -633,7 +810,7 @@ paths: content: application/json: schema: - $ref: '#/components/responses/ContainerUtilization' + $ref: '#/components/schemas/ContainerUtilization' /metrics/query: get: tags: @@ -646,15 +823,108 @@ paths: description: The query in a Prometheus query format for which to fetch results example: ratis_leader_election_electionCount required: true + schema: + type: string + allowReserved: true responses: '200': description: Successful Operation content: application/json: schema: - $ref: '#/components/responses/MetricsQuery' + $ref: '#/components/schemas/MetricsQuery' components: schemas: + Volumes: + type: object + properties: + totalCount: + type: integer + volumes: + type: array + items: + type: object + properties: + metadata: + type: object + name: + type: string + quotaInBytes: + type: integer + quotaInNamespace: + type: integer + usedNamespace: + type: integer + creationTime: + type: integer + modificationTime: + type: integer + acls: + $ref: "#/components/schemas/ACL" + admin: + type: string + owner: + type: string + volume: + type: string + Buckets: + type: object + properties: + totalCount: + type: integer + buckets: + type: array + items: + type: object + properties: + versioningEnabled: + type: boolean + metadata: + type: object + name: + type: string + quotaInBytes: + type: integer + quotaInNamespace: + type: integer + usedNamespace: + type: integer + creationTime: + type: integer + modificationTime: + type: integer + acls: + $ref: "#/components/schemas/ACL" + volumeName: + type: string + storageType: + type: string + versioning: + type: boolean + usedBytes: + type: integer + encryptionInfo: + type: object + properties: + version: + type: string + suite: + type: string + keyName: + type: string + replicationConfigInfo: + type: object + nullable: true + sourceVolume: + type: string + nullable: true + sourceBucket: + type: string + nullable: true + bucketLayout: + type: string + owner: + type: string ContainerMetadata: type: object properties: @@ -664,6 +934,9 @@ components: totalCount: type: integer example: 3 + prevKey: + type: integer + example: 3019 containers: type: array items: @@ -675,23 +948,61 @@ components: NumberOfKeys: type: integer example: 834 + pipelines: + type: string + nullable: true xml: name: containerMetadata example: - ContainerID: 1 NumberOfKeys: 834 + pipelines: null - ContainerID: 2 NumberOfKeys: 833 + pipelines: null - ContainerID: 3 NumberOfKeys: 833 + pipelines: null xml: name: containerMetadataResponse + DeletedContainers: + type: array + items: + type: object + properties: + containerId: + type: integer + pipelineId: + type: object + properties: + id: + type: string + containerState: + type: string + stateEnterTime: + type: integer + lastUsed: + type: integer + replicationConfig: + type: object + properties: + replicationType: + type: string + replicationFactor: + type: string + replicationNodes: + type: integer + replicationFactor: + type: string KeyMetadata: type: object properties: totalCount: type: integer example: 7 + lastKey: + type: string + example: /vol1/buck1/file1 keys: type: array items: @@ -706,6 +1017,9 @@ components: Key: type: string example: key-0-43637 + CompletePath: + type: string + example: /vol1/buck1/dir1/dir2/file1 DataSize: type: integer example: 1000 @@ -719,7 +1033,7 @@ components: properties: 0: type: array - items: + items: type: object properties: containerID: @@ -743,15 +1057,24 @@ components: containerID: type: integer example: 1 + datanodeUuid: + type: string + example: 841be80f-0454-47df-b676 datanodeHost: type: string example: localhost-1 - firstReportTimestamp: + firstSeenTime: type: number example: 1605724047057 - lastReportTimestamp: + lastSeenTime: type: number example: 1605731201301 + lastBcsId: + type: integer + example: 123 + state: + type: string + example: OPEN MissingContainerMetadata: type: object properties: @@ -886,7 +1209,7 @@ components: replicationFactor: ONE requiredNodes: 1 replicationType: RATIS - healthy: true + healthy: true existsAt: OM - containerId: 11 numberOfKeys: 2 @@ -906,7 +1229,6 @@ components: replicationType: RATIS healthy: true existsAt: SCM - responses: DeletedMismatchedContainers: type: object properties: @@ -981,26 +1303,38 @@ components: requiredNodes: 1 replicationType: RATIS healthy: true + OpenKeysSummary: + type: object + properties: + totalUnreplicatedDataSize: + type: integer + totalReplicatedDataSize: + type: integer + totalOpenKeys: + type: integer OpenKeys: type: object + required: ['lastKey', 'replicatedDataSize', 'unreplicatedDataSize', 'status'] properties: lastKey: type: string example: /vol1/fso-bucket/dir1/dir2/file2 - replicatedTotal: + replicatedDataSize: type: integer example: 13824 - unreplicatedTotal: + unreplicatedDataSize: type: integer example: 4608 - entities: + status: + type: string + fso: type: array items: type: object properties: path: type: string - keyState: + key: type: string inStateSince: type: number @@ -1008,63 +1342,200 @@ components: type: integer replicatedSize: type: integer - unreplicatedSize: + replicationInfo: + type: object + properties: + replicationFactor: + type: string + example: THREE + requiredNodes: + type: integer + example: 3 + replicationType: + type: string + example: RATIS + creationTime: type: integer - replicationType: + modificationTime: + type: integer + isKey: + type: boolean + nonFSO: + type: array + items: + type: object + properties: + path: type: string + key: + type: string + inStateSince: + type: number + size: + type: integer + replicatedSize: + type: integer + replicationInfo: + type: object + properties: + replicationFactor: + type: string + example: THREE + requiredNodes: + type: integer + example: 3 + replicationType: + type: string + example: RATIS + creationTime: + type: integer + modificationTime: + type: integer + isKey: + type: boolean + OMKeyInfoList: + type: array + items: + type: object + properties: + metadata: + type: object + objectID: + type: number + updateID: + type: number + parentObjectID: + type: number + volumeName: + type: string + bucketName: + type: string + keyName: + type: string + dataSize: + type: number + keyLocationVersions: + type: array + items: + $ref: "#/components/schemas/VersionLocation" + creationTime: + type: number + modificationTime: + type: number + replicationConfig: + type: object + properties: replicationFactor: type: string - example: - - path: /vol1/bucket1/key1 - keyState: Open - inStateSince: 1667564193026 - size: 1024 - replicatedSize: 3072 - unreplicatedSize: 1024 - replicationType: RATIS - replicationFactor: THREE - - path: /vol1/bucket1/key2 - keyState: Open - inStateSince: 1667564193026 - size: 512 - replicatedSize: 1536 - unreplicatedSize: 512 - replicationType: RATIS - replicationFactor: THREE - - path: /vol1/fso-bucket/dir1/file1 - keyState: Open - inStateSince: 1667564193026 - size: 1024 - replicatedSize: 3072 - unreplicatedSize: 1024 - replicationType: RATIS - replicationFactor: THREE - - path: /vol1/fso-bucket/dir1/dir2/file2 - keyState: Open - inStateSince: 1667564193026 - size: 2048 - replicatedSize: 6144 - unreplicatedSize: 2048 - replicationType: RATIS - replicationFactor: THREE + requiredNodes: + type: integer + replicationType: + type: string + fileChecksum: + type: number + nullable: true + fileName: + type: string + ownerName: + type: string + acls: + $ref: "#/components/schemas/ACL" + tags: + type: object + expectedDataGeneration: + type: string + nullable: true + file: + type: boolean + path: + type: string + generation: + type: integer + replicatedSize: + type: number + fileEncryptionInfo: + type: string + nullable: true + objectInfo: + type: string + latestVersionLocations: + $ref: "#/components/schemas/VersionLocation" + hsync: + type: boolean + VersionLocation: + type: object + properties: + version: + type: integer + locationVersionMap: + type: object + properties: + 0: + $ref: "#/components/schemas/LocationList" + multipartKey: + type: boolean + blocksLatestVersionOnly: + $ref: "#/components/schemas/LocationList" + locationListCount: + type: integer + locationLists: + type: array + items: + $ref: "#/components/schemas/LocationList" + locationList: + $ref: "#/components/schemas/LocationList" + LocationList: + type: array + items: + type: object + properties: + blockID: + type: object + properties: + containerBlockID: + type: object + properties: + containerID: + type: integer + localID: + type: integer + blockCommitSequenceID: + type: integer + replicaIndex: + type: integer + nullable: true + containerID: + type: integer + localID: + type: integer + length: + type: integer + offset: + type: integer + token: + type: string + nullable: true + createVersion: + type: integer + pipeline: + type: string + nullable: true + partNumber: + type: integer + underConstruction: + type: boolean + blockCommitSequenceId: + type: integer + containerID: + type: integer + localID: + type: integer DeletePendingKeys: type: object properties: lastKey: type: string example: sampleVol/bucketOne/key_one - keysSummary: - type: object - properties: - totalUnreplicatedDataSize: - type: integer - example: 29291 - totalReplicatedDataSize: - type: integer - example: 87873 - totalDeletedKeys: - type: integer - example: 3 replicatedDataSize: type: number example: 300000000 @@ -1077,224 +1548,83 @@ components: type: object properties: omKeyInfoList: - type: array - items: - type: object - properties: - objectID: - type: number - updateID: - type: number - parentObjectID: - type: number - volumeName: - type: string - bucketName: - type: string - keyName: - type: string - dataSize: - type: number - creationTime: - type: number - modificationTime: - type: number - replicationConfig: - type: object - properties: - replicationFactor: - type: string - requiredNodes: - type: integer - replicationType: - type: string - fileChecksum: - type: number - nullable: true - fileName: - type: string - file: - type: boolean - path: - type: string - hsync: - type: boolean - replicatedSize: - type: number - fileEncryptionInfo: - type: string - nullable: true - objectInfo: - type: string - updateIDset: - type: boolean - example: - - omKeyInfoList: - - objectID: -9223372036844470271 - updateID: 40429 - parentObjectID: -9223372036844472575 - volumeName: sampleVol - bucketName: bucketOne - keyName: key_one - dataSize: 20000000 - creationTime: 1687189663661 - modificationTime: 1687189672598 - replicationConfig: - replicationFactor: THREE - requiredNodes: 3 - replicationType: RATIS - fileChecksum: null - fileName: List1File1 - file: false - path: 0/key_one - hsync: false - replicatedSize: 30000000 - fileEncryptionInfo: null - objectInfo: OMKeyInfo{volume='sampleVol', bucket='bucketOne', key='key_one', dataSize='10000000', creationTime='1687189663661', objectID='-9223372036844470271', parentID='-9223372036844472575', replication='RATIS/THREE', fileChecksum='null} - updateIDset: true - - objectID: -9223372036844470271 - updateID: 40429 - parentObjectID: -9223372036844472575 - volumeName: sampleVol - bucketName: bucketOne - keyName: key_one - dataSize: 20000000 - creationTime: 1687189663661 - modificationTime: 1687189672598 - replicationConfig: - replicationFactor: THREE - requiredNodes: 3 - replicationType: RATIS - fileChecksum: null - fileName: List1File1 - file: false - path: 0/key_one - hsync: false - replicatedSize: 30000000 - fileEncryptionInfo: null - objectInfo: OMKeyInfo{volume='sampleVol', bucket='bucketOne', key='key_one', dataSize='10000000', creationTime='1687189663661', objectID='-9223372036844470271', parentID='-9223372036844472575', replication='RATIS/THREE', fileChecksum='null} - updateIDset: true - - omKeyInfoList: - - objectID: -9223372036844470015 - updateID: 40407 - parentObjectID: -9223372036844472575 - volumeName: sampleVol - bucketName: bucketOne - keyName: key_two - dataSize: 10000000 - creationTime: 1687189663664 - modificationTime: 1687189671884 - replicationConfig: - replicationFactor: THREE - requiredNodes: 3 - replicationType: RATIS - fileChecksum: null - fileName: List2File2 - file: false - path: 0/key_two - hsync: false - replicatedSize: 30000000 - fileEncryptionInfo: null - objectInfo: OMKeyInfo{volume='sampleVol', bucket='bucketOne', key='key_two', dataSize='10000000', creationTime='1687189663664', objectID='-9223372036844470015', parentID='-9223372036844472575', replication='RATIS/THREE', fileChecksum='null} - updateIDset: true + $ref: "#/components/schemas/OMKeyInfoList" + totalSize: + type: object + properties: + 63: + type: integer + example: 189 + status: + type: string + example: OK + DeletePendingSummary: + type: object + properties: + totalUnreplicatedDataSize: + type: integer + totalReplicatedDataSize: + type: integer + totalDeletedKeys: + type: integer + ACL: + type: object + properties: + type: + type: string + name: + type: string + aclScope: + type: string + aclList: + type: array + items: + type: string DeletePendingDirs: type: object properties: lastKey: type: string example: vol1/bucket1/bucket1/dir1 - replicatedTotal: + replicatedDataSize: type: integer example: 13824 - unreplicatedTotal: + unreplicatedDataSize: type: integer example: 4608 - deletedKeyInfo: + deletedDirInfo: type: array items: type: object properties: - omKeyInfoList: - type: array - items: - type: object - properties: - metadata: - type: object - updateID: - type: number - parentObjectID: - type: number - volumeName: - type: string - bucketName: - type: string - keyName: - type: string - dataSize: - type: number - creationTime: - type: number - modificationTime: - type: number - replicationConfig: - type: object - properties: - replicationFactor: - type: string - requiredNodes: - type: integer - replicationType: - type: string - fileChecksum: - type: number - nullable: true - fileName: - type: string - acls: - type: array - path: - type: string - file: - type: boolean - latestVersionLocations: - type: string - nullable: true - replicatedSize: - type: number - fileEncryptionInfo: - type: string - nullable: true - objectInfo: - type: string - updateIDset: - type: boolean - example: - - omKeyInfoList: - - metadata: {} - objectID: -9223372036844470271 - updateID: 40429 - parentObjectID: -9223372036844472575 - volumeName: vol1 - bucketName: bucket1 - keyName: dir1 - dataSize: 20000000 - creationTime: 1687189663661 - modificationTime: 1687189672598 - replicationConfig: - replicationFactor: ONE - requiredNodes: 1 - replicationType: STANDALONE - fileChecksum: null - fileName: dir1 - acls: [] - path: 0/key_one - file: false - latestVersionLocations: null - replicatedSize: 30000000 - fileEncryptionInfo: null - objectInfo: OMKeyInfo{volume='vol1', bucket='bucket1', key='key_one', dataSize='10000000', creationTime='1687189663661', objectID='-9223372036844470271', parentID='-9223372036844472575', replication='STANDALONE/ONE', fileChecksum='null} - updateIDset: false + path: + type: string + key: + type: string + inStateSince: + type: number + size: + type: integer + replicatedSize: + type: integer + replicationInfo: + type: object + properties: + replicationFactor: + type: string + example: THREE + requiredNodes: + type: integer + example: 3 + replicationType: + type: string + example: RATIS + creationTime: + type: integer + modificationTime: + type: integer + isKey: + type: boolean status: type: string example: OK @@ -1311,6 +1641,8 @@ components: example: 100 localIDList: type: array + items: + type: integer example: - 1 - 2 @@ -1335,7 +1667,7 @@ components: type: number example: -1 numBucket: - type: -1 + type: integer example: 100 numDir: type: number @@ -1366,6 +1698,8 @@ components: items: type: object properties: + key: + type: boolean path: type: string size: @@ -1375,19 +1709,23 @@ components: isKey: type: boolean example: - - path: /vol1/bucket1/dir1-1 + - key: false + path: /vol1/bucket1/dir1-1 size: 30000 sizeWithReplica: 90000 isKey: false - - path: /vol1/bucket1/dir1-2 + - key: false + path: /vol1/bucket1/dir1-2 size: 30000 sizeWithReplica: 90000 isKey": false - - path: /vol1/bucket1/dir1-3 + - key: false + path: /vol1/bucket1/dir1-3 size: 30000 sizeWithReplica: 90000 isKey": false - - path: /vol1/bucket1/key1-1 + - key: true + path: /vol1/bucket1/key1-1 size: 30000 sizeWithReplica: 90000 isKey": true @@ -1414,6 +1752,8 @@ components: example: OK dist: type: array + items: + type: integer example: - 0 - 0 @@ -1424,9 +1764,38 @@ components: - 0 - 100 - 40 + StorageReport: + type: object + properties: + capacity: + type: number + example: 270429917184 + used: + type: number + example: 358805504 + remaining: + type: number + example: 270071111680 + committed: + type: number + example: 27007111 ClusterState: type: object properties: + deletedDirs: + type: integer + missingContainers: + type: integer + openContainers: + type: integer + deletedContainers: + type: integer + keysPendingDeletion: + type: integer + scmServiceId: + type: string + omServiceId: + type: string pipelines: type: integer example: 5 @@ -1437,20 +1806,7 @@ components: type: integer example: 4 storageReport: - type: object - properties: - capacity: - type: number - example: 1081719668736 - used: - type: number - example: 1309212672 - remaining: - type: number - example: 1080410456064 - committed: - type: number - example: 1080410456 + $ref: "#/components/schemas/StorageReport" containers: type: integer example: 26 @@ -1474,6 +1830,20 @@ components: items: type: object properties: + buildDate: + type: string + layoutVersion: + type: integer + networkLocation: + type: string + opState: + type: string + revision: + type: string + setupTime: + type: integer + version: + type: string uuid: type: string example: f8f8cb45-3ab2-4123 @@ -1487,20 +1857,7 @@ components: type: number example: 1605738400544 storageReport: - type: object - properties: - capacity: - type: number - example: 270429917184 - used: - type: number - example: 358805504 - remaining: - type: number - example: 270071111680 - committed: - type: number - example: 27007111 + $ref: "#/components/schemas/StorageReport" pipelines: type: array items: @@ -1531,6 +1888,133 @@ components: example: 1 RemovedDatanodesResponse: type: object + properties: + datanodesResponseMap: + type: object + properties: + removedDatanodes: + type: object + properties: + totalCount: + type: integer + datanodes: + type: array + items: + type: object + properties: + uuid: + type: string + hostname: + type: string + state: + type: string + pipelines: + type: string + nullable: true + DatanodesDecommissionInfo: + type: object + properties: + DatanodesDecommissionInfo: + type: array + items: + type: object + properties: + containers: + type: object + metrics: + type: object + properties: + decommissionStartTime: + type: string + numOfUnclosedContainers: + type: integer + numOfUnclosedPipelines: + type: integer + numOfUnderReplicatedContainers: + type: integer + nullable: true + datanodeDetails: + $ref: "#/components/schemas/DatanodeDetails" + ByteString: + type: object + properties: + string: + type: string + bytes: + type: object + properties: + validUtf8: + type: boolean + empty: + type: boolean + DatanodeDetails: + type: object + properties: + level: + type: integer + parent: + type: string + nullable: true + cost: + type: integer + uuid: + type: string + uuidString: + type: string + ipAddress: + type: string + hostName: + type: string + ports: + type: array + items: + type: object + properties: + name: + type: string + value: + type: integer + certSerialId: + type: integer + version: + type: string + nullable: true + setupTime: + type: string + revision: + type: string + nullable: true + buildDate: + type: string + nullable: true + persistedOpState: + type: string + persistedOpStateExpiryEpochSec: + type: integer + initialVersion: + type: integer + currentVersion: + type: integer + decommissioned: + type: boolean + maintenance: + type: boolean + ipAddressAsByteString: + $ref: '#/components/schemas/ByteString' + hostNameAsByteString: + $ref: '#/components/schemas/ByteString' + networkName: + type: string + networkLocation: + type: string + networkFullPath: + type: string + numOfLeaves: + type: integer + networkNameAsByteString: + $ref: '#/components/schemas/ByteString' + networkLocationAsByteString: + $ref: '#/components/schemas/ByteString' PipelinesSummary: type: object properties: @@ -1553,10 +2037,8 @@ components: example: localhost-1 datanodes: type: array - example: - - localhost-1 - - localhost-2 - - localhost-3 + items: + $ref: '#/components/schemas/DatanodeDetails' lastLeaderElection: type: integer example: 0 @@ -1580,7 +2062,7 @@ components: items: type: object properties: - taskName: + taskName: type: string lastUpdatedTimestamp: type: number @@ -1607,21 +2089,21 @@ components: count: type: integer example: - - volume: vol-2-04168, - bucket: bucket-0-11685, - fileSize: 1024, + - volume: vol-2-04168 + bucket: bucket-0-11685 + fileSize: 1024 count: 1 - - volume: vol-2-04168, - bucket: bucket-1-41795, - fileSize: 1024, + - volume: vol-2-04168 + bucket: bucket-1-41795 + fileSize: 1024 count: 1 - - volume: vol-2-04168, - bucket: bucket-2-93377, - fileSize: 1024, + - volume: vol-2-04168 + bucket: bucket-2-93377 + fileSize: 1024 count: 1 - - volume: vol-2-04168, - bucket: bucket-3-50336, - fileSize: 1024, + - volume: vol-2-04168 + bucket: bucket-3-50336 + fileSize: 1024 count: 2 ContainerUtilization: type: array @@ -1673,7 +2155,9 @@ components: type: string example: ozone value: - type: array + oneOf: + - type: string + - type: number example: - 1599159384.455 - "5" From 7d50d77223215617c9357d3140501fe7326017dc Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Mon, 20 Jan 2025 11:37:25 +0100 Subject: [PATCH 107/168] HDDS-12086. Allow --db option at leaf subcommand in debug tools (#7710) --- .../test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java | 4 ++-- .../java/org/apache/hadoop/ozone/debug/ldb/RDBParser.java | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java index aac55367adc..b3ab92d87aa 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/debug/TestLDBCli.java @@ -293,8 +293,8 @@ void testLDBScan( // Prepare scan args List completeScanArgs = new ArrayList<>(); completeScanArgs.addAll(Arrays.asList( - "--db", dbStore.getDbLocation().getAbsolutePath(), "scan", + "--db", dbStore.getDbLocation().getAbsolutePath(), "--column-family", tableName)); completeScanArgs.addAll(scanArgs); @@ -353,9 +353,9 @@ void testScanWithRecordsPerFile() throws IOException { // Prepare scan args int maxRecordsPerFile = 2; List completeScanArgs1 = new ArrayList<>(Arrays.asList( - "--db", dbStore.getDbLocation().getAbsolutePath(), "scan", "--column-family", KEY_TABLE, "--out", scanDir1 + File.separator + "keytable", + "--db", dbStore.getDbLocation().getAbsolutePath(), "--max-records-per-file", String.valueOf(maxRecordsPerFile))); File tmpDir1 = new File(scanDir1); tmpDir1.deleteOnExit(); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/RDBParser.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/RDBParser.java index ba58b05ac54..39cb67d1c01 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/RDBParser.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ldb/RDBParser.java @@ -41,6 +41,7 @@ public class RDBParser implements DebugSubcommand { @CommandLine.Option(names = {"--db"}, required = true, + scope = CommandLine.ScopeType.INHERIT, description = "Database File Path") private String dbPath; From 7b4df04beb59373f0dda509adeb59a33feb488fd Mon Sep 17 00:00:00 2001 From: Wei-Chiu Chuang Date: Tue, 21 Jan 2025 00:21:41 -0800 Subject: [PATCH 108/168] HDDS-12030. Update SCM-HA.zh.md (#7669) --- hadoop-hdds/docs/content/feature/SCM-HA.md | 24 +++++------------ hadoop-hdds/docs/content/feature/SCM-HA.zh.md | 27 ++++--------------- 2 files changed, 11 insertions(+), 40 deletions(-) diff --git a/hadoop-hdds/docs/content/feature/SCM-HA.md b/hadoop-hdds/docs/content/feature/SCM-HA.md index 333c908275d..2b6ee72b7cf 100644 --- a/hadoop-hdds/docs/content/feature/SCM-HA.md +++ b/hadoop-hdds/docs/content/feature/SCM-HA.md @@ -33,14 +33,6 @@ This document explains the HA setup of Storage Container Manager (SCM), please c ## Configuration -HA mode of Storage Container Manager can be enabled with the following settings in `ozone-site.xml`: - -```XML - - ozone.scm.ratis.enable - true - -``` One Ozone configuration (`ozone-site.xml`) can support multiple SCM HA node set, multiple Ozone clusters. To select between the available SCM nodes a logical name is required for each of the clusters which can be resolved to the IP addresses (and domain names) of the Storage Container Managers. This logical name is called `serviceId` and can be configured in the `ozone-site.xml` @@ -185,9 +177,7 @@ signed certificate for sub-CA from root CA. primordial SCM is not defined. Bring up other SCM's using **--bootstrap**. ### Current SCM HA Security limitation: -1. When primordial SCM is down, new SCM’s cannot be bootstrapped and join the -quorum. -2. Secure cluster upgrade to ratis-enable secure cluster is not supported. +* Unsecure HA cluster upgrade to secure HA cluster is not supported. ## Implementation details @@ -196,7 +186,7 @@ SCM HA uses Apache Ratis to replicate state between the members of the SCM HA qu This replication process is a simpler version of OM HA replication process as it doesn't use any double buffer (as the overall db thourghput of SCM requests are lower) -Datanodes are sending all the reports (Container reports, Pipeline reports...) to *all* the Datanodes parallel. Only the leader node can assign/create new containers, and only the leader node sends command back to the Datanodes. +Datanodes are sending all the reports (Container reports, Pipeline reports...) to *all* SCM nodes in parallel. Only the leader node can assign/create new containers, and only the leader node sends commands back to the Datanodes. ## Verify SCM HA setup @@ -232,10 +222,8 @@ bin/ozone debug ldb --db=/tmp/metadata/scm.db ls bin/ozone debug ldb --db=/tmp/metadata/scm.db scan --column-family=containers ``` -## Migrating from existing SCM - -SCM HA can be turned on on any Ozone cluster. First enable Ratis (`ozone.scm.ratis.enable`) and configure only one node for the Ratis ring (`ozone.scm.nodes.serviceId` should have one element). - -Start the cluster and test if it works well. +## Migrating from Non-HA to HA SCM -If everything is fine, you can extend the cluster configuration with multiple nodes, restart SCM node, and initialize the additional nodes with `scm --bootstrap` command. +Add additional SCM nodes and extend the cluster configuration to reflect the newly added nodes. +Bootstrap the newly added SCM nodes with `scm --bootstrap` command and start the SCM service. +Note: Make sure that the `ozone.scm.primordial.node.id` property is pointed to the existing SCM before you run the `bootstrap` command on the newly added SCM nodes. diff --git a/hadoop-hdds/docs/content/feature/SCM-HA.zh.md b/hadoop-hdds/docs/content/feature/SCM-HA.zh.md index a5382735b7a..66d2b885fbe 100644 --- a/hadoop-hdds/docs/content/feature/SCM-HA.zh.md +++ b/hadoop-hdds/docs/content/feature/SCM-HA.zh.md @@ -33,20 +33,6 @@ Ozone Manager 和 Storage Container Manager 都支持 HA。在这种模式下, ## 配置 -> ⚠️ **注意** ⚠️ -> -> SCM HA 目前仅支持新初始化的集群。 -> SCM HA 必须在 Ozone 服务首次启动前开启。 -> 当某个 SCM 以非 HA 的模式启动后,不支持将其改为 HA 模式。 - -Storage Container Manager 的 HA 模式可以在 `ozone-site.xml` 中进行以下设置: - -```XML - - ozone.scm.ratis.enable - true - -``` 一个 Ozone 配置(`ozone-site.xml`)可以支持多个SCM HA节点集,多个 Ozone 集群。要在可用的 SCM 节点之间进行选择,每个集群都需要一个逻辑名称,可以将其解析为 Storage Container Manage 的 IP 地址(和域名)。 这个逻辑名称称为 `serviceId`,可以在 `ozone-site.xml` 中配置。 @@ -171,8 +157,7 @@ bin/ozone scm --bootstrap ### 目前 SCM HA 安全的限制 -1. 当原始 SCM 失效时, 新的 SCM 不能被引导并添加到 HA 节点中。 -2. 尚未支持从非 HA 安全集群升级到 HA 安全集群。 +* 尚未支持从非 HA 安全集群升级到 HA 安全集群。 ## 实现细节 @@ -216,10 +201,8 @@ bin/ozone debug ldb --db=/tmp/metadata/scm.db ls bin/ozone debug ldb --db=/tmp/metadata/scm.db scan --column-family=containers ``` -## 从现有的SCM迁移 - -可以在任何 Ozone 集群上打开 SCM HA。 首先启用 Ratis(`ozone.scm.ratis.enable`)并为 Ratis ring 配置一个节点(`ozone.scm.nodes.serviceId` 应该有一个元素)。 - -启动集群并测试它是否正常工作。 +## 从非HA SCM迁移到SCM HA -如果一切正常,您可以用多个节点扩展集群配置,重新启动 SCM 节点,并使用 `scm --bootstrap` 命令初始化其他节点。 +添加额外的 SCM 节点,并扩展集群配置以包含新添加的节点。 +使用 `scm --bootstrap` 命令为新添加的 SCM 节点引导启动,然后启动 SCM 服务。 +注意:在新添加的 SCM 节点上运行 bootstrap 命令之前,请确保 `ozone.scm.primordial.node.id` 属性指向现有的 SCM。 From fefaf9beeaa2354e7f4baf9f4fed535df849d45f Mon Sep 17 00:00:00 2001 From: Galsza <109229906+Galsza@users.noreply.github.com> Date: Tue, 21 Jan 2025 09:23:17 +0100 Subject: [PATCH 109/168] HDDS-11070. Separate KeyCodec from reading and storing keys to disk (#6871) --- .../hadoop/hdds/security/SecurityConfig.java | 33 +- .../hdds/security/SecurityConstants.java | 49 ++ .../hdds/security/x509/keys/KeyCodec.java | 128 ++++++ .../hdds/security/x509/keys/KeyStorage.java | 249 ++++++++++ .../x509/keys/TestHDDSKeyGenerator.java | 0 .../hdds/security/x509/keys/TestKeyCodec.java | 78 ++++ .../security/x509/keys/TestKeyStorage.java | 344 ++++++++++++++ .../hdds/security/x509/keys/package-info.java | 0 .../ozone/TestHddsSecureDatanodeInit.java | 22 +- .../hadoop/hdds/security/OzoneSecretKey.java | 23 +- .../authority/DefaultCAServer.java | 110 ++--- .../client/DefaultCertificateClient.java | 50 ++- .../hdds/security/x509/keys/KeyCodec.java | 425 ------------------ .../hdds/security/x509/keys/SecurityUtil.java | 90 ---- .../hdds/security/x509/keys/package-info.java | 23 - .../authority/TestDefaultCAServer.java | 57 ++- .../client/TestDefaultCertificateClient.java | 21 +- .../client/TestDnCertificateClientInit.java | 10 +- .../hdds/security/x509/keys/TestKeyCodec.java | 235 ---------- .../scm/security/RootCARotationManager.java | 9 +- .../hadoop/ozone/TestDelegationToken.java | 6 +- .../hadoop/ozone/TestSecureOzoneCluster.java | 18 +- .../ozone/om/TestSecureOzoneManager.java | 6 +- .../security/TestOmCertificateClientInit.java | 10 +- 24 files changed, 1011 insertions(+), 985 deletions(-) create mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/SecurityConstants.java create mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/KeyCodec.java create mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/KeyStorage.java rename hadoop-hdds/{framework => common}/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestHDDSKeyGenerator.java (100%) create mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestKeyCodec.java create mode 100644 hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestKeyStorage.java rename hadoop-hdds/{framework => common}/src/test/java/org/apache/hadoop/hdds/security/x509/keys/package-info.java (100%) delete mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/keys/KeyCodec.java delete mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/keys/SecurityUtil.java delete mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/keys/package-info.java delete mode 100644 hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestKeyCodec.java diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/SecurityConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/SecurityConfig.java index b0bd68d0313..d463d2a2642 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/SecurityConfig.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/SecurityConfig.java @@ -19,8 +19,10 @@ package org.apache.hadoop.hdds.security; +import java.io.IOException; import java.nio.file.Path; import java.nio.file.Paths; +import java.security.NoSuchAlgorithmException; import java.security.Provider; import java.security.Security; import java.time.Duration; @@ -32,6 +34,8 @@ import com.google.common.base.Preconditions; import org.apache.hadoop.hdds.HddsConfigKeys; +import org.apache.hadoop.hdds.security.x509.keys.KeyCodec; +import org.apache.hadoop.ozone.OzoneConsts; import org.apache.ratis.thirdparty.io.netty.handler.ssl.SslProvider; import org.bouncycastle.jce.provider.BouncyCastleProvider; import org.slf4j.Logger; @@ -125,8 +129,8 @@ public class SecurityConfig { private final Duration renewalGracePeriod; private final boolean isSecurityEnabled; private final boolean grpcTlsUseTestCert; - private final String externalRootCaPublicKeyPath; - private final String externalRootCaPrivateKeyPath; + private final Path externalRootCaPublicKeyPath; + private final Path externalRootCaPrivateKeyPath; private final String externalRootCaCert; private final Duration caCheckInterval; private final String caRotationTimeOfDay; @@ -253,12 +257,12 @@ public SecurityConfig(ConfigurationSource configuration) { this.externalRootCaCert = configuration.get( HDDS_X509_ROOTCA_CERTIFICATE_FILE, HDDS_X509_ROOTCA_CERTIFICATE_FILE_DEFAULT); - this.externalRootCaPublicKeyPath = configuration.get( + this.externalRootCaPublicKeyPath = Paths.get(configuration.get( HDDS_X509_ROOTCA_PUBLIC_KEY_FILE, - HDDS_X509_ROOTCA_PUBLIC_KEY_FILE_DEFAULT); - this.externalRootCaPrivateKeyPath = configuration.get( + HDDS_X509_ROOTCA_PUBLIC_KEY_FILE_DEFAULT)); + this.externalRootCaPrivateKeyPath = Paths.get(configuration.get( HDDS_X509_ROOTCA_PRIVATE_KEY_FILE, - HDDS_X509_ROOTCA_PRIVATE_KEY_FILE_DEFAULT); + HDDS_X509_ROOTCA_PRIVATE_KEY_FILE_DEFAULT)); this.grpcSSLProvider = SslProvider.valueOf( configuration.get(HDDS_GRPC_TLS_PROVIDER, @@ -487,6 +491,14 @@ public String getKeyAlgo() { return keyAlgo; } + public KeyCodec keyCodec() throws IOException { + try { + return new KeyCodec(keyAlgo); + } catch (NoSuchAlgorithmException e) { + throw new IOException(e); + } + } + /** * Returns the X.509 Signature Algorithm used. This can be changed by setting * "hdds.x509.signature.algorithm" to the new name. The default algorithm is @@ -548,11 +560,16 @@ public SslProvider getGrpcSslProvider() { return grpcSSLProvider; } - public String getExternalRootCaPrivateKeyPath() { + public boolean useExternalCACertificate(String component) { + return component.equals(OzoneConsts.SCM_ROOT_CA_COMPONENT_NAME) && + !externalRootCaCert.isEmpty() && externalRootCaPrivateKeyPath.getNameCount() != 0; + } + + public Path getExternalRootCaPrivateKeyPath() { return externalRootCaPrivateKeyPath; } - public String getExternalRootCaPublicKeyPath() { + public Path getExternalRootCaPublicKeyPath() { return externalRootCaPublicKeyPath; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/SecurityConstants.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/SecurityConstants.java new file mode 100644 index 00000000000..077b1cc830f --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/SecurityConstants.java @@ -0,0 +1,49 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.hadoop.hdds.security; + +/** + * Class to define constants that are used in relation to different PKIX, PKCS, and CMS Structures as defined by + * RFC-7468. + */ +public final class SecurityConstants { + private SecurityConstants() { } + + private static final String PEM_PRE_ENCAPSULATION_BOUNDARY_FORMAT = "-----BEGIN %s-----"; + + public static final String PEM_POST_ENCAPSULATION_BOUNDARY_FORMAT = "-----END %s-----"; + + public static final String PEM_ENCAPSULATION_BOUNDARY_LABEL_PUBLIC_KEY = "PUBLIC KEY"; + + public static final String PEM_ENCAPSULATION_BOUNDARY_LABEL_PRIVATE_KEY = "PRIVATE KEY"; + + public static final String PEM_PRE_ENCAPSULATION_BOUNDARY_PUBLIC_KEY = + String.format(PEM_PRE_ENCAPSULATION_BOUNDARY_FORMAT, PEM_ENCAPSULATION_BOUNDARY_LABEL_PUBLIC_KEY); + + public static final String PEM_POST_ENCAPSULATION_BOUNDARY_PUBLIC_KEY = + String.format(PEM_POST_ENCAPSULATION_BOUNDARY_FORMAT, PEM_ENCAPSULATION_BOUNDARY_LABEL_PUBLIC_KEY); + + public static final String PEM_PRE_ENCAPSULATION_BOUNDARY_PRIVATE_KEY = + String.format(PEM_PRE_ENCAPSULATION_BOUNDARY_FORMAT, PEM_ENCAPSULATION_BOUNDARY_LABEL_PRIVATE_KEY); + + public static final String PEM_POST_ENCAPSULATION_BOUNDARY_PRIVATE_KEY = + String.format(PEM_POST_ENCAPSULATION_BOUNDARY_FORMAT, PEM_ENCAPSULATION_BOUNDARY_LABEL_PRIVATE_KEY); + +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/KeyCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/KeyCodec.java new file mode 100644 index 00000000000..39b128a15eb --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/KeyCodec.java @@ -0,0 +1,128 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package org.apache.hadoop.hdds.security.x509.keys; + +import org.apache.ratis.util.function.CheckedFunction; +import org.bouncycastle.util.io.pem.PemObject; +import org.bouncycastle.util.io.pem.PemReader; +import org.bouncycastle.util.io.pem.PemWriter; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.OutputStreamWriter; +import java.security.Key; +import java.security.KeyFactory; +import java.security.NoSuchAlgorithmException; +import java.security.PrivateKey; +import java.security.PublicKey; +import java.security.spec.InvalidKeySpecException; +import java.security.spec.PKCS8EncodedKeySpec; +import java.security.spec.X509EncodedKeySpec; + +// We used UTF-8 before, but a PEM file do contain only readable characters that are in the US_ASCII character set, +// and UTF-8 is interoperable with US_ASCII in this case. +// Based on general considerations of RFC-7468 , we stick to the US_ASCII charset for encoding and decoding. +// See: (https://datatracker.ietf.org/doc/html/rfc7468#section-2) +import static java.nio.charset.StandardCharsets.US_ASCII; +import static org.apache.hadoop.hdds.security.SecurityConstants.PEM_ENCAPSULATION_BOUNDARY_LABEL_PRIVATE_KEY; +import static org.apache.hadoop.hdds.security.SecurityConstants.PEM_ENCAPSULATION_BOUNDARY_LABEL_PUBLIC_KEY; + +/** + * KeyCodec for encoding and decoding private and public keys. + */ +public class KeyCodec { + private static final int BUFFER_LEN = 8192; + + private final KeyFactory keyFactory; + + /** + * Creates a KeyCodec based on the security configuration. + * + * @param keyAlgorithm the key algorithm to use. + * @throws NoSuchAlgorithmException if the key algorithm specified in the configuration is not available. + * + * @see + * Java Security Standard Algorithm Names + */ + public KeyCodec(String keyAlgorithm) throws NoSuchAlgorithmException { + keyFactory = KeyFactory.getInstance(keyAlgorithm); + } + + /** + * Encodes the given public key to PEM format. + * @param key the key to encode. + * @return the PEM encoded key. + * @throws IOException if the encoding fails. + */ + public byte[] encodePublicKey(PublicKey key) throws IOException { + return encodeKey(PEM_ENCAPSULATION_BOUNDARY_LABEL_PUBLIC_KEY, key); + } + + /** + * Encodes the given private key to PEM format. + * @param key the key to encode. + * @return the PEM encoded key. + * @throws IOException if the encoding fails. + */ + public byte[] encodePrivateKey(PrivateKey key) throws IOException { + return encodeKey(PEM_ENCAPSULATION_BOUNDARY_LABEL_PRIVATE_KEY, key); + } + + /** + * Decodes a {@link PrivateKey} from PEM encoded format. + * @param encodedKey the PEM encoded key as byte[]. + * @return a {@link PrivateKey} instance representing the key in the PEM data. + * @throws IOException if the decoding fails. + */ + public PrivateKey decodePrivateKey(byte[] encodedKey) throws IOException { + return decodeKey(encodedKey, keyFactory::generatePrivate); + } + + /** + * Decodes a {@link PublicKey} from PEM encoded format. + * @param encodedKey the PEM encoded key as byte[]. + * @return a {@link PublicKey} instance representing the key in the PEM data. + * @throws IOException if the decoding fails. + */ + public PublicKey decodePublicKey(byte[] encodedKey) throws IOException { + return decodeKey(encodedKey, ks -> keyFactory.generatePublic(new X509EncodedKeySpec(ks.getEncoded()))); + } + + private byte[] encodeKey(String keyType, Key key) throws IOException { + ByteArrayOutputStream bytes = new ByteArrayOutputStream(BUFFER_LEN); + try (OutputStreamWriter w = new OutputStreamWriter(bytes, US_ASCII); PemWriter pemWriter = new PemWriter(w)) { + pemWriter.writeObject(new PemObject(keyType, key.getEncoded())); + pemWriter.flush(); + return bytes.toByteArray(); + } + } + + private T decodeKey(byte[] encodedKey, CheckedFunction generator) + throws IOException { + try (PemReader pemReader = new PemReader(new InputStreamReader(new ByteArrayInputStream(encodedKey), US_ASCII))) { + PemObject keyObject = pemReader.readPemObject(); + PKCS8EncodedKeySpec pkcs8EncodedKeySpec = new PKCS8EncodedKeySpec(keyObject.getContent()); + return generator.apply(pkcs8EncodedKeySpec); + } catch (InvalidKeySpecException e) { + throw new IOException(e); + } + } +} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/KeyStorage.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/KeyStorage.java new file mode 100644 index 00000000000..a68bb1cbd41 --- /dev/null +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/keys/KeyStorage.java @@ -0,0 +1,249 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ +package org.apache.hadoop.hdds.security.x509.keys; + +import org.apache.hadoop.hdds.security.SecurityConfig; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.io.UnsupportedEncodingException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.attribute.PosixFilePermission; +import java.security.KeyPair; +import java.security.PrivateKey; +import java.security.PublicKey; +import java.util.Set; + +import static java.nio.file.Files.readAllBytes; +import static java.nio.file.attribute.PosixFilePermissions.asFileAttribute; +import static java.nio.file.attribute.PosixFilePermissions.fromString; + +/** + * The KeyStorage is responsible to persist and read an RSA keypair for an Ozone component in PEM format.
+ *

+ * Ozone component in this sense means not just DN, SCM or OM as it originally did, as rotation overloaded the + * component to a component-stage pair to control the location during different stages of the certificate rotation. + *

+ *

+ * In the general case, the storage used is defined by the {@link SecurityConfig} of the system. It resolves the + * configuration, and provides the private/public key's basedir based on the name of the component using this object. + * If the path specified is not on a POSIX compliant file system, the operations will fail, as this class ensures that + * only the owner can read/write the directory, and only the owner can read/write the key files with the help of POSIX + * file permissions. + *

+ * There are some special cases:
+ *

+ * One is when the Root CA certificate and its keys are supplied externally to Ozone, and the + * component using this KeyStorage is {@link org.apache.hadoop.ozone.OzoneConsts#SCM_ROOT_CA_COMPONENT_NAME} in which + * case the {@link SecurityConfig#useExternalCACertificate(String)} returns true. In this environment, this object + * loads the keys from the specified paths in the configuration. + * Note that the configuration may not contain a public key path in this setup, in which case the CAServer code reads + * the public key from the certificate, as the storage would throw a {@link java.io.FileNotFoundException} on an attempt + * to read the public key from an empty path. + *

+ *

+ * An other is during rotation, where to ensure that we atomically switch keys and certificates, the path of the newly + * generated keys are defined either by changing the component name, or by suffixing the whole path that ends with the + * keys directory. (See {@link SecurityConfig#getKeyLocation(String)} and its usage in RootCARotationManager and in the + * DefaultCertificateClient. + * For the case where the component is changing, it is straightforward to just use the changed component name, for the + * where the keys folder is suffixed, this class provides a constructor to define the suffix. + *

+ * + * @see SecurityConfig#getKeyLocation(String) to understand how the location is resolved for a component + * @see SecurityConfig#useExternalCACertificate(String) to understand the decision on using an external key pair + * @see SecurityConfig#getKeyAlgo() + */ +// Also see: +// DefaultCertificateClient on rotation tasks +// RootCARotationManager on the root CA certificate rotation tasks + +public class KeyStorage { + private static final Logger LOG = LoggerFactory.getLogger(KeyStorage.class); + + public static final Set DIR_PERMISSIONS = fromString("rwx------"); + public static final Set FILE_PERMISSIONS = fromString("rw-------"); + + private final Path privateKeyPath; + private final Path publicKeyPath; + private final KeyCodec keyCodec; + private final boolean externalKeysUsed; + + /** + * Creates a KeyStorage object based on the configuration for the defined component, by assuming an empty key path + * suffix. + * + * @param config the SecurityConfiguration of the application + * @param component the component for which the KeyStorage is to be created + * @throws IOException in case the defined paths are unusable, or if the key algorithm in the configuration is + * unsupported. + * @see KeyStorage#KeyStorage(SecurityConfig, String, String) for more details. + */ + public KeyStorage(SecurityConfig config, String component) throws IOException { + this(config, component, config.getKeyLocation(component)); + } + + /** + * Creates a KeyStorage object based on the configuration, the defined component, suffixing the base path with the + * defined suffix.

+ *

+ * The initialization is retrieving the base path from {@link SecurityConfig#getKeyLocation(String)} method, + * within which {@link SecurityConfig#getPrivateKeyFileName()} and {@link SecurityConfig#getPublicKeyFileName()} + * defined the name of the files that holds the public and private keys respectively.
+ * The base path is suffixed with the keyDirSuffix before resolving the path belongs to the key files in the folder. + * If the base path does not exists, it is created with "rwx------" POSIX permissions, if it exists, the code attempts + * to set its POSIX permissions to "rwx------". Key files are created with "rw-------" POSIX permission bits set. + *


+ *

+ * In case {@link SecurityConfig#useExternalCACertificate(String)} returns true, the public and private key is read + * from {@link SecurityConfig#getExternalRootCaPublicKeyPath()} and + * {@link SecurityConfig#getExternalRootCaPrivateKeyPath()} respectively. + * If {@link SecurityConfig#useExternalCACertificate(String)} is true at the time of constructing this object, the + * store methods of the instance are not usable, and will throw an {@link UnsupportedOperationException}. + *

+ * @param config the {@link SecurityConfig} of the system + * @param component the component name to be used + * @param keyDirSuffix the suffix to apply to the keys base path + * @throws IOException in case when base directory can not be created with the desired permissions, or if the key + * algorithm is not available for the {@link java.security.KeyFactory} + */ + public KeyStorage(SecurityConfig config, String component, String keyDirSuffix) throws IOException { + this(config, component, Paths.get(config.getKeyLocation(component).toString() + keyDirSuffix)); + } + + private KeyStorage(SecurityConfig config, String component, Path keyPath) throws IOException { + if (config.useExternalCACertificate(component)) { + privateKeyPath = config.getExternalRootCaPrivateKeyPath(); + if (!Files.isReadable(privateKeyPath)) { + throw new UnsupportedEncodingException("External private key path is not readable: " + privateKeyPath); + } + publicKeyPath = config.getExternalRootCaPublicKeyPath(); + if (!Files.isReadable(publicKeyPath)) { + throw new UnsupportedEncodingException("External public key path is not readable: " + publicKeyPath); + } + externalKeysUsed = true; + } else { + createOrSanitizeDirectory(keyPath); + privateKeyPath = keyPath.resolve(config.getPrivateKeyFileName()); + publicKeyPath = keyPath.resolve(config.getPublicKeyFileName()); + externalKeysUsed = false; + } + + keyCodec = config.keyCodec(); + } + + /** + * Returns the private key stored in the private key file. + * + * @return PrivateKey the key read from the private key file. + * @throws IOException in case the file is unreadable, or decoding the contents from PEM format fails. + */ + public PrivateKey readPrivateKey() throws IOException { + LOG.info("Reading private key from {}.", privateKeyPath); + try { + return keyCodec.decodePrivateKey(readAllBytes(privateKeyPath)); + } catch (IOException e) { + LOG.error("Failed to read the private key.", e); + throw e; + } + } + + /** + * Returns a public key from an encoded file. + * + * @return PublicKey + * @throws IOException - on Error. + */ + public PublicKey readPublicKey() throws IOException { + LOG.info("Reading public key from {}.", publicKeyPath); + try { + return keyCodec.decodePublicKey(readAllBytes(publicKeyPath)); + } catch (IOException e) { + LOG.error("Failed to read the public key.", e); + throw e; + } + } + + public KeyPair readKeyPair() throws IOException { + return new KeyPair(readPublicKey(), readPrivateKey()); + } + + /** + * Stores a given private key using the default config options. + * + * @param key - Key to write to file. + * @throws IOException - On I/O failure. + */ + public void storePrivateKey(PrivateKey key) throws IOException { + LOG.info("Storing private key to {}.", privateKeyPath); + try { + storeKey(privateKeyPath, keyCodec.encodePrivateKey(key)); + } catch (IOException e) { + LOG.error("Failed to persist the private key.", e); + throw e; + } + } + + /** + * Stores a given public key using the default config options. + * + * @param key - Key to write to file. + * @throws IOException - On I/O failure. + */ + public void storePublicKey(PublicKey key) throws IOException { + LOG.info("Storing public key to {}.", publicKeyPath); + try { + storeKey(publicKeyPath, keyCodec.encodePublicKey(key)); + } catch (IOException e) { + LOG.error("Failed to persist the public key.", e); + throw e; + } + } + + /** + * Helper function that actually writes data to the files. + * + * @param keyPair - Key pair to write to file. + * @throws IOException - On I/O failure. + */ + public void storeKeyPair(KeyPair keyPair) throws IOException { + storePublicKey(keyPair.getPublic()); + storePrivateKey(keyPair.getPrivate()); + } + + private void storeKey(Path keyPath, byte[] encodedKey) throws IOException { + if (externalKeysUsed) { + throw new UnsupportedOperationException("Attempt to override external keys."); + } + Files.createFile(keyPath, asFileAttribute(FILE_PERMISSIONS)); + Files.write(keyPath, encodedKey); + } + + private void createOrSanitizeDirectory(Path dir) throws IOException { + if (Files.exists(dir)) { + // Sanity reset of permissions. + Files.setPosixFilePermissions(dir, DIR_PERMISSIONS); + } else { + Files.createDirectories(dir, asFileAttribute(DIR_PERMISSIONS)); + } + } +} diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestHDDSKeyGenerator.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestHDDSKeyGenerator.java similarity index 100% rename from hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestHDDSKeyGenerator.java rename to hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestHDDSKeyGenerator.java diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestKeyCodec.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestKeyCodec.java new file mode 100644 index 00000000000..02cb5a840aa --- /dev/null +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestKeyCodec.java @@ -0,0 +1,78 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.hadoop.hdds.security.x509.keys; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import org.apache.hadoop.hdds.security.SecurityConstants; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; + +import java.nio.charset.StandardCharsets; +import java.security.KeyPair; +import java.security.KeyPairGenerator; +import java.security.NoSuchAlgorithmException; + +/** + * Test class for HDDS pem writer. + */ +public class TestKeyCodec { + + @Test + public void unkownEncodingThrows() { + assertThrows(NoSuchAlgorithmException.class, () -> new KeyCodec("Unknown")); + assertThrows(NoSuchAlgorithmException.class, () -> new KeyCodec("")); + assertThrows(NullPointerException.class, () -> new KeyCodec(null)); + } + + @ParameterizedTest + @ValueSource(strings = {"RSA", "DSA", "RSASSA-PSS", "EC", "XDH", "X25519", "X448", "DiffieHellman"}) + public void testEncodeDecodePublicKey(String algorithm) throws Exception { + KeyPairGenerator generator = KeyPairGenerator.getInstance(algorithm); + KeyPair keys = generator.generateKeyPair(); + + KeyCodec codec = new KeyCodec(algorithm); + byte[] encodedKey = codec.encodePublicKey(keys.getPublic()); + assertTrue(encodedKey.length > 0); + String pemStr = new String(encodedKey, StandardCharsets.UTF_8); + assertThat(pemStr.trim()).startsWith(SecurityConstants.PEM_PRE_ENCAPSULATION_BOUNDARY_PUBLIC_KEY); + assertThat(pemStr.trim()).endsWith(SecurityConstants.PEM_POST_ENCAPSULATION_BOUNDARY_PUBLIC_KEY); + assertEquals(keys.getPublic(), codec.decodePublicKey(encodedKey)); + } + + @ParameterizedTest + @ValueSource(strings = {"RSA", "DSA", "RSASSA-PSS", "EC", "XDH", "X25519", "X448", "DiffieHellman"}) + public void testEncodeDecodePrivateKey(String algorithm) throws Exception { + KeyPairGenerator generator = KeyPairGenerator.getInstance(algorithm); + KeyPair keys = generator.generateKeyPair(); + + KeyCodec codec = new KeyCodec(algorithm); + byte[] encodedKey = codec.encodePrivateKey(keys.getPrivate()); + assertTrue(encodedKey.length > 0); + String pemStr = new String(encodedKey, StandardCharsets.UTF_8); + assertThat(pemStr.trim()).startsWith(SecurityConstants.PEM_PRE_ENCAPSULATION_BOUNDARY_PRIVATE_KEY); + assertThat(pemStr.trim()).endsWith(SecurityConstants.PEM_POST_ENCAPSULATION_BOUNDARY_PRIVATE_KEY); + assertEquals(keys.getPrivate(), codec.decodePrivateKey(encodedKey)); + } +} diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestKeyStorage.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestKeyStorage.java new file mode 100644 index 00000000000..be055df648c --- /dev/null +++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestKeyStorage.java @@ -0,0 +1,344 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.hadoop.hdds.security.x509.keys; + +import org.apache.hadoop.hdds.security.SecurityConfig; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.DisplayName; +import org.junit.jupiter.api.Nested; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.jupiter.api.io.TempDir; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; +import org.mockito.junit.jupiter.MockitoSettings; +import org.mockito.quality.Strictness; + +import java.io.IOException; +import java.nio.file.FileAlreadyExistsException; +import java.nio.file.FileSystem; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.nio.file.spi.FileSystemProvider; +import java.security.KeyPair; +import java.security.KeyPairGenerator; +import java.security.PrivateKey; +import java.security.PublicKey; +import java.util.Iterator; + +import static java.nio.file.Files.getPosixFilePermissions; +import static org.apache.hadoop.hdds.security.x509.keys.KeyStorage.DIR_PERMISSIONS; +import static org.apache.hadoop.hdds.security.x509.keys.KeyStorage.FILE_PERMISSIONS; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +/** + * JUnit tests to run against the {@link KeyStorage} implementation. + */ +@DisplayName("Testing KeyStorage implementation") +@ExtendWith(MockitoExtension.class) +public class TestKeyStorage { + + @TempDir + private Path baseDir; + + @Mock + private SecurityConfig config; + + private static KeyPair keys; + private static final String KEY_ALGO = "RSA"; + public static final String COMPONENT = "component"; + + @BeforeAll + public static void setupAKeyPair() throws Exception { + KeyPairGenerator generator = KeyPairGenerator.getInstance(KEY_ALGO); + keys = generator.generateKeyPair(); + } + + @BeforeEach + public void setBaseConfigValues() throws Exception { + when(config.getKeyLocation(anyString())).thenReturn(baseDir); + when(config.keyCodec()).thenReturn(new KeyCodec(KEY_ALGO)); + } + + @Nested + @DisplayName("with internal CA configured: ") + class InternalCA { + + private static final String PRIVATE_KEY_FILENAME = "private-key.pem"; + private static final String PUBLIC_KEY_FILENAME = "public-key.pem"; + public static final String ERROR_MSG = "Fail."; + + @BeforeEach + public void setConfigMockForInternalCA() { + when(config.useExternalCACertificate(anyString())).thenReturn(false); + when(config.getPrivateKeyFileName()).thenReturn(PRIVATE_KEY_FILENAME); + when(config.getPublicKeyFileName()).thenReturn(PUBLIC_KEY_FILENAME); + } + + @Test + @DisplayName("store and read a key pair.") + public void testStoreAndReadKeyPair() throws Exception { + KeyStorage storage = new KeyStorage(config, COMPONENT); + storeAndAssertDataWith(storage, baseDir); + } + + @Test + @DisplayName("store and read a key pair into a suffixed storage.") + public void testStoreAndReadKeyPairWithSuffixedStorage() throws Exception { + String pathSuffix = "keys"; + KeyStorage storage = new KeyStorage(config, COMPONENT, pathSuffix); + Path expectedLocation = Paths.get(baseDir + pathSuffix); + storeAndAssertDataWith(storage, expectedLocation); + } + + @Test + @DisplayName("attempt to store a key, fails during encoding and throws an IOException.") + public void testStoreKeyFailToEncode() throws Exception { + KeyCodec codec = mock(KeyCodec.class); + when(codec.encodePrivateKey(any(PrivateKey.class))).thenThrow(new IOException(PRIVATE_KEY_FILENAME)); + when(codec.encodePublicKey(any(PublicKey.class))).thenThrow(new IOException(PUBLIC_KEY_FILENAME)); + + when(config.keyCodec()).thenReturn(codec); + KeyStorage storage = new KeyStorage(config, COMPONENT); + + IOException e = assertThrows(IOException.class, () -> storage.storePrivateKey(keys.getPrivate())); + assertThat(e.getMessage()).isEqualTo(PRIVATE_KEY_FILENAME); + e = assertThrows(IOException.class, () -> storage.storePublicKey(keys.getPublic())); + assertThat(e.getMessage()).isEqualTo(PUBLIC_KEY_FILENAME); + } + + @Test + @DisplayName("attempt to store a key, fails during I/O operations and throws an IOException.") + public void testStoreKeyFailToWrite() throws Exception { + FileSystemProvider fsp = spy(FileSystemProvider.class); + // this is needed for the file create to throw an exception + when(fsp.newByteChannel(any(), any(), any())).thenThrow(new IOException(ERROR_MSG)); + // this is needed to avoid that the storage implementation sees the path as existing. + doThrow(IOException.class).when(fsp).checkAccess(any(Path.class)); + // this is to avoid creating any directories for this test. + doNothing().when(fsp).createDirectory(any(), any()); + + FileSystem fs = mock(FileSystem.class); + when(fs.provider()).thenReturn(fsp); + + Path p = mock(Path.class); + when(p.getFileSystem()).thenReturn(fs); + when(p.resolve(anyString())).thenReturn(p); + + when(config.getKeyLocation(anyString())).thenReturn(p); + + KeyStorage storage = new KeyStorage(config, COMPONENT); + + IOException e = assertThrows(IOException.class, () -> storage.storePrivateKey(keys.getPrivate())); + assertThat(e.getMessage()).isEqualTo(ERROR_MSG); + e = assertThrows(IOException.class, () -> storage.storePublicKey(keys.getPublic())); + assertThat(e.getMessage()).isEqualTo(ERROR_MSG); + } + + @Test + @DisplayName("attempt to read a key, fails during decoding and throws an IOException.") + public void testReadKeyFailToDecode() throws Exception { + KeyCodec codec = spy(new KeyCodec(KEY_ALGO)); + doThrow(new IOException(PRIVATE_KEY_FILENAME)).when(codec).decodePrivateKey(any(byte[].class)); + doThrow(new IOException(PUBLIC_KEY_FILENAME)).when(codec).decodePublicKey(any(byte[].class)); + + when(config.keyCodec()).thenReturn(codec); + KeyStorage storage = new KeyStorage(config, COMPONENT); + storage.storeKeyPair(keys); + + IOException e = assertThrows(IOException.class, storage::readPrivateKey); + assertThat(e.getMessage()).isEqualTo(PRIVATE_KEY_FILENAME); + e = assertThrows(IOException.class, storage::readPublicKey); + assertThat(e.getMessage()).isEqualTo(PUBLIC_KEY_FILENAME); + } + + @Test + @DisplayName("attempt to read a key, fails during I/O operations and throws an IOException.") + public void testReadKeyFailToWrite() throws Exception { + FileSystemProvider fsp = spy(FileSystemProvider.class); + // this is needed for the file create to throw an exception + when(fsp.newByteChannel(any(), any(), any())).thenThrow(new IOException(ERROR_MSG)); + // this is needed to avoid that the storage implementation sees the path as existing. + doThrow(IOException.class).when(fsp).checkAccess(any(Path.class)); + // this is to avoid creating any directories for this test. + doNothing().when(fsp).createDirectory(any(), any()); + + FileSystem fs = mock(FileSystem.class); + when(fs.provider()).thenReturn(fsp); + + Path p = mock(Path.class); + when(p.getFileSystem()).thenReturn(fs); + when(p.resolve(anyString())).thenReturn(p); + + when(config.getKeyLocation(anyString())).thenReturn(p); + + KeyStorage storage = new KeyStorage(config, COMPONENT); + + IOException e = assertThrows(IOException.class, storage::readPrivateKey); + assertThat(e.getMessage()).isEqualTo(ERROR_MSG); + e = assertThrows(IOException.class, storage::readPublicKey); + assertThat(e.getMessage()).isEqualTo(ERROR_MSG); + } + + @Test + @DisplayName("an attempt to overwrite an internal key throws FileAlreadyExists exception.") + public void testInternalKeysAreNotOverWritable() throws Exception { + KeyStorage storage = new KeyStorage(config, COMPONENT); + storage.storePublicKey(keys.getPublic()); + storage.storePrivateKey(keys.getPrivate()); + assertThrows(FileAlreadyExistsException.class, () -> storage.storePublicKey(keys.getPublic())); + assertThrows(FileAlreadyExistsException.class, () -> storage.storePrivateKey(keys.getPrivate())); + } + + @Test + @DisplayName("storage initialization fails because permissions can not be set.") + @MockitoSettings(strictness = Strictness.LENIENT) + public void testInitFailsOnPermissions() { + // the mock will not return posix file attributes, so setting posix permissions fails. + FileSystemProvider fsp = spy(FileSystemProvider.class); + + FileSystem fs = mock(FileSystem.class); + when(fs.provider()).thenReturn(fsp); + + Path p = mock(Path.class); + when(p.getFileSystem()).thenReturn(fs); + when(p.resolve(anyString())).thenReturn(p); + + when(config.getKeyLocation(anyString())).thenReturn(p); + + assertThrows(UnsupportedOperationException.class, () -> new KeyStorage(config, COMPONENT)); + } + + @Test + @DisplayName("storage initialization fails because directory creation fails.") + @MockitoSettings(strictness = Strictness.LENIENT) + public void testInitFailsOnDirCreation() throws Exception { + // the mock will not return posix file attributes, so setting them fails. + FileSystemProvider fsp = spy(FileSystemProvider.class); + // first exception is to make path non-existent, second do nothing to get to the createDirectory call. + doThrow(IOException.class).doNothing().when(fsp).checkAccess(any(Path.class)); + doThrow(new IOException(ERROR_MSG)).when(fsp).createDirectory(any(), any()); + + FileSystem fs = mock(FileSystem.class); + when(fs.provider()).thenReturn(fsp); + + // this is needed to get to the createDirectory call that we set to throw an exception + Path pathMock = mock(Path.class); + when(pathMock.getFileSystem()).thenReturn(fs); + when(pathMock.resolve(any(Path.class))).thenReturn(pathMock); + when(pathMock.toAbsolutePath()).thenReturn(pathMock); + when(pathMock.getParent()).thenReturn(pathMock); + when(pathMock.relativize(any(Path.class))).thenReturn(pathMock); + + Iterator pathIterMock = mock(Iterator.class); + when(pathIterMock.hasNext()).thenReturn(true, false); + when(pathIterMock.next()).thenReturn(pathMock); + when(pathMock.iterator()).thenReturn(pathIterMock); + + when(config.getKeyLocation(anyString())).thenReturn(pathMock); + + IOException e = assertThrows(IOException.class, () -> new KeyStorage(config, COMPONENT)); + assertThat(e.getMessage()).isEqualTo(ERROR_MSG); + } + + private void storeAndAssertDataWith(KeyStorage storage, Path expectedLocation) throws Exception { + storage.storeKeyPair(keys); + + // Check if the files were written + verify(config, times(1)).getKeyLocation(COMPONENT); + + Path privKeyPath = expectedLocation.resolve(PRIVATE_KEY_FILENAME); + Path publicKeyPath = expectedLocation.resolve(PUBLIC_KEY_FILENAME); + assertThat(privKeyPath).exists(); + assertThat(publicKeyPath).exists(); + assertThat(getPosixFilePermissions(privKeyPath)).containsExactlyInAnyOrderElementsOf(FILE_PERMISSIONS); + assertThat(getPosixFilePermissions(publicKeyPath)).containsExactlyInAnyOrderElementsOf(FILE_PERMISSIONS); + assertThat(getPosixFilePermissions(expectedLocation)).containsExactlyInAnyOrderElementsOf(DIR_PERMISSIONS); + + // Check if we can read the same keys from the files written + KeyCodec codec = new KeyCodec(KEY_ALGO); + + PrivateKey privKey = codec.decodePrivateKey(Files.readAllBytes(privKeyPath)); + assertThat(privKey).isEqualTo(keys.getPrivate()); + + PublicKey pubKey = codec.decodePublicKey(Files.readAllBytes(publicKeyPath)); + assertThat(pubKey).isEqualTo(keys.getPublic()); + + // KeyPair does not implement equals, check keys read against the original keys. + KeyPair kp = storage.readKeyPair(); + assertThat(kp.getPrivate()).isEqualTo(keys.getPrivate()); + assertThat(kp.getPublic()).isEqualTo(keys.getPublic()); + } + } + + @Nested + @DisplayName("with external CA configured: ") + class ExternalCA { + private Path externalPublicKeyPath; + private Path externalPrivateKeyPath; + + @BeforeEach + public void setConfigMockForExternalCA() { + when(config.useExternalCACertificate(anyString())).thenReturn(true); + when(config.getExternalRootCaPublicKeyPath()).thenReturn(externalPublicKeyPath); + when(config.getExternalRootCaPrivateKeyPath()).thenReturn(externalPrivateKeyPath); + } + + @BeforeEach + public void addKeysToBaseDir() throws IOException { + externalPublicKeyPath = baseDir.resolve("external-public-key.pem"); + externalPrivateKeyPath = baseDir.resolve("external-private-key.pem"); + Files.createFile(externalPublicKeyPath); + Files.write(externalPublicKeyPath, config.keyCodec().encodePublicKey(keys.getPublic())); + Files.createFile(externalPrivateKeyPath); + Files.write(externalPrivateKeyPath, config.keyCodec().encodePrivateKey(keys.getPrivate())); + } + + @Test + @DisplayName("external CA keys are read.") + public void testExternalKeysRead() throws Exception { + KeyStorage storage = new KeyStorage(config, COMPONENT); + + assertThat(storage.readPublicKey()).isEqualTo(keys.getPublic()); + assertThat(storage.readPrivateKey()).isEqualTo(keys.getPrivate()); + } + + @Test + @DisplayName("an attempt to overwrite an external key throws UnsupportedOperationException.") + public void testExternalKeysAreNotOverWritable() throws Exception { + KeyStorage storage = new KeyStorage(config, COMPONENT); + + assertThrows(UnsupportedOperationException.class, () -> storage.storePublicKey(keys.getPublic())); + assertThrows(UnsupportedOperationException.class, () -> storage.storePrivateKey(keys.getPrivate())); + } + } +} diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/keys/package-info.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/keys/package-info.java similarity index 100% rename from hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/keys/package-info.java rename to hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/keys/package-info.java diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java index 253551115dd..941cf506486 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java @@ -41,7 +41,7 @@ import org.apache.hadoop.hdds.security.x509.certificate.client.DNCertificateClient; import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec; import org.apache.hadoop.hdds.security.x509.certificate.utils.SelfSignedCertificate; -import org.apache.hadoop.hdds.security.x509.keys.KeyCodec; +import org.apache.hadoop.hdds.security.x509.keys.KeyStorage; import org.apache.hadoop.security.ssl.KeyStoreTestUtil; import org.apache.ozone.test.GenericTestUtils; import org.apache.hadoop.util.ServicePlugin; @@ -84,7 +84,7 @@ public class TestHddsSecureDatanodeInit { private static PublicKey publicKey; private static GenericTestUtils.LogCapturer dnLogs; private static SecurityConfig securityConfig; - private static KeyCodec keyCodec; + private static KeyStorage keyStorage; private static CertificateCodec certCodec; private static X509Certificate cert; private static final String DN_COMPONENT = DNCertificateClient.COMPONENT_NAME; @@ -130,7 +130,7 @@ SCMSecurityProtocolClientSideTranslatorPB createScmSecurityClient() dnLogs = GenericTestUtils.LogCapturer.captureLogs( ((DNCertificateClient)service.getCertificateClient()).getLogger()); certCodec = new CertificateCodec(securityConfig, DN_COMPONENT); - keyCodec = new KeyCodec(securityConfig, DN_COMPONENT); + keyStorage = new KeyStorage(securityConfig, DN_COMPONENT); dnLogs.clearOutput(); privateKey = service.getCertificateClient().getPrivateKey(); publicKey = service.getCertificateClient().getPublicKey(); @@ -197,7 +197,7 @@ public void testSecureDnStartupCase1() throws Exception { @Test public void testSecureDnStartupCase2() throws Exception { // Case 2: When private key and certificate is missing. - keyCodec.writePublicKey(publicKey); + keyStorage.storePublicKey(publicKey); RuntimeException rteException = assertThrows( RuntimeException.class, () -> service.initializeCertificateClient(client)); @@ -213,7 +213,7 @@ public void testSecureDnStartupCase2() throws Exception { @Test public void testSecureDnStartupCase3() throws Exception { // Case 3: When only public key and certificate is present. - keyCodec.writePublicKey(publicKey); + keyStorage.storePublicKey(publicKey); certCodec.writeCertificate(cert); RuntimeException rteException = assertThrows( RuntimeException.class, @@ -230,7 +230,7 @@ public void testSecureDnStartupCase3() throws Exception { @Test public void testSecureDnStartupCase4() throws Exception { // Case 4: When public key as well as certificate is missing. - keyCodec.writePrivateKey(privateKey); + keyStorage.storePrivateKey(privateKey); // provide a new valid SCMGetCertResponseProto X509Certificate newCert = generateX509Cert(null, null, Duration.ofSeconds(CERT_LIFETIME)); String pemCert = CertificateCodec.getPEMEncodedString(newCert); @@ -261,7 +261,7 @@ public void testSecureDnStartupCase4() throws Exception { public void testSecureDnStartupCase5() throws Exception { // Case 5: If private key and certificate is present. certCodec.writeCertificate(cert); - keyCodec.writePrivateKey(privateKey); + keyStorage.storePrivateKey(privateKey); service.initializeCertificateClient(client); assertNotNull(client.getPrivateKey()); assertNotNull(client.getPublicKey()); @@ -273,8 +273,8 @@ public void testSecureDnStartupCase5() throws Exception { @Test public void testSecureDnStartupCase6() throws Exception { // Case 6: If key pair already exist than response should be GETCERT. - keyCodec.writePublicKey(publicKey); - keyCodec.writePrivateKey(privateKey); + keyStorage.storePublicKey(publicKey); + keyStorage.storePrivateKey(privateKey); assertThrows(Exception.class, () -> service.initializeCertificateClient(client)); assertNotNull(client.getPrivateKey()); @@ -287,8 +287,8 @@ public void testSecureDnStartupCase6() throws Exception { @Test public void testSecureDnStartupCase7() throws Exception { // Case 7 When keypair and certificate is present. - keyCodec.writePublicKey(publicKey); - keyCodec.writePrivateKey(privateKey); + keyStorage.storePublicKey(publicKey); + keyStorage.storePrivateKey(privateKey); certCodec.writeCertificate(cert); service.initializeCertificateClient(client); diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/OzoneSecretKey.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/OzoneSecretKey.java index 07cd635dab4..f2e905dc456 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/OzoneSecretKey.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/OzoneSecretKey.java @@ -17,6 +17,7 @@ package org.apache.hadoop.hdds.security; import com.google.common.base.Preconditions; + import java.security.KeyPair; import java.security.PrivateKey; import java.security.PublicKey; @@ -24,8 +25,6 @@ import org.apache.commons.lang3.builder.HashCodeBuilder; import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.annotation.InterfaceStability; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.security.x509.keys.SecurityUtil; /** * Wrapper class for Ozone/Hdds secret keys. Used in delegation tokens and block @@ -39,7 +38,6 @@ public class OzoneSecretKey { private long expiryDate; private PrivateKey privateKey; private PublicKey publicKey; - private SecurityConfig securityConfig; private String certSerialId; public OzoneSecretKey(int keyId, long expiryDate, KeyPair keyPair, @@ -52,21 +50,6 @@ public OzoneSecretKey(int keyId, long expiryDate, KeyPair keyPair, this.certSerialId = certificateSerialId; } - /* - * Create new instance using default signature algorithm and provider. - * */ - public OzoneSecretKey(int keyId, long expiryDate, byte[] pvtKey, - byte[] publicKey) { - Preconditions.checkNotNull(pvtKey); - Preconditions.checkNotNull(publicKey); - - this.securityConfig = new SecurityConfig(new OzoneConfiguration()); - this.keyId = keyId; - this.expiryDate = expiryDate; - this.privateKey = SecurityUtil.getPrivateKey(pvtKey, securityConfig); - this.publicKey = SecurityUtil.getPublicKey(publicKey, securityConfig); - } - public int getKeyId() { return keyId; } @@ -95,10 +78,6 @@ public byte[] getEncodedPubliceKey() { return publicKey.getEncoded(); } - public void setExpiryDate(long expiryDate) { - this.expiryDate = expiryDate; - } - @Override public int hashCode() { HashCodeBuilder hashCodeBuilder = new HashCodeBuilder(537, 963); diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java index 118aa826013..813e119c579 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/authority/DefaultCAServer.java @@ -30,7 +30,7 @@ import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec; import org.apache.hadoop.hdds.security.x509.certificate.utils.SelfSignedCertificate; import org.apache.hadoop.hdds.security.x509.keys.HDDSKeyGenerator; -import org.apache.hadoop.hdds.security.x509.keys.KeyCodec; +import org.apache.hadoop.hdds.security.x509.keys.KeyStorage; import org.bouncycastle.operator.OperatorCreationException; import org.bouncycastle.pkcs.PKCS10CertificationRequest; import org.slf4j.Logger; @@ -45,11 +45,9 @@ import java.security.NoSuchAlgorithmException; import java.security.NoSuchProviderException; import java.security.PrivateKey; -import java.security.PublicKey; import java.security.cert.CertPath; import java.security.cert.CertificateException; import java.security.cert.X509Certificate; -import java.security.spec.InvalidKeySpecException; import java.time.LocalDateTime; import java.time.ZoneId; import java.util.Date; @@ -115,7 +113,6 @@ public class DefaultCAServer implements CertificateServer { private final String clusterID; private final String scmID; private String componentName; - private Path caKeysPath; private Path caRootX509Path; private SecurityConfig config; /** @@ -157,7 +154,6 @@ public DefaultCAServer(String subject, String clusterID, String scmID, @Override public void init(SecurityConfig securityConfig, CAType type) throws IOException { - caKeysPath = securityConfig.getKeyLocation(componentName); caRootX509Path = securityConfig.getCertificateLocation(componentName); this.config = securityConfig; this.approver = new DefaultApprover(profile, this.config); @@ -202,13 +198,9 @@ public X509Certificate getCertificate(String certSerialId) throws IOException { return store.getCertificateByID(new BigInteger(certSerialId)); } - private KeyPair getCAKeys() throws IOException { - KeyCodec keyCodec = new KeyCodec(config, componentName); - try { - return new KeyPair(keyCodec.readPublicKey(), keyCodec.readPrivateKey()); - } catch (InvalidKeySpecException | NoSuchAlgorithmException e) { - throw new IOException(e); - } + private PrivateKey getPrivateKey() throws IOException { + KeyStorage keyStorage = new KeyStorage(config, componentName); + return keyStorage.readPrivateKey(); } @Override @@ -268,7 +260,7 @@ private X509Certificate signAndStoreCertificate( try { Preconditions.checkState(!Strings.isNullOrEmpty(certSerialId)); xcert = approver.sign(config, - getCAKeys().getPrivate(), + getPrivateKey(), getCACertificate(), Date.from(beginDate.atZone(ZoneId.systemDefault()).toInstant()), Date.from(endDate.atZone(ZoneId.systemDefault()).toInstant()), @@ -359,12 +351,20 @@ private VerificationStatus verifySelfSignedCA(SecurityConfig securityConfig) { * @return True if the key files exist. */ private boolean checkIfKeysExist() { - if (!Files.exists(caKeysPath)) { + KeyStorage storage = null; + try { + storage = new KeyStorage(config, componentName); + storage.readKeyPair(); + } catch (IOException e) { + if (storage != null && config.useExternalCACertificate(componentName)) { + try { + storage.readPrivateKey(); + return true; + } catch (IOException ignored) { } + } return false; } - - return Files.exists(Paths.get(caKeysPath.toString(), - this.config.getPrivateKeyFileName())); + return true; } /** @@ -406,14 +406,18 @@ Consumer processVerificationStatus( }; break; case MISSING_CERTIFICATE: - consumer = (arg) -> { - LOG.error("We found the keys, but the root certificate for this " + - "CertificateServer is missing. Please restart SCM after locating " + - "the " + - "Certificates."); - LOG.error("Exiting due to unrecoverable CertificateServer error."); - throw new IllegalStateException("Missing Root Certs, cannot continue."); - }; + if (config.useExternalCACertificate(componentName) && type == CAType.ROOT) { + consumer = this::initRootCa; + } else { + consumer = (arg) -> { + LOG.error("We found the keys, but the root certificate for this " + + "CertificateServer is missing. Please restart SCM after locating " + + "the " + + "Certificates."); + LOG.error("Exiting due to unrecoverable CertificateServer error."); + throw new IllegalStateException("Missing Root Certs, cannot continue."); + }; + } break; case INITIALIZE: if (type == CAType.ROOT) { @@ -438,7 +442,7 @@ Consumer processVerificationStatus( } private void initRootCa(SecurityConfig securityConfig) { - if (isExternalCaSpecified(securityConfig)) { + if (securityConfig.useExternalCACertificate(componentName)) { initWithExternalRootCa(securityConfig); } else { try { @@ -455,11 +459,6 @@ private void initRootCa(SecurityConfig securityConfig) { } } - private boolean isExternalCaSpecified(SecurityConfig conf) { - return !conf.getExternalRootCaCert().isEmpty() && - !conf.getExternalRootCaPrivateKeyPath().isEmpty(); - } - /** * Generates a KeyPair for the Certificate. * @@ -473,9 +472,8 @@ private KeyPair generateKeys(SecurityConfig securityConfig) throws NoSuchProviderException, NoSuchAlgorithmException, IOException { HDDSKeyGenerator keyGenerator = new HDDSKeyGenerator(securityConfig); KeyPair keys = keyGenerator.generateKey(); - KeyCodec keyPEMWriter = new KeyCodec(securityConfig, - componentName); - keyPEMWriter.writeKey(keys); + KeyStorage keyStorage = new KeyStorage(securityConfig, componentName); + keyStorage.storeKeyPair(keys); return keys; } @@ -513,15 +511,10 @@ private void generateRootCertificate( } private void initWithExternalRootCa(SecurityConfig conf) { - String externalRootCaLocation = conf.getExternalRootCaCert(); - Path extCertPath = Paths.get(externalRootCaLocation); - Path extPrivateKeyPath = Paths.get(conf.getExternalRootCaPrivateKeyPath()); - String externalPublicKeyLocation = conf.getExternalRootCaPublicKeyPath(); + Path extCertPath = Paths.get(conf.getExternalRootCaCert()); - KeyCodec keyCodec = new KeyCodec(config, componentName); - CertificateCodec certificateCodec = - new CertificateCodec(config, componentName); try { + CertificateCodec certificateCodec = new CertificateCodec(config, componentName); Path extCertParent = extCertPath.getParent(); Path extCertName = extCertPath.getFileName(); if (extCertParent == null || extCertName == null) { @@ -529,44 +522,13 @@ private void initWithExternalRootCa(SecurityConfig conf) { extCertPath); } X509Certificate certificate = certificateCodec.getTargetCert(extCertParent, extCertName.toString()); - Path extPrivateKeyParent = extPrivateKeyPath.getParent(); - Path extPrivateKeyFileName = extPrivateKeyPath.getFileName(); - if (extPrivateKeyParent == null || extPrivateKeyFileName == null) { - throw new IOException("External private key path is not correct: " + - extPrivateKeyPath); - } - PrivateKey privateKey = keyCodec.readPrivateKey(extPrivateKeyParent, - extPrivateKeyFileName.toString()); - PublicKey publicKey; - publicKey = readPublicKeyWithExternalData( - externalPublicKeyLocation, keyCodec, certificate); - keyCodec.writeKey(new KeyPair(publicKey, privateKey)); + certificateCodec.writeCertificate(certificate); - } catch (IOException | CertificateException | NoSuchAlgorithmException | - InvalidKeySpecException e) { + } catch (IOException | CertificateException e) { LOG.error("External root CA certificate initialization failed", e); } } - private PublicKey readPublicKeyWithExternalData( - String externalPublicKeyLocation, KeyCodec keyCodec, X509Certificate certificate - ) throws CertificateException, NoSuchAlgorithmException, InvalidKeySpecException, IOException { - PublicKey publicKey; - if (externalPublicKeyLocation.isEmpty()) { - publicKey = certificate.getPublicKey(); - } else { - Path publicKeyPath = Paths.get(externalPublicKeyLocation); - Path publicKeyPathFileName = publicKeyPath.getFileName(); - Path publicKeyParent = publicKeyPath.getParent(); - if (publicKeyPathFileName == null || publicKeyParent == null) { - throw new IOException("Public key path incorrect: " + publicKeyParent); - } - publicKey = keyCodec.readPublicKey( - publicKeyParent, publicKeyPathFileName.toString()); - } - return publicKey; - } - /** * This represents the verification status of the CA. Based on this enum * appropriate action is taken in the Init. diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java index 42292b9663f..b63218b8495 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/client/DefaultCertificateClient.java @@ -83,7 +83,7 @@ import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateSignRequest; import org.apache.hadoop.hdds.security.x509.exception.CertificateException; import org.apache.hadoop.hdds.security.x509.keys.HDDSKeyGenerator; -import org.apache.hadoop.hdds.security.x509.keys.KeyCodec; +import org.apache.hadoop.hdds.security.x509.keys.KeyStorage; import org.apache.hadoop.ozone.OzoneSecurityUtil; import com.google.common.base.Preconditions; @@ -115,7 +115,7 @@ public abstract class DefaultCertificateClient implements CertificateClient { private final Logger logger; private final SecurityConfig securityConfig; - private final KeyCodec keyCodec; + private KeyStorage keyStorage; private PrivateKey privateKey; private PublicKey publicKey; private CertPath certPath; @@ -152,7 +152,6 @@ protected DefaultCertificateClient( Objects.requireNonNull(securityConfig); this.securityConfig = securityConfig; this.scmSecurityClient = scmSecurityClient; - keyCodec = new KeyCodec(securityConfig, component); this.logger = log; this.certificateMap = new ConcurrentHashMap<>(); this.component = component; @@ -165,6 +164,13 @@ protected DefaultCertificateClient( updateCertSerialId(certSerialId); } + + private KeyStorage keyStorage() throws IOException { + if (keyStorage == null) { + keyStorage = new KeyStorage(securityConfig, component); + } + return keyStorage; + } /** * Load all certificates from configured location. @@ -322,9 +328,8 @@ public synchronized PrivateKey getPrivateKey() { if (OzoneSecurityUtil.checkIfFileExist(keyPath, securityConfig.getPrivateKeyFileName())) { try { - privateKey = keyCodec.readPrivateKey(); - } catch (InvalidKeySpecException | NoSuchAlgorithmException - | IOException e) { + privateKey = keyStorage().readPrivateKey(); + } catch (IOException e) { getLogger().error("Error while getting private key.", e); } } @@ -346,9 +351,8 @@ public synchronized PublicKey getPublicKey() { if (OzoneSecurityUtil.checkIfFileExist(keyPath, securityConfig.getPublicKeyFileName())) { try { - publicKey = keyCodec.readPublicKey(); - } catch (InvalidKeySpecException | NoSuchAlgorithmException - | IOException e) { + publicKey = keyStorage().readPublicKey(); + } catch (IOException e) { getLogger().error("Error while getting public key.", e); } } @@ -735,7 +739,7 @@ private X509Certificate firstCertificateFrom(CertPath certificatePath) { * Default handling of each {@link InitCase}. */ protected InitResponse handleCase(InitCase init) - throws CertificateException { + throws IOException { switch (init) { case NONE: getLogger().info("Creating keypair for client as keypair and " + @@ -859,7 +863,7 @@ protected boolean recoverPublicKey() throws CertificateException { try { if (validateKeyPair(pubKey)) { - keyCodec.writePublicKey(pubKey); + keyStorage().storePublicKey(pubKey); publicKey = pubKey; } else { getLogger().error("Can't recover public key " + @@ -889,7 +893,7 @@ protected boolean recoverPublicKeyFromPrivateKey() PublicKey pubKey = KeyFactory.getInstance(securityConfig.getKeyAlgo()) .generatePublic(rsaPublicKeySpec); if (validateKeyPair(pubKey)) { - keyCodec.writePublicKey(pubKey); + keyStorage().storePublicKey(pubKey); publicKey = pubKey; getLogger().info("Public key is recovered from the private key."); return true; @@ -922,8 +926,8 @@ protected boolean validateKeyPair(PublicKey pubKey) /** * Bootstrap the client by creating keypair and storing it in configured * location. - * */ - protected void bootstrapClientKeys() throws CertificateException { + */ + protected void bootstrapClientKeys() throws IOException { Path keyPath = securityConfig.getKeyLocation(component); if (Files.notExists(keyPath)) { try { @@ -933,20 +937,20 @@ protected void bootstrapClientKeys() throws CertificateException { "for certificate storage.", BOOTSTRAP_ERROR); } } - KeyPair keyPair = createKeyPair(keyCodec); + KeyPair keyPair = createKeyPair(keyStorage()); privateKey = keyPair.getPrivate(); publicKey = keyPair.getPublic(); } - protected KeyPair createKeyPair(KeyCodec codec) throws CertificateException { + protected KeyPair createKeyPair(KeyStorage storage) throws CertificateException { HDDSKeyGenerator keyGenerator = new HDDSKeyGenerator(securityConfig); - KeyPair keyPair = null; + KeyPair keyPair; try { + KeyStorage keyStorageToUse = storage == null ? keyStorage() : storage; keyPair = keyGenerator.generateKey(); - codec.writePublicKey(keyPair.getPublic()); - codec.writePrivateKey(keyPair.getPrivate()); + keyStorageToUse.storeKeyPair(keyPair); } catch (NoSuchProviderException | NoSuchAlgorithmException - | IOException e) { + | IOException e) { getLogger().error("Error while bootstrapping certificate client.", e); throw new CertificateException("Error while bootstrapping certificate.", BOOTSTRAP_ERROR); @@ -1117,11 +1121,11 @@ public String renewAndStoreKeyAndCertificate(boolean force) } // Generate key - KeyCodec newKeyCodec = new KeyCodec(securityConfig, newKeyDir.toPath()); KeyPair newKeyPair; try { - newKeyPair = createKeyPair(newKeyCodec); - } catch (CertificateException e) { + KeyStorage newKeyStorage = new KeyStorage(securityConfig, component, HDDS_NEW_KEY_CERT_DIR_NAME_SUFFIX); + newKeyPair = createKeyPair(newKeyStorage); + } catch (IOException e) { throw new CertificateException("Error while creating new key pair.", e, RENEW_ERROR); } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/keys/KeyCodec.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/keys/KeyCodec.java deleted file mode 100644 index 1a3ef2d7f0d..00000000000 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/keys/KeyCodec.java +++ /dev/null @@ -1,425 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package org.apache.hadoop.hdds.security.x509.keys; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import org.apache.commons.io.FileUtils; -import org.apache.commons.io.output.FileWriterWithEncoding; -import org.apache.hadoop.hdds.security.SecurityConfig; -import org.bouncycastle.util.io.pem.PemObject; -import org.bouncycastle.util.io.pem.PemReader; -import org.bouncycastle.util.io.pem.PemWriter; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.io.IOException; -import java.io.StringReader; -import java.nio.charset.Charset; -import java.nio.charset.StandardCharsets; -import java.nio.file.FileSystems; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.nio.file.attribute.PosixFilePermission; -import java.security.KeyFactory; -import java.security.KeyPair; -import java.security.NoSuchAlgorithmException; -import java.security.PrivateKey; -import java.security.PublicKey; -import java.security.spec.InvalidKeySpecException; -import java.security.spec.PKCS8EncodedKeySpec; -import java.security.spec.X509EncodedKeySpec; -import java.util.Set; -import java.util.function.BooleanSupplier; -import java.util.stream.Collectors; -import java.util.stream.Stream; - -import static java.nio.file.attribute.PosixFilePermission.OWNER_EXECUTE; -import static java.nio.file.attribute.PosixFilePermission.OWNER_READ; -import static java.nio.file.attribute.PosixFilePermission.OWNER_WRITE; - -/** - * We store all Key material in good old PEM files. This helps in avoiding - * dealing will persistent Java KeyStore issues. Also when debugging, general - * tools like OpenSSL can be used to read and decode these files. - */ -public class KeyCodec { - public static final String PRIVATE_KEY = "PRIVATE KEY"; - public static final String PUBLIC_KEY = "PUBLIC KEY"; - public static final Charset DEFAULT_CHARSET = StandardCharsets.UTF_8; - private static final Logger LOG = - LoggerFactory.getLogger(KeyCodec.class); - private final Path location; - private final SecurityConfig securityConfig; - private final Set dirPermissionSet = - Stream.of(OWNER_READ, OWNER_WRITE, OWNER_EXECUTE) - .collect(Collectors.toSet()); - private final Set filePermissionSet = - Stream.of(OWNER_READ, OWNER_WRITE) - .collect(Collectors.toSet()); - private BooleanSupplier isPosixFileSystem; - - /** - * Creates a KeyCodec with component name. - * - * @param config - Security Config. - * @param component - Component String. - */ - public KeyCodec(SecurityConfig config, String component) { - this.securityConfig = config; - isPosixFileSystem = KeyCodec::isPosix; - this.location = securityConfig.getKeyLocation(component); - } - - /** - * Creates a KeyCodec with component name. - * - * @param config - Security Config. - * @param keyDir - path to save the key materials. - */ - public KeyCodec(SecurityConfig config, Path keyDir) { - this.securityConfig = config; - isPosixFileSystem = KeyCodec::isPosix; - this.location = keyDir; - if (!location.toFile().exists()) { - if (!location.toFile().mkdirs()) { - throw new RuntimeException("Failed to create directory " + location); - } - } - } - - /** - * Checks if File System supports posix style security permissions. - * - * @return True if it supports posix. - */ - private static boolean isPosix() { - return FileSystems.getDefault().supportedFileAttributeViews() - .contains("posix"); - } - - /** - * Returns the keys directory permission set. - * - * @return Set - */ - @VisibleForTesting - public Set getDirPermissionSet() { - return dirPermissionSet; - } - - /** - * Returns the file permission set. - */ - public Set getFilePermissionSet() { - return filePermissionSet; - } - - /** - * Returns the Security config used for this object. - * - * @return SecurityConfig - */ - public SecurityConfig getSecurityConfig() { - return securityConfig; - } - - /** - * This function is used only for testing. - * - * @param isPosixFileSystem - Sets a boolean function for mimicking files - * systems that are not posix. - */ - @VisibleForTesting - public void setIsPosixFileSystem(BooleanSupplier isPosixFileSystem) { - this.isPosixFileSystem = isPosixFileSystem; - } - - /** - * Writes a given key using the default config options. - * - * @param keyPair - Key Pair to write to file. - * @throws IOException - On I/O failure. - */ - public void writeKey(KeyPair keyPair) throws IOException { - writeKey(location, keyPair, securityConfig.getPrivateKeyFileName(), - securityConfig.getPublicKeyFileName(), false); - } - - /** - * Writes a given private key using the default config options. - * - * @param key - Key to write to file. - * @throws IOException - On I/O failure. - */ - public void writePrivateKey(PrivateKey key) throws IOException { - File privateKeyFile = - Paths.get(location.toString(), - securityConfig.getPrivateKeyFileName()).toFile(); - - if (Files.exists(privateKeyFile.toPath())) { - throw new IOException("Private key already exist."); - } - - try (PemWriter privateKeyWriter = new PemWriter(new - FileWriterWithEncoding(privateKeyFile, DEFAULT_CHARSET))) { - privateKeyWriter.writeObject( - new PemObject(PRIVATE_KEY, key.getEncoded())); - } - Files.setPosixFilePermissions(privateKeyFile.toPath(), filePermissionSet); - } - - /** - * Writes a given public key using the default config options. - * - * @param key - Key to write to file. - * @throws IOException - On I/O failure. - */ - public void writePublicKey(PublicKey key) throws IOException { - File publicKeyFile = Paths.get(location.toString(), - securityConfig.getPublicKeyFileName()).toFile(); - - if (Files.exists(publicKeyFile.toPath())) { - throw new IOException("Public key already exist."); - } - - try (PemWriter keyWriter = new PemWriter(new - FileWriterWithEncoding(publicKeyFile, DEFAULT_CHARSET))) { - keyWriter.writeObject( - new PemObject(PUBLIC_KEY, key.getEncoded())); - } - Files.setPosixFilePermissions(publicKeyFile.toPath(), filePermissionSet); - } - - /** - * Writes a given key using default config options. - * - * @param keyPair - Key pair to write - * @param overwrite - Overwrites the keys if they already exist. - * @throws IOException - On I/O failure. - */ - public void writeKey(KeyPair keyPair, boolean overwrite) throws IOException { - writeKey(location, keyPair, securityConfig.getPrivateKeyFileName(), - securityConfig.getPublicKeyFileName(), overwrite); - } - - /** - * Writes a given key using default config options. - * - * @param basePath - The location to write to, override the config values. - * @param keyPair - Key pair to write - * @param overwrite - Overwrites the keys if they already exist. - * @throws IOException - On I/O failure. - */ - public void writeKey(Path basePath, KeyPair keyPair, boolean overwrite) - throws IOException { - writeKey(basePath, keyPair, securityConfig.getPrivateKeyFileName(), - securityConfig.getPublicKeyFileName(), overwrite); - } - - /** - * Reads a Private Key from the PEM Encoded Store. - * - * @param basePath - Base Path, Directory where the Key is stored. - * @param keyFileName - File Name of the private key - * @return PrivateKey Object. - * @throws IOException - on Error. - */ - private PKCS8EncodedKeySpec readKey(Path basePath, String keyFileName) - throws IOException { - File fileName = Paths.get(basePath.toString(), keyFileName).toFile(); - String keyData = FileUtils.readFileToString(fileName, DEFAULT_CHARSET); - final byte[] pemContent; - try (PemReader pemReader = new PemReader(new StringReader(keyData))) { - PemObject keyObject = pemReader.readPemObject(); - pemContent = keyObject.getContent(); - } - return new PKCS8EncodedKeySpec(pemContent); - } - - /** - * Returns a Private Key from a PEM encoded file. - * - * @param basePath - base path - * @param privateKeyFileName - private key file name. - * @return PrivateKey - * @throws InvalidKeySpecException - on Error. - * @throws NoSuchAlgorithmException - on Error. - * @throws IOException - on Error. - */ - public PrivateKey readPrivateKey(Path basePath, String privateKeyFileName) - throws InvalidKeySpecException, NoSuchAlgorithmException, IOException { - PKCS8EncodedKeySpec encodedKeySpec = readKey(basePath, privateKeyFileName); - final KeyFactory keyFactory = - KeyFactory.getInstance(securityConfig.getKeyAlgo()); - return - keyFactory.generatePrivate(encodedKeySpec); - } - - /** - * Read the Public Key using defaults. - * @return PublicKey. - * @throws InvalidKeySpecException - On Error. - * @throws NoSuchAlgorithmException - On Error. - * @throws IOException - On Error. - */ - public PublicKey readPublicKey() throws InvalidKeySpecException, - NoSuchAlgorithmException, IOException { - return readPublicKey(this.location.toAbsolutePath(), - securityConfig.getPublicKeyFileName()); - } - - /** - * Returns a public key from a PEM encoded file. - * - * @param basePath - base path. - * @param publicKeyFileName - public key file name. - * @return PublicKey - * @throws NoSuchAlgorithmException - on Error. - * @throws InvalidKeySpecException - on Error. - * @throws IOException - on Error. - */ - public PublicKey readPublicKey(Path basePath, String publicKeyFileName) - throws NoSuchAlgorithmException, InvalidKeySpecException, IOException { - PKCS8EncodedKeySpec encodedKeySpec = readKey(basePath, publicKeyFileName); - final KeyFactory keyFactory = - KeyFactory.getInstance(securityConfig.getKeyAlgo()); - return - keyFactory.generatePublic( - new X509EncodedKeySpec(encodedKeySpec.getEncoded())); - - } - - - /** - * Returns the private key using defaults. - * @return PrivateKey. - * @throws InvalidKeySpecException - On Error. - * @throws NoSuchAlgorithmException - On Error. - * @throws IOException - On Error. - */ - public PrivateKey readPrivateKey() throws InvalidKeySpecException, - NoSuchAlgorithmException, IOException { - return readPrivateKey(this.location.toAbsolutePath(), - securityConfig.getPrivateKeyFileName()); - } - - - /** - * Helper function that actually writes data to the files. - * - * @param basePath - base path to write key - * @param keyPair - Key pair to write to file. - * @param privateKeyFileName - private key file name. - * @param publicKeyFileName - public key file name. - * @param force - forces overwriting the keys. - * @throws IOException - On I/O failure. - */ - private synchronized void writeKey(Path basePath, KeyPair keyPair, - String privateKeyFileName, String publicKeyFileName, boolean force) - throws IOException { - checkPreconditions(basePath); - - File privateKeyFile = - Paths.get(basePath.toString(), privateKeyFileName).toFile(); - File publicKeyFile = - Paths.get(basePath.toString(), publicKeyFileName).toFile(); - checkKeyFile(privateKeyFile, force, publicKeyFile); - - try (PemWriter privateKeyWriter = new PemWriter(new - FileWriterWithEncoding(privateKeyFile, DEFAULT_CHARSET))) { - privateKeyWriter.writeObject( - new PemObject(PRIVATE_KEY, keyPair.getPrivate().getEncoded())); - } - - try (PemWriter publicKeyWriter = new PemWriter(new - FileWriterWithEncoding(publicKeyFile, DEFAULT_CHARSET))) { - publicKeyWriter.writeObject( - new PemObject(PUBLIC_KEY, keyPair.getPublic().getEncoded())); - } - Files.setPosixFilePermissions(privateKeyFile.toPath(), filePermissionSet); - Files.setPosixFilePermissions(publicKeyFile.toPath(), filePermissionSet); - } - - /** - * Checks if private and public key file already exists. Throws IOException if - * file exists and force flag is set to false, else will delete the existing - * file. - * - * @param privateKeyFile - Private key file. - * @param force - forces overwriting the keys. - * @param publicKeyFile - public key file. - * @throws IOException - On I/O failure. - */ - private void checkKeyFile(File privateKeyFile, boolean force, - File publicKeyFile) throws IOException { - if (privateKeyFile.exists() && force) { - if (!privateKeyFile.delete()) { - throw new IOException("Unable to delete private key file."); - } - } - - if (publicKeyFile.exists() && force) { - if (!publicKeyFile.delete()) { - throw new IOException("Unable to delete public key file."); - } - } - - if (privateKeyFile.exists()) { - throw new IOException("Private Key file already exists."); - } - - if (publicKeyFile.exists()) { - throw new IOException("Public Key file already exists."); - } - } - - /** - * Checks if base path exists and sets file permissions. - * - * @param basePath - base path to write key - * @throws IOException - On I/O failure. - */ - private void checkPreconditions(Path basePath) throws IOException { - Preconditions.checkNotNull(basePath, "Base path cannot be null"); - if (!isPosixFileSystem.getAsBoolean()) { - LOG.error("Keys cannot be stored securely without POSIX file system " - + "support for now."); - throw new IOException("Unsupported File System for pem file."); - } - - if (Files.exists(basePath)) { - // Not the end of the world if we reset the permissions on an existing - // directory. - Files.setPosixFilePermissions(basePath, dirPermissionSet); - } else { - boolean success = basePath.toFile().mkdirs(); - if (!success) { - LOG.error("Unable to create the directory for the " - + "location. Location: {}", basePath); - throw new IOException("Unable to create the directory for the " - + "location. Location:" + basePath); - } - Files.setPosixFilePermissions(basePath, dirPermissionSet); - } - } - -} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/keys/SecurityUtil.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/keys/SecurityUtil.java deleted file mode 100644 index 41545fb7e9a..00000000000 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/keys/SecurityUtil.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ -package org.apache.hadoop.hdds.security.x509.keys; - -import java.security.KeyFactory; -import java.security.NoSuchAlgorithmException; -import java.security.NoSuchProviderException; -import java.security.PrivateKey; -import java.security.PublicKey; -import java.security.spec.InvalidKeySpecException; -import java.security.spec.PKCS8EncodedKeySpec; -import java.security.spec.X509EncodedKeySpec; - -import org.apache.hadoop.hdds.security.SecurityConfig; - -/** - * Utility functions for Security modules for Ozone. - */ -public final class SecurityUtil { - - private SecurityUtil() { - } - - /** - * Returns private key created from encoded key. - * - * @return private key if successful else returns null. - */ - public static PrivateKey getPrivateKey(byte[] encodedKey, - SecurityConfig secureConfig) { - PrivateKey pvtKey = null; - if (encodedKey == null || encodedKey.length == 0) { - return null; - } - - try { - KeyFactory kf = null; - - kf = KeyFactory.getInstance(secureConfig.getKeyAlgo(), - secureConfig.getProvider()); - pvtKey = kf.generatePrivate(new PKCS8EncodedKeySpec(encodedKey)); - - } catch (NoSuchAlgorithmException | InvalidKeySpecException | - NoSuchProviderException e) { - return null; - } - return pvtKey; - } - - /** - * Returns public key created from encoded key. - * - * @return public key if successful else returns null. - */ - public static PublicKey getPublicKey(byte[] encodedKey, - SecurityConfig secureConfig) { - PublicKey key = null; - if (encodedKey == null || encodedKey.length == 0) { - return null; - } - - try { - KeyFactory kf = null; - kf = KeyFactory.getInstance(secureConfig.getKeyAlgo(), - secureConfig.getProvider()); - key = kf.generatePublic(new X509EncodedKeySpec(encodedKey)); - - } catch (NoSuchAlgorithmException | InvalidKeySpecException | - NoSuchProviderException e) { - return null; - } - return key; - } -} diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/keys/package-info.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/keys/package-info.java deleted file mode 100644 index 4fffbf7da72..00000000000 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/keys/package-info.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -/** - * Utils for private and public keys. - */ -package org.apache.hadoop.hdds.security.x509.keys; diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/TestDefaultCAServer.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/TestDefaultCAServer.java index e029006a6af..a81c2054f4d 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/TestDefaultCAServer.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/authority/TestDefaultCAServer.java @@ -33,7 +33,7 @@ import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateSignRequest; import org.apache.hadoop.hdds.security.x509.certificate.utils.SelfSignedCertificate; import org.apache.hadoop.hdds.security.x509.keys.HDDSKeyGenerator; -import org.apache.hadoop.hdds.security.x509.keys.KeyCodec; +import org.apache.hadoop.hdds.security.x509.keys.KeyStorage; import org.apache.hadoop.security.ssl.KeyStoreTestUtil; import org.bouncycastle.pkcs.PKCS10CertificationRequest; @@ -60,6 +60,9 @@ import java.util.concurrent.Future; import java.util.function.Consumer; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_KEY_DIR_NAME_DEFAULT; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PRIVATE_KEY_FILE_NAME_DEFAULT; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PUBLIC_KEY_FILE_NAME_DEFAULT; import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType.OM; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType.SCM; @@ -80,8 +83,11 @@ public class TestDefaultCAServer { private SecurityConfig securityConfig; private MockCAStore caStore; + @TempDir + private Path tempDir; + @BeforeEach - public void init(@TempDir Path tempDir) throws IOException { + public void init() throws IOException { conf = new OzoneConfiguration(); conf.set(OZONE_METADATA_DIRS, tempDir.toString()); securityConfig = new SecurityConfig(conf); @@ -105,12 +111,13 @@ public void testInit() throws Exception { } @Test - public void testMissingCertificate() { + public void testMissingCertificate() throws Exception { CertificateServer testCA = new DefaultCAServer("testCA", RandomStringUtils.randomAlphabetic(4), RandomStringUtils.randomAlphabetic(4), caStore, new DefaultProfile(), Paths.get(SCM_CA_CERT_STORAGE_DIR, SCM_CA_PATH).toString()); + testCA.init(securityConfig, CAType.ROOT); Consumer caInitializer = ((DefaultCAServer) testCA).processVerificationStatus( DefaultCAServer.VerificationStatus.MISSING_CERTIFICATE, @@ -281,19 +288,18 @@ public void testIntermediaryCAWithEmpty() { } @Test - public void testExternalRootCA(@TempDir Path tempDir) throws Exception { + public void testExternalRootCA() throws Exception { //Given an external certificate String externalCaCertFileName = "CaCert.pem"; - setExternalPathsInConfig(tempDir, externalCaCertFileName); + + setExternalPathsInConfig(externalCaCertFileName); try (SCMCertificateClient scmCertificateClient = new SCMCertificateClient(securityConfig, null, null)) { - KeyPair keyPair = KeyStoreTestUtil.generateKeyPair("RSA"); - KeyCodec keyPEMWriter = new KeyCodec(securityConfig, - scmCertificateClient.getComponentName()); + KeyStorage keyStorage = new KeyStorage(securityConfig, ""); + keyStorage.storeKeyPair(keyPair); - keyPEMWriter.writeKey(tempDir, keyPair, true); X509Certificate externalCert = generateExternalCert(keyPair); CertificateCodec certificateCodec = new CertificateCodec(securityConfig, @@ -314,28 +320,21 @@ public void testExternalRootCA(@TempDir Path tempDir) throws Exception { } } - private void setExternalPathsInConfig(Path tempDir, - String externalCaCertFileName) { - String externalCaCertPart = Paths.get(tempDir.toString(), - externalCaCertFileName).toString(); - String privateKeyPath = Paths.get(tempDir.toString(), - HddsConfigKeys.HDDS_PRIVATE_KEY_FILE_NAME_DEFAULT).toString(); - String publicKeyPath = Paths.get(tempDir.toString(), - HddsConfigKeys.HDDS_PUBLIC_KEY_FILE_NAME_DEFAULT).toString(); - - conf.set(HddsConfigKeys.HDDS_X509_ROOTCA_CERTIFICATE_FILE, - externalCaCertPart); - conf.set(HddsConfigKeys.HDDS_X509_ROOTCA_PRIVATE_KEY_FILE, - privateKeyPath); - conf.set(HddsConfigKeys.HDDS_X509_ROOTCA_PUBLIC_KEY_FILE, - publicKeyPath); + private void setExternalPathsInConfig(String externalCaCertFileName) { + String externalCaCertPath = tempDir.resolve(externalCaCertFileName).toString(); + String privKey = tempDir.resolve(HDDS_KEY_DIR_NAME_DEFAULT).resolve(HDDS_PRIVATE_KEY_FILE_NAME_DEFAULT).toString(); + String pubKey = tempDir.resolve(HDDS_KEY_DIR_NAME_DEFAULT).resolve(HDDS_PUBLIC_KEY_FILE_NAME_DEFAULT).toString(); + + conf.set(HddsConfigKeys.HDDS_X509_ROOTCA_CERTIFICATE_FILE, externalCaCertPath); + conf.set(HddsConfigKeys.HDDS_X509_ROOTCA_PRIVATE_KEY_FILE, privKey); + conf.set(HddsConfigKeys.HDDS_X509_ROOTCA_PUBLIC_KEY_FILE, pubKey); securityConfig = new SecurityConfig(conf); } @Test - public void testInitWithCertChain(@TempDir Path tempDir) throws Exception { + public void testInitWithCertChain() throws Exception { String externalCaCertFileName = "CaCert.pem"; - setExternalPathsInConfig(tempDir, externalCaCertFileName); + setExternalPathsInConfig(externalCaCertFileName); CertificateApprover approver = new DefaultApprover(new DefaultCAProfile(), securityConfig); try (SCMCertificateClient scmCertificateClient = @@ -343,10 +342,8 @@ public void testInitWithCertChain(@TempDir Path tempDir) throws Exception { String scmId = RandomStringUtils.randomAlphabetic(4); String clusterId = RandomStringUtils.randomAlphabetic(4); KeyPair keyPair = new HDDSKeyGenerator(securityConfig).generateKey(); - KeyCodec keyPEMWriter = new KeyCodec(securityConfig, - scmCertificateClient.getComponentName()); - - keyPEMWriter.writeKey(tempDir, keyPair, true); + KeyStorage keyStorage = new KeyStorage(securityConfig, ""); + keyStorage.storeKeyPair(keyPair); LocalDate beginDate = LocalDate.now().atStartOfDay().toLocalDate(); LocalDate endDate = LocalDate.from(LocalDate.now().atStartOfDay().plusDays(10)); diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDefaultCertificateClient.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDefaultCertificateClient.java index f6827352f00..7ffca70f345 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDefaultCertificateClient.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDefaultCertificateClient.java @@ -27,7 +27,7 @@ import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec; import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateSignRequest; import org.apache.hadoop.hdds.security.x509.exception.CertificateException; -import org.apache.hadoop.hdds.security.x509.keys.KeyCodec; +import org.apache.hadoop.hdds.security.x509.keys.KeyStorage; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -61,6 +61,7 @@ import static java.nio.charset.StandardCharsets.UTF_8; import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY; import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_METADATA_DIR_NAME; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_NEW_KEY_CERT_DIR_NAME_SUFFIX; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_NAMES; import static org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient.InitResponse.FAILURE; import static org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec.getPEMEncodedString; @@ -91,7 +92,7 @@ public class TestDefaultCertificateClient { private SecurityConfig dnSecurityConfig; private SCMSecurityProtocolClientSideTranslatorPB scmSecurityClient; private static final String DN_COMPONENT = DNCertificateClient.COMPONENT_NAME; - private KeyCodec dnKeyCodec; + private KeyStorage dnKeyStorage; @BeforeEach public void setUp() throws Exception { @@ -103,7 +104,7 @@ public void setUp() throws Exception { dnSecurityConfig = new SecurityConfig(config); keyGenerator = new HDDSKeyGenerator(dnSecurityConfig); - dnKeyCodec = new KeyCodec(dnSecurityConfig, DN_COMPONENT); + dnKeyStorage = new KeyStorage(dnSecurityConfig, DN_COMPONENT); Files.createDirectories(dnSecurityConfig.getKeyLocation(DN_COMPONENT)); x509Certificate = generateX509Cert(null); @@ -153,8 +154,7 @@ public void testKeyOperations() throws Exception { private KeyPair generateKeyPairFiles() throws Exception { cleanupOldKeyPair(); KeyPair keyPair = keyGenerator.generateKey(); - dnKeyCodec.writePrivateKey(keyPair.getPrivate()); - dnKeyCodec.writePublicKey(keyPair.getPublic()); + dnKeyStorage.storeKeyPair(keyPair); return keyPair; } @@ -388,9 +388,8 @@ public void testInitCertAndKeypairValidationFailures() throws Exception { FileUtils.deleteQuietly(Paths.get( dnSecurityConfig.getKeyLocation(DN_COMPONENT).toString(), dnSecurityConfig.getPublicKeyFileName()).toFile()); - dnKeyCodec.writePrivateKey(keyPair.getPrivate()); - dnKeyCodec.writePublicKey(keyPair1.getPublic()); - + dnKeyStorage.storePrivateKey(keyPair.getPrivate()); + dnKeyStorage.storePublicKey(keyPair1.getPublic()); // Check for DN. assertEquals(FAILURE, dnCertClient.init()); assertThat(dnClientLog.getOutput()).contains("Keypair validation failed"); @@ -418,7 +417,7 @@ public void testInitCertAndKeypairValidationFailures() throws Exception { dnSecurityConfig.getKeyLocation(DN_COMPONENT).toString(), dnSecurityConfig.getPublicKeyFileName()).toFile()); getCertClient(); - dnKeyCodec.writePublicKey(keyPair.getPublic()); + dnKeyStorage.storePublicKey(keyPair.getPublic()); // Check for DN. assertEquals(FAILURE, dnCertClient.init()); @@ -513,8 +512,8 @@ public void testRenewAndStoreKeyAndCertificate() throws Exception { Files.createDirectories(newKeyDir.toPath()); Files.createDirectories(newCertDir.toPath()); KeyPair keyPair = KeyStoreTestUtil.generateKeyPair("RSA"); - KeyCodec newKeyCodec = new KeyCodec(dnSecurityConfig, newKeyDir.toPath()); - newKeyCodec.writeKey(keyPair); + KeyStorage newKeyStorage = new KeyStorage(dnSecurityConfig, DN_COMPONENT, HDDS_NEW_KEY_CERT_DIR_NAME_SUFFIX); + newKeyStorage.storeKeyPair(keyPair); X509Certificate cert = KeyStoreTestUtil.generateCertificate( "CN=OzoneMaster", keyPair, 30, "SHA256withRSA"); diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDnCertificateClientInit.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDnCertificateClientInit.java index 9a39695b3a4..be1d207881b 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDnCertificateClientInit.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/client/TestDnCertificateClientInit.java @@ -25,7 +25,7 @@ import org.apache.hadoop.hdds.security.SecurityConfig; import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec; import org.apache.hadoop.hdds.security.x509.keys.HDDSKeyGenerator; -import org.apache.hadoop.hdds.security.x509.keys.KeyCodec; +import org.apache.hadoop.hdds.security.x509.keys.KeyStorage; import org.apache.hadoop.ozone.OzoneSecurityUtil; import org.apache.hadoop.security.ssl.KeyStoreTestUtil; import org.junit.jupiter.api.AfterEach; @@ -64,7 +64,7 @@ public class TestDnCertificateClientInit { @TempDir private Path metaDirPath; private SecurityConfig securityConfig; - private KeyCodec dnKeyCodec; + private KeyStorage dnKeyStorage; private X509Certificate x509Certificate; private static final String DN_COMPONENT = DNCertificateClient.COMPONENT_NAME; @@ -94,7 +94,7 @@ public void setUp() throws Exception { dnCertificateClient = new DNCertificateClient( securityConfig, null, dn, certSerialId, null, null); - dnKeyCodec = new KeyCodec(securityConfig, DN_COMPONENT); + dnKeyStorage = new KeyStorage(securityConfig, DN_COMPONENT); Files.createDirectories(securityConfig.getKeyLocation(DN_COMPONENT)); } @@ -111,7 +111,7 @@ public void tearDown() throws IOException { public void testInitDatanode(boolean pvtKeyPresent, boolean pubKeyPresent, boolean certPresent, InitResponse expectedResult) throws Exception { if (pvtKeyPresent) { - dnKeyCodec.writePrivateKey(keyPair.getPrivate()); + dnKeyStorage.storePrivateKey(keyPair.getPrivate()); } else { FileUtils.deleteQuietly(Paths.get( securityConfig.getKeyLocation(DN_COMPONENT).toString(), @@ -120,7 +120,7 @@ public void testInitDatanode(boolean pvtKeyPresent, boolean pubKeyPresent, if (pubKeyPresent) { if (dnCertificateClient.getPublicKey() == null) { - dnKeyCodec.writePublicKey(keyPair.getPublic()); + dnKeyStorage.storePublicKey(keyPair.getPublic()); } } else { FileUtils.deleteQuietly( diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestKeyCodec.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestKeyCodec.java deleted file mode 100644 index f52b52ef69a..00000000000 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/security/x509/keys/TestKeyCodec.java +++ /dev/null @@ -1,235 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - */ - -package org.apache.hadoop.hdds.security.x509.keys; - -import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_METADATA_DIR_NAME; -import static org.assertj.core.api.Assertions.assertThat; -import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.assertTrue; - -import java.io.IOException; -import java.nio.charset.StandardCharsets; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.nio.file.attribute.PosixFilePermission; -import java.security.KeyFactory; -import java.security.KeyPair; -import java.security.NoSuchAlgorithmException; -import java.security.NoSuchProviderException; -import java.security.PrivateKey; -import java.security.PublicKey; -import java.security.spec.InvalidKeySpecException; -import java.security.spec.PKCS8EncodedKeySpec; -import java.security.spec.X509EncodedKeySpec; -import java.util.Set; -import org.apache.commons.codec.binary.Base64; -import org.apache.commons.io.FileUtils; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.security.SecurityConfig; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.io.TempDir; - -/** - * Test class for HDDS pem writer. - */ -public class TestKeyCodec { - - private SecurityConfig securityConfig; - private String component; - private HDDSKeyGenerator keyGenerator; - private String prefix; - - @BeforeEach - public void init(@TempDir Path tempDir) throws IOException { - OzoneConfiguration configuration = new OzoneConfiguration(); - prefix = tempDir.toString(); - configuration.set(HDDS_METADATA_DIR_NAME, prefix); - securityConfig = new SecurityConfig(configuration); - keyGenerator = new HDDSKeyGenerator(securityConfig); - component = "test_component"; - } - - /** - * Assert basic things like we are able to create a file, and the names are - * in expected format etc. - * - * @throws NoSuchProviderException - On Error, due to missing Java - * dependencies. - * @throws NoSuchAlgorithmException - On Error, due to missing Java - * dependencies. - * @throws IOException - On I/O failure. - */ - @Test - public void testWriteKey() - throws NoSuchProviderException, NoSuchAlgorithmException, - IOException, InvalidKeySpecException { - KeyPair keys = keyGenerator.generateKey(); - KeyCodec pemWriter = new KeyCodec(securityConfig, component); - pemWriter.writeKey(keys); - - // Assert that locations have been created. - Path keyLocation = pemWriter.getSecurityConfig().getKeyLocation(component); - assertTrue(keyLocation.toFile().exists()); - - // Assert that locations are created in the locations that we specified - // using the Config. - assertTrue(keyLocation.toString().startsWith(prefix)); - Path privateKeyPath = Paths.get(keyLocation.toString(), - pemWriter.getSecurityConfig().getPrivateKeyFileName()); - assertTrue(privateKeyPath.toFile().exists()); - Path publicKeyPath = Paths.get(keyLocation.toString(), - pemWriter.getSecurityConfig().getPublicKeyFileName()); - assertTrue(publicKeyPath.toFile().exists()); - - // Read the private key and test if the expected String in the PEM file - // format exists. - byte[] privateKey = Files.readAllBytes(privateKeyPath); - String privateKeydata = new String(privateKey, StandardCharsets.UTF_8); - assertThat(privateKeydata).contains("PRIVATE KEY"); - - // Read the public key and test if the expected String in the PEM file - // format exists. - byte[] publicKey = Files.readAllBytes(publicKeyPath); - String publicKeydata = new String(publicKey, StandardCharsets.UTF_8); - assertThat(publicKeydata).contains("PUBLIC KEY"); - - // Let us decode the PEM file and parse it back into binary. - KeyFactory kf = KeyFactory.getInstance( - pemWriter.getSecurityConfig().getKeyAlgo()); - - // Replace the PEM Human readable guards. - privateKeydata = - privateKeydata.replace("-----BEGIN PRIVATE KEY-----\n", ""); - privateKeydata = - privateKeydata.replace("-----END PRIVATE KEY-----", ""); - - // Decode the bas64 to binary format and then use an ASN.1 parser to - // parse the binary format. - - byte[] keyBytes = Base64.decodeBase64(privateKeydata); - PKCS8EncodedKeySpec spec = new PKCS8EncodedKeySpec(keyBytes); - PrivateKey privateKeyDecoded = kf.generatePrivate(spec); - assertNotNull(privateKeyDecoded, - "Private Key should not be null"); - - // Let us decode the public key and veriy that we can parse it back into - // binary. - publicKeydata = - publicKeydata.replace("-----BEGIN PUBLIC KEY-----\n", ""); - publicKeydata = - publicKeydata.replace("-----END PUBLIC KEY-----", ""); - - keyBytes = Base64.decodeBase64(publicKeydata); - X509EncodedKeySpec pubKeyspec = new X509EncodedKeySpec(keyBytes); - PublicKey publicKeyDecoded = kf.generatePublic(pubKeyspec); - assertNotNull(publicKeyDecoded, "Public Key should not be null"); - - // Now let us assert the permissions on the Directories and files are as - // expected. - Set expectedSet = pemWriter.getFilePermissionSet(); - Set currentSet = - Files.getPosixFilePermissions(privateKeyPath); - assertEquals(expectedSet.size(), currentSet.size()); - currentSet.removeAll(expectedSet); - assertEquals(0, currentSet.size()); - - currentSet = - Files.getPosixFilePermissions(publicKeyPath); - currentSet.removeAll(expectedSet); - assertEquals(0, currentSet.size()); - - expectedSet = pemWriter.getDirPermissionSet(); - currentSet = - Files.getPosixFilePermissions(keyLocation); - assertEquals(expectedSet.size(), currentSet.size()); - currentSet.removeAll(expectedSet); - assertEquals(0, currentSet.size()); - } - - /** - * Assert key rewrite fails without force option. - * - * @throws IOException - on I/O failure. - */ - @Test - public void testReWriteKey() - throws Exception { - KeyPair kp = keyGenerator.generateKey(); - KeyCodec pemWriter = new KeyCodec(securityConfig, component); - SecurityConfig secConfig = pemWriter.getSecurityConfig(); - pemWriter.writeKey(kp); - - // Assert that rewriting of keys throws exception with valid messages. - IOException ioException = assertThrows(IOException.class, - () -> pemWriter.writeKey(kp)); - assertThat(ioException.getMessage()) - .contains("Private Key file already exists."); - FileUtils.deleteQuietly(Paths.get( - secConfig.getKeyLocation(component).toString() + "/" + secConfig - .getPrivateKeyFileName()).toFile()); - ioException = assertThrows(IOException.class, - () -> pemWriter.writeKey(kp)); - assertThat(ioException.getMessage()) - .contains("Public Key file already exists."); - FileUtils.deleteQuietly(Paths.get( - secConfig.getKeyLocation(component).toString() + "/" + secConfig - .getPublicKeyFileName()).toFile()); - - // Should succeed now as both public and private key are deleted. - pemWriter.writeKey(kp); - // Should succeed with overwrite flag as true. - pemWriter.writeKey(kp, true); - - } - - /** - * Assert key rewrite fails in non Posix file system. - */ - @Test - public void testWriteKeyInNonPosixFS() - throws Exception { - KeyPair kp = keyGenerator.generateKey(); - KeyCodec pemWriter = new KeyCodec(securityConfig, component); - pemWriter.setIsPosixFileSystem(() -> false); - - // Assert key rewrite fails in non Posix file system. - IOException ioException = assertThrows(IOException.class, - () -> pemWriter.writeKey(kp)); - assertThat(ioException.getMessage()) - .contains("Unsupported File System for pem file."); - } - - @Test - public void testReadWritePublicKeyWithoutArgs() - throws NoSuchProviderException, NoSuchAlgorithmException, IOException, - InvalidKeySpecException { - - KeyPair kp = keyGenerator.generateKey(); - KeyCodec keycodec = new KeyCodec(securityConfig, component); - keycodec.writeKey(kp); - - PublicKey pubKey = keycodec.readPublicKey(); - assertNotNull(pubKey); - } -} diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/RootCARotationManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/RootCARotationManager.java index fc7249462c4..92e5bae719d 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/RootCARotationManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/security/RootCARotationManager.java @@ -40,7 +40,7 @@ import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec; import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateSignRequest; import org.apache.hadoop.hdds.security.x509.keys.HDDSKeyGenerator; -import org.apache.hadoop.hdds.security.x509.keys.KeyCodec; +import org.apache.hadoop.hdds.security.x509.keys.KeyStorage; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -48,7 +48,6 @@ import java.io.IOException; import java.math.BigInteger; import java.nio.file.Files; -import java.nio.file.Path; import java.nio.file.Paths; import java.nio.file.StandardCopyOption; import java.security.KeyPair; @@ -562,15 +561,13 @@ public void run() { } // Generate key - Path keyDir = securityConfig.getKeyLocation(progressComponent); - KeyCodec keyCodec = new KeyCodec(securityConfig, keyDir); + KeyStorage keyStorage = new KeyStorage(securityConfig, progressComponent); KeyPair newKeyPair = null; try { HDDSKeyGenerator keyGenerator = new HDDSKeyGenerator(securityConfig); newKeyPair = keyGenerator.generateKey(); - keyCodec.writePublicKey(newKeyPair.getPublic()); - keyCodec.writePrivateKey(newKeyPair.getPrivate()); + keyStorage.storeKeyPair(newKeyPair); LOG.info("SubCARotationPrepareTask[rootCertId = {}] - " + "scm key generated.", rootCACertId); } catch (Exception e) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDelegationToken.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDelegationToken.java index e0c2a292397..e8e9c241a25 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDelegationToken.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestDelegationToken.java @@ -39,7 +39,7 @@ import org.apache.hadoop.hdds.security.SecurityConfig; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClientTestImpl; import org.apache.hadoop.hdds.security.x509.keys.HDDSKeyGenerator; -import org.apache.hadoop.hdds.security.x509.keys.KeyCodec; +import org.apache.hadoop.hdds.security.x509.keys.KeyStorage; import org.apache.hadoop.io.Text; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; @@ -414,8 +414,8 @@ private void generateKeyPair() throws Exception { SecurityConfig securityConfig = new SecurityConfig(conf); HDDSKeyGenerator keyGenerator = new HDDSKeyGenerator(securityConfig); KeyPair keyPair = keyGenerator.generateKey(); - KeyCodec pemWriter = new KeyCodec(securityConfig, COMPONENT); - pemWriter.writeKey(keyPair, true); + KeyStorage keyStorage = new KeyStorage(securityConfig, COMPONENT); + keyStorage.storeKeyPair(keyPair); } private void setupOm(OzoneConfiguration config) throws Exception { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java index d71a4854c9e..b0da3fce839 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestSecureOzoneCluster.java @@ -75,7 +75,7 @@ import org.apache.hadoop.hdds.security.x509.certificate.utils.SelfSignedCertificate; import org.apache.hadoop.hdds.security.x509.exception.CertificateException; import org.apache.hadoop.hdds.security.x509.keys.HDDSKeyGenerator; -import org.apache.hadoop.hdds.security.x509.keys.KeyCodec; +import org.apache.hadoop.hdds.security.x509.keys.KeyStorage; import org.apache.hadoop.hdds.utils.HAUtils; import org.apache.hadoop.io.Text; import org.apache.hadoop.ipc.Client; @@ -625,8 +625,8 @@ private void generateKeyPair() throws Exception { SecurityConfig securityConfig = new SecurityConfig(conf); HDDSKeyGenerator keyGenerator = new HDDSKeyGenerator(securityConfig); keyPair = keyGenerator.generateKey(); - KeyCodec pemWriter = new KeyCodec(securityConfig, COMPONENT); - pemWriter.writeKey(keyPair, true); + KeyStorage keyStorage = new KeyStorage(securityConfig, COMPONENT); + keyStorage.storeKeyPair(keyPair); } /** @@ -942,11 +942,9 @@ void testCertificateRotation() throws Exception { // save first cert final int certificateLifetime = 20; // seconds - KeyCodec keyCodec = - new KeyCodec(securityConfig, securityConfig.getKeyLocation("om")); + KeyStorage keyStorage = new KeyStorage(securityConfig, "om"); X509Certificate cert = generateSelfSignedX509Cert(securityConfig, - new KeyPair(keyCodec.readPublicKey(), keyCodec.readPrivateKey()), - null, Duration.ofSeconds(certificateLifetime)); + keyStorage.readKeyPair(), null, Duration.ofSeconds(certificateLifetime)); String certId = cert.getSerialNumber().toString(); omStorage.setOmCertSerialId(certId); omStorage.forceInitialize(); @@ -1025,11 +1023,9 @@ void testCertificateRotationRecoverableFailure() throws Exception { // save first cert final int certificateLifetime = 20; // seconds - KeyCodec keyCodec = - new KeyCodec(securityConfig, securityConfig.getKeyLocation("om")); + KeyStorage keyStorage = new KeyStorage(securityConfig, "om"); X509Certificate certHolder = generateSelfSignedX509Cert(securityConfig, - new KeyPair(keyCodec.readPublicKey(), keyCodec.readPrivateKey()), - null, Duration.ofSeconds(certificateLifetime)); + keyStorage.readKeyPair(), null, Duration.ofSeconds(certificateLifetime)); String certId = certHolder.getSerialNumber().toString(); certCodec.writeCertificate(certHolder); omStorage.setOmCertSerialId(certId); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSecureOzoneManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSecureOzoneManager.java index 0238b3e3ecf..9b503d8f924 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSecureOzoneManager.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestSecureOzoneManager.java @@ -24,7 +24,7 @@ import org.apache.hadoop.hdds.security.SecurityConfig; import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient; import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec; -import org.apache.hadoop.hdds.security.x509.keys.KeyCodec; +import org.apache.hadoop.hdds.security.x509.keys.KeyStorage; import org.apache.hadoop.ozone.security.OMCertificateClient; import org.apache.hadoop.security.ssl.KeyStoreTestUtil; import org.junit.jupiter.api.BeforeEach; @@ -135,7 +135,7 @@ void testSecureOmInitFailures() throws Exception { client = new OMCertificateClient( securityConfig, null, omStorage, omInfo, "", null, null, null); - KeyCodec keyCodec = new KeyCodec(securityConfig, COMPONENT); + KeyStorage keyStorage = new KeyStorage(securityConfig, COMPONENT); FileUtils.deleteQuietly(Paths.get(securityConfig.getKeyLocation(COMPONENT) .toString(), securityConfig.getPrivateKeyFileName()).toFile()); assertEquals(CertificateClient.InitResponse.FAILURE, client.init()); @@ -169,7 +169,7 @@ void testSecureOmInitFailures() throws Exception { securityConfig, null, omStorage, omInfo, "", scmId, null, null); FileUtils.deleteQuietly(Paths.get(securityConfig.getKeyLocation(COMPONENT) .toString(), securityConfig.getPublicKeyFileName()).toFile()); - keyCodec.writePrivateKey(privateKey); + keyStorage.storePrivateKey(privateKey); assertEquals(CertificateClient.InitResponse.SUCCESS, client.init()); assertNotNull(client.getPrivateKey()); assertNotNull(client.getPublicKey()); diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOmCertificateClientInit.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOmCertificateClientInit.java index a604ebbd724..f40b555b19e 100644 --- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOmCertificateClientInit.java +++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/TestOmCertificateClientInit.java @@ -24,7 +24,7 @@ import org.apache.hadoop.hdds.security.SecurityConfig; import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec; import org.apache.hadoop.hdds.security.x509.keys.HDDSKeyGenerator; -import org.apache.hadoop.hdds.security.x509.keys.KeyCodec; +import org.apache.hadoop.hdds.security.x509.keys.KeyStorage; import org.apache.hadoop.ozone.OzoneSecurityUtil; import org.apache.hadoop.ozone.om.OMStorage; import org.apache.hadoop.ozone.om.OzoneManager; @@ -66,7 +66,7 @@ public class TestOmCertificateClientInit { private OMCertificateClient omCertificateClient; private HDDSKeyGenerator keyGenerator; private SecurityConfig securityConfig; - private KeyCodec omKeyCodec; + private KeyStorage omKeyStorage; private X509Certificate x509Certificate; private static final String OM_COMPONENT = OMCertificateClient.COMPONENT_NAME; @@ -101,7 +101,7 @@ public void setUp(@TempDir Path metaDirPath) throws Exception { omCertificateClient = new OMCertificateClient( securityConfig, null, storage, omInfo, "", null, null, null); - omKeyCodec = new KeyCodec(securityConfig, OM_COMPONENT); + omKeyStorage = new KeyStorage(securityConfig, OM_COMPONENT); Files.createDirectories(securityConfig.getKeyLocation(OM_COMPONENT)); } @@ -117,7 +117,7 @@ public void tearDown() throws IOException { public void testInitOzoneManager(boolean pvtKeyPresent, boolean pubKeyPresent, boolean certPresent, InitResponse expectedResult) throws Exception { if (pvtKeyPresent) { - omKeyCodec.writePrivateKey(keyPair.getPrivate()); + omKeyStorage.storePrivateKey(keyPair.getPrivate()); } else { FileUtils.deleteQuietly(Paths.get( securityConfig.getKeyLocation(OM_COMPONENT).toString(), @@ -126,7 +126,7 @@ public void testInitOzoneManager(boolean pvtKeyPresent, boolean pubKeyPresent, if (pubKeyPresent) { if (omCertificateClient.getPublicKey() == null) { - omKeyCodec.writePublicKey(keyPair.getPublic()); + omKeyStorage.storePublicKey(keyPair.getPublic()); } } else { FileUtils.deleteQuietly(Paths.get( From 52930c50af4f5368ed7191d081dd7ecbba17eaf5 Mon Sep 17 00:00:00 2001 From: Chia-Chuan Yu Date: Tue, 21 Jan 2025 23:43:05 +0800 Subject: [PATCH 110/168] HDDS-12005. Refactor TestBlockDataStreamOutput (#7716) --- .../client/rpc/TestBlockDataStreamOutput.java | 423 ++++++++++-------- 1 file changed, 227 insertions(+), 196 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java index c1345207d99..0238f4f4987 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java @@ -19,15 +19,19 @@ import org.apache.hadoop.hdds.DatanodeVersion; import org.apache.hadoop.hdds.client.ReplicationType; +import org.apache.hadoop.hdds.conf.ConfigurationSource; +import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.DatanodeDetails; +import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.XceiverClientMetrics; import org.apache.hadoop.hdds.scm.storage.BlockDataStreamOutput; import org.apache.hadoop.hdds.scm.storage.ByteBufferStreamOutput; -import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.ozone.ClientConfigForTesting; import org.apache.hadoop.ozone.HddsDatanodeService; import org.apache.hadoop.ozone.MiniOzoneCluster; @@ -44,19 +48,26 @@ import org.apache.ozone.test.tag.Flaky; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; +import java.io.IOException; import java.nio.ByteBuffer; +import java.time.Duration; import java.util.List; import java.util.UUID; import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.stream.Stream; import static java.nio.charset.StandardCharsets.UTF_8; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type.PutBlock; -import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type.WriteChunk; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_CHUNK_READ_NETTY_CHUNKED_NIO_FILE_KEY; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertInstanceOf; @@ -64,131 +75,157 @@ /** * Tests BlockDataStreamOutput class. */ +@TestInstance(TestInstance.Lifecycle.PER_CLASS) @Timeout(300) public class TestBlockDataStreamOutput { - private static MiniOzoneCluster cluster; - private static OzoneConfiguration conf = new OzoneConfiguration(); - private static OzoneClient client; - private static ObjectStore objectStore; - private static int chunkSize; - private static int flushSize; - private static int maxFlushSize; - private static int blockSize; - private static String volumeName; - private static String bucketName; - private static String keyString; + private MiniOzoneCluster cluster; + private static final int CHUNK_SIZE = 100; + private static final int FLUSH_SIZE = 2 * CHUNK_SIZE; + private static final int MAX_FLUSH_SIZE = 2 * FLUSH_SIZE; + private static final int BLOCK_SIZE = 2 * MAX_FLUSH_SIZE; + private static final String VOLUME_NAME = "testblockoutputstream"; + private static final String BUCKET_NAME = VOLUME_NAME; + private static String keyString = UUID.randomUUID().toString();; private static final DatanodeVersion DN_OLD_VERSION = DatanodeVersion.SEPARATE_RATIS_PORTS_AVAILABLE; - @BeforeAll - public static void init() throws Exception { - chunkSize = 100; - flushSize = 2 * chunkSize; - maxFlushSize = 2 * flushSize; - blockSize = 2 * maxFlushSize; - + static MiniOzoneCluster createCluster() throws IOException, + InterruptedException, TimeoutException { + OzoneConfiguration conf = new OzoneConfiguration(); OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class); + clientConfig.setChecksumType(ContainerProtos.ChecksumType.NONE); + clientConfig.setStreamBufferFlushDelay(false); + clientConfig.setEnablePutblockPiggybacking(true); conf.setFromObject(clientConfig); - conf.setBoolean(OZONE_CHUNK_READ_NETTY_CHUNKED_NIO_FILE_KEY, true); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS); + conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, TimeUnit.SECONDS); conf.setQuietMode(false); - conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 4, - StorageUnit.MB); + conf.setStorageSize(OZONE_SCM_BLOCK_SIZE, 4, StorageUnit.MB); + conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 3); + + conf.setBoolean(OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED, true); + conf.setBoolean("ozone.client.hbase.enhancements.allowed", true); + + DatanodeRatisServerConfig ratisServerConfig = + conf.getObject(DatanodeRatisServerConfig.class); + ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); + ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(3)); + conf.setFromObject(ratisServerConfig); + + RatisClientConfig.RaftConfig raftClientConfig = + conf.getObject(RatisClientConfig.RaftConfig.class); + raftClientConfig.setRpcRequestTimeout(Duration.ofSeconds(3)); + raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(5)); + conf.setFromObject(raftClientConfig); + + RatisClientConfig ratisClientConfig = + conf.getObject(RatisClientConfig.class); + ratisClientConfig.setWriteRequestTimeout(Duration.ofSeconds(30)); + ratisClientConfig.setWatchRequestTimeout(Duration.ofSeconds(30)); + conf.setFromObject(ratisClientConfig); ClientConfigForTesting.newBuilder(StorageUnit.BYTES) - .setBlockSize(blockSize) - .setChunkSize(chunkSize) - .setStreamBufferFlushSize(flushSize) - .setStreamBufferMaxSize(maxFlushSize) - .setDataStreamBufferFlushSize(maxFlushSize) - .setDataStreamMinPacketSize(chunkSize) - .setDataStreamWindowSize(5 * chunkSize) + .setBlockSize(BLOCK_SIZE) + .setChunkSize(CHUNK_SIZE) + .setStreamBufferFlushSize(FLUSH_SIZE) + .setStreamBufferMaxSize(MAX_FLUSH_SIZE) + .setDataStreamBufferFlushSize(MAX_FLUSH_SIZE) + .setDataStreamMinPacketSize(CHUNK_SIZE) + .setDataStreamWindowSize(5 * CHUNK_SIZE) .applyTo(conf); - cluster = MiniOzoneCluster.newBuilder(conf) + MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf) .setNumDatanodes(5) .setDatanodeFactory(UniformDatanodesFactory.newBuilder() .setCurrentVersion(DN_OLD_VERSION) .build()) .build(); + cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.THREE, + 180000); cluster.waitForClusterToBeReady(); - //the easiest way to create an open container is creating a key - client = OzoneClientFactory.getRpcClient(conf); - objectStore = client.getObjectStore(); - keyString = UUID.randomUUID().toString(); - volumeName = "testblockdatastreamoutput"; - bucketName = volumeName; - objectStore.createVolume(volumeName); - objectStore.getVolume(volumeName).createBucket(bucketName); + + try (OzoneClient client = cluster.newClient()) { + ObjectStore objectStore = client.getObjectStore(); + objectStore.createVolume(VOLUME_NAME); + objectStore.getVolume(VOLUME_NAME).createBucket(BUCKET_NAME); + } + + return cluster; } - static String getKeyName() { - return UUID.randomUUID().toString(); + private static Stream clientParameters() { + return Stream.of( + Arguments.of(true), + Arguments.of(false) + ); } - @AfterAll - public static void shutdown() { - IOUtils.closeQuietly(client); - if (cluster != null) { - cluster.shutdown(); - } + private static Stream dataLengthParameters() { + return Stream.of( + Arguments.of(CHUNK_SIZE / 2), + Arguments.of(CHUNK_SIZE), + Arguments.of(CHUNK_SIZE + 50), + Arguments.of(BLOCK_SIZE + 50) + ); } - @Test - public void testHalfChunkWrite() throws Exception { - testWrite(chunkSize / 2); - testWriteWithFailure(chunkSize / 2); + static OzoneClientConfig newClientConfig(ConfigurationSource source, + boolean flushDelay) { + OzoneClientConfig clientConfig = source.getObject(OzoneClientConfig.class); + clientConfig.setChecksumType(ContainerProtos.ChecksumType.NONE); + clientConfig.setStreamBufferFlushDelay(flushDelay); + return clientConfig; } - @Test - public void testSingleChunkWrite() throws Exception { - testWrite(chunkSize); - testWriteWithFailure(chunkSize); + static OzoneClient newClient(OzoneConfiguration conf, + OzoneClientConfig config) throws IOException { + OzoneConfiguration copy = new OzoneConfiguration(conf); + copy.setFromObject(config); + return OzoneClientFactory.getRpcClient(copy); } - @Test - public void testMultiChunkWrite() throws Exception { - testWrite(chunkSize + 50); - testWriteWithFailure(chunkSize + 50); + @BeforeAll + public void init() throws Exception { + cluster = createCluster(); } - @Test - @Flaky("HDDS-12027") - public void testMultiBlockWrite() throws Exception { - testWrite(blockSize + 50); - testWriteWithFailure(blockSize + 50); + static String getKeyName() { + return UUID.randomUUID().toString(); } - static void testWrite(int dataLength) throws Exception { - XceiverClientMetrics metrics = - XceiverClientManager.getXceiverClientMetrics(); - long pendingWriteChunkCount = metrics.getPendingContainerOpCountMetrics(WriteChunk); - long pendingPutBlockCount = metrics.getPendingContainerOpCountMetrics(PutBlock); + @AfterAll + public void shutdown() { + if (cluster != null) { + cluster.shutdown(); + } + } + + @ParameterizedTest + @MethodSource("dataLengthParameters") + @Flaky("HDDS-12027") + public void testStreamWrite(int dataLength) throws Exception { + OzoneClientConfig config = newClientConfig(cluster.getConf(), false); + try (OzoneClient client = newClient(cluster.getConf(), config)) { + testWrite(client, dataLength); + testWriteWithFailure(client, dataLength); + } + } + static void testWrite(OzoneClient client, int dataLength) throws Exception { String keyName = getKeyName(); OzoneDataStreamOutput key = createKey( - keyName, ReplicationType.RATIS, dataLength); + client, keyName, dataLength); final byte[] data = ContainerTestHelper.generateData(dataLength, false); key.write(ByteBuffer.wrap(data)); // now close the stream, It will update the key length. key.close(); - validateData(keyName, data); - - assertEquals(pendingPutBlockCount, - metrics.getPendingContainerOpCountMetrics(PutBlock)); - assertEquals(pendingWriteChunkCount, - metrics.getPendingContainerOpCountMetrics(WriteChunk)); + validateData(client, keyName, data); } - private void testWriteWithFailure(int dataLength) throws Exception { - XceiverClientMetrics metrics = - XceiverClientManager.getXceiverClientMetrics(); - long pendingWriteChunkCount = metrics.getPendingContainerOpCountMetrics(WriteChunk); - long pendingPutBlockCount = metrics.getPendingContainerOpCountMetrics(PutBlock); - + private void testWriteWithFailure(OzoneClient client, int dataLength) throws Exception { String keyName = getKeyName(); OzoneDataStreamOutput key = createKey( - keyName, ReplicationType.RATIS, dataLength); + client, keyName, dataLength); byte[] data = ContainerTestHelper.getFixedLengthString(keyString, dataLength) .getBytes(UTF_8); @@ -203,129 +240,123 @@ private void testWriteWithFailure(int dataLength) throws Exception { key.write(b); key.close(); String dataString = new String(data, UTF_8); - validateData(keyName, dataString.concat(dataString).getBytes(UTF_8)); - - assertEquals(pendingPutBlockCount, - metrics.getPendingContainerOpCountMetrics(PutBlock)); - assertEquals(pendingWriteChunkCount, - metrics.getPendingContainerOpCountMetrics(WriteChunk)); - } - - @Test - public void testPutBlockAtBoundary() throws Exception { - int dataLength = maxFlushSize + 100; - XceiverClientMetrics metrics = - XceiverClientManager.getXceiverClientMetrics(); - long writeChunkCount = metrics.getContainerOpCountMetrics(WriteChunk); - long putBlockCount = metrics.getContainerOpCountMetrics(PutBlock); - long pendingWriteChunkCount = metrics.getPendingContainerOpCountMetrics(WriteChunk); - long pendingPutBlockCount = metrics.getPendingContainerOpCountMetrics(PutBlock); - long totalOpCount = metrics.getTotalOpCount(); - - String keyName = getKeyName(); - OzoneDataStreamOutput key = createKey( - keyName, ReplicationType.RATIS, 0); - byte[] data = - ContainerTestHelper.getFixedLengthString(keyString, dataLength) - .getBytes(UTF_8); - key.write(ByteBuffer.wrap(data)); - assertThat(metrics.getPendingContainerOpCountMetrics(PutBlock)) - .isLessThanOrEqualTo(pendingPutBlockCount + 1); - assertThat(metrics.getPendingContainerOpCountMetrics(WriteChunk)) - .isLessThanOrEqualTo(pendingWriteChunkCount + 5); - key.close(); - // Since data length is 500 , first putBlock will be at 400(flush boundary) - // and the other at 500 - assertEquals(putBlockCount + 2, - metrics.getContainerOpCountMetrics(PutBlock)); - // Each chunk is 100 so there will be 500 / 100 = 5 chunks. - assertEquals(writeChunkCount + 5, - metrics.getContainerOpCountMetrics(WriteChunk)); - assertEquals(totalOpCount + 7, - metrics.getTotalOpCount()); - assertEquals(pendingPutBlockCount, - metrics.getPendingContainerOpCountMetrics(PutBlock)); - assertEquals(pendingWriteChunkCount, - metrics.getPendingContainerOpCountMetrics(WriteChunk)); - - validateData(keyName, data); + validateData(client, keyName, dataString.concat(dataString).getBytes(UTF_8)); } - - static OzoneDataStreamOutput createKey(String keyName, ReplicationType type, + static OzoneDataStreamOutput createKey(OzoneClient client, String keyName, long size) throws Exception { - return TestHelper.createStreamKey( - keyName, type, size, objectStore, volumeName, bucketName); + return TestHelper.createStreamKey(keyName, ReplicationType.RATIS, size, + client.getObjectStore(), VOLUME_NAME, BUCKET_NAME); } - static void validateData(String keyName, byte[] data) throws Exception { + + static void validateData(OzoneClient client, String keyName, byte[] data) throws Exception { TestHelper.validateData( - keyName, data, objectStore, volumeName, bucketName); + keyName, data, client.getObjectStore(), VOLUME_NAME, BUCKET_NAME); } - - @Test - public void testMinPacketSize() throws Exception { - String keyName = getKeyName(); - XceiverClientMetrics metrics = - XceiverClientManager.getXceiverClientMetrics(); - OzoneDataStreamOutput key = createKey(keyName, ReplicationType.RATIS, 0); - long writeChunkCount = metrics.getContainerOpCountMetrics(WriteChunk); - long pendingWriteChunkCount = metrics.getPendingContainerOpCountMetrics(WriteChunk); - byte[] data = - ContainerTestHelper.getFixedLengthString(keyString, chunkSize / 2) - .getBytes(UTF_8); - key.write(ByteBuffer.wrap(data)); - // minPacketSize= 100, so first write of 50 wont trigger a writeChunk - assertEquals(writeChunkCount, - metrics.getContainerOpCountMetrics(WriteChunk)); - key.write(ByteBuffer.wrap(data)); - assertEquals(writeChunkCount + 1, - metrics.getContainerOpCountMetrics(WriteChunk)); - // now close the stream, It will update the key length. - key.close(); - assertEquals(pendingWriteChunkCount, - metrics.getPendingContainerOpCountMetrics(WriteChunk)); - String dataString = new String(data, UTF_8); - validateData(keyName, dataString.concat(dataString).getBytes(UTF_8)); + @ParameterizedTest + @MethodSource("clientParameters") + public void testPutBlockAtBoundary(boolean flushDelay) throws Exception { + OzoneClientConfig config = newClientConfig(cluster.getConf(), flushDelay); + try (OzoneClient client = newClient(cluster.getConf(), config)) { + int dataLength = 500; + XceiverClientMetrics metrics = + XceiverClientManager.getXceiverClientMetrics(); + long putBlockCount = metrics.getContainerOpCountMetrics( + ContainerProtos.Type.PutBlock); + long pendingPutBlockCount = metrics.getPendingContainerOpCountMetrics( + ContainerProtos.Type.PutBlock); + String keyName = getKeyName(); + OzoneDataStreamOutput key = createKey( + client, keyName, 0); + byte[] data = + ContainerTestHelper.getFixedLengthString(keyString, dataLength) + .getBytes(UTF_8); + key.write(ByteBuffer.wrap(data)); + assertThat(metrics.getPendingContainerOpCountMetrics(ContainerProtos.Type.PutBlock)) + .isLessThanOrEqualTo(pendingPutBlockCount + 1); + key.close(); + // Since data length is 500 , first putBlock will be at 400(flush boundary) + // and the other at 500 + assertEquals( + metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock), + putBlockCount + 2); + validateData(client, keyName, data); + } } - @Test - public void testTotalAckDataLength() throws Exception { - int dataLength = 400; - String keyName = getKeyName(); - OzoneDataStreamOutput key = createKey( - keyName, ReplicationType.RATIS, 0); - byte[] data = - ContainerTestHelper.getFixedLengthString(keyString, dataLength) - .getBytes(UTF_8); - KeyDataStreamOutput keyDataStreamOutput = - (KeyDataStreamOutput) key.getByteBufStreamOutput(); - BlockDataStreamOutputEntry stream = - keyDataStreamOutput.getStreamEntries().get(0); - key.write(ByteBuffer.wrap(data)); - key.close(); - assertEquals(dataLength, stream.getTotalAckDataLength()); + @ParameterizedTest + @MethodSource("clientParameters") + public void testMinPacketSize(boolean flushDelay) throws Exception { + OzoneClientConfig config = newClientConfig(cluster.getConf(), flushDelay); + try (OzoneClient client = newClient(cluster.getConf(), config)) { + String keyName = getKeyName(); + XceiverClientMetrics metrics = + XceiverClientManager.getXceiverClientMetrics(); + OzoneDataStreamOutput key = createKey(client, keyName, 0); + long writeChunkCount = + metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk); + byte[] data = + ContainerTestHelper.getFixedLengthString(keyString, CHUNK_SIZE / 2) + .getBytes(UTF_8); + key.write(ByteBuffer.wrap(data)); + // minPacketSize= 100, so first write of 50 won't trigger a writeChunk + assertEquals(writeChunkCount, + metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk)); + key.write(ByteBuffer.wrap(data)); + assertEquals(writeChunkCount + 1, + metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk)); + // now close the stream, It will update the key length. + key.close(); + String dataString = new String(data, UTF_8); + validateData(client, keyName, dataString.concat(dataString).getBytes(UTF_8)); + } } - @Test - public void testDatanodeVersion() throws Exception { - // Verify all DNs internally have versions set correctly - List dns = cluster.getHddsDatanodes(); - for (HddsDatanodeService dn : dns) { - DatanodeDetails details = dn.getDatanodeDetails(); - assertEquals(DN_OLD_VERSION.toProtoValue(), details.getCurrentVersion()); + @ParameterizedTest + @MethodSource("clientParameters") + public void testTotalAckDataLength(boolean flushDelay) throws Exception { + OzoneClientConfig config = newClientConfig(cluster.getConf(), flushDelay); + try (OzoneClient client = newClient(cluster.getConf(), config)) { + int dataLength = 400; + String keyName = getKeyName(); + OzoneDataStreamOutput key = createKey( + client, keyName, 0); + byte[] data = + ContainerTestHelper.getFixedLengthString(keyString, dataLength) + .getBytes(UTF_8); + KeyDataStreamOutput keyDataStreamOutput = + (KeyDataStreamOutput) key.getByteBufStreamOutput(); + BlockDataStreamOutputEntry stream = + keyDataStreamOutput.getStreamEntries().get(0); + key.write(ByteBuffer.wrap(data)); + key.close(); + assertEquals(dataLength, stream.getTotalAckDataLength()); } + } - String keyName = getKeyName(); - OzoneDataStreamOutput key = createKey(keyName, ReplicationType.RATIS, 0); - KeyDataStreamOutput keyDataStreamOutput = (KeyDataStreamOutput) key.getByteBufStreamOutput(); - BlockDataStreamOutputEntry stream = keyDataStreamOutput.getStreamEntries().get(0); - - // Now check 3 DNs in a random pipeline returns the correct DN versions - List streamDnDetails = stream.getPipeline().getNodes(); - for (DatanodeDetails details : streamDnDetails) { - assertEquals(DN_OLD_VERSION.toProtoValue(), details.getCurrentVersion()); + @ParameterizedTest + @MethodSource("clientParameters") + public void testDatanodeVersion(boolean flushDelay) throws Exception { + OzoneClientConfig config = newClientConfig(cluster.getConf(), flushDelay); + try (OzoneClient client = newClient(cluster.getConf(), config)) { + // Verify all DNs internally have versions set correctly + List dns = cluster.getHddsDatanodes(); + for (HddsDatanodeService dn : dns) { + DatanodeDetails details = dn.getDatanodeDetails(); + assertEquals(DN_OLD_VERSION.toProtoValue(), details.getCurrentVersion()); + } + + String keyName = getKeyName(); + OzoneDataStreamOutput key = createKey(client, keyName, 0); + KeyDataStreamOutput keyDataStreamOutput = (KeyDataStreamOutput) key.getByteBufStreamOutput(); + BlockDataStreamOutputEntry stream = keyDataStreamOutput.getStreamEntries().get(0); + + // Now check 3 DNs in a random pipeline returns the correct DN versions + List streamDnDetails = stream.getPipeline().getNodes(); + for (DatanodeDetails details : streamDnDetails) { + assertEquals(DN_OLD_VERSION.toProtoValue(), details.getCurrentVersion()); + } } } - } From e2718dda4a56e516853dfec32fe56758ec1cad3f Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Tue, 21 Jan 2025 17:57:06 +0100 Subject: [PATCH 111/168] HDDS-12100. Move CI dependency installer functions out of _lib.sh (#7715) --- dev-support/ci/selective_ci_checks.sh | 6 +- hadoop-ozone/dev-support/checks/_lib.sh | 105 ------------------ hadoop-ozone/dev-support/checks/acceptance.sh | 19 ++++ hadoop-ozone/dev-support/checks/bats.sh | 6 +- hadoop-ozone/dev-support/checks/docs.sh | 2 +- hadoop-ozone/dev-support/checks/findbugs.sh | 3 +- .../dev-support/checks/install/bats.sh | 29 +++++ .../dev-support/checks/install/flekszible.sh | 33 ++++++ .../dev-support/checks/install/hugo.sh | 44 ++++++++ .../dev-support/checks/install/k3s.sh | 28 +++++ .../dev-support/checks/install/spotbugs.sh | 26 +++++ hadoop-ozone/dev-support/checks/kubernetes.sh | 26 ++++- 12 files changed, 209 insertions(+), 118 deletions(-) create mode 100644 hadoop-ozone/dev-support/checks/install/bats.sh create mode 100644 hadoop-ozone/dev-support/checks/install/flekszible.sh create mode 100644 hadoop-ozone/dev-support/checks/install/hugo.sh create mode 100644 hadoop-ozone/dev-support/checks/install/k3s.sh create mode 100644 hadoop-ozone/dev-support/checks/install/spotbugs.sh diff --git a/dev-support/ci/selective_ci_checks.sh b/dev-support/ci/selective_ci_checks.sh index 869d36fc6cc..ffe6886b33d 100755 --- a/dev-support/ci/selective_ci_checks.sh +++ b/dev-support/ci/selective_ci_checks.sh @@ -247,6 +247,7 @@ function get_count_doc_files() { local pattern_array=( "^hadoop-hdds/docs" "^hadoop-ozone/dev-support/checks/docs.sh" + "^hadoop-ozone/dev-support/checks/install/hugo.sh" ) filter_changed_files true COUNT_DOC_CHANGED_FILES=${match_count} @@ -278,6 +279,8 @@ function get_count_kubernetes_files() { start_end::group_start "Count kubernetes files" local pattern_array=( "^hadoop-ozone/dev-support/checks/kubernetes.sh" + "^hadoop-ozone/dev-support/checks/install/flekszible.sh" + "^hadoop-ozone/dev-support/checks/install/k3s.sh" "^hadoop-ozone/dist" ) local ignore_array=( @@ -356,7 +359,7 @@ function check_needs_bats() { local pattern_array=( "\.bash$" "\.bats$" - "\.sh$" # includes hadoop-ozone/dev-support/checks/bats.sh + "\.sh$" # includes hadoop-ozone/dev-support/checks/bats.sh and hadoop-ozone/dev-support/checks/install/bats.sh ) filter_changed_files @@ -416,6 +419,7 @@ function check_needs_findbugs() { start_end::group_start "Check if findbugs is needed" local pattern_array=( "^hadoop-ozone/dev-support/checks/findbugs.sh" + "^hadoop-ozone/dev-support/checks/install/spotbugs.sh" "findbugsExcludeFile.xml" "pom.xml" "src/..../java" diff --git a/hadoop-ozone/dev-support/checks/_lib.sh b/hadoop-ozone/dev-support/checks/_lib.sh index 632aecb8296..ec59c16b632 100644 --- a/hadoop-ozone/dev-support/checks/_lib.sh +++ b/hadoop-ozone/dev-support/checks/_lib.sh @@ -63,108 +63,3 @@ _install_tool() { fi fi } - -install_bats() { - _install_tool bats bats-core-1.2.1/bin -} - -_install_bats() { - curl -LSs https://github.com/bats-core/bats-core/archive/v1.2.1.tar.gz | tar -xz -f - -} - -install_k3s() { - _install_tool k3s -} - -_install_k3s() { - curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION="v1.21.2+k3s1" sh - - sudo chmod a+r $KUBECONFIG -} - -install_flekszible() { - _install_tool flekszible bin -} - -_install_flekszible() { - mkdir bin - - local os=$(uname -s) - local arch=$(uname -m) - - curl -LSs https://github.com/elek/flekszible/releases/download/v2.3.0/flekszible_2.3.0_${os}_${arch}.tar.gz | tar -xz -f - -C bin - - chmod +x bin/flekszible -} - -install_hugo() { - _install_tool hugo bin -} - -_install_hugo() { - : ${HUGO_VERSION:=0.83.1} - - local os=$(uname -s) - local arch=$(uname -m) - - mkdir bin - - case "${os}" in - Darwin) - os=macOS - ;; - esac - - case "${arch}" in - x86_64) - arch=64bit - ;; - esac - - curl -LSs "https://github.com/gohugoio/hugo/releases/download/v${HUGO_VERSION}/hugo_${HUGO_VERSION}_${os}-${arch}.tar.gz" | tar -xz -f - -C bin hugo - chmod +x bin/hugo -} - -install_virtualenv() { - _install_tool virtualenv -} - -_install_virtualenv() { - sudo pip3 install virtualenv -} - -install_robot() { - _install_tool robot venv/bin -} - -_install_robot() { - virtualenv venv - source venv/bin/activate - pip install robotframework -} - -install_spotbugs() { - _install_tool spotbugs spotbugs-3.1.12/bin -} - -_install_spotbugs() { - curl -LSs https://repo.maven.apache.org/maven2/com/github/spotbugs/spotbugs/3.1.12/spotbugs-3.1.12.tgz | tar -xz -f - -} - -download_hadoop_aws() { - local dir="$1" - - if [[ -z ${dir} ]]; then - echo "Required argument: target directory for Hadoop AWS sources" >&2 - return 1 - fi - - if [[ ! -e "${dir}" ]] || [[ ! -d "${dir}"/src/test/resources ]]; then - mkdir -p "${dir}" - if [[ ! -f "${dir}.tar.gz" ]]; then - local url="https://www.apache.org/dyn/closer.lua?action=download&filename=hadoop/common/hadoop-${HADOOP_VERSION}/hadoop-${HADOOP_VERSION}-src.tar.gz" - echo "Downloading Hadoop from ${url}" - curl -LSs --fail -o "${dir}.tar.gz" "$url" || return 1 - fi - tar -x -z -C "${dir}" --strip-components=3 -f "${dir}.tar.gz" --wildcards 'hadoop-*-src/hadoop-tools/hadoop-aws' || return 1 - fi -} diff --git a/hadoop-ozone/dev-support/checks/acceptance.sh b/hadoop-ozone/dev-support/checks/acceptance.sh index ea9fa819ec3..1e877ade9f1 100755 --- a/hadoop-ozone/dev-support/checks/acceptance.sh +++ b/hadoop-ozone/dev-support/checks/acceptance.sh @@ -50,6 +50,25 @@ if [[ "${OZONE_ACCEPTANCE_SUITE}" == "s3a" ]]; then export HADOOP_AWS_DIR=${OZONE_ROOT}/target/hadoop-src fi + download_hadoop_aws() { + local dir="$1" + + if [[ -z ${dir} ]]; then + echo "Required argument: target directory for Hadoop AWS sources" >&2 + return 1 + fi + + if [[ ! -e "${dir}" ]] || [[ ! -d "${dir}"/src/test/resources ]]; then + mkdir -p "${dir}" + if [[ ! -f "${dir}.tar.gz" ]]; then + local url="https://www.apache.org/dyn/closer.lua?action=download&filename=hadoop/common/hadoop-${HADOOP_VERSION}/hadoop-${HADOOP_VERSION}-src.tar.gz" + echo "Downloading Hadoop from ${url}" + curl -LSs --fail -o "${dir}.tar.gz" "$url" || return 1 + fi + tar -x -z -C "${dir}" --strip-components=3 -f "${dir}.tar.gz" --wildcards 'hadoop-*-src/hadoop-tools/hadoop-aws' || return 1 + fi + } + if ! download_hadoop_aws "${HADOOP_AWS_DIR}"; then echo "Failed to download Hadoop ${HADOOP_VERSION}" > "${REPORT_FILE}" exit 1 diff --git a/hadoop-ozone/dev-support/checks/bats.sh b/hadoop-ozone/dev-support/checks/bats.sh index f94ba583ee2..3dec6052a92 100755 --- a/hadoop-ozone/dev-support/checks/bats.sh +++ b/hadoop-ozone/dev-support/checks/bats.sh @@ -22,11 +22,7 @@ DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" cd "${DIR}/../../.." || exit 1 source "${DIR}/_lib.sh" - -install_bats - -git clone https://github.com/bats-core/bats-assert dev-support/ci/bats-assert -git clone https://github.com/bats-core/bats-support dev-support/ci/bats-support +source "${DIR}/install/bats.sh" REPORT_DIR=${OUTPUT_DIR:-"${DIR}/../../../target/bats"} mkdir -p "${REPORT_DIR}" diff --git a/hadoop-ozone/dev-support/checks/docs.sh b/hadoop-ozone/dev-support/checks/docs.sh index 7ebf64ef190..045811c16c3 100755 --- a/hadoop-ozone/dev-support/checks/docs.sh +++ b/hadoop-ozone/dev-support/checks/docs.sh @@ -22,7 +22,7 @@ DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" cd "${DIR}/../../.." || exit 1 source "${DIR}/_lib.sh" -install_hugo +source "${DIR}/install/hugo.sh" REPORT_DIR=${OUTPUT_DIR:-"${DIR}/../../../target/docs"} mkdir -p "${REPORT_DIR}" diff --git a/hadoop-ozone/dev-support/checks/findbugs.sh b/hadoop-ozone/dev-support/checks/findbugs.sh index 7d1565a0195..0bd7a5717e2 100755 --- a/hadoop-ozone/dev-support/checks/findbugs.sh +++ b/hadoop-ozone/dev-support/checks/findbugs.sh @@ -24,8 +24,7 @@ cd "$DIR/../../.." || exit 1 : ${OZONE_WITH_COVERAGE:="false"} source "${DIR}/_lib.sh" - -install_spotbugs +source "${DIR}/install/spotbugs.sh" REPORT_DIR=${OUTPUT_DIR:-"$DIR/../../../target/findbugs"} mkdir -p "$REPORT_DIR" diff --git a/hadoop-ozone/dev-support/checks/install/bats.sh b/hadoop-ozone/dev-support/checks/install/bats.sh new file mode 100644 index 00000000000..b8d3aa7a27a --- /dev/null +++ b/hadoop-ozone/dev-support/checks/install/bats.sh @@ -0,0 +1,29 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script installs bats. +# Requires _install_tool from _lib.sh. Use `source` for both scripts, because it modifies $PATH. + +: ${BATS_VERSION:="1.2.1"} + +_install_bats() { + curl -LSs "https://github.com/bats-core/bats-core/archive/v${BATS_VERSION}.tar.gz" | tar -xz -f - +} + +_install_tool bats "bats-core-${BATS_VERSION}/bin" + +git clone https://github.com/bats-core/bats-assert dev-support/ci/bats-assert +git clone https://github.com/bats-core/bats-support dev-support/ci/bats-support diff --git a/hadoop-ozone/dev-support/checks/install/flekszible.sh b/hadoop-ozone/dev-support/checks/install/flekszible.sh new file mode 100644 index 00000000000..ea887b84900 --- /dev/null +++ b/hadoop-ozone/dev-support/checks/install/flekszible.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script installs Flekszible. +# Requires _install_tool from _lib.sh. Use `source` for both scripts, because it modifies $PATH. + +: ${FLEKSZIBLE_VERSION:="2.3.0"} + +_install_flekszible() { + mkdir bin + + local os=$(uname -s) + local arch=$(uname -m) + + curl -LSs "https://github.com/elek/flekszible/releases/download/v${FLEKSZIBLE_VERSION}/flekszible_${FLEKSZIBLE_VERSION}_${os}_${arch}.tar.gz" | tar -xz -f - -C bin + + chmod +x bin/flekszible +} + +_install_tool flekszible bin diff --git a/hadoop-ozone/dev-support/checks/install/hugo.sh b/hadoop-ozone/dev-support/checks/install/hugo.sh new file mode 100644 index 00000000000..279d668438d --- /dev/null +++ b/hadoop-ozone/dev-support/checks/install/hugo.sh @@ -0,0 +1,44 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script installs Hugo. +# Requires _install_tool from _lib.sh. Use `source` for both scripts, because it modifies $PATH. + +: ${HUGO_VERSION:=0.83.1} + +_install_hugo() { + local os=$(uname -s) + local arch=$(uname -m) + + mkdir bin + + case "${os}" in + Darwin) + os=macOS + ;; + esac + + case "${arch}" in + x86_64) + arch=64bit + ;; + esac + + curl -LSs "https://github.com/gohugoio/hugo/releases/download/v${HUGO_VERSION}/hugo_${HUGO_VERSION}_${os}-${arch}.tar.gz" | tar -xz -f - -C bin hugo + chmod +x bin/hugo +} + +_install_tool hugo bin diff --git a/hadoop-ozone/dev-support/checks/install/k3s.sh b/hadoop-ozone/dev-support/checks/install/k3s.sh new file mode 100644 index 00000000000..2cd59874267 --- /dev/null +++ b/hadoop-ozone/dev-support/checks/install/k3s.sh @@ -0,0 +1,28 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script installs K3S. +# Requires _install_tool from _lib.sh. Use `source` for both scripts, because it modifies $PATH. + +: ${K3S_VERSION:="v1.21.2+k3s1"} +: ${KUBECONFIG:=/etc/rancher/k3s/k3s.yaml} + +_install_k3s() { + curl -sfL https://get.k3s.io | INSTALL_K3S_VERSION="${K3S_VERSION}" sh - + sudo chmod a+r $KUBECONFIG +} + +_install_tool k3s diff --git a/hadoop-ozone/dev-support/checks/install/spotbugs.sh b/hadoop-ozone/dev-support/checks/install/spotbugs.sh new file mode 100644 index 00000000000..0d0cce3a394 --- /dev/null +++ b/hadoop-ozone/dev-support/checks/install/spotbugs.sh @@ -0,0 +1,26 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script installs SpotBugs. +# Requires _install_tool from _lib.sh. Use `source` for both scripts, because it modifies $PATH. + +: ${SPOTBUGS_VERSION:=3.1.12} + +_install_spotbugs() { + curl -LSs "https://repo.maven.apache.org/maven2/com/github/spotbugs/spotbugs/${SPOTBUGS_VERSION}/spotbugs-${SPOTBUGS_VERSION}.tgz" | tar -xz -f - +} + +_install_tool spotbugs "spotbugs-${SPOTBUGS_VERSION}/bin" diff --git a/hadoop-ozone/dev-support/checks/kubernetes.sh b/hadoop-ozone/dev-support/checks/kubernetes.sh index e9ecfdf5f2a..4699ed5520e 100755 --- a/hadoop-ozone/dev-support/checks/kubernetes.sh +++ b/hadoop-ozone/dev-support/checks/kubernetes.sh @@ -19,19 +19,37 @@ set -u -o pipefail DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" cd "$DIR/../../.." || exit 1 -: ${KUBECONFIG:=/etc/rancher/k3s/k3s.yaml} - export KUBECONFIG source "${DIR}/_lib.sh" +source "${DIR}/install/flekszible.sh" + +# TODO these functions will be removed in HDDS-12099 +install_virtualenv() { + _install_tool virtualenv +} + +_install_virtualenv() { + sudo pip3 install virtualenv +} + +install_robot() { + _install_tool robot venv/bin +} + +_install_robot() { + virtualenv venv + source venv/bin/activate + pip install robotframework +} -install_flekszible install_virtualenv install_robot + if [[ "$(uname -s)" = "Darwin" ]]; then echo "Skip installing k3s, not supported on Mac. Make sure a working Kubernetes cluster is available." >&2 else - install_k3s + source "${DIR}/install/k3s.sh" fi REPORT_DIR=${OUTPUT_DIR:-"$DIR/../../../target/kubernetes"} From a0be99a3ba3258eff4b409f76cd5ccb6ca7cda91 Mon Sep 17 00:00:00 2001 From: Peter Lee Date: Wed, 22 Jan 2025 02:53:12 +0800 Subject: [PATCH 112/168] HDDS-12095. Include AWS request ID in S3G audit logs (#7725) --- .../hadoop/ozone/TestMultipartObjectGet.java | 2 ++ .../ozone/s3/endpoint/EndpointBase.java | 12 ++++++++++ .../ozone/s3/endpoint/RootEndpoint.java | 5 ++--- .../ozone/s3/TestS3GatewayAuditLog.java | 22 +++++++++++++------ .../s3/endpoint/TestAbortMultipartUpload.java | 2 ++ .../ozone/s3/endpoint/TestBucketAcl.java | 2 ++ .../ozone/s3/endpoint/TestBucketDelete.java | 2 ++ .../ozone/s3/endpoint/TestBucketHead.java | 2 ++ .../ozone/s3/endpoint/TestBucketList.java | 21 +++++++++++++----- .../ozone/s3/endpoint/TestBucketPut.java | 2 ++ .../endpoint/TestInitiateMultipartUpload.java | 3 +++ .../endpoint/TestMultipartUploadComplete.java | 3 +++ .../endpoint/TestMultipartUploadWithCopy.java | 2 ++ .../ozone/s3/endpoint/TestObjectDelete.java | 2 ++ .../ozone/s3/endpoint/TestObjectGet.java | 2 ++ .../ozone/s3/endpoint/TestObjectHead.java | 2 ++ .../s3/endpoint/TestObjectMultiDelete.java | 3 +++ .../ozone/s3/endpoint/TestObjectPut.java | 2 ++ .../s3/endpoint/TestObjectTaggingDelete.java | 4 +++- .../s3/endpoint/TestObjectTaggingGet.java | 2 ++ .../s3/endpoint/TestObjectTaggingPut.java | 3 +++ .../ozone/s3/endpoint/TestPartUpload.java | 4 ++++ .../s3/endpoint/TestPartUploadWithStream.java | 2 ++ .../s3/endpoint/TestPermissionCheck.java | 21 ++++++++++++++---- .../ozone/s3/endpoint/TestRootList.java | 2 ++ .../s3/endpoint/TestUploadWithStream.java | 2 ++ .../s3/metrics/TestS3GatewayMetrics.java | 6 +++++ 27 files changed, 117 insertions(+), 20 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java index 2a150683001..0c9af3b2ad4 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.s3.RequestIdentifier; import org.apache.hadoop.ozone.s3.endpoint.CompleteMultipartUploadRequest; import org.apache.hadoop.ozone.s3.endpoint.CompleteMultipartUploadResponse; import org.apache.hadoop.ozone.s3.endpoint.MultipartUploadInitiateResponse; @@ -98,6 +99,7 @@ public static void init() throws Exception { REST.setClient(client); REST.setOzoneConfiguration(conf); REST.setContext(context); + REST.setRequestIdentifier(new RequestIdentifier()); S3GatewayMetrics.create(conf); } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java index fbb0614c4f4..60525462707 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/EndpointBase.java @@ -80,6 +80,8 @@ import static org.apache.hadoop.ozone.s3.util.S3Consts.TAG_REGEX_PATTERN; import static org.apache.hadoop.ozone.s3.util.S3Consts.TAG_VALUE_LENGTH_LIMIT; +import org.apache.hadoop.ozone.s3.RequestIdentifier; + /** * Basic helpers for all the REST endpoints. */ @@ -91,6 +93,8 @@ public abstract class EndpointBase implements Auditor { private OzoneClient client; @Inject private SignatureInfo signatureInfo; + @Inject + private RequestIdentifier requestIdentifier; private S3Auth s3Auth; @Context @@ -443,6 +447,9 @@ protected static Map validateAndGetTagging( private AuditMessage.Builder auditMessageBaseBuilder(AuditAction op, Map auditMap) { + auditMap.put("x-amz-request-id", requestIdentifier.getRequestId()); + auditMap.put("x-amz-id-2", requestIdentifier.getAmzId()); + AuditMessage.Builder builder = new AuditMessage.Builder() .forOperation(op) .withParams(auditMap); @@ -488,6 +495,11 @@ public void setClient(OzoneClient ozoneClient) { this.client = ozoneClient; } + @VisibleForTesting + public void setRequestIdentifier(RequestIdentifier requestIdentifier) { + this.requestIdentifier = requestIdentifier; + } + public OzoneClient getClient() { return client; } diff --git a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/RootEndpoint.java b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/RootEndpoint.java index 09360b6395b..1e49c3d5f67 100644 --- a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/RootEndpoint.java +++ b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/RootEndpoint.java @@ -21,7 +21,6 @@ import javax.ws.rs.Path; import javax.ws.rs.core.Response; import java.io.IOException; -import java.util.Collections; import java.util.Iterator; import org.apache.hadoop.ozone.audit.S3GAction; @@ -78,14 +77,14 @@ public Response get() auditSuccess = false; AUDIT.logReadFailure( buildAuditMessageForFailure(S3GAction.LIST_S3_BUCKETS, - Collections.emptyMap(), ex) + getAuditParameters(), ex) ); throw ex; } finally { if (auditSuccess) { AUDIT.logReadSuccess( buildAuditMessageForSuccess(S3GAction.LIST_S3_BUCKETS, - Collections.emptyMap()) + getAuditParameters()) ); } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestS3GatewayAuditLog.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestS3GatewayAuditLog.java index 0b73e045027..0f2334960e8 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestS3GatewayAuditLog.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/TestS3GatewayAuditLog.java @@ -69,14 +69,15 @@ public class TestS3GatewayAuditLog { private ObjectEndpoint keyEndpoint; private OzoneBucket bucket; private Map parametersMap = new HashMap<>(); + private RequestIdentifier requestIdentifier; @BeforeEach public void setup() throws Exception { - parametersMap.clear(); clientStub = new OzoneClientStub(); clientStub.getObjectStore().createS3Bucket(bucketName); bucket = clientStub.getObjectStore().getS3Bucket(bucketName); + requestIdentifier = new RequestIdentifier(); bucketEndpoint = new BucketEndpoint() { @Override @@ -85,9 +86,10 @@ protected Map getAuditParameters() { } }; bucketEndpoint.setClient(clientStub); - + bucketEndpoint.setRequestIdentifier(requestIdentifier); rootEndpoint = new RootEndpoint(); rootEndpoint.setClient(clientStub); + rootEndpoint.setRequestIdentifier(requestIdentifier); keyEndpoint = new ObjectEndpoint() { @Override @@ -97,7 +99,7 @@ protected Map getAuditParameters() { }; keyEndpoint.setClient(clientStub); keyEndpoint.setOzoneConfiguration(new OzoneConfiguration()); - + keyEndpoint.setRequestIdentifier(requestIdentifier); } @AfterAll @@ -116,8 +118,11 @@ public void testHeadBucket() throws Exception { parametersMap.put("bucket", "[bucket]"); bucketEndpoint.head(bucketName); + String expected = "INFO | S3GAudit | ? | user=null | ip=null | " + - "op=HEAD_BUCKET {bucket=[bucket]} | ret=SUCCESS"; + "op=HEAD_BUCKET {bucket=[bucket], x-amz-request-id=" + + requestIdentifier.getRequestId() + ", x-amz-id-2=" + + requestIdentifier.getAmzId() + "} | ret=SUCCESS"; verifyLog(expected); } @@ -126,7 +131,9 @@ public void testListBucket() throws Exception { rootEndpoint.get().getEntity(); String expected = "INFO | S3GAudit | ? | user=null | ip=null | " + - "op=LIST_S3_BUCKETS {} | ret=SUCCESS"; + "op=LIST_S3_BUCKETS {x-amz-request-id=" + + requestIdentifier.getRequestId() + ", x-amz-id-2=" + + requestIdentifier.getAmzId() + "} | ret=SUCCESS"; verifyLog(expected); } @@ -145,9 +152,10 @@ public void testHeadObject() throws Exception { keyEndpoint.head(bucketName, "key1"); String expected = "INFO | S3GAudit | ? | user=null | ip=null | " + - "op=HEAD_KEY {bucket=[bucket], path=[key1]} | ret=SUCCESS"; + "op=HEAD_KEY {bucket=[bucket], path=[key1], x-amz-request-id=" + + requestIdentifier.getRequestId() + ", x-amz-id-2=" + + requestIdentifier.getAmzId() + "} | ret=SUCCESS"; verifyLog(expected); - } private void verifyLog(String expectedString) throws IOException { diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAbortMultipartUpload.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAbortMultipartUpload.java index 1356b50ad35..c4781509b55 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAbortMultipartUpload.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAbortMultipartUpload.java @@ -23,6 +23,7 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientStub; +import org.apache.hadoop.ozone.s3.RequestIdentifier; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; @@ -58,6 +59,7 @@ public void testAbortMultipartUpload() throws Exception { rest.setHeaders(headers); rest.setClient(client); rest.setOzoneConfiguration(new OzoneConfiguration()); + rest.setRequestIdentifier(new RequestIdentifier()); Response response = rest.initializeMultipartUpload(bucket, key); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketAcl.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketAcl.java index 16c8a793d00..5f1f4d31fc7 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketAcl.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketAcl.java @@ -24,6 +24,7 @@ import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientStub; import org.apache.hadoop.ozone.client.OzoneVolume; +import org.apache.hadoop.ozone.s3.RequestIdentifier; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; @@ -73,6 +74,7 @@ public void setup() throws IOException { bucketEndpoint = new BucketEndpoint(); bucketEndpoint.setClient(client); + bucketEndpoint.setRequestIdentifier(new RequestIdentifier()); } @AfterEach diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketDelete.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketDelete.java index cd5639b78c0..04aca4e2741 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketDelete.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketDelete.java @@ -27,6 +27,7 @@ import org.apache.hadoop.ozone.client.ObjectStoreStub; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientStub; +import org.apache.hadoop.ozone.s3.RequestIdentifier; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; @@ -59,6 +60,7 @@ public void setup() throws Exception { // Create HeadBucket and setClient to OzoneClientStub bucketEndpoint = new BucketEndpoint(); bucketEndpoint.setClient(clientStub); + bucketEndpoint.setRequestIdentifier(new RequestIdentifier()); } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketHead.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketHead.java index 2445a54060e..f85f42d2935 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketHead.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketHead.java @@ -25,6 +25,7 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientStub; +import org.apache.hadoop.ozone.s3.RequestIdentifier; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import static java.net.HttpURLConnection.HTTP_NOT_FOUND; @@ -51,6 +52,7 @@ public void setup() throws Exception { // Create HeadBucket and setClient to OzoneClientStub bucketEndpoint = new BucketEndpoint(); bucketEndpoint.setClient(clientStub); + bucketEndpoint.setRequestIdentifier(new RequestIdentifier()); } @Test diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketList.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketList.java index 638ac73ebdc..69bc5350806 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketList.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketList.java @@ -24,6 +24,7 @@ import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientStub; +import org.apache.hadoop.ozone.s3.RequestIdentifier; import org.apache.hadoop.ozone.s3.commontypes.EncodingTypeObject; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; @@ -54,6 +55,7 @@ public void listRoot() throws OS3Exception, IOException { OzoneClient client = createClientWithKeys("file1", "dir1/file2"); getBucket.setClient(client); + getBucket.setRequestIdentifier(new RequestIdentifier()); ListObjectResponse getBucketResponse = (ListObjectResponse) getBucket.get("b1", "/", null, null, 100, "", @@ -78,6 +80,7 @@ public void listDir() throws OS3Exception, IOException { OzoneClient client = createClientWithKeys("dir1/file2", "dir1/dir2/file2"); getBucket.setClient(client); + getBucket.setRequestIdentifier(new RequestIdentifier()); ListObjectResponse getBucketResponse = (ListObjectResponse) getBucket.get("b1", "/", null, null, 100, @@ -101,7 +104,7 @@ public void listSubDir() throws OS3Exception, IOException { "dir1bha/file2"); getBucket.setClient(ozoneClient); - + getBucket.setRequestIdentifier(new RequestIdentifier()); ListObjectResponse getBucketResponse = (ListObjectResponse) getBucket .get("b1", "/", null, null, 100, "dir1/", null, @@ -138,6 +141,7 @@ public void listObjectOwner() throws OS3Exception, IOException { bucket.createKey("key2", 0).close(); getBucket.setClient(client); + getBucket.setRequestIdentifier(new RequestIdentifier()); ListObjectResponse getBucketResponse = (ListObjectResponse) getBucket.get("b1", "/", null, null, 100, "key", null, null, null, null, null).getEntity(); @@ -160,6 +164,7 @@ public void listWithPrefixAndDelimiter() throws OS3Exception, IOException { "dir1bha/file2", "file2"); getBucket.setClient(ozoneClient); + getBucket.setRequestIdentifier(new RequestIdentifier()); ListObjectResponse getBucketResponse = (ListObjectResponse) getBucket.get("b1", "/", null, null, 100, @@ -179,6 +184,7 @@ public void listWithPrefixAndDelimiter1() throws OS3Exception, IOException { "dir1bha/file2", "file2"); getBucket.setClient(ozoneClient); + getBucket.setRequestIdentifier(new RequestIdentifier()); ListObjectResponse getBucketResponse = (ListObjectResponse) getBucket.get("b1", "/", null, null, 100, @@ -200,7 +206,7 @@ public void listWithPrefixAndDelimiter2() throws OS3Exception, IOException { "dir1bha/file2", "file2"); getBucket.setClient(ozoneClient); - + getBucket.setRequestIdentifier(new RequestIdentifier()); ListObjectResponse getBucketResponse = (ListObjectResponse) getBucket.get("b1", "/", null, null, 100, "dir1bh", null, "dir1/dir2/file2", null, null, null).getEntity(); @@ -219,7 +225,7 @@ public void listWithPrefixAndEmptyStrDelimiter() "dir1/dir2/file2"); getBucket.setClient(ozoneClient); - + getBucket.setRequestIdentifier(new RequestIdentifier()); // Should behave the same if delimiter is null ListObjectResponse getBucketResponse = (ListObjectResponse) getBucket.get("b1", "", null, null, 100, "dir1/", @@ -248,7 +254,7 @@ public void listWithContinuationToken() throws OS3Exception, IOException { "dir1bha/file2", "file2"); getBucket.setClient(ozoneClient); - + getBucket.setRequestIdentifier(new RequestIdentifier()); int maxKeys = 2; // As we have 5 keys, with max keys 2 we should call list 3 times. @@ -299,6 +305,7 @@ public void listWithContinuationTokenDirBreak() "test/file8"); getBucket.setClient(ozoneClient); + getBucket.setRequestIdentifier(new RequestIdentifier()); int maxKeys = 2; @@ -342,6 +349,7 @@ public void listWithContinuationToken1() throws OS3Exception, IOException { "dir1bha/file1", "dir0/file1", "dir2/file1"); getBucket.setClient(ozoneClient); + getBucket.setRequestIdentifier(new RequestIdentifier()); int maxKeys = 2; // As we have 5 keys, with max keys 2 we should call list 3 times. @@ -383,6 +391,7 @@ public void listWithContinuationTokenFail() throws IOException { "dir1bha/file2", "dir1", "dir2", "dir3"); getBucket.setClient(ozoneClient); + getBucket.setRequestIdentifier(new RequestIdentifier()); OS3Exception e = assertThrows(OS3Exception.class, () -> getBucket.get("b1", "/", null, null, 2, "dir", "random", null, null, null, null) @@ -401,6 +410,7 @@ public void testStartAfter() throws IOException, OS3Exception { "dir1bha/file1", "dir0/file1", "dir2/file1"); getBucket.setClient(ozoneClient); + getBucket.setRequestIdentifier(new RequestIdentifier()); ListObjectResponse getBucketResponse = (ListObjectResponse) getBucket.get("b1", null, null, null, 1000, @@ -460,6 +470,7 @@ public void testEncodingType() throws IOException, OS3Exception { OzoneClient ozoneClient = createClientWithKeys("data=1970", "data==1970"); getBucket.setClient(ozoneClient); + getBucket.setRequestIdentifier(new RequestIdentifier()); String delimiter = "="; String prefix = "data="; @@ -508,7 +519,7 @@ public void testEncodingTypeException() throws IOException { OzoneClient client = new OzoneClientStub(); client.getObjectStore().createS3Bucket("b1"); getBucket.setClient(client); - + getBucket.setRequestIdentifier(new RequestIdentifier()); OS3Exception e = assertThrows(OS3Exception.class, () -> getBucket.get( "b1", null, "unSupportType", null, 1000, null, null, null, null, null, null).getEntity()); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketPut.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketPut.java index 17c83aa44b0..2a575a2e72f 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketPut.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestBucketPut.java @@ -26,6 +26,7 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientStub; +import org.apache.hadoop.ozone.s3.RequestIdentifier; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import static java.net.HttpURLConnection.HTTP_CONFLICT; @@ -58,6 +59,7 @@ public void setup() throws Exception { // Create HeadBucket and setClient to OzoneClientStub bucketEndpoint = new BucketEndpoint(); bucketEndpoint.setClient(clientStub); + bucketEndpoint.setRequestIdentifier(new RequestIdentifier()); } @Test diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestInitiateMultipartUpload.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestInitiateMultipartUpload.java index 500010f63db..0954b7b2f54 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestInitiateMultipartUpload.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestInitiateMultipartUpload.java @@ -25,6 +25,8 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientStub; +import org.apache.hadoop.ozone.s3.RequestIdentifier; + import jakarta.annotation.Nonnull; import org.junit.jupiter.api.Test; @@ -99,6 +101,7 @@ private ObjectEndpoint getObjectEndpoint(OzoneClient client, rest.setHeaders(headers); rest.setClient(client); rest.setOzoneConfiguration(new OzoneConfiguration()); + rest.setRequestIdentifier(new RequestIdentifier()); return rest; } } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java index 4c5e2b53d90..d9da4ed866e 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java @@ -42,6 +42,8 @@ import java.util.List; import java.util.Map; import java.util.UUID; + +import org.apache.hadoop.ozone.s3.RequestIdentifier; import org.apache.hadoop.ozone.s3.endpoint.CompleteMultipartUploadRequest.Part; import static java.nio.charset.StandardCharsets.UTF_8; @@ -74,6 +76,7 @@ public static void setUp() throws Exception { REST.setHeaders(HEADERS); REST.setClient(CLIENT); REST.setOzoneConfiguration(new OzoneConfiguration()); + REST.setRequestIdentifier(new RequestIdentifier()); } private String initiateMultipartUpload(String key) throws IOException, diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java index 6894fc4abea..1f08b2c65c7 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadWithCopy.java @@ -42,6 +42,7 @@ import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientStub; import org.apache.hadoop.ozone.client.OzoneMultipartUploadPartListParts; +import org.apache.hadoop.ozone.s3.RequestIdentifier; import org.apache.hadoop.ozone.s3.endpoint.CompleteMultipartUploadRequest.Part; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; @@ -127,6 +128,7 @@ public static void setUp() throws Exception { REST.setHeaders(headers); REST.setClient(CLIENT); REST.setOzoneConfiguration(new OzoneConfiguration()); + REST.setRequestIdentifier(new RequestIdentifier()); } @Test diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectDelete.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectDelete.java index 340ed1984ec..af4b6f92f91 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectDelete.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectDelete.java @@ -26,6 +26,7 @@ import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientStub; +import org.apache.hadoop.ozone.s3.RequestIdentifier; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.junit.jupiter.api.Test; @@ -48,6 +49,7 @@ public void delete() throws IOException, OS3Exception { ObjectEndpoint rest = new ObjectEndpoint(); rest.setClient(client); + rest.setRequestIdentifier(new RequestIdentifier()); rest.setOzoneConfiguration(new OzoneConfiguration()); //WHEN diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java index 048faabcef0..af1564110b0 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java @@ -34,6 +34,7 @@ import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientStub; import org.apache.hadoop.ozone.client.io.OzoneInputStream; +import org.apache.hadoop.ozone.s3.RequestIdentifier; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.commons.io.IOUtils; @@ -92,6 +93,7 @@ public void init() throws OS3Exception, IOException { rest.setOzoneConfiguration(new OzoneConfiguration()); headers = mock(HttpHeaders.class); rest.setHeaders(headers); + rest.setRequestIdentifier(new RequestIdentifier()); ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); rest.put(BUCKET_NAME, KEY_NAME, CONTENT.length(), diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectHead.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectHead.java index 88a5f00e440..73b556b6813 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectHead.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectHead.java @@ -32,6 +32,7 @@ import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientStub; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; +import org.apache.hadoop.ozone.s3.RequestIdentifier; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import static java.net.HttpURLConnection.HTTP_NOT_FOUND; @@ -66,6 +67,7 @@ public void setup() throws IOException { // Create HeadBucket and setClient to OzoneClientStub keyEndpoint = new ObjectEndpoint(); keyEndpoint.setClient(clientStub); + keyEndpoint.setRequestIdentifier(new RequestIdentifier()); keyEndpoint.setOzoneConfiguration(new OzoneConfiguration()); } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectMultiDelete.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectMultiDelete.java index 413b3afb274..9152da41efd 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectMultiDelete.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectMultiDelete.java @@ -29,6 +29,7 @@ import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientStub; import org.apache.hadoop.ozone.client.OzoneKey; +import org.apache.hadoop.ozone.s3.RequestIdentifier; import org.apache.hadoop.ozone.s3.endpoint.MultiDeleteRequest.DeleteObject; import org.apache.hadoop.ozone.s3.exception.OS3Exception; @@ -51,6 +52,7 @@ public void delete() throws IOException, OS3Exception, JAXBException { BucketEndpoint rest = new BucketEndpoint(); rest.setClient(client); + rest.setRequestIdentifier(new RequestIdentifier()); MultiDeleteRequest mdr = new MultiDeleteRequest(); mdr.getObjects().add(new DeleteObject("key1")); @@ -82,6 +84,7 @@ public void deleteQuiet() throws IOException, OS3Exception, JAXBException { BucketEndpoint rest = new BucketEndpoint(); rest.setClient(client); + rest.setRequestIdentifier(new RequestIdentifier()); MultiDeleteRequest mdr = new MultiDeleteRequest(); mdr.setQuiet(true); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java index a36d756ddaa..94ce257598e 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectPut.java @@ -50,6 +50,7 @@ import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.client.io.OzoneInputStream; import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.hadoop.ozone.s3.RequestIdentifier; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.exception.S3ErrorTable; import org.apache.http.HttpStatus; @@ -134,6 +135,7 @@ void setup() throws IOException { objectEndpoint = spy(new ObjectEndpoint()); objectEndpoint.setClient(clientStub); objectEndpoint.setOzoneConfiguration(config); + objectEndpoint.setRequestIdentifier(new RequestIdentifier()); headers = mock(HttpHeaders.class); objectEndpoint.setHeaders(headers); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingDelete.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingDelete.java index 91f8869dc91..eec965db8e6 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingDelete.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingDelete.java @@ -25,6 +25,7 @@ import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; +import org.apache.hadoop.ozone.s3.RequestIdentifier; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -77,6 +78,7 @@ public void init() throws OS3Exception, IOException { rest = new ObjectEndpoint(); rest.setClient(client); rest.setOzoneConfiguration(config); + rest.setRequestIdentifier(new RequestIdentifier()); headers = Mockito.mock(HttpHeaders.class); rest.setHeaders(headers); body = new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); @@ -137,7 +139,7 @@ public void testDeleteObjectTaggingNotImplemented() throws Exception { ObjectEndpoint endpoint = new ObjectEndpoint(); endpoint.setClient(mockClient); - + endpoint.setRequestIdentifier(new RequestIdentifier()); doThrow(new OMException("DeleteObjectTagging is not currently supported for FSO directory", ResultCodes.NOT_SUPPORTED_OPERATION)).when(mockBucket).deleteObjectTagging("dir/"); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingGet.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingGet.java index f379ae71f59..75563fd3a95 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingGet.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingGet.java @@ -20,6 +20,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientStub; +import org.apache.hadoop.ozone.s3.RequestIdentifier; import org.apache.hadoop.ozone.s3.endpoint.S3Tagging.Tag; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.junit.jupiter.api.BeforeEach; @@ -64,6 +65,7 @@ public void init() throws OS3Exception, IOException { rest = new ObjectEndpoint(); rest.setClient(client); rest.setOzoneConfiguration(config); + rest.setRequestIdentifier(new RequestIdentifier()); HttpHeaders headers = Mockito.mock(HttpHeaders.class); rest.setHeaders(headers); ByteArrayInputStream body = new ByteArrayInputStream(CONTENT.getBytes(UTF_8)); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingPut.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingPut.java index 478ab8ba79f..8c7c73c92ee 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingPut.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectTaggingPut.java @@ -26,6 +26,7 @@ import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes; +import org.apache.hadoop.ozone.s3.RequestIdentifier; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -78,6 +79,7 @@ void setup() throws IOException, OS3Exception { objectEndpoint = new ObjectEndpoint(); objectEndpoint.setClient(clientStub); objectEndpoint.setOzoneConfiguration(config); + objectEndpoint.setRequestIdentifier(new RequestIdentifier()); HttpHeaders headers = mock(HttpHeaders.class); ByteArrayInputStream body = @@ -172,6 +174,7 @@ public void testPutObjectTaggingNotImplemented() throws Exception { twoTagsMap.put("tag1", "val1"); twoTagsMap.put("tag2", "val2"); endpoint.setClient(mockClient); + endpoint.setRequestIdentifier(new RequestIdentifier()); doThrow(new OMException("PutObjectTagging is not currently supported for FSO directory", ResultCodes.NOT_SUPPORTED_OPERATION)).when(mockBucket).putObjectTagging("dir/", twoTagsMap); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java index dbafa8c11cb..67d414f22cb 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java @@ -26,6 +26,7 @@ import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientStub; import org.apache.hadoop.ozone.client.OzoneMultipartUploadPartListParts; +import org.apache.hadoop.ozone.s3.RequestIdentifier; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; @@ -81,6 +82,7 @@ public static void setUp() throws Exception { REST.setHeaders(headers); REST.setClient(client); REST.setOzoneConfiguration(new OzoneConfiguration()); + REST.setRequestIdentifier(new RequestIdentifier()); } @@ -159,6 +161,7 @@ public void testPartUploadStreamContentLength() objectEndpoint.setHeaders(headers); objectEndpoint.setClient(client); objectEndpoint.setOzoneConfiguration(new OzoneConfiguration()); + objectEndpoint.setRequestIdentifier(new RequestIdentifier()); String keyName = UUID.randomUUID().toString(); String chunkedContent = "0a;chunk-signature=signature\r\n" @@ -221,6 +224,7 @@ public void testPartUploadMessageDigestResetDuringException() throws IOException objectEndpoint.setHeaders(headers); objectEndpoint.setClient(clientStub); objectEndpoint.setOzoneConfiguration(new OzoneConfiguration()); + objectEndpoint.setRequestIdentifier(new RequestIdentifier()); Response response = objectEndpoint.initializeMultipartUpload(OzoneConsts.S3_BUCKET, OzoneConsts.KEY); diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java index dc844f6463f..30b4823ff86 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java @@ -25,6 +25,7 @@ import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientStub; +import org.apache.hadoop.ozone.s3.RequestIdentifier; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.junit.jupiter.api.BeforeAll; @@ -66,6 +67,7 @@ public static void setUp() throws Exception { REST.setHeaders(headers); REST.setClient(client); + REST.setRequestIdentifier(new RequestIdentifier()); OzoneConfiguration conf = new OzoneConfiguration(); conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java index d256a346295..886ce311acc 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java @@ -28,6 +28,7 @@ import org.apache.hadoop.ozone.client.protocol.ClientProtocol; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.ErrorInfo; +import org.apache.hadoop.ozone.s3.RequestIdentifier; import org.apache.hadoop.ozone.s3.exception.OS3Exception; import org.apache.hadoop.ozone.s3.metrics.S3GatewayMetrics; import org.junit.jupiter.api.BeforeEach; @@ -73,6 +74,7 @@ public class TestPermissionCheck { private OzoneVolume volume; private OMException exception; private HttpHeaders headers; + private RequestIdentifier requestIdentifier; @BeforeEach public void setup() { @@ -92,6 +94,7 @@ public void setup() { clientProtocol = mock(ClientProtocol.class); S3GatewayMetrics.create(conf); when(client.getProxy()).thenReturn(clientProtocol); + requestIdentifier = new RequestIdentifier(); } /** @@ -102,6 +105,7 @@ public void testListS3Buckets() throws IOException { doThrow(exception).when(objectStore).getS3Volume(); RootEndpoint rootEndpoint = new RootEndpoint(); rootEndpoint.setClient(client); + rootEndpoint.setRequestIdentifier(requestIdentifier); OS3Exception e = assertThrows(OS3Exception.class, () -> rootEndpoint.get()); assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); } @@ -114,6 +118,7 @@ public void testGetBucket() throws IOException { doThrow(exception).when(objectStore).getS3Bucket(anyString()); BucketEndpoint bucketEndpoint = new BucketEndpoint(); bucketEndpoint.setClient(client); + bucketEndpoint.setRequestIdentifier(requestIdentifier); OS3Exception e = assertThrows(OS3Exception.class, () -> bucketEndpoint.head("bucketName")); assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); @@ -125,6 +130,7 @@ public void testCreateBucket() throws IOException { doThrow(exception).when(objectStore).createS3Bucket(anyString()); BucketEndpoint bucketEndpoint = new BucketEndpoint(); bucketEndpoint.setClient(client); + bucketEndpoint.setRequestIdentifier(requestIdentifier); OS3Exception e = assertThrows(OS3Exception.class, () -> bucketEndpoint.put("bucketName", null, null, null)); assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); @@ -135,7 +141,7 @@ public void testDeleteBucket() throws IOException { doThrow(exception).when(objectStore).deleteS3Bucket(anyString()); BucketEndpoint bucketEndpoint = new BucketEndpoint(); bucketEndpoint.setClient(client); - + bucketEndpoint.setRequestIdentifier(requestIdentifier); OS3Exception e = assertThrows(OS3Exception.class, () -> bucketEndpoint.delete("bucketName")); assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); @@ -146,7 +152,7 @@ public void testListMultiUpload() throws IOException { doThrow(exception).when(bucket).listMultipartUploads(anyString()); BucketEndpoint bucketEndpoint = new BucketEndpoint(); bucketEndpoint.setClient(client); - + bucketEndpoint.setRequestIdentifier(requestIdentifier); OS3Exception e = assertThrows(OS3Exception.class, () -> bucketEndpoint.listMultipartUploads("bucketName", "prefix")); assertEquals(HTTP_FORBIDDEN, e.getHttpCode()); @@ -160,7 +166,7 @@ public void testListKey() throws IOException { anyBoolean()); BucketEndpoint bucketEndpoint = new BucketEndpoint(); bucketEndpoint.setClient(client); - + bucketEndpoint.setRequestIdentifier(requestIdentifier); OS3Exception e = assertThrows(OS3Exception.class, () -> bucketEndpoint.get( "bucketName", null, null, null, 1000, null, null, null, null, null, null)); @@ -177,6 +183,7 @@ public void testDeleteKeys() throws IOException, OS3Exception { BucketEndpoint bucketEndpoint = new BucketEndpoint(); bucketEndpoint.setClient(client); + bucketEndpoint.setRequestIdentifier(requestIdentifier); MultiDeleteRequest request = new MultiDeleteRequest(); List objectList = new ArrayList<>(); objectList.add(new MultiDeleteRequest.DeleteObject("deleteKeyName")); @@ -204,6 +211,7 @@ public void testGetAcl() throws Exception { .thenReturn(S3Acl.ACLIdentityType.USER.getHeaderType() + "=root"); BucketEndpoint bucketEndpoint = new BucketEndpoint(); bucketEndpoint.setClient(client); + bucketEndpoint.setRequestIdentifier(requestIdentifier); OS3Exception e = assertThrows(OS3Exception.class, () -> bucketEndpoint.get( "bucketName", null, null, null, 1000, null, null, null, null, "acl", null), "Expected OS3Exception with FORBIDDEN http code."); @@ -225,6 +233,7 @@ public void testSetAcl() throws Exception { .thenReturn(S3Acl.ACLIdentityType.USER.getHeaderType() + "=root"); BucketEndpoint bucketEndpoint = new BucketEndpoint(); bucketEndpoint.setClient(client); + bucketEndpoint.setRequestIdentifier(requestIdentifier); try { bucketEndpoint.put("bucketName", "acl", headers, null); } catch (Exception e) { @@ -245,6 +254,7 @@ public void testGetKey() throws IOException { objectEndpoint.setClient(client); objectEndpoint.setHeaders(headers); objectEndpoint.setOzoneConfiguration(conf); + objectEndpoint.setRequestIdentifier(requestIdentifier); OS3Exception e = assertThrows(OS3Exception.class, () -> objectEndpoint.get( "bucketName", "keyPath", 0, null, 1000, "marker", null)); @@ -261,6 +271,7 @@ public void testPutKey() throws IOException { objectEndpoint.setClient(client); objectEndpoint.setHeaders(headers); objectEndpoint.setOzoneConfiguration(conf); + objectEndpoint.setRequestIdentifier(requestIdentifier); OS3Exception e = assertThrows(OS3Exception.class, () -> objectEndpoint.put( "bucketName", "keyPath", 1024, 0, null, null, null, @@ -277,6 +288,7 @@ public void testDeleteKey() throws IOException { objectEndpoint.setClient(client); objectEndpoint.setHeaders(headers); objectEndpoint.setOzoneConfiguration(conf); + objectEndpoint.setRequestIdentifier(requestIdentifier); OS3Exception e = assertThrows(OS3Exception.class, () -> objectEndpoint.delete("bucketName", "keyPath", null, null)); @@ -291,6 +303,7 @@ public void testMultiUploadKey() throws IOException { objectEndpoint.setClient(client); objectEndpoint.setHeaders(headers); objectEndpoint.setOzoneConfiguration(conf); + objectEndpoint.setRequestIdentifier(requestIdentifier); OS3Exception e = assertThrows(OS3Exception.class, () -> objectEndpoint.initializeMultipartUpload("bucketName", "keyPath")); @@ -309,7 +322,7 @@ public void testObjectTagging() throws Exception { ObjectEndpoint objectEndpoint = new ObjectEndpoint(); objectEndpoint.setClient(client); - + objectEndpoint.setRequestIdentifier(requestIdentifier); String xml = "" + " " + diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestRootList.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestRootList.java index 312ada4f2b9..dd7282a88cb 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestRootList.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestRootList.java @@ -22,6 +22,7 @@ import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientStub; +import org.apache.hadoop.ozone.s3.RequestIdentifier; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -45,6 +46,7 @@ public void setup() throws Exception { // Create HeadBucket and setClient to OzoneClientStub rootEndpoint = new RootEndpoint(); rootEndpoint.setClient(clientStub); + rootEndpoint.setRequestIdentifier(new RequestIdentifier()); } diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java index 1c0e115a24c..27f35f835d5 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestUploadWithStream.java @@ -29,6 +29,7 @@ import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientStub; +import org.apache.hadoop.ozone.s3.RequestIdentifier; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; @@ -79,6 +80,7 @@ public static void setUp() throws Exception { REST.setHeaders(HEADERS); REST.setClient(client); + REST.setRequestIdentifier(new RequestIdentifier()); OzoneConfiguration conf = new OzoneConfiguration(); conf.setBoolean(OzoneConfigKeys.HDDS_CONTAINER_RATIS_DATASTREAM_ENABLED, diff --git a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java index 1f6cee2c4a9..e3287e539f7 100644 --- a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java +++ b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java @@ -24,6 +24,7 @@ import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.OzoneClientStub; +import org.apache.hadoop.ozone.s3.RequestIdentifier; import org.apache.hadoop.ozone.s3.endpoint.BucketEndpoint; import org.apache.hadoop.ozone.s3.endpoint.ObjectEndpoint; import org.apache.hadoop.ozone.s3.endpoint.RootEndpoint; @@ -87,15 +88,20 @@ public void setup() throws Exception { clientStub.getObjectStore().createS3Bucket(bucketName); bucket = clientStub.getObjectStore().getS3Bucket(bucketName); + RequestIdentifier requestIdentifier = new RequestIdentifier(); + bucketEndpoint = new BucketEndpoint(); bucketEndpoint.setClient(clientStub); + bucketEndpoint.setRequestIdentifier(requestIdentifier); rootEndpoint = new RootEndpoint(); rootEndpoint.setClient(clientStub); + rootEndpoint.setRequestIdentifier(requestIdentifier); keyEndpoint = new ObjectEndpoint(); keyEndpoint.setClient(clientStub); keyEndpoint.setOzoneConfiguration(new OzoneConfiguration()); + keyEndpoint.setRequestIdentifier(requestIdentifier); headers = mock(HttpHeaders.class); when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn( From 4df73d568eacffa97d4c1d8c39b43ab0c838df39 Mon Sep 17 00:00:00 2001 From: Nandakumar Vadivelu Date: Wed, 22 Jan 2025 14:03:17 +0530 Subject: [PATCH 113/168] HDDS-12106. Enable sortpom in integration-test and fault-injection-test. (#7734) --- .../mini-chaos-tests/pom.xml | 33 ++- .../network-tests/pom.xml | 24 +- hadoop-ozone/fault-injection-test/pom.xml | 15 +- hadoop-ozone/integration-test/pom.xml | 234 +++++++++--------- 4 files changed, 140 insertions(+), 166 deletions(-) diff --git a/hadoop-ozone/fault-injection-test/mini-chaos-tests/pom.xml b/hadoop-ozone/fault-injection-test/mini-chaos-tests/pom.xml index 90961941a46..622565a6431 100644 --- a/hadoop-ozone/fault-injection-test/mini-chaos-tests/pom.xml +++ b/hadoop-ozone/fault-injection-test/mini-chaos-tests/pom.xml @@ -12,21 +12,19 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 - ozone-fault-injection-test org.apache.ozone + ozone-fault-injection-test 2.0.0-SNAPSHOT + + mini-chaos-tests 2.0.0-SNAPSHOT - Apache Ozone Mini Ozone Chaos Tests Apache Ozone Mini Ozone Chaos Tests + Apache Ozone Mini Ozone Chaos Tests - mini-chaos-tests org.apache.ozone @@ -35,30 +33,30 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> - org.junit.platform - junit-platform-launcher + org.apache.ozone + hdds-hadoop-dependency-test test org.apache.ozone - hdds-test-utils + hdds-server-scm + test-jar test org.apache.ozone - ozone-integration-test + hdds-test-utils test - test-jar org.apache.ozone - hdds-server-scm - test + ozone-integration-test test-jar + test - org.apache.ozone - hdds-hadoop-dependency-test + org.junit.platform + junit-platform-launcher test @@ -76,8 +74,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> com.github.spotbugs spotbugs-maven-plugin - ${basedir}/dev-support/findbugsExcludeFile.xml - + ${basedir}/dev-support/findbugsExcludeFile.xml diff --git a/hadoop-ozone/fault-injection-test/network-tests/pom.xml b/hadoop-ozone/fault-injection-test/network-tests/pom.xml index 35874911730..7cebddbf093 100644 --- a/hadoop-ozone/fault-injection-test/network-tests/pom.xml +++ b/hadoop-ozone/fault-injection-test/network-tests/pom.xml @@ -12,10 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -23,12 +20,13 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> 2.0.0-SNAPSHOT ozone-network-tests - Apache Ozone Network Tests - Apache Ozone Network Tests jar + Apache Ozone Network Tests + Apache Ozone Network Tests - true + + true @@ -45,10 +43,10 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> copy-resources - process-resources copy-resources + process-resources ${project.build.directory} @@ -81,10 +79,10 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> exec-maven-plugin - integration-test exec + integration-test python @@ -94,12 +92,8 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> ${basedir}/src/test/blockade/ - - ${ozone.home} - - - ${project.build.directory} - + ${ozone.home} + ${project.build.directory} diff --git a/hadoop-ozone/fault-injection-test/pom.xml b/hadoop-ozone/fault-injection-test/pom.xml index 1306013726d..ca816165dba 100644 --- a/hadoop-ozone/fault-injection-test/pom.xml +++ b/hadoop-ozone/fault-injection-test/pom.xml @@ -12,10 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -24,17 +21,13 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> ozone-fault-injection-test 2.0.0-SNAPSHOT - Apache Ozone Fault Injection Tests - Apache Ozone Fault Injection Tests pom - - - true - + Apache Ozone Fault Injection Tests + Apache Ozone Fault Injection Tests - network-tests mini-chaos-tests + network-tests diff --git a/hadoop-ozone/integration-test/pom.xml b/hadoop-ozone/integration-test/pom.xml index f4e6b73cc63..a7366ff7e5d 100644 --- a/hadoop-ozone/integration-test/pom.xml +++ b/hadoop-ozone/integration-test/pom.xml @@ -12,10 +12,7 @@ See the License for the specific language governing permissions and limitations under the License. See accompanying LICENSE file. --> - + 4.0.0 org.apache.ozone @@ -24,67 +21,38 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> ozone-integration-test 2.0.0-SNAPSHOT - Apache Ozone Integration Tests - Apache Ozone Integration Tests jar - - - true - + Apache Ozone Integration Tests + Apache Ozone Integration Tests - org.apache.ozone - ozone-common + hdds-server-scm org.apache.ozone - hdds-test-utils - test + hdds-tools org.apache.ozone - hdds-server-scm + ozone-client org.apache.ozone - hdds-server-framework - ${hdds.version} - test-jar - test + ozone-common org.apache.ozone - ozone-manager - - - org.apache.hadoop - hadoop-minikdc - test - - - log4j - log4j - - - ch.qos.reload4j - reload4j - - - org.slf4j - * - - + ozone-csi - org.apache.ozone - ozone-s3gateway + ozone-filesystem org.apache.ozone - ozone-csi + ozone-manager org.apache.ozone @@ -92,119 +60,114 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.ozone - ozone-client - - - org.apache.ozone - ozone-filesystem + ozone-s3gateway org.apache.ozone ozone-tools - org.apache.ozone - hdds-tools + org.apache.ratis + ratis-server - org.apache.commons - commons-lang3 - test + org.assertj + assertj-core + ${assertj.version} - org.apache.ozone - ozone-manager - test - test-jar + org.hamcrest + hamcrest - org.apache.ozone - hdds-common - test-jar - test + org.slf4j + jul-to-slf4j - org.junit.platform - junit-platform-launcher + org.apache.commons + commons-lang3 test org.apache.hadoop - hadoop-kms + hadoop-distcp test - - log4j - log4j - ch.qos.reload4j reload4j - org.slf4j - * + log4j + log4j - com.sun.jersey - jersey-servlet + org.slf4j + * org.apache.hadoop - hadoop-kms + hadoop-distcp test-jar test - - log4j - log4j - ch.qos.reload4j reload4j - org.slf4j - * + log4j + log4j - com.sun.jersey - jersey-servlet + org.slf4j + * - - org.apache.ozone - hdds-server-scm - test - test-jar - - - org.apache.ozone - hdds-container-service - test - test-jar - - - org.apache.ozone - hdds-hadoop-dependency-test - test - org.apache.hadoop - hadoop-distcp + hadoop-kms test + + ch.qos.reload4j + reload4j + + + com.sun.jersey + jersey-servlet + log4j log4j + + org.slf4j + * + + + + + org.apache.hadoop + hadoop-kms + test-jar + test + ch.qos.reload4j reload4j + + com.sun.jersey + jersey-servlet + + + log4j + log4j + org.slf4j * @@ -216,38 +179,37 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> hadoop-mapreduce-client-jobclient test - - log4j - log4j - ch.qos.reload4j reload4j - - org.slf4j - * - com.sun.jersey jersey-servlet + + log4j + log4j + + + org.slf4j + * + org.apache.hadoop - hadoop-distcp + hadoop-minikdc test - test-jar - - log4j - log4j - ch.qos.reload4j reload4j + + log4j + log4j + org.slf4j * @@ -255,22 +217,50 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd"> - org.apache.ratis - ratis-server + org.apache.ozone + hdds-common + test-jar + test - - org.hamcrest - hamcrest + org.apache.ozone + hdds-container-service + test-jar + test - org.slf4j - jul-to-slf4j + org.apache.ozone + hdds-hadoop-dependency-test + test - org.assertj - assertj-core - ${assertj.version} + org.apache.ozone + hdds-server-framework + ${hdds.version} + test-jar + test + + + org.apache.ozone + hdds-server-scm + test-jar + test + + + org.apache.ozone + hdds-test-utils + test + + + org.apache.ozone + ozone-manager + test-jar + test + + + org.junit.platform + junit-platform-launcher + test From bb96826af245a0b5ddaebcd6749425d1fd96cb11 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Wed, 22 Jan 2025 15:32:09 +0100 Subject: [PATCH 114/168] HDDS-12124. Disable resource filtering for VI swap files (#7736) --- hadoop-ozone/recon/pom.xml | 4 ---- pom.xml | 7 +++++++ 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/hadoop-ozone/recon/pom.xml b/hadoop-ozone/recon/pom.xml index 1fad8fab076..87e75e934b4 100644 --- a/hadoop-ozone/recon/pom.xml +++ b/hadoop-ozone/recon/pom.xml @@ -509,10 +509,6 @@ true - - woff - woff2 - diff --git a/pom.xml b/pom.xml index 8242711d6eb..29471b34dd4 100644 --- a/pom.xml +++ b/pom.xml @@ -1370,6 +1370,13 @@ org.apache.maven.plugins maven-resources-plugin ${maven-resources-plugin.version} + + + swp + woff + woff2 + + org.codehaus.mojo From 642b1c74b91679b8a40a99646f782628dc163b71 Mon Sep 17 00:00:00 2001 From: Rishabh Patel <1607531+ptlrs@users.noreply.github.com> Date: Wed, 22 Jan 2025 07:07:50 -0800 Subject: [PATCH 115/168] HDDS-12057. ReadReplicas should handle key names containing / (#7735) --- .../java/org/apache/hadoop/ozone/debug/ReadReplicas.java | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ReadReplicas.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ReadReplicas.java index c88245a571b..d029d80a03a 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ReadReplicas.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/ReadReplicas.java @@ -109,8 +109,11 @@ protected void execute(OzoneClient client, OzoneAddress address) String volumeName = address.getVolumeName(); String bucketName = address.getBucketName(); String keyName = address.getKeyName(); + // Multilevel keys will have a '/' in their names. This interferes with + // directory and file creation process. Flatten the keys to fix this. + String sanitizedKeyName = address.getKeyName().replace("/", "_"); - File dir = createDirectory(volumeName, bucketName, keyName); + File dir = createDirectory(volumeName, bucketName, sanitizedKeyName); OzoneKeyDetails keyInfoDetails = checksumClient.getKeyDetails(volumeName, bucketName, keyName); @@ -128,13 +131,13 @@ protected void execute(OzoneClient client, OzoneAddress address) result.put(JSON_PROPERTY_FILE_SIZE, keyInfoDetails.getDataSize()); ArrayNode blocks = JsonUtils.createArrayNode(); - downloadReplicasAndCreateManifest(keyName, replicas, + downloadReplicasAndCreateManifest(sanitizedKeyName, replicas, replicasWithoutChecksum, dir, blocks); result.set(JSON_PROPERTY_FILE_BLOCKS, blocks); String prettyJson = JsonUtils.toJsonStringWithDefaultPrettyPrinter(result); - String manifestFileName = keyName + "_manifest"; + String manifestFileName = sanitizedKeyName + "_manifest"; System.out.println("Writing manifest file : " + manifestFileName); File manifestFile = new File(dir, manifestFileName); From 518338f476b2b67fa6f63f232337dc306b396dc7 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Thu, 23 Jan 2025 10:34:50 +0100 Subject: [PATCH 116/168] HDDS-11946. Require all ozone repair commands to support a --dry-run option (#7682) --- .../ozone/repair/om/TestFSORepairTool.java | 4 +- .../ozone/shell/TestOzoneRepairShell.java | 12 +++-- .../hadoop/ozone/repair/ReadOnlyCommand.java | 23 ++++++++++ .../hadoop/ozone/repair/RepairTool.java | 18 +++++++- .../hadoop/ozone/repair/om/FSORepairTool.java | 44 ++++--------------- .../ozone/repair/om/SnapshotChainRepair.java | 18 +++----- .../repair/om/TransactionInfoRepair.java | 15 ++++--- .../ozone/repair/om/quota/QuotaStatus.java | 9 ++-- .../ozone/repair/om/quota/QuotaTrigger.java | 6 ++- .../scm/cert/RecoverSCMCertificate.java | 20 +++++---- .../hadoop/ozone/repair/TestOzoneRepair.java | 28 ++++++++++++ 11 files changed, 125 insertions(+), 72 deletions(-) create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ReadOnlyCommand.java diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/repair/om/TestFSORepairTool.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/repair/om/TestFSORepairTool.java index 2969931808a..a9b258e2f61 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/repair/om/TestFSORepairTool.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/repair/om/TestFSORepairTool.java @@ -346,8 +346,8 @@ private int dryRun(String... args) { private int execute(boolean dryRun, String... args) { List argList = new ArrayList<>(Arrays.asList("om", "fso-tree", "--db", dbPath)); - if (!dryRun) { - argList.add("--repair"); + if (dryRun) { + argList.add("--dry-run"); } argList.addAll(Arrays.asList(args)); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneRepairShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneRepairShell.java index 4cc8ecaa030..7446bd0afa2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneRepairShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneRepairShell.java @@ -51,6 +51,7 @@ public class TestOzoneRepairShell { private GenericTestUtils.PrintStreamCapturer err; private static MiniOzoneCluster cluster = null; private static OzoneConfiguration conf = null; + private static String om; private static final String TRANSACTION_INFO_TABLE_TERM_INDEX_PATTERN = "([0-9]+#[0-9]+)"; @@ -59,6 +60,7 @@ public static void init() throws Exception { conf = new OzoneConfiguration(); cluster = MiniOzoneCluster.newBuilder(conf).build(); cluster.waitForClusterToBeReady(); + om = conf.get(OZONE_OM_ADDRESS_KEY); } @BeforeEach @@ -136,17 +138,21 @@ private String[] parseScanOutput(String output) { public void testQuotaRepair() throws Exception { CommandLine cmd = new OzoneRepair().getCmd(); - int exitCode = cmd.execute("om", "quota", "status", "--service-host", conf.get(OZONE_OM_ADDRESS_KEY)); + int exitCode = cmd.execute("om", "quota", "status", "--service-host", om); assertEquals(0, exitCode, err); + cmd.execute("om", "quota", "start", "--dry-run", "--service-host", om); + cmd.execute("om", "quota", "status", "--service-host", om); + assertThat(out.get()).doesNotContain("lastRun"); + exitCode = withTextFromSystemIn("y") - .execute(() -> cmd.execute("om", "quota", "start", "--service-host", conf.get(OZONE_OM_ADDRESS_KEY))); + .execute(() -> cmd.execute("om", "quota", "start", "--service-host", om)); assertEquals(0, exitCode, err); GenericTestUtils.waitFor(() -> { out.reset(); // verify quota trigger is completed having non-zero lastRunFinishedTime - cmd.execute("om", "quota", "status", "--service-host", conf.get(OZONE_OM_ADDRESS_KEY)); + cmd.execute("om", "quota", "status", "--service-host", om); try { return out.get().contains("\"lastRunFinishedTime\":\"\""); } catch (Exception ex) { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ReadOnlyCommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ReadOnlyCommand.java new file mode 100644 index 00000000000..629efb4c0a6 --- /dev/null +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/ReadOnlyCommand.java @@ -0,0 +1,23 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.ozone.repair; + +/** Marker interface for repair subcommands that do not modify state. */ +public interface ReadOnlyCommand { + // marker +} diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RepairTool.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RepairTool.java index d873d07645d..1ae033e2e71 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RepairTool.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RepairTool.java @@ -25,6 +25,7 @@ import java.util.concurrent.Callable; /** Parent class for all actionable repair commands. */ +@CommandLine.Command public abstract class RepairTool extends AbstractSubcommand implements Callable { private static final String WARNING_SYS_USER_MESSAGE = @@ -35,12 +36,20 @@ public abstract class RepairTool extends AbstractSubcommand implements Callable< description = "Use this flag if you want to bypass the check in false-positive cases.") private boolean force; + @CommandLine.Option(names = {"--dry-run"}, + defaultValue = "false", + fallbackValue = "true", + description = "Simulate repair, but do not make any changes") + private boolean dryRun; + /** Hook method for subclasses for performing actual repair task. */ protected abstract void execute() throws Exception; @Override public final Void call() throws Exception { - confirmUser(); + if (!dryRun) { + confirmUser(); + } execute(); return null; } @@ -65,6 +74,10 @@ protected boolean checkIfServiceIsRunning(String serviceName) { return false; } + protected boolean isDryRun() { + return dryRun; + } + protected void info(String msg, Object... args) { out().println(formatMessage(msg, args)); } @@ -77,6 +90,9 @@ private String formatMessage(String msg, Object[] args) { if (args != null && args.length > 0) { msg = String.format(msg, args); } + if (isDryRun()) { + msg = "[dry run] " + msg; + } return msg; } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairTool.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairTool.java index a4068415db6..eb5a5dd9a2f 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairTool.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairTool.java @@ -85,11 +85,6 @@ public class FSORepairTool extends RepairTool { description = "Path to OM RocksDB") private String omDBPath; - @CommandLine.Option(names = {"-r", "--repair"}, - defaultValue = "false", - description = "Run in repair mode to move unreferenced files and directories to deleted tables.") - private boolean repair; - @CommandLine.Option(names = {"-v", "--volume"}, description = "Filter by volume name. Add '/' before the volume name.") private String volumeFilter; @@ -99,7 +94,7 @@ public class FSORepairTool extends RepairTool { private String bucketFilter; @CommandLine.Option(names = {"--verbose"}, - description = "Verbose output. Show all intermediate steps and deleted keys info.") + description = "Verbose output. Show all intermediate steps.") private boolean verbose; @Override @@ -107,11 +102,6 @@ public void execute() throws Exception { if (checkIfServiceIsRunning("OM")) { return; } - if (repair) { - info("FSO Repair Tool is running in repair mode"); - } else { - info("FSO Repair Tool is running in debug mode"); - } try { Impl repairTool = new Impl(); repairTool.run(); @@ -274,18 +264,12 @@ private boolean checkIfSnapshotExistsForBucket(String volumeName, String bucketN } private void processBucket(OmVolumeArgs volume, OmBucketInfo bucketInfo) throws IOException { - info("Processing bucket: " + volume.getVolume() + "/" + bucketInfo.getBucketName()); if (checkIfSnapshotExistsForBucket(volume.getVolume(), bucketInfo.getBucketName())) { - if (!repair) { - info( - "Snapshot detected in bucket '" + volume.getVolume() + "/" + bucketInfo.getBucketName() + "'. "); - } else { - info( - "Skipping repair for bucket '" + volume.getVolume() + "/" + bucketInfo.getBucketName() + "' " + - "due to snapshot presence."); - return; - } + info("Skipping repair for bucket '" + volume.getVolume() + "/" + bucketInfo.getBucketName() + "' " + + "due to snapshot presence."); + return; } + info("Processing bucket: " + volume.getVolume() + "/" + bucketInfo.getBucketName()); markReachableObjectsInBucket(volume, bucketInfo); handleUnreachableAndUnreferencedObjects(volume, bucketInfo); } @@ -359,15 +343,10 @@ private void handleUnreachableAndUnreferencedObjects(OmVolumeArgs volume, OmBuck if (!isReachable(dirKey)) { if (!isDirectoryInDeletedDirTable(dirKey)) { - info("Found unreferenced directory: " + dirKey); unreferencedStats.addDir(); - if (!repair) { - if (verbose) { - info("Marking unreferenced directory " + dirKey + " for deletion."); - } - } else { - info("Deleting unreferenced directory " + dirKey); + info("Deleting unreferenced directory " + dirKey); + if (!isDryRun()) { OmDirectoryInfo dirInfo = dirEntry.getValue(); markDirectoryForDeletion(volume.getVolume(), bucket.getBucketName(), dirKey, dirInfo); } @@ -393,15 +372,10 @@ private void handleUnreachableAndUnreferencedObjects(OmVolumeArgs volume, OmBuck OmKeyInfo fileInfo = fileEntry.getValue(); if (!isReachable(fileKey)) { if (!isFileKeyInDeletedTable(fileKey)) { - info("Found unreferenced file: " + fileKey); unreferencedStats.addFile(fileInfo.getDataSize()); - if (!repair) { - if (verbose) { - info("Marking unreferenced file " + fileKey + " for deletion." + fileKey); - } - } else { - info("Deleting unreferenced file " + fileKey); + info("Deleting unreferenced file " + fileKey); + if (!isDryRun()) { markFileForDeletion(fileKey, fileInfo); } } else { diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/SnapshotChainRepair.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/SnapshotChainRepair.java index 37cf0c5ddbb..bafd2f89375 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/SnapshotChainRepair.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/SnapshotChainRepair.java @@ -76,11 +76,6 @@ public class SnapshotChainRepair extends RepairTool { description = "Path previous snapshotId to set for the given snapshot") private UUID pathPreviousSnapshotId; - @CommandLine.Option(names = {"--dry-run"}, - required = true, - description = "To dry-run the command.", defaultValue = "true") - private boolean dryRun; - @Override public void execute() throws Exception { if (checkIfServiceIsRunning("OM")) { @@ -139,12 +134,13 @@ public void execute() throws Exception { snapshotInfo.setGlobalPreviousSnapshotId(globalPreviousSnapshotId); snapshotInfo.setPathPreviousSnapshotId(pathPreviousSnapshotId); - if (dryRun) { - info("SnapshotInfo would be updated to : %s", snapshotInfo); - } else { - byte[] snapshotInfoBytes = SnapshotInfo.getCodec().toPersistedFormat(snapshotInfo); - db.get() - .put(snapshotInfoCfh, StringCodec.get().toPersistedFormat(snapshotInfoTableKey), snapshotInfoBytes); + info("Updating SnapshotInfo to %s", snapshotInfo); + + byte[] snapshotInfoBytes = SnapshotInfo.getCodec().toPersistedFormat(snapshotInfo); + byte[] persistedFormat = StringCodec.get().toPersistedFormat(snapshotInfoTableKey); + + if (!isDryRun()) { + db.get().put(snapshotInfoCfh, persistedFormat, snapshotInfoBytes); info("Snapshot Info is updated to : %s", RocksDBUtils.getValue(db, snapshotInfoCfh, snapshotInfoTableKey, SnapshotInfo.getCodec())); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/TransactionInfoRepair.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/TransactionInfoRepair.java index 59ea67138ba..ca664407207 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/TransactionInfoRepair.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/TransactionInfoRepair.java @@ -89,12 +89,17 @@ public void execute() throws Exception { TransactionInfo transactionInfo = TransactionInfo.valueOf(highestTransactionTerm, highestTransactionIndex); byte[] transactionInfoBytes = TransactionInfo.getCodec().toPersistedFormat(transactionInfo); - db.get() - .put(transactionInfoCfh, StringCodec.get().toPersistedFormat(TRANSACTION_INFO_KEY), transactionInfoBytes); + byte[] key = StringCodec.get().toPersistedFormat(TRANSACTION_INFO_KEY); - info("The highest transaction info has been updated to: %s", - RocksDBUtils.getValue(db, transactionInfoCfh, TRANSACTION_INFO_KEY, - TransactionInfo.getCodec()).getTermIndex()); + info("Updating transaction info to %s", transactionInfo.getTermIndex()); + + if (!isDryRun()) { + db.get().put(transactionInfoCfh, key, transactionInfoBytes); + + info("The highest transaction info has been updated to: %s", + RocksDBUtils.getValue(db, transactionInfoCfh, TRANSACTION_INFO_KEY, + TransactionInfo.getCodec()).getTermIndex()); + } } catch (RocksDBException exception) { error("Failed to update the RocksDB for the given path: %s", dbPath); error( diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/quota/QuotaStatus.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/quota/QuotaStatus.java index 879dc06f189..80eb7ee24a9 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/quota/QuotaStatus.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/quota/QuotaStatus.java @@ -24,6 +24,7 @@ import java.util.concurrent.Callable; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol; +import org.apache.hadoop.ozone.repair.ReadOnlyCommand; import picocli.CommandLine; /** @@ -35,7 +36,7 @@ mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class ) -public class QuotaStatus implements Callable { +public class QuotaStatus implements Callable, ReadOnlyCommand { @CommandLine.Option( names = {"--service-id", "--om-service-id"}, @@ -57,9 +58,9 @@ public class QuotaStatus implements Callable { @Override public Void call() throws Exception { - OzoneManagerProtocol ozoneManagerClient = - parent.createOmClient(omServiceId, omHost, false); - System.out.println(ozoneManagerClient.getQuotaRepairStatus()); + try (OzoneManagerProtocol omClient = parent.createOmClient(omServiceId, omHost, false)) { + System.out.println(omClient.getQuotaRepairStatus()); + } return null; } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/quota/QuotaTrigger.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/quota/QuotaTrigger.java index b490f758eaf..90528a79ddb 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/quota/QuotaTrigger.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/quota/QuotaTrigger.java @@ -77,8 +77,10 @@ public void execute() throws Exception { bucketList.isEmpty() ? "all buckets" : ("buckets " + buckets)); - omClient.startQuotaRepair(bucketList); - info(omClient.getQuotaRepairStatus()); + if (!isDryRun()) { + omClient.startQuotaRepair(bucketList); + info(omClient.getQuotaRepairStatus()); + } } } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/scm/cert/RecoverSCMCertificate.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/scm/cert/RecoverSCMCertificate.java index 29b92574b81..2fac9d53294 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/scm/cert/RecoverSCMCertificate.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/scm/cert/RecoverSCMCertificate.java @@ -194,29 +194,31 @@ private void storeCerts(X509Certificate scmCertificate, CertificateCodec certCodec = new CertificateCodec(securityConfig, SCMCertificateClient.COMPONENT_NAME); - info("Writing certs to path : %s", certCodec.getLocation()); - CertPath certPath = addRootCertInPath(scmCertificate, rootCertificate); CertPath rootCertPath = getRootCertPath(rootCertificate); String encodedCert = CertificateCodec.getPEMEncodedString(certPath); String certName = String.format(CERT_FILE_NAME_FORMAT, CAType.NONE.getFileNamePrefix() + scmCertificate.getSerialNumber()); - certCodec.writeCertificate(certName, encodedCert); + writeCertificate(certCodec, certName, encodedCert); String rootCertName = String.format(CERT_FILE_NAME_FORMAT, CAType.SUBORDINATE.getFileNamePrefix() + rootCertificate.getSerialNumber()); String encodedRootCert = CertificateCodec.getPEMEncodedString(rootCertPath); - certCodec.writeCertificate(rootCertName, encodedRootCert); + writeCertificate(certCodec, rootCertName, encodedRootCert); - certCodec.writeCertificate(certCodec.getLocation().toAbsolutePath(), - securityConfig.getCertificateFileName(), encodedCert); + writeCertificate(certCodec, securityConfig.getCertificateFileName(), encodedCert); if (isRootCA) { CertificateCodec rootCertCodec = new CertificateCodec(securityConfig, OzoneConsts.SCM_ROOT_CA_COMPONENT_NAME); - info("Writing root certs to path : %s", rootCertCodec.getLocation()); - rootCertCodec.writeCertificate(rootCertCodec.getLocation().toAbsolutePath(), - securityConfig.getCertificateFileName(), encodedRootCert); + writeCertificate(rootCertCodec, securityConfig.getCertificateFileName(), encodedRootCert); + } + } + + private void writeCertificate(CertificateCodec codec, String name, String encodedCert) throws IOException { + info("Writing cert %s to %s", name, codec.getLocation()); + if (!isDryRun()) { + codec.writeCertificate(name, encodedCert); } } diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/TestOzoneRepair.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/TestOzoneRepair.java index dc3f2d73845..86bb5db4562 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/TestOzoneRepair.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/TestOzoneRepair.java @@ -28,6 +28,7 @@ import java.io.ByteArrayOutputStream; import java.io.PrintStream; import java.util.List; +import java.util.Map; import static java.nio.charset.StandardCharsets.UTF_8; import static java.util.Arrays.asList; @@ -68,6 +69,33 @@ public void reset() { System.setProperty("user.name", OLD_USER); } + /** All leaf subcommands should support {@code --dry-run}, + * except if marked as {@link ReadOnlyCommand}. */ + @Test + void subcommandsSupportDryRun() { + assertSubcommandOptionRecursively(new OzoneRepair().getCmd()); + } + + private static void assertSubcommandOptionRecursively(CommandLine cmd) { + Map subcommands = cmd.getSubcommands(); + if (subcommands.isEmpty()) { + // leaf command + CommandLine.Model.CommandSpec spec = cmd.getCommandSpec(); + Object userObject = spec.userObject(); + if (!(userObject instanceof ReadOnlyCommand)) { + assertThat(spec.optionsMap().keySet()) + .as(() -> "'" + spec.qualifiedName() + "' defined by " + userObject.getClass() + + " should support --dry-run or implement " + ReadOnlyCommand.class) + .contains("--dry-run"); + } + } else { + // parent command + for (CommandLine sub : subcommands.values()) { + assertSubcommandOptionRecursively(sub); + } + } + } + @Test void testOzoneRepairWhenUserIsRemindedSystemUserAndDeclinesToProceed() throws Exception { OzoneRepair ozoneRepair = new OzoneRepair(); From c2d810e15df4ee116648ced3f4b1966a5d00fb20 Mon Sep 17 00:00:00 2001 From: venkatsambath Date: Thu, 23 Jan 2025 11:06:54 -0500 Subject: [PATCH 117/168] HDDS-12108. Grafana Dashboard for OM CommitIndex (#7719) Co-authored-by: Ritesh H Shukla --- .../Ozone - OMComittedIndexMetrics.json | 166 ++++++++++++++++++ 1 file changed, 166 insertions(+) create mode 100644 hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - OMComittedIndexMetrics.json diff --git a/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - OMComittedIndexMetrics.json b/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - OMComittedIndexMetrics.json new file mode 100644 index 00000000000..9ff886cbeec --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Ozone - OMComittedIndexMetrics.json @@ -0,0 +1,166 @@ +{ + "__inputs": [ + { + "name": "DS_PROMETHEUS", + "label": "prometheus", + "description": "", + "type": "datasource", + "pluginId": "prometheus", + "pluginName": "Prometheus" + } + ], + "__elements": {}, + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "10.4.2" + }, + { + "type": "datasource", + "id": "prometheus", + "name": "Prometheus", + "version": "1.0.0" + }, + { + "type": "panel", + "id": "timeseries", + "name": "Time series", + "version": "" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": null, + "links": [], + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 0 + }, + "id": 1, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "${DS_PROMETHEUS}" + }, + "editorMode": "code", + "exemplar": false, + "expr": "ratis_server_peerCommitIndex{instance=~\".*:9874\"}", + "instant": false, + "legendFormat": "Hostname = {{instance}}, Source OM ID = {{exported_instance}}, Follower OM ID = {{follower}}", + "range": true, + "refId": "A" + } + ], + "title": "Om Commit Index", + "type": "timeseries" + } + ], + "schemaVersion": 39, + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-15m", + "to": "now" + }, + "timepicker": {}, + "timezone": "browser", + "title": "OM Commit Index", + "version": 2, + "weekStart": "" +} From 47525b297aadfd4810fc818790fe6dab0b5206fe Mon Sep 17 00:00:00 2001 From: Sarveksha Yeshavantha Raju <79865743+sarvekshayr@users.noreply.github.com> Date: Thu, 23 Jan 2025 23:23:52 +0530 Subject: [PATCH 118/168] HDDS-12050. Implement TransactionInfoRepair command for SCM (#7689) --- .../{om => }/TransactionInfoRepair.java | 48 +++++++++++++----- .../hadoop/ozone/repair/om/OMRepair.java | 1 + .../hadoop/ozone/repair/scm/SCMRepair.java | 2 + .../{om => }/TestTransactionInfoRepair.java | 49 ++++++++++++------- 4 files changed, 69 insertions(+), 31 deletions(-) rename hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/{om => }/TransactionInfoRepair.java (72%) rename hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/{om => }/TestTransactionInfoRepair.java (75%) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/TransactionInfoRepair.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/TransactionInfoRepair.java similarity index 72% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/TransactionInfoRepair.java rename to hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/TransactionInfoRepair.java index ca664407207..4fca8e40a08 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/TransactionInfoRepair.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/TransactionInfoRepair.java @@ -19,15 +19,17 @@ * permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.repair.om; +package org.apache.hadoop.ozone.repair; import org.apache.hadoop.hdds.cli.HddsVersionProvider; +import org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.utils.TransactionInfo; +import org.apache.hadoop.hdds.utils.db.DBColumnFamilyDefinition; import org.apache.hadoop.hdds.utils.db.StringCodec; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; import org.apache.hadoop.ozone.debug.RocksDBUtils; -import org.apache.hadoop.ozone.repair.RepairTool; +import org.apache.hadoop.ozone.om.codec.OMDBDefinition; import org.rocksdb.ColumnFamilyDescriptor; import org.rocksdb.ColumnFamilyHandle; import org.rocksdb.RocksDBException; @@ -38,14 +40,13 @@ import java.util.List; import static org.apache.hadoop.ozone.OzoneConsts.TRANSACTION_INFO_KEY; -import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.TRANSACTION_INFO_TABLE; /** - * Tool to update the highest term-index in transactionInfoTable. + * Tool to update the highest term-index in transaction info table. */ @CommandLine.Command( name = "update-transaction", - description = "CLI to update the highest index in transactionInfoTable. Currently it is only supported for OM.", + description = "CLI to update the highest index in transaction info table.", mixinStandardHelpOptions = true, versionProvider = HddsVersionProvider.class ) @@ -58,17 +59,18 @@ public class TransactionInfoRepair extends RepairTool { @CommandLine.Option(names = {"--term"}, required = true, - description = "Highest term of transactionInfoTable. The input should be non-zero long integer.") + description = "Highest term to set. The input should be non-zero long integer.") private long highestTransactionTerm; @CommandLine.Option(names = {"--index"}, required = true, - description = "Highest index of transactionInfoTable. The input should be non-zero long integer.") + description = "Highest index to set. The input should be non-zero long integer.") private long highestTransactionIndex; @Override public void execute() throws Exception { - if (checkIfServiceIsRunning("OM")) { + final Component component = getComponent(); + if (checkIfServiceIsRunning(component.name())) { return; } List cfHandleList = new ArrayList<>(); @@ -76,9 +78,10 @@ public void execute() throws Exception { dbPath); try (ManagedRocksDB db = ManagedRocksDB.open(dbPath, cfDescList, cfHandleList)) { - ColumnFamilyHandle transactionInfoCfh = RocksDBUtils.getColumnFamilyHandle(TRANSACTION_INFO_TABLE, cfHandleList); + String columnFamilyName = component.columnFamilyDefinition.getName(); + ColumnFamilyHandle transactionInfoCfh = RocksDBUtils.getColumnFamilyHandle(columnFamilyName, cfHandleList); if (transactionInfoCfh == null) { - throw new IllegalArgumentException(TRANSACTION_INFO_TABLE + + throw new IllegalArgumentException(columnFamilyName + " is not in a column family in DB for the given path."); } TransactionInfo originalTransactionInfo = @@ -102,11 +105,32 @@ public void execute() throws Exception { } } catch (RocksDBException exception) { error("Failed to update the RocksDB for the given path: %s", dbPath); - error( - "Make sure that Ozone entity (OM) is not running for the give database path and current host."); throw new IOException("Failed to update RocksDB.", exception); } finally { IOUtils.closeQuietly(cfHandleList); } } + + private Component getComponent() { + final String parent = spec().parent().name(); + switch (parent) { + case "om": + return Component.OM; + case "scm": + return Component.SCM; + default: + throw new IllegalStateException("Unknown component: " + parent); + } + } + + private enum Component { + OM(OMDBDefinition.TRANSACTION_INFO_TABLE), + SCM(SCMDBDefinition.TRANSACTIONINFO); + + private final DBColumnFamilyDefinition columnFamilyDefinition; + + Component(DBColumnFamilyDefinition columnFamilyDefinition) { + this.columnFamilyDefinition = columnFamilyDefinition; + } + } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/OMRepair.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/OMRepair.java index c8e9f6e9e4b..d738ec129b5 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/OMRepair.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/OMRepair.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.repair.om; import org.apache.hadoop.hdds.cli.RepairSubcommand; +import org.apache.hadoop.ozone.repair.TransactionInfoRepair; import org.apache.hadoop.ozone.repair.om.quota.QuotaRepair; import org.kohsuke.MetaInfServices; import picocli.CommandLine; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/scm/SCMRepair.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/scm/SCMRepair.java index d7e61a8ed22..8ef59540a8e 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/scm/SCMRepair.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/scm/SCMRepair.java @@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.repair.scm; import org.apache.hadoop.hdds.cli.RepairSubcommand; +import org.apache.hadoop.ozone.repair.TransactionInfoRepair; import org.apache.hadoop.ozone.repair.scm.cert.CertRepair; import org.kohsuke.MetaInfServices; import picocli.CommandLine; @@ -30,6 +31,7 @@ description = "Operational tool to repair SCM.", subcommands = { CertRepair.class, + TransactionInfoRepair.class } ) @MetaInfServices(RepairSubcommand.class) diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/om/TestTransactionInfoRepair.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/TestTransactionInfoRepair.java similarity index 75% rename from hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/om/TestTransactionInfoRepair.java rename to hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/TestTransactionInfoRepair.java index 3ad1c4f8404..f1ad9b57ed1 100644 --- a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/om/TestTransactionInfoRepair.java +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/TestTransactionInfoRepair.java @@ -15,18 +15,20 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ozone.repair.om; +package org.apache.hadoop.ozone.repair; +import org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.utils.TransactionInfo; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; import org.apache.hadoop.ozone.debug.RocksDBUtils; -import org.apache.hadoop.ozone.repair.OzoneRepair; +import org.apache.hadoop.ozone.om.codec.OMDBDefinition; import org.apache.ozone.test.GenericTestUtils; import org.apache.ratis.server.protocol.TermIndex; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; import org.mockito.MockedStatic; import org.rocksdb.ColumnFamilyHandle; import org.rocksdb.RocksDB; @@ -35,7 +37,6 @@ import static org.apache.ozone.test.IntLambda.withTextFromSystemIn; import static org.apache.hadoop.ozone.OzoneConsts.TRANSACTION_INFO_KEY; -import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.TRANSACTION_INFO_TABLE; import static org.assertj.core.api.Assertions.assertThat; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.any; @@ -51,7 +52,6 @@ */ public class TestTransactionInfoRepair { - private static final String DB_PATH = "testDBPath"; private static final long TEST_TERM = 1; private static final long TEST_INDEX = 1; @@ -69,10 +69,11 @@ void cleanup() { IOUtils.closeQuietly(out, err); } - @Test - public void testUpdateTransactionInfoTableSuccessful() { + @ParameterizedTest + @ValueSource(strings = {"om", "scm"}) + public void testUpdateTransactionInfoTableSuccessful(String component) { ManagedRocksDB mdb = mockRockDB(); - testCommand(mdb, mock(ColumnFamilyHandle.class)); + testCommand(component, mdb, mock(ColumnFamilyHandle.class)); assertThat(out.getOutput()) .contains( @@ -81,16 +82,18 @@ public void testUpdateTransactionInfoTableSuccessful() { ); } - @Test - public void testCommandWhenTableNotInDBForGivenPath() { + @ParameterizedTest + @ValueSource(strings = {"om", "scm"}) + public void testCommandWhenTableNotInDBForGivenPath(String component) { ManagedRocksDB mdb = mockRockDB(); - testCommand(mdb, null); + testCommand(component, mdb, null); assertThat(err.getOutput()) - .contains(TRANSACTION_INFO_TABLE + " is not in a column family in DB for the given path"); + .contains(getColumnFamilyName(component) + " is not in a column family in DB for the given path"); } - @Test - public void testCommandWhenFailToUpdateRocksDBForGivenPath() throws Exception { + @ParameterizedTest + @ValueSource(strings = {"om", "scm"}) + public void testCommandWhenFailToUpdateRocksDBForGivenPath(String component) throws Exception { ManagedRocksDB mdb = mockRockDB(); RocksDB rdb = mdb.get(); @@ -98,18 +101,18 @@ public void testCommandWhenFailToUpdateRocksDBForGivenPath() throws Exception { doThrow(RocksDBException.class).when(rdb) .put(eq(mock), any(byte[].class), any(byte[].class)); - testCommand(mdb, mock); + testCommand(component, mdb, mock); assertThat(err.getOutput()) .contains("Failed to update RocksDB."); } - - private void testCommand(ManagedRocksDB mdb, ColumnFamilyHandle columnFamilyHandle) { + private void testCommand(String component, ManagedRocksDB mdb, ColumnFamilyHandle columnFamilyHandle) { + final String expectedColumnFamilyName = getColumnFamilyName(component); try (MockedStatic mocked = mockStatic(ManagedRocksDB.class); MockedStatic mockUtil = mockStatic(RocksDBUtils.class)) { mocked.when(() -> ManagedRocksDB.open(anyString(), anyList(), anyList())).thenReturn(mdb); - mockUtil.when(() -> RocksDBUtils.getColumnFamilyHandle(anyString(), anyList())) + mockUtil.when(() -> RocksDBUtils.getColumnFamilyHandle(eq(expectedColumnFamilyName), anyList())) .thenReturn(columnFamilyHandle); mockUtil.when(() -> RocksDBUtils.getValue(eq(mdb), eq(columnFamilyHandle), eq(TRANSACTION_INFO_KEY), @@ -128,7 +131,7 @@ private void testCommand(ManagedRocksDB mdb, ColumnFamilyHandle columnFamilyHand CommandLine cli = new OzoneRepair().getCmd(); withTextFromSystemIn("y") .execute(() -> cli.execute( - "om", + component, "update-transaction", "--db", DB_PATH, "--term", String.valueOf(TEST_TERM), @@ -137,6 +140,14 @@ private void testCommand(ManagedRocksDB mdb, ColumnFamilyHandle columnFamilyHand } } + private String getColumnFamilyName(String component) { + switch (component) { + case "om": return OMDBDefinition.TRANSACTION_INFO_TABLE.getName(); + case "scm": return SCMDBDefinition.TRANSACTIONINFO.getName(); + default: return ""; + } + } + private ManagedRocksDB mockRockDB() { ManagedRocksDB db = mock(ManagedRocksDB.class); RocksDB rocksDB = mock(RocksDB.class); From c0a6ffd6dda7432f57e38bcf3f5ababdd7999e26 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Fri, 24 Jan 2025 05:44:02 +0100 Subject: [PATCH 119/168] HDDS-12130. Improve assertion compatibility with old Hadoop (#7738) --- .../AbstractContractGetFileStatusTest.java | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java index 12cfba2312a..6efe2c9d719 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/contract/AbstractContractGetFileStatusTest.java @@ -394,7 +394,7 @@ private void validateListingForFile(Path f, Assertions.assertThat(statusList) .describedAs(msg) .hasSize(1); - Assertions.assertThat(statusList.get(0).getPath()) + Assertions.assertThatObject(statusList.get(0).getPath()) .describedAs("path returned should match with the input path") .isEqualTo(f); Assertions.assertThat(statusList.get(0).isFile()) @@ -471,7 +471,7 @@ private void verifyStatusArrayMatchesFile(Path f, FileStatus[] status) { * @param fileStatus status to validate */ private void assertIsNamedFile(Path f, FileStatus fileStatus) { - Assertions.assertThat(fileStatus.getPath()) + Assertions.assertThatObject(fileStatus.getPath()) .withFailMessage("Wrong pathname in " + fileStatus) .isEqualTo(f); Assertions.assertThat(fileStatus.isFile()) @@ -569,15 +569,15 @@ public void testListStatusFiltering() throws Throwable { MatchesNameFilter file1Filter = new MatchesNameFilter("file-1.txt"); result = verifyListStatus(1, parent, file1Filter); - Assertions.assertThat(result[0].getPath()) + Assertions.assertThatObject(result[0].getPath()) .isEqualTo(file1); verifyListStatus(0, file1, NO_PATHS); result = verifyListStatus(1, file1, ALL_PATHS); - Assertions.assertThat(result[0].getPath()) + Assertions.assertThatObject(result[0].getPath()) .isEqualTo(file1); result = verifyListStatus(1, file1, file1Filter); - Assertions.assertThat(result[0].getPath()) + Assertions.assertThatObject(result[0].getPath()) .isEqualTo(file1); // empty subdirectory @@ -608,15 +608,15 @@ public void testListLocatedStatusFiltering() throws Throwable { MatchesNameFilter file1Filter = new MatchesNameFilter("file-1.txt"); result = verifyListLocatedStatus(xfs, 1, parent, file1Filter); - Assertions.assertThat(result.get(0).getPath()) + Assertions.assertThatObject(result.get(0).getPath()) .isEqualTo(file1); verifyListLocatedStatus(xfs, 0, file1, NO_PATHS); verifyListLocatedStatus(xfs, 1, file1, ALL_PATHS); - Assertions.assertThat(result.get(0).getPath()) + Assertions.assertThatObject(result.get(0).getPath()) .isEqualTo(file1); verifyListLocatedStatus(xfs, 1, file1, file1Filter); - Assertions.assertThat(result.get(0).getPath()) + Assertions.assertThatObject(result.get(0).getPath()) .isEqualTo(file1); verifyListLocatedStatusNextCalls(xfs, 1, file1, file1Filter); From db059c6a3c2cfefce0c453d1012e5739454101c1 Mon Sep 17 00:00:00 2001 From: Kohei Sugihara Date: Fri, 24 Jan 2025 14:59:03 +0900 Subject: [PATCH 120/168] HDDS-12131. NPE in OM when overwriting empty file using multipart upload (#7739) --- .../src/main/smoketest/s3/MultipartUpload.robot | 6 ++++++ .../S3MultipartUploadCompleteResponse.java | 2 ++ .../S3MultipartUploadCompleteResponseWithFSO.java | 13 ++++++++----- 3 files changed, 16 insertions(+), 5 deletions(-) diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot b/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot index e630fe6cdae..c12f8e33581 100644 --- a/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot +++ b/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot @@ -61,6 +61,12 @@ Test Multipart Upload With Adjusted Length Perform Multipart Upload ${BUCKET} multipart/adjusted_length_${PREFIX} /tmp/part1 /tmp/part2 Verify Multipart Upload ${BUCKET} multipart/adjusted_length_${PREFIX} /tmp/part1 /tmp/part2 +Overwrite Empty File + Execute touch ${TEMP_DIR}/empty + Execute AWSS3Cli cp ${TEMP_DIR}/empty s3://${BUCKET}/empty_file_${PREFIX} + Perform Multipart Upload ${BUCKET} empty_file_${PREFIX} /tmp/part1 /tmp/part2 + Verify Multipart Upload ${BUCKET} empty_file_${PREFIX} /tmp/part1 /tmp/part2 + Test Multipart Upload ${uploadID} = Initiate MPU ${BUCKET} ${PREFIX}/multipartKey ${nextUploadID} = Initiate MPU ${BUCKET} ${PREFIX}/multipartKey diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java index a1f7b796cd8..72fcd79608f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponse.java @@ -33,6 +33,7 @@ import org.apache.hadoop.hdds.utils.db.BatchOperation; import jakarta.annotation.Nonnull; +import jakarta.annotation.Nullable; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.BUCKET_TABLE; import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.DELETED_TABLE; @@ -130,6 +131,7 @@ protected OmKeyInfo getOmKeyInfo() { return omKeyInfo; } + @Nullable protected OmBucketInfo getOmBucketInfo() { return omBucketInfo; } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponseWithFSO.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponseWithFSO.java index 4d1a6ce09bc..d2d95928ea1 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponseWithFSO.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/s3/multipart/S3MultipartUploadCompleteResponseWithFSO.java @@ -102,11 +102,14 @@ public void addToDBBatch(OMMetadataManager omMetadataManager, } // namespace quota changes for parent directory - String bucketKey = omMetadataManager.getBucketKey( - getOmBucketInfo().getVolumeName(), - getOmBucketInfo().getBucketName()); - omMetadataManager.getBucketTable().putWithBatch(batchOperation, - bucketKey, getOmBucketInfo()); + OmBucketInfo omBucketInfo = getOmBucketInfo(); + if (omBucketInfo != null) { + String bucketKey = omMetadataManager.getBucketKey( + omBucketInfo.getVolumeName(), + omBucketInfo.getBucketName()); + omMetadataManager.getBucketTable().putWithBatch(batchOperation, + bucketKey, omBucketInfo); + } if (OMFileRequest.getOmKeyInfoFromFileTable(true, omMetadataManager, getMultiPartKey(), getOmKeyInfo().getKeyName()) From 82c1ddaa810e93e95fac033e3f8aa7d25f1cf8ad Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Fri, 24 Jan 2025 11:04:11 +0100 Subject: [PATCH 121/168] HDDS-12089. Move execute_debug_tests out of testlib.sh (#7744) --- .../src/main/compose/common/replicas-test.sh | 48 +++++++++++++++++++ .../dist/src/main/compose/ozone/test.sh | 2 +- hadoop-ozone/dist/src/main/compose/testlib.sh | 35 -------------- 3 files changed, 49 insertions(+), 36 deletions(-) create mode 100755 hadoop-ozone/dist/src/main/compose/common/replicas-test.sh diff --git a/hadoop-ozone/dist/src/main/compose/common/replicas-test.sh b/hadoop-ozone/dist/src/main/compose/common/replicas-test.sh new file mode 100755 index 00000000000..3111177af75 --- /dev/null +++ b/hadoop-ozone/dist/src/main/compose/common/replicas-test.sh @@ -0,0 +1,48 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +prefix=${RANDOM} + +volume="cli-debug-volume${prefix}" +bucket="cli-debug-bucket" +key="testfile" + +execute_robot_test ${SCM} -v "PREFIX:${prefix}" debug/ozone-debug-tests.robot + +# get block locations for key +chunkinfo="${key}-blocks-${prefix}" +docker-compose exec -T ${SCM} bash -c "ozone debug replicas chunk-info ${volume}/${bucket}/${key}" > "$chunkinfo" +host="$(jq -r '.KeyLocations[0][0]["Datanode-HostName"]' ${chunkinfo})" +container="${host%%.*}" + +# corrupt the first block of key on one of the datanodes +datafile="$(jq -r '.KeyLocations[0][0].Locations.files[0]' ${chunkinfo})" +docker exec "${container}" sed -i -e '1s/^/a/' "${datafile}" + +execute_robot_test ${SCM} -v "PREFIX:${prefix}" -v "CORRUPT_DATANODE:${host}" debug/ozone-debug-corrupt-block.robot + +docker stop "${container}" + +wait_for_datanode "${container}" STALE 60 +execute_robot_test ${SCM} -v "PREFIX:${prefix}" -v "STALE_DATANODE:${host}" debug/ozone-debug-stale-datanode.robot + +wait_for_datanode "${container}" DEAD 60 +execute_robot_test ${SCM} -v "PREFIX:${prefix}" debug/ozone-debug-dead-datanode.robot + +docker start "${container}" + +wait_for_datanode "${container}" HEALTHY 60 diff --git a/hadoop-ozone/dist/src/main/compose/ozone/test.sh b/hadoop-ozone/dist/src/main/compose/ozone/test.sh index a580fd83309..85606553e2c 100755 --- a/hadoop-ozone/dist/src/main/compose/ozone/test.sh +++ b/hadoop-ozone/dist/src/main/compose/ozone/test.sh @@ -49,7 +49,7 @@ execute_robot_test scm cli execute_robot_test scm admincli execute_robot_test scm -v USERNAME:httpfs httpfs -execute_debug_tests +source "$COMPOSE_DIR/../common/replicas-test.sh" execute_robot_test scm -v SCHEME:o3fs -v BUCKET_TYPE:bucket -N ozonefs-o3fs-bucket ozonefs/ozonefs.robot diff --git a/hadoop-ozone/dist/src/main/compose/testlib.sh b/hadoop-ozone/dist/src/main/compose/testlib.sh index 1ab7533942f..6be1cd632a0 100755 --- a/hadoop-ozone/dist/src/main/compose/testlib.sh +++ b/hadoop-ozone/dist/src/main/compose/testlib.sh @@ -568,41 +568,6 @@ prepare_for_runner_image() { export OZONE_TEST_IMAGE="$(get_runner_image_spec "$@")" } -## @description Executing the Ozone Debug CLI related robot tests -execute_debug_tests() { - local prefix=${RANDOM} - - local volume="cli-debug-volume${prefix}" - local bucket="cli-debug-bucket" - local key="testfile" - - execute_robot_test ${SCM} -v "PREFIX:${prefix}" debug/ozone-debug-tests.robot - - # get block locations for key - local chunkinfo="${key}-blocks-${prefix}" - docker-compose exec -T ${SCM} bash -c "ozone debug replicas chunk-info ${volume}/${bucket}/${key}" > "$chunkinfo" - local host="$(jq -r '.KeyLocations[0][0]["Datanode-HostName"]' ${chunkinfo})" - local container="${host%%.*}" - - # corrupt the first block of key on one of the datanodes - local datafile="$(jq -r '.KeyLocations[0][0].Locations.files[0]' ${chunkinfo})" - docker exec "${container}" sed -i -e '1s/^/a/' "${datafile}" - - execute_robot_test ${SCM} -v "PREFIX:${prefix}" -v "CORRUPT_DATANODE:${host}" debug/ozone-debug-corrupt-block.robot - - docker stop "${container}" - - wait_for_datanode "${container}" STALE 60 - execute_robot_test ${SCM} -v "PREFIX:${prefix}" -v "STALE_DATANODE:${host}" debug/ozone-debug-stale-datanode.robot - - wait_for_datanode "${container}" DEAD 60 - execute_robot_test ${SCM} -v "PREFIX:${prefix}" debug/ozone-debug-dead-datanode.robot - - docker start "${container}" - - wait_for_datanode "${container}" HEALTHY 60 -} - ## @description Wait for datanode state ## @param Datanode name, eg datanode_1 datanode_2 ## @param State to check for From b6cc4af5983fec8afdfe2c5a0c6febabbcd20196 Mon Sep 17 00:00:00 2001 From: Stephen O'Donnell Date: Fri, 24 Jan 2025 11:10:30 +0000 Subject: [PATCH 122/168] HDDS-12114. Prevent delete commands running after a long lock wait and send ICR earlier (#7726) --- .../statemachine/DatanodeConfiguration.java | 14 ++++ .../container/keyvalue/KeyValueHandler.java | 27 +++++++- .../keyvalue/TestKeyValueHandler.java | 66 +++++++++++++++++++ 3 files changed, 106 insertions(+), 1 deletion(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java index 22dff7505ce..11ef3e9c187 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeConfiguration.java @@ -573,6 +573,20 @@ public void setWaitOnAllFollowers(boolean val) { private boolean bCheckEmptyContainerDir = OZONE_DATANODE_CHECK_EMPTY_CONTAINER_DIR_ON_DELETE_DEFAULT; + @Config(key = "delete.container.timeout", + type = ConfigType.TIME, + defaultValue = "60s", + tags = { DATANODE }, + description = "If a delete container request spends more than this time waiting on the container lock or " + + "performing pre checks, the command will be skipped and SCM will resend it automatically. This avoids " + + "commands running for a very long time without SCM being informed of the progress." + ) + private long deleteContainerTimeoutMs = Duration.ofSeconds(60).toMillis(); + + public long getDeleteContainerTimeoutMs() { + return deleteContainerTimeoutMs; + } + @PostConstruct public void validate() { if (containerDeleteThreads < 1) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java index 0ef8d5e68a0..267a2ecb661 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java @@ -27,6 +27,7 @@ import java.nio.file.DirectoryStream; import java.nio.file.Files; import java.nio.file.Path; +import java.time.Clock; import java.util.ArrayList; import java.util.HashMap; import java.util.LinkedList; @@ -146,11 +147,13 @@ public class KeyValueHandler extends Handler { private final ChunkManager chunkManager; private final VolumeChoosingPolicy volumeChoosingPolicy; private final long maxContainerSize; + private final long maxDeleteLockWaitMs; private final Function byteBufferToByteString; private final boolean validateChunkChecksumData; // A striped lock that is held during container creation. private final Striped containerCreationLocks; private static FaultInjector injector; + private final Clock clock; public KeyValueHandler(ConfigurationSource config, String datanodeId, @@ -158,7 +161,18 @@ public KeyValueHandler(ConfigurationSource config, VolumeSet volSet, ContainerMetrics metrics, IncrementalReportSender icrSender) { + this(config, datanodeId, contSet, volSet, metrics, icrSender, Clock.systemUTC()); + } + + public KeyValueHandler(ConfigurationSource config, + String datanodeId, + ContainerSet contSet, + VolumeSet volSet, + ContainerMetrics metrics, + IncrementalReportSender icrSender, + Clock clock) { super(config, datanodeId, contSet, volSet, metrics, icrSender); + this.clock = clock; blockManager = new BlockManagerImpl(config); validateChunkChecksumData = conf.getObject( DatanodeConfiguration.class).isChunkDataValidationCheck(); @@ -173,6 +187,9 @@ public KeyValueHandler(ConfigurationSource config, maxContainerSize = (long) config.getStorageSize( ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE, ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT, StorageUnit.BYTES); + + DatanodeConfiguration dnConf = conf.getObject(DatanodeConfiguration.class); + maxDeleteLockWaitMs = dnConf.getDeleteContainerTimeoutMs(); // this striped handler lock is used for synchronizing createContainer // Requests. final int threadCountPerDisk = conf.getInt( @@ -1436,6 +1453,7 @@ private boolean logBlocksFoundOnDisk(Container container) throws IOException { private void deleteInternal(Container container, boolean force) throws StorageContainerException { + long startTime = clock.millis(); container.writeLock(); try { if (container.getContainerData().getVolume().isFailed()) { @@ -1490,6 +1508,13 @@ private void deleteInternal(Container container, boolean force) // 4. container moved to tmp folder // 5. container content deleted from tmp folder try { + long waitTime = clock.millis() - startTime; + if (waitTime > maxDeleteLockWaitMs) { + LOG.warn("An attempt to delete container {} took {} ms acquiring locks and pre-checks. " + + "The delete has been skipped and should be retried automatically by SCM.", + container.getContainerData().getContainerID(), waitTime); + return; + } container.markContainerForDelete(); long containerId = container.getContainerData().getContainerID(); containerSet.removeContainer(containerId); @@ -1521,8 +1546,8 @@ private void deleteInternal(Container container, boolean force) container.writeUnlock(); } // Avoid holding write locks for disk operations - container.delete(); sendICR(container); + container.delete(); } private void triggerVolumeScanAndThrowException(Container container, diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java index d02910358de..83a6cddf4d0 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java @@ -22,6 +22,7 @@ import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; +import java.time.Clock; import java.util.List; import java.util.Collections; import java.util.HashMap; @@ -46,6 +47,7 @@ import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher; import org.apache.hadoop.ozone.container.common.interfaces.Container; import org.apache.hadoop.ozone.container.common.interfaces.Handler; +import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; import org.apache.hadoop.ozone.container.common.statemachine.StateContext; import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; import org.apache.hadoop.ozone.container.common.volume.HddsVolume; @@ -456,6 +458,70 @@ public void testDeleteContainer() throws IOException { } } + + @Test + public void testDeleteContainerTimeout() throws IOException { + final String testDir = tempDir.toString(); + final long containerID = 1L; + final String clusterId = UUID.randomUUID().toString(); + final String datanodeId = UUID.randomUUID().toString(); + final ConfigurationSource conf = new OzoneConfiguration(); + final ContainerSet containerSet = new ContainerSet(1000); + final MutableVolumeSet volumeSet = mock(MutableVolumeSet.class); + final Clock clock = mock(Clock.class); + long startTime = System.currentTimeMillis(); + + DatanodeConfiguration dnConf = conf.getObject(DatanodeConfiguration.class); + when(clock.millis()) + .thenReturn(startTime) + .thenReturn(startTime + dnConf.getDeleteContainerTimeoutMs() + 1); + + HddsVolume hddsVolume = new HddsVolume.Builder(testDir).conf(conf) + .clusterID(clusterId).datanodeUuid(datanodeId) + .volumeSet(volumeSet) + .build(); + hddsVolume.format(clusterId); + hddsVolume.createWorkingDir(clusterId, null); + hddsVolume.createTmpDirs(clusterId); + + when(volumeSet.getVolumesList()) + .thenReturn(Collections.singletonList(hddsVolume)); + + List hddsVolumeList = StorageVolumeUtil + .getHddsVolumesList(volumeSet.getVolumesList()); + + assertEquals(1, hddsVolumeList.size()); + + final ContainerMetrics metrics = ContainerMetrics.create(conf); + + final AtomicInteger icrReceived = new AtomicInteger(0); + + final KeyValueHandler kvHandler = new KeyValueHandler(conf, + datanodeId, containerSet, volumeSet, metrics, + c -> icrReceived.incrementAndGet(), clock); + kvHandler.setClusterID(clusterId); + + final ContainerCommandRequestProto createContainer = + createContainerRequest(datanodeId, containerID); + kvHandler.handleCreateContainer(createContainer, null); + assertEquals(1, icrReceived.get()); + assertNotNull(containerSet.getContainer(containerID)); + + // The delete should not have gone through due to the mocked clock. The implementation calls the clock twice: + // Once at the start of the method prior to taking the lock, when the clock will return the start time of the test. + // On the second call to the clock, where the implementation checks if the timeout has expired, the clock will + // return start_time + timeout + 1. This will cause the delete to timeout and the container will not be deleted. + kvHandler.deleteContainer(containerSet.getContainer(containerID), true); + assertEquals(1, icrReceived.get()); + assertNotNull(containerSet.getContainer(containerID)); + + // Delete the container normally, and it should go through. At this stage all calls to the clock mock will return + // the same value, indicating no delay to the delete operation will succeed. + kvHandler.deleteContainer(containerSet.getContainer(containerID), true); + assertEquals(2, icrReceived.get()); + assertNull(containerSet.getContainer(containerID)); + } + private static ContainerCommandRequestProto createContainerRequest( String datanodeId, long containerID) { return ContainerCommandRequestProto.newBuilder() From 1bd721b2fdf1811246cf460d1cd21f533b206219 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Fri, 24 Jan 2025 22:01:17 +0100 Subject: [PATCH 123/168] HDDS-12081. TestKeyInputStream repeats tests with default container layout (#7704) --- .../keyvalue/ContainerLayoutTestInfo.java | 2 +- .../client/rpc/read/TestChunkInputStream.java | 18 ++-- .../client/rpc/read/TestInputStreamBase.java | 48 +++++++++-- .../client/rpc/read/TestKeyInputStream.java | 86 +++++++------------ .../commandhandler/TestFinalizeBlock.java | 28 ++---- 5 files changed, 88 insertions(+), 94 deletions(-) diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/ContainerLayoutTestInfo.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/ContainerLayoutTestInfo.java index ab6e2c857c5..8349005b877 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/ContainerLayoutTestInfo.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/ContainerLayoutTestInfo.java @@ -122,7 +122,7 @@ private static void assertFileCount(File dir, long count) { } /** - * Composite annotation for tests parameterized with {@link ContainerLayoutTestInfo}. + * Composite annotation for tests parameterized with {@link ContainerLayoutVersion}. */ @Target(ElementType.METHOD) @Retention(RetentionPolicy.RUNTIME) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestChunkInputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestChunkInputStream.java index c5301ba4194..7a6873dfd07 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestChunkInputStream.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestChunkInputStream.java @@ -22,12 +22,13 @@ import org.apache.hadoop.hdds.scm.storage.BlockInputStream; import org.apache.hadoop.hdds.scm.storage.ChunkInputStream; -import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.io.KeyInputStream; import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.apache.hadoop.ozone.container.keyvalue.ContainerLayoutTestInfo; import org.apache.hadoop.ozone.om.TestBucket; +import org.junit.jupiter.api.TestInstance; + import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; @@ -36,6 +37,7 @@ /** * Tests {@link ChunkInputStream}. */ +@TestInstance(TestInstance.Lifecycle.PER_CLASS) class TestChunkInputStream extends TestInputStreamBase { /** @@ -44,16 +46,14 @@ class TestChunkInputStream extends TestInputStreamBase { */ @ContainerLayoutTestInfo.ContainerTest void testAll(ContainerLayoutVersion layout) throws Exception { - try (MiniOzoneCluster cluster = newCluster(layout)) { - cluster.waitForClusterToBeReady(); + try (OzoneClient client = getCluster().newClient()) { + updateConfig(layout); - try (OzoneClient client = cluster.newClient()) { - TestBucket bucket = TestBucket.newBuilder(client).build(); + TestBucket bucket = TestBucket.newBuilder(client).build(); - testChunkReadBuffers(bucket); - testBufferRelease(bucket); - testCloseReleasesBuffers(bucket); - } + testChunkReadBuffers(bucket); + testBufferRelease(bucket); + testCloseReleasesBuffers(bucket); } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestInputStreamBase.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestInputStreamBase.java index 256148dfb8d..4835d2c3ba1 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestInputStreamBase.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestInputStreamBase.java @@ -25,15 +25,22 @@ import org.apache.hadoop.hdds.scm.OzoneClientConfig; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager.ReplicationManagerConfiguration; +import org.apache.hadoop.hdds.scm.server.StorageContainerManager; +import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.ozone.ClientConfigForTesting; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConfigKeys; +import org.apache.hadoop.ozone.container.TestHelper; import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.TestInstance; +import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_LAYOUT_KEY; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; -// TODO remove this class, set config as default in integration tests +@TestInstance(TestInstance.Lifecycle.PER_CLASS) abstract class TestInputStreamBase { static final int CHUNK_SIZE = 1024 * 1024; // 1MB @@ -42,8 +49,7 @@ abstract class TestInputStreamBase { static final int BLOCK_SIZE = 2 * MAX_FLUSH_SIZE; // 8MB static final int BYTES_PER_CHECKSUM = 256 * 1024; // 256KB - protected static MiniOzoneCluster newCluster( - ContainerLayoutVersion containerLayout) throws Exception { + protected static MiniOzoneCluster newCluster() throws Exception { OzoneConfiguration conf = new OzoneConfiguration(); OzoneClientConfig config = conf.getObject(OzoneClientConfig.class); @@ -57,8 +63,6 @@ protected static MiniOzoneCluster newCluster( conf.setQuietMode(false); conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 64, StorageUnit.MB); - conf.set(ScmConfigKeys.OZONE_SCM_CONTAINER_LAYOUT_KEY, - containerLayout.toString()); ReplicationManagerConfiguration repConf = conf.getObject(ReplicationManagerConfiguration.class); @@ -81,4 +85,38 @@ static String getNewKeyName() { return UUID.randomUUID().toString(); } + protected void updateConfig(ContainerLayoutVersion layout) { + cluster.getHddsDatanodes().forEach(dn -> dn.getConf().setEnum(OZONE_SCM_CONTAINER_LAYOUT_KEY, layout)); + closeContainers(); + } + + private MiniOzoneCluster cluster; + + protected MiniOzoneCluster getCluster() { + return cluster; + } + + @BeforeAll + void setup() throws Exception { + cluster = newCluster(); + cluster.waitForClusterToBeReady(); + } + + @AfterAll + void cleanup() { + IOUtils.closeQuietly(cluster); + } + + private void closeContainers() { + StorageContainerManager scm = cluster.getStorageContainerManager(); + scm.getContainerManager().getContainers().forEach(container -> { + if (container.isOpen()) { + try { + TestHelper.waitForContainerClose(getCluster(), container.getContainerID()); + } catch (Exception e) { + throw new RuntimeException(e); + } + } + }); + } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestKeyInputStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestKeyInputStream.java index 3ab8ae31188..d9c310d8f51 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestKeyInputStream.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/read/TestKeyInputStream.java @@ -34,7 +34,6 @@ import org.apache.hadoop.hdds.scm.storage.BlockExtendedInputStream; import org.apache.hadoop.hdds.scm.storage.BlockInputStream; import org.apache.hadoop.hdds.scm.storage.ChunkInputStream; -import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.client.io.KeyInputStream; import org.apache.hadoop.ozone.common.utils.BufferUtils; @@ -46,15 +45,16 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; +import org.junit.jupiter.api.MethodOrderer; +import org.junit.jupiter.api.Order; +import org.junit.jupiter.api.TestInstance; +import org.junit.jupiter.api.TestMethodOrder; import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.Arguments; -import org.junit.jupiter.params.provider.MethodSource; +import org.junit.jupiter.params.provider.ValueSource; import static org.apache.hadoop.hdds.client.ECReplicationConfig.EcCodec.RS; import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE; import static org.apache.hadoop.ozone.container.TestHelper.countReplicas; -import static org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion.FILE_PER_BLOCK; -import static org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion.FILE_PER_CHUNK; import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotEquals; @@ -63,6 +63,8 @@ /** * Tests {@link KeyInputStream}. */ +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +@TestMethodOrder(MethodOrderer.OrderAnnotation.class) class TestKeyInputStream extends TestInputStreamBase { /** @@ -123,20 +125,18 @@ private void validate(TestBucket bucket, KeyInputStream keyInputStream, */ @ContainerLayoutTestInfo.ContainerTest void testNonReplicationReads(ContainerLayoutVersion layout) throws Exception { - try (MiniOzoneCluster cluster = newCluster(layout)) { - cluster.waitForClusterToBeReady(); - - try (OzoneClient client = cluster.newClient()) { - TestBucket bucket = TestBucket.newBuilder(client).build(); - - testInputStreams(bucket); - testSeekRandomly(bucket); - testSeek(bucket); - testReadChunkWithByteArray(bucket); - testReadChunkWithByteBuffer(bucket); - testSkip(bucket); - testECSeek(bucket); - } + try (OzoneClient client = getCluster().newClient()) { + updateConfig(layout); + + TestBucket bucket = TestBucket.newBuilder(client).build(); + + testInputStreams(bucket); + testSeekRandomly(bucket); + testSeek(bucket); + testReadChunkWithByteArray(bucket); + testReadChunkWithByteBuffer(bucket); + testSkip(bucket); + testECSeek(bucket); } } @@ -379,32 +379,18 @@ private void testSkip(TestBucket bucket) throws Exception { } } - private static List readAfterReplicationArgs() { - return Arrays.asList( - Arguments.arguments(FILE_PER_BLOCK, false), - Arguments.arguments(FILE_PER_BLOCK, true), - Arguments.arguments(FILE_PER_CHUNK, false), - Arguments.arguments(FILE_PER_CHUNK, true) - ); - } - @ParameterizedTest - @MethodSource("readAfterReplicationArgs") - void readAfterReplication(ContainerLayoutVersion layout, - boolean doUnbuffer) throws Exception { - try (MiniOzoneCluster cluster = newCluster(layout)) { - cluster.waitForClusterToBeReady(); - - try (OzoneClient client = cluster.newClient()) { - TestBucket bucket = TestBucket.newBuilder(client).build(); + @ValueSource(booleans = {false, true}) + @Order(Integer.MAX_VALUE) // shuts down datanodes + void readAfterReplication(boolean doUnbuffer) throws Exception { + try (OzoneClient client = getCluster().newClient()) { + TestBucket bucket = TestBucket.newBuilder(client).build(); - testReadAfterReplication(cluster, bucket, doUnbuffer); - } + testReadAfterReplication(bucket, doUnbuffer); } } - private void testReadAfterReplication(MiniOzoneCluster cluster, - TestBucket bucket, boolean doUnbuffer) throws Exception { + private void testReadAfterReplication(TestBucket bucket, boolean doUnbuffer) throws Exception { int dataLength = 2 * CHUNK_SIZE; String keyName = getNewKeyName(); byte[] data = bucket.writeRandomBytes(keyName, dataLength); @@ -415,7 +401,7 @@ private void testReadAfterReplication(MiniOzoneCluster cluster, .setKeyName(keyName) .setReplicationConfig(RatisReplicationConfig.getInstance(THREE)) .build(); - OmKeyInfo keyInfo = cluster.getOzoneManager() + OmKeyInfo keyInfo = getCluster().getOzoneManager() .getKeyInfo(keyArgs, false) .getKeyInfo(); @@ -425,24 +411,12 @@ private void testReadAfterReplication(MiniOzoneCluster cluster, assertEquals(1, locationInfoList.size()); OmKeyLocationInfo loc = locationInfoList.get(0); long containerID = loc.getContainerID(); - assertEquals(3, countReplicas(containerID, cluster)); + assertEquals(3, countReplicas(containerID, getCluster())); - TestHelper.waitForContainerClose(cluster, containerID); + TestHelper.waitForContainerClose(getCluster(), containerID); List pipelineNodes = loc.getPipeline().getNodes(); - // read chunk data - try (KeyInputStream keyInputStream = bucket.getKeyInputStream(keyName)) { - int b = keyInputStream.read(); - assertNotEquals(-1, b); - if (doUnbuffer) { - keyInputStream.unbuffer(); - } - cluster.shutdownHddsDatanode(pipelineNodes.get(0)); - // check that we can still read it - assertReadFully(data, keyInputStream, dataLength - 1, 1); - } - // read chunk data with ByteBuffer try (KeyInputStream keyInputStream = bucket.getKeyInputStream(keyName)) { int b = keyInputStream.read(); @@ -450,7 +424,7 @@ private void testReadAfterReplication(MiniOzoneCluster cluster, if (doUnbuffer) { keyInputStream.unbuffer(); } - cluster.shutdownHddsDatanode(pipelineNodes.get(0)); + getCluster().shutdownHddsDatanode(pipelineNodes.get(0)); // check that we can still read it assertReadFullyUsingByteBuffer(data, keyInputStream, dataLength - 1, 1); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestFinalizeBlock.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestFinalizeBlock.java index ca3733588aa..8c3f0fd0255 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestFinalizeBlock.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestFinalizeBlock.java @@ -26,7 +26,6 @@ import org.apache.hadoop.hdds.conf.StorageUnit; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.scm.ScmConfig; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.container.ContainerID; @@ -41,7 +40,6 @@ import org.apache.hadoop.ozone.client.OzoneClientFactory; import org.apache.hadoop.ozone.client.io.OzoneOutputStream; import org.apache.hadoop.ozone.container.ContainerTestHelper; -import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion; import org.apache.hadoop.ozone.container.common.interfaces.Container; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; @@ -51,8 +49,7 @@ import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Timeout; import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.Arguments; -import org.junit.jupiter.params.provider.MethodSource; +import org.junit.jupiter.params.provider.ValueSource; import java.io.IOException; import java.time.Duration; @@ -61,7 +58,6 @@ import java.util.UUID; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; -import java.util.stream.Stream; import static java.nio.charset.StandardCharsets.UTF_8; import static java.util.concurrent.TimeUnit.SECONDS; @@ -74,12 +70,9 @@ import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL; import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; -import static org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion.FILE_PER_BLOCK; -import static org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion.FILE_PER_CHUNK; import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration.CONTAINER_SCHEMA_V3_ENABLED; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; -import static org.junit.jupiter.params.provider.Arguments.arguments; /** * Tests FinalizeBlock. @@ -94,16 +87,7 @@ public class TestFinalizeBlock { private static String volumeName = UUID.randomUUID().toString(); private static String bucketName = UUID.randomUUID().toString(); - public static Stream dnLayoutParams() { - return Stream.of( - arguments(false, FILE_PER_CHUNK), - arguments(true, FILE_PER_CHUNK), - arguments(false, FILE_PER_BLOCK), - arguments(true, FILE_PER_BLOCK) - ); - } - - private void setup(boolean enableSchemaV3, ContainerLayoutVersion version) throws Exception { + private void setup(boolean enableSchemaV3) throws Exception { conf = new OzoneConfiguration(); conf.set(OZONE_SCM_CONTAINER_SIZE, "1GB"); conf.setStorageSize(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN, @@ -116,7 +100,6 @@ private void setup(boolean enableSchemaV3, ContainerLayoutVersion version) throw conf.setTimeDuration(HDDS_NODE_REPORT_INTERVAL, 1, SECONDS); conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS); conf.setBoolean(CONTAINER_SCHEMA_V3_ENABLED, enableSchemaV3); - conf.setEnum(ScmConfigKeys.OZONE_SCM_CONTAINER_LAYOUT_KEY, version); DatanodeConfiguration datanodeConfiguration = conf.getObject( DatanodeConfiguration.class); @@ -150,10 +133,9 @@ public void shutdown() { } @ParameterizedTest - @MethodSource("dnLayoutParams") - public void testFinalizeBlock(boolean enableSchemaV3, ContainerLayoutVersion version) - throws Exception { - setup(enableSchemaV3, version); + @ValueSource(booleans = {false, true}) + public void testFinalizeBlock(boolean enableSchemaV3) throws Exception { + setup(enableSchemaV3); String keyName = UUID.randomUUID().toString(); // create key createKey(keyName); From 22367c6c9dbb8868fac79298660de5b7ecb5fdba Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 25 Jan 2025 10:17:06 +0100 Subject: [PATCH 124/168] HDDS-12138. Bump assertj-core to 3.27.3 (#7751) --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index 29471b34dd4..87497e0a39a 100644 --- a/pom.xml +++ b/pom.xml @@ -34,7 +34,7 @@ 0.16.1 1.14 1.9.7 - 3.27.2 + 3.27.3 1.12.661 0.8.0.RELEASE 1.80 From 98cb75cd9282546ce38014bdb7c7be209b7e973b Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Sat, 25 Jan 2025 11:34:56 +0100 Subject: [PATCH 125/168] HDDS-12098. Bump Hugo to 0.141.0 (#7731) --- .../dev-support/checks/install/hugo.sh | 21 ++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/hadoop-ozone/dev-support/checks/install/hugo.sh b/hadoop-ozone/dev-support/checks/install/hugo.sh index 279d668438d..293fd6b1981 100644 --- a/hadoop-ozone/dev-support/checks/install/hugo.sh +++ b/hadoop-ozone/dev-support/checks/install/hugo.sh @@ -17,7 +17,7 @@ # This script installs Hugo. # Requires _install_tool from _lib.sh. Use `source` for both scripts, because it modifies $PATH. -: ${HUGO_VERSION:=0.83.1} +: ${HUGO_VERSION:=0.141.0} _install_hugo() { local os=$(uname -s) @@ -25,15 +25,22 @@ _install_hugo() { mkdir bin - case "${os}" in - Darwin) - os=macOS + case "${arch}" in + x86_64) + arch=amd64 + ;; + aarch64) + arch=arm64 ;; esac - case "${arch}" in - x86_64) - arch=64bit + case "${os}" in + Darwin) + os=darwin + arch=universal + ;; + Linux) + os=linux ;; esac From 95b4fdcf7243de24cfbd0c7d34f4f0332dab7c5d Mon Sep 17 00:00:00 2001 From: Peter Lee Date: Sat, 25 Jan 2025 19:50:35 +0800 Subject: [PATCH 126/168] HDDS-12122. Add unit test for SnapshotChainRepair (#7741) --- .../repair/om/TestSnapshotChainRepair.java | 376 ++++++++++++++++++ 1 file changed, 376 insertions(+) create mode 100644 hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/om/TestSnapshotChainRepair.java diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/om/TestSnapshotChainRepair.java b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/om/TestSnapshotChainRepair.java new file mode 100644 index 00000000000..ba96742c238 --- /dev/null +++ b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/repair/om/TestSnapshotChainRepair.java @@ -0,0 +1,376 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.repair.om; + +import org.apache.hadoop.hdds.utils.IOUtils; +import org.apache.hadoop.hdds.utils.db.StringCodec; +import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; +import org.apache.hadoop.ozone.debug.RocksDBUtils; +import org.apache.hadoop.ozone.om.helpers.SnapshotInfo; +import org.apache.hadoop.ozone.repair.OzoneRepair; +import org.apache.ozone.test.GenericTestUtils; +import org.assertj.core.api.Assertions; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; +import org.mockito.MockedStatic; +import org.rocksdb.ColumnFamilyHandle; +import org.rocksdb.RocksDB; +import org.rocksdb.RocksIterator; +import org.rocksdb.ColumnFamilyDescriptor; + +import picocli.CommandLine; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.UUID; + +import static org.apache.ozone.test.IntLambda.withTextFromSystemIn; +import static org.apache.hadoop.ozone.OzoneConsts.SNAPSHOT_INFO_TABLE; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.anyList; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.mockStatic; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +/** + * Tests SnapshotChainRepair. + */ +public class TestSnapshotChainRepair { + + private ManagedRocksDB managedRocksDB; + private RocksDB rocksDB; + private ColumnFamilyHandle columnFamilyHandle; + + private static final String DB_PATH = "testDBPath"; + + private MockedStatic mockedDB; + private MockedStatic mockedUtils; + + private GenericTestUtils.PrintStreamCapturer out; + private GenericTestUtils.PrintStreamCapturer err; + + @BeforeEach + public void setup() throws Exception { + out = GenericTestUtils.captureOut(); + err = GenericTestUtils.captureErr(); + + // Initialize static mocks + mockedDB = mockStatic(ManagedRocksDB.class); + mockedUtils = mockStatic(RocksDBUtils.class); + } + + @AfterEach + public void tearDown() { + IOUtils.closeQuietly(out, err); + + if (mockedDB != null) { + mockedDB.close(); + } + if (mockedUtils != null) { + mockedUtils.close(); + } + } + + private void setupMockDB(SnapshotInfo snapshotInfo, + List iteratorSnapshots) throws Exception { + + managedRocksDB = mock(ManagedRocksDB.class); + rocksDB = mock(RocksDB.class); + columnFamilyHandle = mock(ColumnFamilyHandle.class); + + when(managedRocksDB.get()).thenReturn(rocksDB); + + // Mock column family descriptors + List cfDescList = new ArrayList<>(); + cfDescList.add(new ColumnFamilyDescriptor(new byte[] {1})); + + mockedUtils.when(() -> RocksDBUtils.getColumnFamilyDescriptors(eq(DB_PATH))) + .thenReturn(cfDescList); + + // Mock DB open + mockedDB.when(() -> ManagedRocksDB.open(eq(DB_PATH), eq(cfDescList), eq(new ArrayList<>()))) + .thenReturn(managedRocksDB); + + // Mock column family handle + mockedUtils.when(() -> RocksDBUtils.getColumnFamilyHandle( + eq(SNAPSHOT_INFO_TABLE), anyList())) + .thenReturn(columnFamilyHandle); + + // Mock snapshot retrieval + mockedUtils.when(() -> RocksDBUtils.getValue( + eq(managedRocksDB), + eq(columnFamilyHandle), + anyString(), + eq(SnapshotInfo.getCodec()))) + .thenReturn(snapshotInfo); + + // Mock iterator + RocksIterator rocksIterator = mock(RocksIterator.class); + when(rocksDB.newIterator(columnFamilyHandle)).thenReturn(rocksIterator); + + // Setup iterator behavior based on provided snapshots + if (iteratorSnapshots.isEmpty()) { + when(rocksIterator.isValid()).thenReturn(false); + } else { + Boolean[] remainingValidResponses = new Boolean[iteratorSnapshots.size()]; + for (int i = 0; i < iteratorSnapshots.size() - 1; i++) { + remainingValidResponses[i] = true; + } + remainingValidResponses[iteratorSnapshots.size() - 1] = false; + + when(rocksIterator.isValid()) + .thenReturn(true, remainingValidResponses); + + ArrayList valueResponses = new ArrayList<>(); + for (SnapshotInfo snap : iteratorSnapshots) { + try { + valueResponses.add(SnapshotInfo.getCodec().toPersistedFormat(snap)); + } catch (IOException e) { + Assertions.fail("Failed to serialize snapshot info"); + } + } + byte[] firstValue = valueResponses.get(0); + byte[][] remainingValueResponses = valueResponses.subList(1, valueResponses.size()).toArray(new byte[0][]); + when(rocksIterator.value()) + .thenReturn(firstValue, remainingValueResponses); + } + } + + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testSuccessfulRepair(boolean dryRun) throws Exception { + String volumeName = "vol1"; + String bucketName = "bucket1"; + String snapshotName = "snap1"; + String globalPrevSnapshotName = "global-prev-snap1"; + String pathPrevSnapshotName = "path-prev-snap1"; + + UUID snapshotId = UUID.randomUUID(); + UUID globalPrevSnapshotId = UUID.randomUUID(); + UUID pathPrevSnapshotId = UUID.randomUUID(); + + SnapshotInfo snapshotInfo = SnapshotInfo.newInstance(volumeName, bucketName, snapshotName, snapshotId, 0); + SnapshotInfo globalPrevSnapshot = SnapshotInfo.newInstance(volumeName, bucketName, globalPrevSnapshotName, + globalPrevSnapshotId, 0); + SnapshotInfo pathPrevSnapshot = SnapshotInfo.newInstance(volumeName, bucketName, pathPrevSnapshotName, + pathPrevSnapshotId, 0); + + List iteratorSnapshots = Arrays.asList( + snapshotInfo, globalPrevSnapshot, pathPrevSnapshot); + + List argsList = new ArrayList<>(Arrays.asList( + "om", "snapshot", "chain", + volumeName + "/" + bucketName, + snapshotName, + "--db", DB_PATH, + "--global-previous", globalPrevSnapshotId.toString(), + "--path-previous", pathPrevSnapshotId.toString())); + + if (dryRun) { + argsList.add("--dry-run"); + } + + setupMockDB(snapshotInfo, iteratorSnapshots); + + CommandLine cli = new OzoneRepair().getCmd(); + withTextFromSystemIn("y") + .execute(() -> cli.execute(argsList.toArray(new String[0]))); + + String output = out.getOutput(); + assertTrue(output.contains("Updating SnapshotInfo to")); + + if (dryRun) { + // Verify DB update was NOT called in dry run mode + verify(rocksDB, never()).put( + eq(columnFamilyHandle), + eq(StringCodec.get().toPersistedFormat(snapshotInfo.getTableKey())), + eq(SnapshotInfo.getCodec().toPersistedFormat(snapshotInfo))); + } else { + // Verify DB update was called with correct parameters + verify(rocksDB).put( + eq(columnFamilyHandle), + eq(StringCodec.get().toPersistedFormat(snapshotInfo.getTableKey())), + eq(SnapshotInfo.getCodec().toPersistedFormat(snapshotInfo))); + assertTrue(output.contains("Snapshot Info is updated")); + } + } + + @Test + public void testGlobalPreviousMatchesSnapshotId() throws Exception { + String volumeName = "vol1"; + String bucketName = "bucket1"; + String snapshotName = "snap1"; + + UUID snapshotId = UUID.randomUUID(); + // Use same ID for global previous to trigger error + UUID globalPrevSnapshotId = snapshotId; + UUID pathPrevSnapshotId = UUID.randomUUID(); + + SnapshotInfo snapshotInfo = SnapshotInfo.newInstance(volumeName, bucketName, + snapshotName, snapshotId, 0); + SnapshotInfo pathPrevSnapshot = SnapshotInfo.newInstance(volumeName, bucketName, + "path-prev", pathPrevSnapshotId, 0); + + List iteratorSnapshots = Arrays.asList( + snapshotInfo, pathPrevSnapshot); + + String[] args = new String[] { + "om", "snapshot", "chain", + volumeName + "/" + bucketName, + snapshotName, + "--db", DB_PATH, + "--global-previous", globalPrevSnapshotId.toString(), + "--path-previous", pathPrevSnapshotId.toString(), + }; + + setupMockDB(snapshotInfo, iteratorSnapshots); + + CommandLine cli = new OzoneRepair().getCmd(); + withTextFromSystemIn("y") + .execute(() -> cli.execute(args)); + + String errorOutput = err.getOutput(); + assertTrue(errorOutput.contains("globalPreviousSnapshotId: '" + globalPrevSnapshotId + + "' is equal to given snapshot's ID")); + } + + @Test + public void testPathPreviousMatchesSnapshotId() throws Exception { + String volumeName = "vol1"; + String bucketName = "bucket1"; + String snapshotName = "snap1"; + + UUID snapshotId = UUID.randomUUID(); + UUID globalPrevSnapshotId = UUID.randomUUID(); + // Use same ID for path previous to trigger error + UUID pathPrevSnapshotId = snapshotId; + + SnapshotInfo snapshotInfo = SnapshotInfo.newInstance(volumeName, bucketName, + snapshotName, snapshotId, 0); + SnapshotInfo globalPrevSnapshot = SnapshotInfo.newInstance(volumeName, bucketName, + "global-prev", globalPrevSnapshotId, 0); + + List iteratorSnapshots = Arrays.asList( + snapshotInfo, globalPrevSnapshot); + + String[] args = new String[] { + "om", "snapshot", "chain", + volumeName + "/" + bucketName, + snapshotName, + "--db", DB_PATH, + "--global-previous", globalPrevSnapshotId.toString(), + "--path-previous", pathPrevSnapshotId.toString(), + }; + + setupMockDB(snapshotInfo, iteratorSnapshots); + + CommandLine cli = new OzoneRepair().getCmd(); + withTextFromSystemIn("y") + .execute(() -> cli.execute(args)); + + String errorOutput = err.getOutput(); + assertTrue(errorOutput.contains("pathPreviousSnapshotId: '" + pathPrevSnapshotId + + "' is equal to given snapshot's ID")); + } + + @Test + public void testGlobalPreviousDoesNotExist() throws Exception { + String volumeName = "vol1"; + String bucketName = "bucket1"; + String snapshotName = "snap1"; + + UUID snapshotId = UUID.randomUUID(); + UUID globalPrevSnapshotId = UUID.randomUUID(); + UUID pathPrevSnapshotId = UUID.randomUUID(); + + SnapshotInfo snapshotInfo = SnapshotInfo.newInstance(volumeName, bucketName, + snapshotName, snapshotId, 0); + SnapshotInfo pathPrevSnapshot = SnapshotInfo.newInstance(volumeName, bucketName, + "path-prev", pathPrevSnapshotId, 0); + + List iteratorSnapshots = Arrays.asList( + snapshotInfo, pathPrevSnapshot); + + String[] args = new String[] { + "om", "snapshot", "chain", + volumeName + "/" + bucketName, + snapshotName, + "--db", DB_PATH, + "--global-previous", globalPrevSnapshotId.toString(), + "--path-previous", pathPrevSnapshotId.toString(), + }; + + setupMockDB(snapshotInfo, iteratorSnapshots); + + CommandLine cli = new OzoneRepair().getCmd(); + withTextFromSystemIn("y") + .execute(() -> cli.execute(args)); + + String errorOutput = err.getOutput(); + assertTrue(errorOutput.contains("globalPreviousSnapshotId: '" + globalPrevSnapshotId + + "' does not exist in snapshotInfoTable")); + } + + @Test + public void testPathPreviousDoesNotExist() throws Exception { + String volumeName = "vol1"; + String bucketName = "bucket1"; + String snapshotName = "snap1"; + + UUID snapshotId = UUID.randomUUID(); + UUID globalPrevSnapshotId = UUID.randomUUID(); + UUID pathPrevSnapshotId = UUID.randomUUID(); + + SnapshotInfo snapshotInfo = SnapshotInfo.newInstance(volumeName, bucketName, + snapshotName, snapshotId, 0); + SnapshotInfo globalPrevSnapshot = SnapshotInfo.newInstance(volumeName, bucketName, + "global-prev", globalPrevSnapshotId, 0); + + List iteratorSnapshots = Arrays.asList( + snapshotInfo, globalPrevSnapshot); + + String[] args = new String[] { + "om", "snapshot", "chain", + volumeName + "/" + bucketName, + snapshotName, + "--db", DB_PATH, + "--global-previous", globalPrevSnapshotId.toString(), + "--path-previous", pathPrevSnapshotId.toString(), + }; + + setupMockDB(snapshotInfo, iteratorSnapshots); + + CommandLine cli = new OzoneRepair().getCmd(); + withTextFromSystemIn("y") + .execute(() -> cli.execute(args)); + + String errorOutput = err.getOutput(); + assertTrue(errorOutput.contains("pathPreviousSnapshotId: '" + pathPrevSnapshotId + + "' does not exist in snapshotInfoTable")); + } +} From 8133be836a4e1d6903c0b4b302ea2df896f063dc Mon Sep 17 00:00:00 2001 From: Nandakumar Vadivelu Date: Sat, 25 Jan 2025 20:22:59 +0530 Subject: [PATCH 127/168] HDDS-11798. Move SafeModeRule names to respective rules (#7742) --- .../scm/safemode/ContainerSafeModeRule.java | 8 +++-- .../scm/safemode/DataNodeSafeModeRule.java | 6 ++-- .../safemode/HealthyPipelineSafeModeRule.java | 7 ++-- .../OneReplicaPipelineSafeModeRule.java | 6 ++-- .../scm/safemode/SafeModeRuleFactory.java | 35 +++++++------------ 5 files changed, 30 insertions(+), 32 deletions(-) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java index b66b6e9f0f6..c8583c5b61b 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java @@ -58,6 +58,9 @@ public class ContainerSafeModeRule extends SafeModeExitRule { public static final Logger LOG = LoggerFactory.getLogger(ContainerSafeModeRule.class); + + private static final String NAME = "ContainerSafeModeRule"; + private final ContainerManager containerManager; // Required cutoff % for containers with at least 1 reported replica. private final double safeModeCutoff; @@ -71,12 +74,11 @@ public class ContainerSafeModeRule extends private double ratisMaxContainer; private double ecMaxContainer; - public ContainerSafeModeRule(final String ruleName, - final EventQueue eventQueue, + public ContainerSafeModeRule(final EventQueue eventQueue, final ConfigurationSource conf, final ContainerManager containerManager, final SCMSafeModeManager manager) { - super(manager, ruleName, eventQueue); + super(manager, NAME, eventQueue); this.safeModeCutoff = getSafeModeCutoff(conf); this.containerManager = containerManager; this.ratisContainers = new HashSet<>(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/DataNodeSafeModeRule.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/DataNodeSafeModeRule.java index b03fedb647e..04a92513031 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/DataNodeSafeModeRule.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/DataNodeSafeModeRule.java @@ -35,16 +35,18 @@ public class DataNodeSafeModeRule extends SafeModeExitRule { + private static final String NAME = "DataNodeSafeModeRule"; + // Min DataNodes required to exit safe mode. private int requiredDns; private int registeredDns = 0; // Set to track registered DataNodes. private HashSet registeredDnSet; - public DataNodeSafeModeRule(String ruleName, EventQueue eventQueue, + public DataNodeSafeModeRule(EventQueue eventQueue, ConfigurationSource conf, SCMSafeModeManager manager) { - super(manager, ruleName, eventQueue); + super(manager, NAME, eventQueue); requiredDns = conf.getInt( HddsConfigKeys.HDDS_SCM_SAFEMODE_MIN_DATANODE, HddsConfigKeys.HDDS_SCM_SAFEMODE_MIN_DATANODE_DEFAULT); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/HealthyPipelineSafeModeRule.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/HealthyPipelineSafeModeRule.java index e9f25f3a94f..855963ccb30 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/HealthyPipelineSafeModeRule.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/HealthyPipelineSafeModeRule.java @@ -50,6 +50,9 @@ public class HealthyPipelineSafeModeRule extends SafeModeExitRule { public static final Logger LOG = LoggerFactory.getLogger(HealthyPipelineSafeModeRule.class); + + private static final String NAME = "HealthyPipelineSafeModeRule"; + private int healthyPipelineThresholdCount; private int currentHealthyPipelineCount = 0; private final double healthyPipelinesPercent; @@ -59,10 +62,10 @@ public class HealthyPipelineSafeModeRule extends SafeModeExitRule { private final SCMContext scmContext; private final Set unProcessedPipelineSet = new HashSet<>(); - HealthyPipelineSafeModeRule(String ruleName, EventQueue eventQueue, + HealthyPipelineSafeModeRule(EventQueue eventQueue, PipelineManager pipelineManager, SCMSafeModeManager manager, ConfigurationSource configuration, SCMContext scmContext) { - super(manager, ruleName, eventQueue); + super(manager, NAME, eventQueue); this.pipelineManager = pipelineManager; this.scmContext = scmContext; healthyPipelinesPercent = diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/OneReplicaPipelineSafeModeRule.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/OneReplicaPipelineSafeModeRule.java index 08fe4d59d64..d27c19c19f5 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/OneReplicaPipelineSafeModeRule.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/OneReplicaPipelineSafeModeRule.java @@ -50,6 +50,7 @@ public class OneReplicaPipelineSafeModeRule extends private static final Logger LOG = LoggerFactory.getLogger(OneReplicaPipelineSafeModeRule.class); + private static final String NAME = "AtleastOneDatanodeReportedRule"; private int thresholdCount; private final Set reportedPipelineIDSet = new HashSet<>(); @@ -59,10 +60,9 @@ public class OneReplicaPipelineSafeModeRule extends private final double pipelinePercent; - public OneReplicaPipelineSafeModeRule(String ruleName, EventQueue eventQueue, - PipelineManager pipelineManager, + public OneReplicaPipelineSafeModeRule(EventQueue eventQueue, PipelineManager pipelineManager, SCMSafeModeManager safeModeManager, ConfigurationSource configuration) { - super(safeModeManager, ruleName, eventQueue); + super(safeModeManager, NAME, eventQueue); pipelinePercent = configuration.getDouble( diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeRuleFactory.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeRuleFactory.java index 8e75f51b962..8693f114eca 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeRuleFactory.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeRuleFactory.java @@ -25,8 +25,6 @@ import org.apache.hadoop.hdds.scm.ha.SCMContext; import org.apache.hadoop.hdds.scm.pipeline.PipelineManager; import org.apache.hadoop.hdds.server.events.EventQueue; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import java.util.ArrayList; import java.util.List; @@ -37,16 +35,6 @@ public final class SafeModeRuleFactory { - private static final Logger LOG = LoggerFactory.getLogger(SafeModeRuleFactory.class); - - // TODO: Move the rule names to respective rules. (HDDS-11798) - private static final String CONT_EXIT_RULE = "ContainerSafeModeRule"; - private static final String DN_EXIT_RULE = "DataNodeSafeModeRule"; - private static final String HEALTHY_PIPELINE_EXIT_RULE = - "HealthyPipelineSafeModeRule"; - private static final String ATLEAST_ONE_DATANODE_REPORTED_PIPELINE_EXIT_RULE = - "AtleastOneDatanodeReportedRule"; - private final ConfigurationSource config; private final SCMContext scmContext; private final EventQueue eventQueue; @@ -80,11 +68,15 @@ private SafeModeRuleFactory(final ConfigurationSource config, private void loadRules() { // TODO: Use annotation to load the rules. (HDDS-11730) - safeModeRules.add(new ContainerSafeModeRule(CONT_EXIT_RULE, eventQueue, config, - containerManager, safeModeManager)); - SafeModeExitRule dnRule = new DataNodeSafeModeRule(DN_EXIT_RULE, eventQueue, config, safeModeManager); - safeModeRules.add(dnRule); - preCheckRules.add(dnRule); + SafeModeExitRule containerRule = new ContainerSafeModeRule(eventQueue, + config, containerManager, safeModeManager); + SafeModeExitRule datanodeRule = new DataNodeSafeModeRule(eventQueue, + config, safeModeManager); + + safeModeRules.add(containerRule); + safeModeRules.add(datanodeRule); + + preCheckRules.add(datanodeRule); // TODO: Move isRuleEnabled check to the Rule implementation. (HDDS-11799) if (config.getBoolean( @@ -92,11 +84,10 @@ private void loadRules() { HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_AVAILABILITY_CHECK_DEFAULT) && pipelineManager != null) { - safeModeRules.add(new HealthyPipelineSafeModeRule(HEALTHY_PIPELINE_EXIT_RULE, - eventQueue, pipelineManager, safeModeManager, config, scmContext)); - safeModeRules.add(new OneReplicaPipelineSafeModeRule( - ATLEAST_ONE_DATANODE_REPORTED_PIPELINE_EXIT_RULE, eventQueue, - pipelineManager, safeModeManager, config)); + safeModeRules.add(new HealthyPipelineSafeModeRule(eventQueue, pipelineManager, + safeModeManager, config, scmContext)); + safeModeRules.add(new OneReplicaPipelineSafeModeRule(eventQueue, pipelineManager, + safeModeManager, config)); } } From efd8adcf59dc101735b9d210caae7102a4bf80a7 Mon Sep 17 00:00:00 2001 From: Stephen O'Donnell Date: Sat, 25 Jan 2025 18:43:25 +0000 Subject: [PATCH 128/168] HDDS-12115. RM selects replicas to delete non-deterministically if nodes are overloaded (#7728) --- .../AbstractOverReplicationHandler.java | 5 +-- .../RatisOverReplicationHandler.java | 12 +++-- .../TestRatisOverReplicationHandler.java | 44 +++++++++++++++++-- 3 files changed, 52 insertions(+), 9 deletions(-) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/AbstractOverReplicationHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/AbstractOverReplicationHandler.java index dfea743ded0..50a08ed546a 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/AbstractOverReplicationHandler.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/AbstractOverReplicationHandler.java @@ -48,11 +48,10 @@ protected AbstractOverReplicationHandler(PlacementPolicy placementPolicy) { * @param replica the replica to be removed */ public boolean isPlacementStatusActuallyEqualAfterRemove( + ContainerPlacementStatus currentCPS, final Set replicas, final ContainerReplica replica, final int replicationFactor) { - ContainerPlacementStatus currentCPS = - getPlacementStatus(replicas, replicationFactor); replicas.remove(replica); ContainerPlacementStatus newCPS = getPlacementStatus(replicas, replicationFactor); @@ -78,7 +77,7 @@ protected Set selectReplicasToRemove( * @param replicationFactor Expected Replication Factor of the containe * @return ContainerPlacementStatus indicating if the policy is met or not */ - private ContainerPlacementStatus getPlacementStatus( + protected ContainerPlacementStatus getPlacementStatus( Set replicas, int replicationFactor) { List replicaDns = replicas.stream() .map(ContainerReplica::getDatanodeDetails) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/RatisOverReplicationHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/RatisOverReplicationHandler.java index a0892b28d22..446a8dd5998 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/RatisOverReplicationHandler.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/RatisOverReplicationHandler.java @@ -20,6 +20,7 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.hdds.scm.ContainerPlacementStatus; import org.apache.hadoop.hdds.scm.PlacementPolicy; import org.apache.hadoop.hdds.scm.container.ContainerInfo; import org.apache.hadoop.hdds.scm.container.ContainerReplica; @@ -288,18 +289,18 @@ private int createCommands( */ Set replicaSet = new HashSet<>(replicas); // iterate through replicas in deterministic order + ContainerPlacementStatus originalPlacementStatus = getPlacementStatus(replicaSet, + containerInfo.getReplicationFactor().getNumber()); for (ContainerReplica replica : replicas) { if (excess == 0) { break; } - - if (super.isPlacementStatusActuallyEqualAfterRemove(replicaSet, replica, + if (super.isPlacementStatusActuallyEqualAfterRemove(originalPlacementStatus, replicaSet, replica, containerInfo.getReplicationFactor().getNumber())) { try { replicationManager.sendThrottledDeleteCommand(containerInfo, replica.getReplicaIndex(), replica.getDatanodeDetails(), true); commandsSent++; - excess--; } catch (CommandTargetOverloadedException e) { LOG.debug("Unable to send delete command for container {} to {} as " + "it has too many pending delete commands", @@ -308,6 +309,11 @@ private int createCommands( firstOverloadedException = e; } } + // Even if the command fails to send, we still mark the replica as if the command was sent to ensure a + // deterministic selection order. Then we adjust the replicaSet so it appears as if this replica was deleted + // to allow subsequent placement checks to be accurate. + excess--; + replicaSet.remove(replica); } } // If we encountered an overloaded exception, and then did not send as many diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisOverReplicationHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisOverReplicationHandler.java index cfb3952d133..c0c3eff3d15 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisOverReplicationHandler.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisOverReplicationHandler.java @@ -270,6 +270,40 @@ public void testOverReplicatedContainerBecomesMisReplicatedOnRemoving() getOverReplicatedHealthResult(), 0); } + /** + * In this test, the container is already mis-replicated, being on 2 racks rather than 3. + * Removing a replica does not make it "more" mis-replicated, so the handler should remove + * one replica. + * @throws IOException + */ + @Test + public void testOverReplicatedContainerAlreadyMisReplicated() + throws IOException { + Set replicas = createReplicas(container.containerID(), + ContainerReplicaProto.State.CLOSED, 0, 0, 0, 0); + + // Ensure a mis-replicated status is always returned. + when(policy.validateContainerPlacement(anyList(), anyInt())) + .thenReturn(new ContainerPlacementStatusDefault(2, 3, 3)); + + testProcessing(replicas, Collections.emptyList(), getOverReplicatedHealthResult(), 1); + } + + @Test + public void testOverReplicatedContainerBecomesOnSecondRemoval() + throws IOException { + Set replicas = createReplicas(container.containerID(), + ContainerReplicaProto.State.CLOSED, 0, 0, 0, 0, 0); + + // Ensure a mis-replicated status is returned when 3 or fewer replicas are + // checked. + when(policy.validateContainerPlacement(argThat(list -> list.size() <= 3), anyInt())) + .thenReturn(new ContainerPlacementStatusDefault(1, 2, 3)); + + testProcessing(replicas, Collections.emptyList(), + getOverReplicatedHealthResult(), 1); + } + @Test public void testOverReplicatedAllUnhealthySameBCSID() throws IOException { @@ -478,9 +512,13 @@ public void testDeleteThrottling() throws IOException { RatisOverReplicationHandler handler = new RatisOverReplicationHandler(policy, replicationManager); - handler.processAndSendCommands(closedReplicas, Collections.emptyList(), - getOverReplicatedHealthResult(), 2); - assertEquals(2, commandsSent.size()); + // Only 1 command should be sent, as the first call to sendThrottledDelete + // throws an overloaded exception. Rather than skip to the next one, the skipped + // one should get retried later. + assertThrows(CommandTargetOverloadedException.class, + () -> handler.processAndSendCommands(closedReplicas, Collections.emptyList(), + getOverReplicatedHealthResult(), 2)); + assertEquals(1, commandsSent.size()); } /** From e8aea7c07d042cee1232a971c20aaeba57a957bb Mon Sep 17 00:00:00 2001 From: Nandakumar Vadivelu Date: Sun, 26 Jan 2025 14:22:53 +0530 Subject: [PATCH 129/168] HDDS-11892. Remove config from SCM for disabling Ratis. (#7711) --- .../hdds/conf/DefaultConfigManager.java | 2 ++ .../apache/hadoop/hdds/scm/ScmConfigKeys.java | 8 ----- .../apache/hadoop/hdds/scm/ha/SCMHAUtils.java | 12 ++++--- .../upgrade/VersionedDatanodeFeatures.java | 6 ++-- .../hadoop/hdds/scm/ha/SCMHANodeDetails.java | 33 ++----------------- .../scm/server/StorageContainerManager.java | 7 ++-- ...ScmHAUnfinalizedStateValidationAction.java | 6 ++-- .../dev-support/intellij/ozone-site-ha.xml | 4 --- .../dev-support/intellij/ozone-site.xml | 4 --- .../main/compose/ozone-balancer/docker-config | 1 - .../src/main/compose/ozone-ha/docker-config | 1 - .../main/compose/ozonesecure-ha/docker-config | 1 - .../compose/upgrade/compose/ha/docker-config | 1 - .../definitions/ozone/definitions/om-ha.yaml | 1 - .../definitions/ozone/definitions/scm-ha.yaml | 1 - .../examples/ozone-ha/config-configmap.yaml | 1 - .../hadoop/ozone/MiniOzoneClusterImpl.java | 1 - .../ozone/TestOzoneConfigurationFields.java | 1 - 18 files changed, 18 insertions(+), 73 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/DefaultConfigManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/DefaultConfigManager.java index 5b883e8b2c3..b7577377a41 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/DefaultConfigManager.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/DefaultConfigManager.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hdds.conf; import com.google.common.annotations.VisibleForTesting; +import org.apache.hadoop.hdds.scm.ha.SCMHAUtils; import java.util.HashMap; import java.util.Map; @@ -53,6 +54,7 @@ public static void forceUpdateConfigValue(String config, T value) { @VisibleForTesting public static void clearDefaultConfigs() { + SCMHAUtils.setRatisEnabled(true); CONFIG_DEFAULT_MAP.clear(); } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java index 85c82af942f..654038b57ae 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java @@ -480,14 +480,6 @@ public final class ScmConfigKeys { public static final String HDDS_TRACING_ENABLED = "hdds.tracing.enabled"; public static final boolean HDDS_TRACING_ENABLED_DEFAULT = false; - // SCM Ratis related - public static final String OZONE_SCM_HA_ENABLE_KEY - = "ozone.scm.ratis.enable"; - /** Default Value would be Overriden based on the current state of Ratis. - {@link org.apache.hadoop.hdds.conf.DefaultConfigManager} - */ - public static final boolean OZONE_SCM_HA_ENABLE_DEFAULT - = true; public static final String OZONE_SCM_RATIS_PORT_KEY = "ozone.scm.ratis.port"; public static final int OZONE_SCM_RATIS_PORT_DEFAULT diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java index b71adb7099a..74db62768de 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java @@ -25,7 +25,6 @@ import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.conf.ConfigurationException; import org.apache.hadoop.hdds.conf.ConfigurationSource; -import org.apache.hadoop.hdds.conf.DefaultConfigManager; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.ratis.ServerNotLeaderException; import org.apache.hadoop.hdds.scm.ScmConfigKeys; @@ -87,11 +86,14 @@ private SCMHAUtils() { // not used } - // Check if SCM HA is enabled. + // This will be removed in follow-up Jira. Ref. HDDS-11754 + private static boolean isRatisEnabled = true; public static boolean isSCMHAEnabled(ConfigurationSource conf) { - return conf.getBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, - DefaultConfigManager.getValue(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, - ScmConfigKeys.OZONE_SCM_HA_ENABLE_DEFAULT)); + return isRatisEnabled; + } + + public static void setRatisEnabled(boolean value) { + isRatisEnabled = value; } public static String getPrimordialSCM(ConfigurationSource conf) { diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/VersionedDatanodeFeatures.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/VersionedDatanodeFeatures.java index 5f52191e36d..a46d9ab3172 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/VersionedDatanodeFeatures.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/upgrade/VersionedDatanodeFeatures.java @@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.container.upgrade; import org.apache.hadoop.hdds.conf.ConfigurationSource; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.ha.SCMHAUtils; import org.apache.hadoop.hdds.upgrade.HDDSLayoutFeature; import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager; import org.apache.hadoop.ozone.OzoneConsts; @@ -117,9 +117,7 @@ public static String chooseContainerPathID(StorageVolume volume, */ public static String chooseContainerPathID(ConfigurationSource conf, String scmID, String clusterID) { - boolean scmHAEnabled = - conf.getBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, - ScmConfigKeys.OZONE_SCM_HA_ENABLE_DEFAULT); + boolean scmHAEnabled = SCMHAUtils.isSCMHAEnabled(conf); if (isFinalized(HDDSLayoutFeature.SCM_HA) || scmHAEnabled) { return clusterID; } else { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHANodeDetails.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHANodeDetails.java index 0c90987f0c2..b21a1830517 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHANodeDetails.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHANodeDetails.java @@ -18,8 +18,6 @@ package org.apache.hadoop.hdds.scm.ha; import com.google.common.base.Preconditions; -import org.apache.hadoop.hdds.conf.ConfigurationException; -import org.apache.hadoop.hdds.conf.DefaultConfigManager; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.ScmUtils; @@ -149,10 +147,7 @@ public static SCMHANodeDetails loadDefaultConfig( } /** Validates SCM HA Config. - For Non Initialized SCM the value is taken directly based on the config - {@link org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY} - which defaults to - {@link org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HA_ENABLE_DEFAULT} + For Non Initialized SCM the value is true. For Previously Initialized SCM the values are taken from the version file
Ratis SCM -> Non Ratis SCM is not supported. @@ -164,30 +159,8 @@ private static void validateSCMHAConfig(SCMStorageConfig scmStorageConfig, boolean scmHAEnableDefault = state == Storage.StorageState.INITIALIZED ? scmStorageConfig.isSCMHAEnabled() : SCMHAUtils.isSCMHAEnabled(conf); - boolean scmHAEnabled = SCMHAUtils.isSCMHAEnabled(conf); - - if (Storage.StorageState.INITIALIZED.equals(state) && - scmHAEnabled != scmHAEnableDefault) { - String errorMessage = String.format("Current State of SCM: %s", - scmHAEnableDefault ? "SCM is running with Ratis. " - : "SCM is running without Ratis. ") - + "Ratis SCM -> Non Ratis SCM is not supported."; - if (!scmHAEnabled) { - throw new ConfigurationException(String.format("Invalid Config %s " + - "Provided ConfigValue: false, Expected Config Value: true. %s", - ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, errorMessage)); - } else { - LOG.warn("Default/Configured value of config {} conflicts with " + - "the expected value. " + - "Default/Configured: {}. " + - "Expected: {}. " + - "Falling back to the expected value. {}", - ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, - scmHAEnabled, scmHAEnableDefault, errorMessage); - } - } - DefaultConfigManager.setConfigValue(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, - scmHAEnableDefault); + // If we have an initialized cluster, use the value from VERSION file. + SCMHAUtils.setRatisEnabled(scmHAEnableDefault); } public static SCMHANodeDetails loadSCMHAConfig(OzoneConfiguration conf, diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java index 52148c3d683..1a986f2f67c 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java @@ -35,7 +35,6 @@ import org.apache.hadoop.hdds.HddsUtils; import org.apache.hadoop.hdds.annotation.InterfaceAudience; import org.apache.hadoop.hdds.conf.ConfigurationSource; -import org.apache.hadoop.hdds.conf.DefaultConfigManager; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.conf.ReconfigurationHandler; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -1330,8 +1329,7 @@ public static boolean scmInit(OzoneConfiguration conf, // Initialize security if security is enabled later. initializeSecurityIfNeeded(conf, scmStorageConfig, selfHostName, true); - if (conf.getBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, - ScmConfigKeys.OZONE_SCM_HA_ENABLE_DEFAULT) && !isSCMHAEnabled) { + if (SCMHAUtils.isSCMHAEnabled(conf) && !isSCMHAEnabled) { SCMRatisServerImpl.initialize(scmStorageConfig.getClusterID(), scmStorageConfig.getScmId(), haDetails.getLocalNodeDetails(), conf); @@ -1346,8 +1344,7 @@ public static boolean scmInit(OzoneConfiguration conf, */ try { - DefaultConfigManager.forceUpdateConfigValue( - ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); + SCMHAUtils.setRatisEnabled(true); StorageContainerManager scm = createSCM(conf); scm.start(); scm.getScmHAManager().getRatisServer().triggerSnapshot(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/ScmHAUnfinalizedStateValidationAction.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/ScmHAUnfinalizedStateValidationAction.java index f6d66875371..7e086ad5651 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/ScmHAUnfinalizedStateValidationAction.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/upgrade/ScmHAUnfinalizedStateValidationAction.java @@ -23,7 +23,6 @@ import static org.apache.hadoop.ozone.upgrade.UpgradeActionHdds.Component.SCM; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.ha.SCMHAUtils; import org.apache.hadoop.hdds.scm.server.SCMStorageConfig; import org.apache.hadoop.hdds.upgrade.HDDSUpgradeAction; @@ -62,9 +61,8 @@ public static void checkScmHA(OzoneConfiguration conf, if (!versionManager.isAllowed(SCM_HA) && SCMHAUtils.isSCMHAEnabled(conf) && !storageConf.isSCMHAEnabled()) { - throw new UpgradeException(String.format("Configuration %s cannot be " + - "used until SCM upgrade has been finalized", - ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY), + throw new UpgradeException("Ratis cannot be " + + "used until SCM upgrade has been finalized", UpgradeException.ResultCodes.PREFINALIZE_ACTION_VALIDATION_FAILED); } } diff --git a/hadoop-ozone/dev-support/intellij/ozone-site-ha.xml b/hadoop-ozone/dev-support/intellij/ozone-site-ha.xml index ff7883fc55f..1e4f14b257b 100644 --- a/hadoop-ozone/dev-support/intellij/ozone-site-ha.xml +++ b/hadoop-ozone/dev-support/intellij/ozone-site-ha.xml @@ -39,10 +39,6 @@ ozone.metadata.dirs /tmp/metadata - - ozone.scm.ratis.enable - true - ozone.scm.service.ids scm-group diff --git a/hadoop-ozone/dev-support/intellij/ozone-site.xml b/hadoop-ozone/dev-support/intellij/ozone-site.xml index e530b9fe0a9..c06449cee70 100644 --- a/hadoop-ozone/dev-support/intellij/ozone-site.xml +++ b/hadoop-ozone/dev-support/intellij/ozone-site.xml @@ -75,10 +75,6 @@ datanode.replication.port 0 - - ozone.scm.ratis.enable - false - hdds.container.report.interval 60m diff --git a/hadoop-ozone/dist/src/main/compose/ozone-balancer/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-balancer/docker-config index 3d0cfce1eaa..39d97178e33 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-balancer/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone-balancer/docker-config @@ -32,7 +32,6 @@ OZONE-SITE.XML_ozone.scm.nodes.scmservice=scm1,scm2,scm3 OZONE-SITE.XML_ozone.scm.address.scmservice.scm1=scm1 OZONE-SITE.XML_ozone.scm.address.scmservice.scm2=scm2 OZONE-SITE.XML_ozone.scm.address.scmservice.scm3=scm3 -OZONE-SITE.XML_ozone.scm.ratis.enable=true OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.scm.container.size=100MB OZONE-SITE.XML_ozone.scm.block.size=20MB diff --git a/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config b/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config index 92a71eea3c1..7317b2c1aa9 100644 --- a/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozone-ha/docker-config @@ -32,7 +32,6 @@ OZONE-SITE.XML_ozone.scm.nodes.scmservice=scm1,scm2,scm3 OZONE-SITE.XML_ozone.scm.address.scmservice.scm1=scm1 OZONE-SITE.XML_ozone.scm.address.scmservice.scm2=scm2 OZONE-SITE.XML_ozone.scm.address.scmservice.scm3=scm3 -OZONE-SITE.XML_ozone.scm.ratis.enable=true OZONE-SITE.XML_ozone.scm.datanode.id.dir=/data/metadata OZONE-SITE.XML_ozone.scm.container.size=1GB OZONE-SITE.XML_ozone.scm.datanode.ratis.volume.free-space.min=10MB diff --git a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config index a4f030d45f5..01ddca174ec 100644 --- a/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config +++ b/hadoop-ozone/dist/src/main/compose/ozonesecure-ha/docker-config @@ -37,7 +37,6 @@ OZONE-SITE.XML_ozone.scm.nodes.scmservice=scm1,scm2,scm3 OZONE-SITE.XML_ozone.scm.address.scmservice.scm1=scm1.org OZONE-SITE.XML_ozone.scm.address.scmservice.scm2=scm2.org OZONE-SITE.XML_ozone.scm.address.scmservice.scm3=scm3.org -OZONE-SITE.XML_ozone.scm.ratis.enable=true OZONE-SITE.XML_ozone.scm.close.container.wait.duration=5s OZONE-SITE.XML_ozone.om.volume.listall.allowed=false diff --git a/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/docker-config b/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/docker-config index bb68e9bf60f..e19062a7259 100644 --- a/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/docker-config +++ b/hadoop-ozone/dist/src/main/compose/upgrade/compose/ha/docker-config @@ -29,7 +29,6 @@ OZONE-SITE.XML_ozone.scm.nodes.scmservice=scm1,scm2,scm3 OZONE-SITE.XML_ozone.scm.address.scmservice.scm1=scm1.org OZONE-SITE.XML_ozone.scm.address.scmservice.scm2=scm2.org OZONE-SITE.XML_ozone.scm.address.scmservice.scm3=scm3.org -OZONE-SITE.XML_ozone.scm.ratis.enable=true OZONE-SITE.XML_ozone.scm.primordial.node.id=scm1 OZONE-SITE.XML_ozone.scm.pipeline.creation.interval=30s diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/om-ha.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/om-ha.yaml index 3832bb207e0..4a8d6b17b85 100644 --- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/om-ha.yaml +++ b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/om-ha.yaml @@ -49,7 +49,6 @@ description: Enable HA for SCM components OZONE-SITE.XML_ozone.scm.address.scmservice.scm0: scm-0.scm.default.svc.cluster.local OZONE-SITE.XML_ozone.scm.address.scmservice.scm1: scm-1.scm.default.svc.cluster.local OZONE-SITE.XML_ozone.scm.address.scmservice.scm2: scm-2.scm.default.svc.cluster.local - OZONE-SITE.XML_ozone.scm.ratis.enable: "true" OZONE-SITE.XML_ozone.scm.primordial.node.id: scm0 - type: add trigger: diff --git a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/scm-ha.yaml b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/scm-ha.yaml index b7aca3c55de..55a4c52780f 100644 --- a/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/scm-ha.yaml +++ b/hadoop-ozone/dist/src/main/k8s/definitions/ozone/definitions/scm-ha.yaml @@ -49,7 +49,6 @@ description: Enable HA for SCM component OZONE-SITE.XML_ozone.scm.address.scmservice.scm0: scm-0.scm.default.svc.cluster.local OZONE-SITE.XML_ozone.scm.address.scmservice.scm1: scm-1.scm.default.svc.cluster.local OZONE-SITE.XML_ozone.scm.address.scmservice.scm2: scm-2.scm.default.svc.cluster.local - OZONE-SITE.XML_ozone.scm.ratis.enable: "true" OZONE-SITE.XML_ozone.scm.primordial.node.id: scm0 - type: add trigger: diff --git a/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/config-configmap.yaml b/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/config-configmap.yaml index a9315eb2103..dbfdde0f0f8 100644 --- a/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/config-configmap.yaml +++ b/hadoop-ozone/dist/src/main/k8s/examples/ozone-ha/config-configmap.yaml @@ -42,5 +42,4 @@ data: OZONE-SITE.XML_ozone.scm.address.scmservice.scm0: scm-0.scm.default.svc.cluster.local OZONE-SITE.XML_ozone.scm.address.scmservice.scm1: scm-1.scm.default.svc.cluster.local OZONE-SITE.XML_ozone.scm.address.scmservice.scm2: scm-2.scm.default.svc.cluster.local - OZONE-SITE.XML_ozone.scm.ratis.enable: "true" OZONE-SITE.XML_ozone.scm.primordial.node.id: scm0 diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java index b3d9f780888..64db4c19006 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java @@ -691,7 +691,6 @@ protected void initializeConfiguration() throws IOException { Path metaDir = Paths.get(path, "ozone-meta"); Files.createDirectories(metaDir); conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.toString()); - // conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true); conf.setTimeDuration(OMConfigKeys.OZONE_OM_RATIS_MINIMUM_TIMEOUT_KEY, DEFAULT_RATIS_RPC_TIMEOUT_SEC, TimeUnit.SECONDS); diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java index 809c19c972f..6d415c0c153 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneConfigurationFields.java @@ -89,7 +89,6 @@ private void addPropertiesNotInXml() { OMConfigKeys.OZONE_OM_DECOMMISSIONED_NODES_KEY, ScmConfigKeys.OZONE_SCM_NODES_KEY, ScmConfigKeys.OZONE_SCM_ADDRESS_KEY, - ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, ScmConfigKeys.OZONE_CHUNK_READ_NETTY_CHUNKED_NIO_FILE_KEY, OMConfigKeys.OZONE_FS_TRASH_INTERVAL_KEY, OMConfigKeys.OZONE_FS_TRASH_CHECKPOINT_INTERVAL_KEY, From d7616ec22a60e1f9ed9e2eea4ecdccd1a0c98bf1 Mon Sep 17 00:00:00 2001 From: Stephen O'Donnell Date: Sun, 26 Jan 2025 10:26:54 +0000 Subject: [PATCH 130/168] HDDS-12135. Set RM default deadline to 12 minutes and datanode offset to 6 minutes (#7747) --- .../scm/container/replication/ReplicationManager.java | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java index d183c876e95..1675da0fa84 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java @@ -1096,13 +1096,13 @@ public static class ReplicationManagerConfiguration */ @Config(key = "event.timeout", type = ConfigType.TIME, - defaultValue = "10m", + defaultValue = "12m", reconfigurable = true, tags = {SCM, OZONE}, description = "Timeout for the container replication/deletion commands " + "sent to datanodes. After this timeout the command will be " + "retried.") - private long eventTimeout = Duration.ofMinutes(10).toMillis(); + private long eventTimeout = Duration.ofMinutes(12).toMillis(); public void setInterval(Duration interval) { this.interval = interval; } @@ -1118,7 +1118,7 @@ public void setEventTimeout(Duration timeout) { */ @Config(key = "event.timeout.datanode.offset", type = ConfigType.TIME, - defaultValue = "30s", + defaultValue = "6m", reconfigurable = true, tags = {SCM, OZONE}, description = "The amount of time to subtract from " @@ -1126,7 +1126,7 @@ public void setEventTimeout(Duration timeout) { + "datanodes which is less than the SCM timeout. This ensures " + "the datanodes will not process a command after SCM believes it " + "should have expired.") - private long datanodeTimeoutOffset = Duration.ofSeconds(30).toMillis(); + private long datanodeTimeoutOffset = Duration.ofMinutes(6).toMillis(); public long getDatanodeTimeoutOffset() { return datanodeTimeoutOffset; } From ffa097f98af6960beb679d667d1dad9506b3ea69 Mon Sep 17 00:00:00 2001 From: Chia-Chuan Yu Date: Mon, 27 Jan 2025 15:24:57 +0800 Subject: [PATCH 131/168] HDDS-12099. Generate kubernetes Robot report in container (#7754) --- hadoop-ozone/dev-support/checks/kubernetes.sh | 22 -------- .../dev-support/bin/dist-layout-stitching | 2 +- hadoop-ozone/dist/pom.xml | 16 ++++++ hadoop-ozone/dist/src/main/compose/testlib.sh | 35 +------------ .../dist/src/main/k8s/examples/testlib.sh | 5 +- .../dist/src/main/smoketest/testlib.sh | 50 +++++++++++++++++++ pom.xml | 1 + 7 files changed, 73 insertions(+), 58 deletions(-) create mode 100644 hadoop-ozone/dist/src/main/smoketest/testlib.sh diff --git a/hadoop-ozone/dev-support/checks/kubernetes.sh b/hadoop-ozone/dev-support/checks/kubernetes.sh index 4699ed5520e..1f9772f8f43 100755 --- a/hadoop-ozone/dev-support/checks/kubernetes.sh +++ b/hadoop-ozone/dev-support/checks/kubernetes.sh @@ -24,28 +24,6 @@ export KUBECONFIG source "${DIR}/_lib.sh" source "${DIR}/install/flekszible.sh" -# TODO these functions will be removed in HDDS-12099 -install_virtualenv() { - _install_tool virtualenv -} - -_install_virtualenv() { - sudo pip3 install virtualenv -} - -install_robot() { - _install_tool robot venv/bin -} - -_install_robot() { - virtualenv venv - source venv/bin/activate - pip install robotframework -} - -install_virtualenv -install_robot - if [[ "$(uname -s)" = "Darwin" ]]; then echo "Skip installing k3s, not supported on Mac. Make sure a working Kubernetes cluster is available." >&2 else diff --git a/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching b/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching index a902eab5a97..be491a3063e 100755 --- a/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching +++ b/hadoop-ozone/dist/dev-support/bin/dist-layout-stitching @@ -124,7 +124,7 @@ run cp -r "${ROOT}/dev-support/byteman" "share/ozone/" #Copy docker compose files #compose files are preprocessed: properties (eg. project.version) are replaced first by maven. run cp -p -R "${ROOT}/hadoop-ozone/dist/target/compose" . -run cp -p -r "${ROOT}/hadoop-ozone/dist/src/main/smoketest" . +run cp -p -r "${ROOT}/hadoop-ozone/dist/target/smoketest" . run cp -p -r "${ROOT}/hadoop-ozone/dist/target/k8s" kubernetes run cp -p -r "${ROOT}/hadoop-ozone/dist/target/Dockerfile" . diff --git a/hadoop-ozone/dist/pom.xml b/hadoop-ozone/dist/pom.xml index 055169e7c4a..3fe9012d095 100644 --- a/hadoop-ozone/dist/pom.xml +++ b/hadoop-ozone/dist/pom.xml @@ -197,6 +197,22 @@ + + copy-smoketest-files + + copy-resources + + compile + + ${basedir}/target/smoketest + + + src/main/smoketest + true + + + +
true + org.apache.hadoop.shaded @@ -102,7 +103,7 @@ com.google.protobuf - org.apache.hadoop.shaded.com.google.protobuf + ${proto.shaded.prefix}.com.google.protobuf com.google.protobuf.* From 34792eda720842b3c00ea88ed23f8098b26931e8 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Thu, 30 Jan 2025 07:41:46 +0100 Subject: [PATCH 143/168] HDDS-12010. Block ozone repair if service is running (#7758) --- .../hadoop/ozone/repair/RepairTool.java | 64 ++++++++++++++----- .../ozone/repair/TransactionInfoRepair.java | 28 ++++---- .../hadoop/ozone/repair/om/FSORepairTool.java | 10 ++- .../ozone/repair/om/SnapshotChainRepair.java | 10 ++- .../scm/cert/RecoverSCMCertificate.java | 10 ++- 5 files changed, 82 insertions(+), 40 deletions(-) diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RepairTool.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RepairTool.java index 1ae033e2e71..3fa1033fb23 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RepairTool.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RepairTool.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.ozone.repair; +import jakarta.annotation.Nullable; import org.apache.hadoop.hdds.cli.AbstractSubcommand; import picocli.CommandLine; @@ -45,35 +46,57 @@ public abstract class RepairTool extends AbstractSubcommand implements Callable< /** Hook method for subclasses for performing actual repair task. */ protected abstract void execute() throws Exception; + /** Which Ozone component should be verified to be offline. */ + @Nullable + protected Component serviceToBeOffline() { + return null; + } + @Override public final Void call() throws Exception { if (!dryRun) { confirmUser(); } - execute(); + if (isServiceStateOK()) { + execute(); + } return null; } - protected boolean checkIfServiceIsRunning(String serviceName) { - String runningEnvVar = String.format("OZONE_%s_RUNNING", serviceName); - String pidEnvVar = String.format("OZONE_%s_PID", serviceName); - String isServiceRunning = System.getenv(runningEnvVar); - String servicePid = System.getenv(pidEnvVar); - if ("true".equals(isServiceRunning)) { - if (!force) { - error("Error: %s is currently running on this host with PID %s. " + - "Stop the service before running the repair tool.", serviceName, servicePid); - return true; - } else { - info("Warning: --force flag used. Proceeding despite %s being detected as running with PID %s.", - serviceName, servicePid); - } - } else { - info("No running %s service detected. Proceeding with repair.", serviceName); + private boolean isServiceStateOK() { + final Component service = serviceToBeOffline(); + + if (service == null) { + return true; // online tool + } + + if (!isServiceRunning(service)) { + info("No running %s service detected. Proceeding with repair.", service); + return true; + } + + String servicePid = getServicePid(service); + + if (force) { + info("Warning: --force flag used. Proceeding despite %s being detected as running with PID %s.", + service, servicePid); + return true; } + + error("Error: %s is currently running on this host with PID %s. " + + "Stop the service before running the repair tool.", service, servicePid); + return false; } + private static String getServicePid(Component service) { + return System.getenv(String.format("OZONE_%s_PID", service)); + } + + private static boolean isServiceRunning(Component service) { + return "true".equals(System.getenv(String.format("OZONE_%s_RUNNING", service))); + } + protected boolean isDryRun() { return dryRun; } @@ -117,4 +140,11 @@ private String getConsoleReadLineWithFormat(String currentUser) { .nextLine() .trim(); } + + /** Ozone component for offline tools. */ + protected enum Component { + DATANODE, + OM, + SCM, + } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/TransactionInfoRepair.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/TransactionInfoRepair.java index 4fca8e40a08..8b44c30877d 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/TransactionInfoRepair.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/TransactionInfoRepair.java @@ -21,6 +21,7 @@ */ package org.apache.hadoop.ozone.repair; +import jakarta.annotation.Nonnull; import org.apache.hadoop.hdds.cli.HddsVersionProvider; import org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition; import org.apache.hadoop.hdds.utils.IOUtils; @@ -69,16 +70,12 @@ public class TransactionInfoRepair extends RepairTool { @Override public void execute() throws Exception { - final Component component = getComponent(); - if (checkIfServiceIsRunning(component.name())) { - return; - } List cfHandleList = new ArrayList<>(); List cfDescList = RocksDBUtils.getColumnFamilyDescriptors( dbPath); + String columnFamilyName = getColumnFamily().getName(); try (ManagedRocksDB db = ManagedRocksDB.open(dbPath, cfDescList, cfHandleList)) { - String columnFamilyName = component.columnFamilyDefinition.getName(); ColumnFamilyHandle transactionInfoCfh = RocksDBUtils.getColumnFamilyHandle(columnFamilyName, cfHandleList); if (transactionInfoCfh == null) { throw new IllegalArgumentException(columnFamilyName + @@ -111,7 +108,9 @@ public void execute() throws Exception { } } - private Component getComponent() { + @Override + @Nonnull + protected Component serviceToBeOffline() { final String parent = spec().parent().name(); switch (parent) { case "om": @@ -123,14 +122,15 @@ private Component getComponent() { } } - private enum Component { - OM(OMDBDefinition.TRANSACTION_INFO_TABLE), - SCM(SCMDBDefinition.TRANSACTIONINFO); - - private final DBColumnFamilyDefinition columnFamilyDefinition; - - Component(DBColumnFamilyDefinition columnFamilyDefinition) { - this.columnFamilyDefinition = columnFamilyDefinition; + private DBColumnFamilyDefinition getColumnFamily() { + Component component = serviceToBeOffline(); + switch (component) { + case OM: + return OMDBDefinition.TRANSACTION_INFO_TABLE; + case SCM: + return SCMDBDefinition.TRANSACTIONINFO; + default: + throw new IllegalStateException("This tool does not support component: " + component); } } } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairTool.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairTool.java index eb5a5dd9a2f..7e22536a1b0 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairTool.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/FSORepairTool.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.ozone.repair.om; +import jakarta.annotation.Nonnull; import org.apache.commons.io.FileUtils; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -97,11 +98,14 @@ public class FSORepairTool extends RepairTool { description = "Verbose output. Show all intermediate steps.") private boolean verbose; + @Nonnull + @Override + protected Component serviceToBeOffline() { + return Component.OM; + } + @Override public void execute() throws Exception { - if (checkIfServiceIsRunning("OM")) { - return; - } try { Impl repairTool = new Impl(); repairTool.run(); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/SnapshotChainRepair.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/SnapshotChainRepair.java index bafd2f89375..06cb8b1e110 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/SnapshotChainRepair.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/om/SnapshotChainRepair.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.repair.om; +import jakarta.annotation.Nonnull; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.utils.db.StringCodec; import org.apache.hadoop.hdds.utils.db.managed.ManagedRocksDB; @@ -76,11 +77,14 @@ public class SnapshotChainRepair extends RepairTool { description = "Path previous snapshotId to set for the given snapshot") private UUID pathPreviousSnapshotId; + @Nonnull + @Override + protected Component serviceToBeOffline() { + return Component.OM; + } + @Override public void execute() throws Exception { - if (checkIfServiceIsRunning("OM")) { - return; - } List cfHandleList = new ArrayList<>(); List cfDescList = RocksDBUtils.getColumnFamilyDescriptors(dbPath); diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/scm/cert/RecoverSCMCertificate.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/scm/cert/RecoverSCMCertificate.java index 2fac9d53294..1461e4aa730 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/scm/cert/RecoverSCMCertificate.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/scm/cert/RecoverSCMCertificate.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.ozone.repair.scm.cert; +import jakarta.annotation.Nonnull; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.security.SecurityConfig; import org.apache.hadoop.hdds.security.x509.certificate.authority.CAType; @@ -71,11 +72,14 @@ public class RecoverSCMCertificate extends RepairTool { description = "SCM DB Path") private String dbPath; + @Nonnull + @Override + protected Component serviceToBeOffline() { + return Component.SCM; + } + @Override public void execute() throws Exception { - if (checkIfServiceIsRunning("SCM")) { - return; - } dbPath = removeTrailingSlashIfNeeded(dbPath); String tableName = VALID_SCM_CERTS.getName(); DBDefinition dbDefinition = From a2825fd11a56678496b314ef9e91b6b5057f2205 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Thu, 30 Jan 2025 08:00:04 +0100 Subject: [PATCH 144/168] HDDS-12155. Create new submodule for ozone shell (#7775) --- .../apache/hadoop/hdds/cli/AbstractMixin.java | 0 .../hadoop/hdds/cli/AbstractSubcommand.java | 0 .../dev-support/findbugsExcludeFile.xml | 16 ++ hadoop-ozone/cli-shell/pom.xml | 193 ++++++++++++++++++ .../ozone/shell/ClearSpaceQuotaOptions.java | 0 .../apache/hadoop/ozone/shell/Handler.java | 0 .../hadoop/ozone/shell/ListOptions.java | 0 .../shell/MandatoryReplicationOptions.java | 0 .../hadoop/ozone/shell/OzoneAddress.java | 0 .../apache/hadoop/ozone/shell/OzoneShell.java | 0 .../org/apache/hadoop/ozone/shell/REPL.java | 0 .../ozone/shell/ReplicationOptions.java | 0 .../ozone/shell/SetSpaceQuotaOptions.java | 0 .../org/apache/hadoop/ozone/shell/Shell.java | 0 .../ozone/shell/ShellReplicationOptions.java | 0 .../hadoop/ozone/shell/StoreTypeOption.java | 0 .../hadoop/ozone/shell/acl/AclHandler.java | 0 .../hadoop/ozone/shell/acl/AclOption.java | 0 .../hadoop/ozone/shell/acl/GetAclHandler.java | 0 .../hadoop/ozone/shell/acl/package-info.java | 0 .../shell/bucket/AddAclBucketHandler.java | 0 .../ozone/shell/bucket/BucketCommands.java | 0 .../ozone/shell/bucket/BucketHandler.java | 0 .../hadoop/ozone/shell/bucket/BucketUri.java | 0 .../ozone/shell/bucket/ClearQuotaHandler.java | 0 .../shell/bucket/CreateBucketHandler.java | 0 .../shell/bucket/DeleteBucketHandler.java | 0 .../shell/bucket/GetAclBucketHandler.java | 0 .../ozone/shell/bucket/InfoBucketHandler.java | 0 .../ozone/shell/bucket/LinkBucketHandler.java | 0 .../ozone/shell/bucket/ListBucketHandler.java | 0 .../shell/bucket/RemoveAclBucketHandler.java | 0 .../shell/bucket/SetAclBucketHandler.java | 0 .../ozone/shell/bucket/SetEncryptionKey.java | 0 .../ozone/shell/bucket/SetQuotaHandler.java | 0 .../bucket/SetReplicationConfigHandler.java | 0 .../shell/bucket/UpdateBucketHandler.java | 0 .../ozone/shell/bucket/package-info.java | 0 .../shell/common/VolumeBucketHandler.java | 0 .../ozone/shell/common/VolumeBucketUri.java | 0 .../ozone/shell/common/package-info.java | 0 .../ozone/shell/keys/AddAclKeyHandler.java | 0 .../ozone/shell/keys/CatKeyHandler.java | 0 .../ozone/shell/keys/ChecksumKeyHandler.java | 0 .../ozone/shell/keys/CopyKeyHandler.java | 0 .../ozone/shell/keys/DeleteKeyHandler.java | 0 .../ozone/shell/keys/GetAclKeyHandler.java | 0 .../ozone/shell/keys/GetKeyHandler.java | 0 .../ozone/shell/keys/InfoKeyHandler.java | 0 .../hadoop/ozone/shell/keys/KeyCommands.java | 0 .../hadoop/ozone/shell/keys/KeyHandler.java | 0 .../hadoop/ozone/shell/keys/KeyUri.java | 0 .../ozone/shell/keys/ListKeyHandler.java | 0 .../ozone/shell/keys/PutKeyHandler.java | 0 .../ozone/shell/keys/RemoveAclKeyHandler.java | 0 .../ozone/shell/keys/RenameKeyHandler.java | 0 .../ozone/shell/keys/RewriteKeyHandler.java | 0 .../ozone/shell/keys/SetAclKeyHandler.java | 0 .../hadoop/ozone/shell/keys/package-info.java | 0 .../hadoop/ozone/shell/package-info.java | 22 ++ .../shell/prefix/AddAclPrefixHandler.java | 0 .../shell/prefix/GetAclPrefixHandler.java | 0 .../ozone/shell/prefix/PrefixCommands.java | 0 .../hadoop/ozone/shell/prefix/PrefixUri.java | 0 .../shell/prefix/RemoveAclPrefixHandler.java | 0 .../shell/prefix/SetAclPrefixHandler.java | 0 .../ozone/shell/prefix/package-info.java | 0 .../ozone/shell/s3/GetS3SecretHandler.java | 0 .../ozone/shell/s3/RevokeS3SecretHandler.java | 0 .../hadoop/ozone/shell/s3/S3Handler.java | 0 .../apache/hadoop/ozone/shell/s3/S3Shell.java | 0 .../ozone/shell/s3/SetS3SecretHandler.java | 0 .../hadoop/ozone/shell/s3/package-info.java | 0 .../shell/snapshot/BucketSnapshotHandler.java | 0 .../shell/snapshot/CreateSnapshotHandler.java | 0 .../shell/snapshot/DeleteSnapshotHandler.java | 0 .../shell/snapshot/InfoSnapshotHandler.java | 0 .../snapshot/ListSnapshotDiffHandler.java | 0 .../shell/snapshot/ListSnapshotHandler.java | 0 .../shell/snapshot/RenameSnapshotHandler.java | 0 .../shell/snapshot/SnapshotCommands.java | 0 .../shell/snapshot/SnapshotDiffHandler.java | 0 .../ozone/shell/snapshot/SnapshotUri.java | 0 .../ozone/shell/snapshot/package-info.java | 0 .../shell/tenant/GetUserInfoHandler.java | 0 .../tenant/TenantAssignAdminHandler.java | 0 .../TenantAssignUserAccessIdHandler.java | 0 .../shell/tenant/TenantBucketLinkHandler.java | 0 .../shell/tenant/TenantCreateHandler.java | 0 .../shell/tenant/TenantDeleteHandler.java | 0 .../shell/tenant/TenantGetSecretHandler.java | 0 .../ozone/shell/tenant/TenantHandler.java | 0 .../ozone/shell/tenant/TenantListHandler.java | 0 .../shell/tenant/TenantListUsersHandler.java | 0 .../tenant/TenantRevokeAdminHandler.java | 0 .../TenantRevokeUserAccessIdHandler.java | 0 .../shell/tenant/TenantSetSecretHandler.java | 0 .../ozone/shell/tenant/TenantShell.java | 0 .../shell/tenant/TenantUserCommands.java | 0 .../ozone/shell/tenant/package-info.java | 0 .../ozone/shell/token/CancelTokenHandler.java | 0 .../ozone/shell/token/GetTokenHandler.java | 0 .../ozone/shell/token/PrintTokenHandler.java | 0 .../ozone/shell/token/RenewTokenHandler.java | 0 .../ozone/shell/token/RenewerOption.java | 0 .../ozone/shell/token/TokenCommands.java | 0 .../ozone/shell/token/TokenHandler.java | 0 .../hadoop/ozone/shell/token/TokenOption.java | 0 .../ozone/shell/token/package-info.java | 0 .../shell/volume/AddAclVolumeHandler.java | 0 .../ozone/shell/volume/ClearQuotaHandler.java | 0 .../shell/volume/CreateVolumeHandler.java | 0 .../shell/volume/DeleteVolumeHandler.java | 0 .../shell/volume/GetAclVolumeHandler.java | 0 .../ozone/shell/volume/InfoVolumeHandler.java | 0 .../ozone/shell/volume/ListVolumeHandler.java | 0 .../shell/volume/RemoveAclVolumeHandler.java | 0 .../shell/volume/SetAclVolumeHandler.java | 0 .../ozone/shell/volume/SetQuotaHandler.java | 0 .../shell/volume/UpdateVolumeHandler.java | 0 .../ozone/shell/volume/VolumeCommands.java | 0 .../ozone/shell/volume/VolumeHandler.java | 0 .../hadoop/ozone/shell/volume/VolumeUri.java | 0 .../ozone/shell/volume/package-info.java | 0 .../hadoop/ozone/shell/TestOzoneAddress.java | 0 .../shell/TestOzoneAddressClientCreation.java | 0 .../shell/keys/TestChecksumKeyHandler.java | 0 .../hadoop/ozone/shell/keys/package-info.java | 0 .../hadoop/ozone/shell/package-info.java | 0 .../runConfigurations/OzoneShell-ha.xml | 2 +- .../intellij/runConfigurations/OzoneShell.xml | 2 +- hadoop-ozone/dist/pom.xml | 4 + .../dist/src/main/license/jar-report.txt | 1 + hadoop-ozone/dist/src/shell/ozone/ozone | 20 +- hadoop-ozone/integration-test/pom.xml | 4 + hadoop-ozone/pom.xml | 6 + hadoop-ozone/tools/pom.xml | 4 + 137 files changed, 263 insertions(+), 11 deletions(-) rename hadoop-hdds/{tools => common}/src/main/java/org/apache/hadoop/hdds/cli/AbstractMixin.java (100%) rename hadoop-hdds/{tools => common}/src/main/java/org/apache/hadoop/hdds/cli/AbstractSubcommand.java (100%) create mode 100644 hadoop-ozone/cli-shell/dev-support/findbugsExcludeFile.xml create mode 100644 hadoop-ozone/cli-shell/pom.xml rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/ClearSpaceQuotaOptions.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/Handler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/ListOptions.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/MandatoryReplicationOptions.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/OzoneAddress.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/OzoneShell.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/REPL.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/ReplicationOptions.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/SetSpaceQuotaOptions.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/Shell.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/ShellReplicationOptions.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/StoreTypeOption.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/acl/AclHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/acl/AclOption.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/acl/GetAclHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/acl/package-info.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/bucket/AddAclBucketHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/bucket/BucketCommands.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/bucket/BucketHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/bucket/BucketUri.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/bucket/ClearQuotaHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/bucket/CreateBucketHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/bucket/DeleteBucketHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/bucket/GetAclBucketHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/bucket/InfoBucketHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/bucket/LinkBucketHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/bucket/ListBucketHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/bucket/RemoveAclBucketHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/bucket/SetAclBucketHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/bucket/SetEncryptionKey.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/bucket/SetQuotaHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/bucket/SetReplicationConfigHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/bucket/UpdateBucketHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/bucket/package-info.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/common/VolumeBucketHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/common/VolumeBucketUri.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/common/package-info.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/keys/AddAclKeyHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/keys/CatKeyHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/keys/ChecksumKeyHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/keys/CopyKeyHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/keys/DeleteKeyHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/keys/GetAclKeyHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/keys/GetKeyHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/keys/InfoKeyHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/keys/KeyCommands.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/keys/KeyHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/keys/KeyUri.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/keys/ListKeyHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/keys/PutKeyHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/keys/RemoveAclKeyHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/keys/RenameKeyHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/keys/RewriteKeyHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/keys/SetAclKeyHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/keys/package-info.java (100%) create mode 100644 hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/package-info.java rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/prefix/AddAclPrefixHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/prefix/GetAclPrefixHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/prefix/PrefixCommands.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/prefix/PrefixUri.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/prefix/RemoveAclPrefixHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/prefix/SetAclPrefixHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/prefix/package-info.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/s3/GetS3SecretHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/s3/RevokeS3SecretHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/s3/S3Handler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/s3/S3Shell.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/s3/SetS3SecretHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/s3/package-info.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/snapshot/BucketSnapshotHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/snapshot/CreateSnapshotHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/snapshot/DeleteSnapshotHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/snapshot/InfoSnapshotHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/snapshot/ListSnapshotDiffHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/snapshot/ListSnapshotHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/snapshot/RenameSnapshotHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/snapshot/SnapshotCommands.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/snapshot/SnapshotDiffHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/snapshot/SnapshotUri.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/snapshot/package-info.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/tenant/GetUserInfoHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantAssignAdminHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantAssignUserAccessIdHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantBucketLinkHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantCreateHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantDeleteHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantGetSecretHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantListHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantListUsersHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantRevokeAdminHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantRevokeUserAccessIdHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantSetSecretHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantShell.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantUserCommands.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/tenant/package-info.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/token/CancelTokenHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/token/GetTokenHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/token/PrintTokenHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/token/RenewTokenHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/token/RenewerOption.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/token/TokenCommands.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/token/TokenHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/token/TokenOption.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/token/package-info.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/volume/AddAclVolumeHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/volume/ClearQuotaHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/volume/CreateVolumeHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/volume/DeleteVolumeHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/volume/GetAclVolumeHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/volume/InfoVolumeHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/volume/ListVolumeHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/volume/RemoveAclVolumeHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/volume/SetAclVolumeHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/volume/SetQuotaHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/volume/UpdateVolumeHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/volume/VolumeCommands.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/volume/VolumeHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/volume/VolumeUri.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/main/java/org/apache/hadoop/ozone/shell/volume/package-info.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneAddress.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneAddressClientCreation.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/test/java/org/apache/hadoop/ozone/shell/keys/TestChecksumKeyHandler.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/test/java/org/apache/hadoop/ozone/shell/keys/package-info.java (100%) rename hadoop-ozone/{tools => cli-shell}/src/test/java/org/apache/hadoop/ozone/shell/package-info.java (100%) diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/AbstractMixin.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/AbstractMixin.java similarity index 100% rename from hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/AbstractMixin.java rename to hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/AbstractMixin.java diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/AbstractSubcommand.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/AbstractSubcommand.java similarity index 100% rename from hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/AbstractSubcommand.java rename to hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/cli/AbstractSubcommand.java diff --git a/hadoop-ozone/cli-shell/dev-support/findbugsExcludeFile.xml b/hadoop-ozone/cli-shell/dev-support/findbugsExcludeFile.xml new file mode 100644 index 00000000000..ee5ed59808b --- /dev/null +++ b/hadoop-ozone/cli-shell/dev-support/findbugsExcludeFile.xml @@ -0,0 +1,16 @@ + + + + diff --git a/hadoop-ozone/cli-shell/pom.xml b/hadoop-ozone/cli-shell/pom.xml new file mode 100644 index 00000000000..92059864f29 --- /dev/null +++ b/hadoop-ozone/cli-shell/pom.xml @@ -0,0 +1,193 @@ + + + + 4.0.0 + + org.apache.ozone + ozone + 2.0.0-SNAPSHOT + + ozone-cli-shell + 2.0.0-SNAPSHOT + jar + Apache Ozone CLI Shell + Apache Ozone CLI Shell + + + false + + + + + com.fasterxml.jackson.core + jackson-annotations + + + com.fasterxml.jackson.core + jackson-databind + + + com.google.guava + guava + + + commons-codec + commons-codec + + + info.picocli + picocli + + + info.picocli + picocli-shell-jline3 + + + jakarta.xml.bind + jakarta.xml.bind-api + + + org.apache.commons + commons-lang3 + + + org.apache.httpcomponents + httpclient + + + org.apache.ozone + hdds-client + + + org.apache.ozone + hdds-common + + + org.apache.ozone + hdds-config + + + org.apache.ozone + ozone-client + + + org.apache.ozone + ozone-common + + + org.apache.ozone + ozone-filesystem-common + + + org.apache.ozone + ozone-interface-client + + + org.apache.ratis + ratis-common + + + org.jline + jline + + + org.slf4j + slf4j-api + + + org.apache.ozone + ozone-filesystem + runtime + + + org.slf4j + slf4j-reload4j + runtime + + + + + org.apache.ozone + hdds-hadoop-dependency-test + test + + + org.apache.ozone + hdds-test-utils + test + + + + + + com.github.spotbugs + spotbugs-maven-plugin + + ${project.basedir}/dev-support/findbugsExcludeFile.xml + true + 2048 + + + + org.apache.maven.plugins + maven-compiler-plugin + + + + org.kohsuke.metainf-services + metainf-services + ${metainf-services.version} + + + info.picocli + picocli-codegen + ${picocli.version} + + + + org.kohsuke.metainf_services.AnnotationProcessorImpl + picocli.codegen.aot.graalvm.processor.NativeImageConfigGeneratorProcessor + + + -Aproject=${project.groupId}/${project.artifactId} + + + + + org.apache.maven.plugins + maven-enforcer-plugin + + + ban-annotations + + + + + Only selected annotation processors are enabled, see configuration of maven-compiler-plugin. + + org.apache.hadoop.hdds.conf.Config + org.apache.hadoop.hdds.conf.ConfigGroup + org.apache.hadoop.hdds.scm.metadata.Replicate + org.apache.hadoop.ozone.om.request.validation.RequestFeatureValidator + + + + + + + + + + diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/ClearSpaceQuotaOptions.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/ClearSpaceQuotaOptions.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/ClearSpaceQuotaOptions.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/ClearSpaceQuotaOptions.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/Handler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/Handler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/Handler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/Handler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/ListOptions.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/ListOptions.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/ListOptions.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/ListOptions.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/MandatoryReplicationOptions.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/MandatoryReplicationOptions.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/MandatoryReplicationOptions.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/MandatoryReplicationOptions.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneAddress.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/OzoneAddress.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneAddress.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/OzoneAddress.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneShell.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/OzoneShell.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/OzoneShell.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/OzoneShell.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/REPL.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/REPL.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/REPL.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/REPL.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/ReplicationOptions.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/ReplicationOptions.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/ReplicationOptions.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/ReplicationOptions.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/SetSpaceQuotaOptions.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/SetSpaceQuotaOptions.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/SetSpaceQuotaOptions.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/SetSpaceQuotaOptions.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/Shell.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/Shell.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/Shell.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/Shell.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/ShellReplicationOptions.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/ShellReplicationOptions.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/ShellReplicationOptions.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/ShellReplicationOptions.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/StoreTypeOption.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/StoreTypeOption.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/StoreTypeOption.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/StoreTypeOption.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/acl/AclHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/acl/AclHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/acl/AclHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/acl/AclHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/acl/AclOption.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/acl/AclOption.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/acl/AclOption.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/acl/AclOption.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/acl/GetAclHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/acl/GetAclHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/acl/GetAclHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/acl/GetAclHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/acl/package-info.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/acl/package-info.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/acl/package-info.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/acl/package-info.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/AddAclBucketHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/bucket/AddAclBucketHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/AddAclBucketHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/bucket/AddAclBucketHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/BucketCommands.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/bucket/BucketCommands.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/BucketCommands.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/bucket/BucketCommands.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/BucketHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/bucket/BucketHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/BucketHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/bucket/BucketHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/BucketUri.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/bucket/BucketUri.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/BucketUri.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/bucket/BucketUri.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/ClearQuotaHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/bucket/ClearQuotaHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/ClearQuotaHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/bucket/ClearQuotaHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/CreateBucketHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/bucket/CreateBucketHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/CreateBucketHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/bucket/CreateBucketHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/DeleteBucketHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/bucket/DeleteBucketHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/DeleteBucketHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/bucket/DeleteBucketHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/GetAclBucketHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/bucket/GetAclBucketHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/GetAclBucketHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/bucket/GetAclBucketHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/InfoBucketHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/bucket/InfoBucketHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/InfoBucketHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/bucket/InfoBucketHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/LinkBucketHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/bucket/LinkBucketHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/LinkBucketHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/bucket/LinkBucketHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/ListBucketHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/bucket/ListBucketHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/ListBucketHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/bucket/ListBucketHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/RemoveAclBucketHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/bucket/RemoveAclBucketHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/RemoveAclBucketHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/bucket/RemoveAclBucketHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/SetAclBucketHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/bucket/SetAclBucketHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/SetAclBucketHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/bucket/SetAclBucketHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/SetEncryptionKey.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/bucket/SetEncryptionKey.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/SetEncryptionKey.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/bucket/SetEncryptionKey.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/SetQuotaHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/bucket/SetQuotaHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/SetQuotaHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/bucket/SetQuotaHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/SetReplicationConfigHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/bucket/SetReplicationConfigHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/SetReplicationConfigHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/bucket/SetReplicationConfigHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/UpdateBucketHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/bucket/UpdateBucketHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/UpdateBucketHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/bucket/UpdateBucketHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/package-info.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/bucket/package-info.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/package-info.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/bucket/package-info.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/common/VolumeBucketHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/common/VolumeBucketHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/common/VolumeBucketHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/common/VolumeBucketHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/common/VolumeBucketUri.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/common/VolumeBucketUri.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/common/VolumeBucketUri.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/common/VolumeBucketUri.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/common/package-info.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/common/package-info.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/common/package-info.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/common/package-info.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/AddAclKeyHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/keys/AddAclKeyHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/AddAclKeyHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/keys/AddAclKeyHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/CatKeyHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/keys/CatKeyHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/CatKeyHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/keys/CatKeyHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/ChecksumKeyHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/keys/ChecksumKeyHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/ChecksumKeyHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/keys/ChecksumKeyHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/CopyKeyHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/keys/CopyKeyHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/CopyKeyHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/keys/CopyKeyHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/DeleteKeyHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/keys/DeleteKeyHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/DeleteKeyHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/keys/DeleteKeyHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/GetAclKeyHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/keys/GetAclKeyHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/GetAclKeyHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/keys/GetAclKeyHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/GetKeyHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/keys/GetKeyHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/GetKeyHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/keys/GetKeyHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/InfoKeyHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/keys/InfoKeyHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/InfoKeyHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/keys/InfoKeyHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/KeyCommands.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/keys/KeyCommands.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/KeyCommands.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/keys/KeyCommands.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/KeyHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/keys/KeyHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/KeyHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/keys/KeyHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/KeyUri.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/keys/KeyUri.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/KeyUri.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/keys/KeyUri.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/ListKeyHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/keys/ListKeyHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/ListKeyHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/keys/ListKeyHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/PutKeyHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/keys/PutKeyHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/PutKeyHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/keys/PutKeyHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/RemoveAclKeyHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/keys/RemoveAclKeyHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/RemoveAclKeyHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/keys/RemoveAclKeyHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/RenameKeyHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/keys/RenameKeyHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/RenameKeyHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/keys/RenameKeyHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/RewriteKeyHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/keys/RewriteKeyHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/RewriteKeyHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/keys/RewriteKeyHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/SetAclKeyHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/keys/SetAclKeyHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/SetAclKeyHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/keys/SetAclKeyHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/package-info.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/keys/package-info.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/package-info.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/keys/package-info.java diff --git a/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/package-info.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/package-info.java new file mode 100644 index 00000000000..a57cc4ac6fc --- /dev/null +++ b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/package-info.java @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * A simple CLI for Ozone. + */ +package org.apache.hadoop.ozone.shell; diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/prefix/AddAclPrefixHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/prefix/AddAclPrefixHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/prefix/AddAclPrefixHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/prefix/AddAclPrefixHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/prefix/GetAclPrefixHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/prefix/GetAclPrefixHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/prefix/GetAclPrefixHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/prefix/GetAclPrefixHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/prefix/PrefixCommands.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/prefix/PrefixCommands.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/prefix/PrefixCommands.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/prefix/PrefixCommands.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/prefix/PrefixUri.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/prefix/PrefixUri.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/prefix/PrefixUri.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/prefix/PrefixUri.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/prefix/RemoveAclPrefixHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/prefix/RemoveAclPrefixHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/prefix/RemoveAclPrefixHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/prefix/RemoveAclPrefixHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/prefix/SetAclPrefixHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/prefix/SetAclPrefixHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/prefix/SetAclPrefixHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/prefix/SetAclPrefixHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/prefix/package-info.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/prefix/package-info.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/prefix/package-info.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/prefix/package-info.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/s3/GetS3SecretHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/s3/GetS3SecretHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/s3/GetS3SecretHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/s3/GetS3SecretHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/s3/RevokeS3SecretHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/s3/RevokeS3SecretHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/s3/RevokeS3SecretHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/s3/RevokeS3SecretHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/s3/S3Handler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/s3/S3Handler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/s3/S3Handler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/s3/S3Handler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/s3/S3Shell.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/s3/S3Shell.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/s3/S3Shell.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/s3/S3Shell.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/s3/SetS3SecretHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/s3/SetS3SecretHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/s3/SetS3SecretHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/s3/SetS3SecretHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/s3/package-info.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/s3/package-info.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/s3/package-info.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/s3/package-info.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/BucketSnapshotHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/snapshot/BucketSnapshotHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/BucketSnapshotHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/snapshot/BucketSnapshotHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/CreateSnapshotHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/snapshot/CreateSnapshotHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/CreateSnapshotHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/snapshot/CreateSnapshotHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/DeleteSnapshotHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/snapshot/DeleteSnapshotHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/DeleteSnapshotHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/snapshot/DeleteSnapshotHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/InfoSnapshotHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/snapshot/InfoSnapshotHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/InfoSnapshotHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/snapshot/InfoSnapshotHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/ListSnapshotDiffHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/snapshot/ListSnapshotDiffHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/ListSnapshotDiffHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/snapshot/ListSnapshotDiffHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/ListSnapshotHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/snapshot/ListSnapshotHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/ListSnapshotHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/snapshot/ListSnapshotHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/RenameSnapshotHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/snapshot/RenameSnapshotHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/RenameSnapshotHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/snapshot/RenameSnapshotHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/SnapshotCommands.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/snapshot/SnapshotCommands.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/SnapshotCommands.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/snapshot/SnapshotCommands.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/SnapshotDiffHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/snapshot/SnapshotDiffHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/SnapshotDiffHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/snapshot/SnapshotDiffHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/SnapshotUri.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/snapshot/SnapshotUri.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/SnapshotUri.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/snapshot/SnapshotUri.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/package-info.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/snapshot/package-info.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/snapshot/package-info.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/snapshot/package-info.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/GetUserInfoHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/tenant/GetUserInfoHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/GetUserInfoHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/tenant/GetUserInfoHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantAssignAdminHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantAssignAdminHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantAssignAdminHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantAssignAdminHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantAssignUserAccessIdHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantAssignUserAccessIdHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantAssignUserAccessIdHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantAssignUserAccessIdHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantBucketLinkHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantBucketLinkHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantBucketLinkHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantBucketLinkHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantCreateHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantCreateHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantCreateHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantCreateHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantDeleteHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantDeleteHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantDeleteHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantDeleteHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantGetSecretHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantGetSecretHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantGetSecretHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantGetSecretHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantListHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantListHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantListHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantListHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantListUsersHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantListUsersHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantListUsersHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantListUsersHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantRevokeAdminHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantRevokeAdminHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantRevokeAdminHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantRevokeAdminHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantRevokeUserAccessIdHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantRevokeUserAccessIdHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantRevokeUserAccessIdHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantRevokeUserAccessIdHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantSetSecretHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantSetSecretHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantSetSecretHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantSetSecretHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantShell.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantShell.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantShell.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantShell.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantUserCommands.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantUserCommands.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantUserCommands.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/tenant/TenantUserCommands.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/package-info.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/tenant/package-info.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/tenant/package-info.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/tenant/package-info.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/CancelTokenHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/token/CancelTokenHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/CancelTokenHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/token/CancelTokenHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/GetTokenHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/token/GetTokenHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/GetTokenHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/token/GetTokenHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/PrintTokenHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/token/PrintTokenHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/PrintTokenHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/token/PrintTokenHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/RenewTokenHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/token/RenewTokenHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/RenewTokenHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/token/RenewTokenHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/RenewerOption.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/token/RenewerOption.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/RenewerOption.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/token/RenewerOption.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/TokenCommands.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/token/TokenCommands.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/TokenCommands.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/token/TokenCommands.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/TokenHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/token/TokenHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/TokenHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/token/TokenHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/TokenOption.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/token/TokenOption.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/TokenOption.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/token/TokenOption.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/package-info.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/token/package-info.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/token/package-info.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/token/package-info.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/AddAclVolumeHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/volume/AddAclVolumeHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/AddAclVolumeHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/volume/AddAclVolumeHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/ClearQuotaHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/volume/ClearQuotaHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/ClearQuotaHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/volume/ClearQuotaHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/CreateVolumeHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/volume/CreateVolumeHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/CreateVolumeHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/volume/CreateVolumeHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/DeleteVolumeHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/volume/DeleteVolumeHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/DeleteVolumeHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/volume/DeleteVolumeHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/GetAclVolumeHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/volume/GetAclVolumeHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/GetAclVolumeHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/volume/GetAclVolumeHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/InfoVolumeHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/volume/InfoVolumeHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/InfoVolumeHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/volume/InfoVolumeHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/ListVolumeHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/volume/ListVolumeHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/ListVolumeHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/volume/ListVolumeHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/RemoveAclVolumeHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/volume/RemoveAclVolumeHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/RemoveAclVolumeHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/volume/RemoveAclVolumeHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/SetAclVolumeHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/volume/SetAclVolumeHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/SetAclVolumeHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/volume/SetAclVolumeHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/SetQuotaHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/volume/SetQuotaHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/SetQuotaHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/volume/SetQuotaHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/UpdateVolumeHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/volume/UpdateVolumeHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/UpdateVolumeHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/volume/UpdateVolumeHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/VolumeCommands.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/volume/VolumeCommands.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/VolumeCommands.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/volume/VolumeCommands.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/VolumeHandler.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/volume/VolumeHandler.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/VolumeHandler.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/volume/VolumeHandler.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/VolumeUri.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/volume/VolumeUri.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/VolumeUri.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/volume/VolumeUri.java diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/package-info.java b/hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/volume/package-info.java similarity index 100% rename from hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/package-info.java rename to hadoop-ozone/cli-shell/src/main/java/org/apache/hadoop/ozone/shell/volume/package-info.java diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneAddress.java b/hadoop-ozone/cli-shell/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneAddress.java similarity index 100% rename from hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneAddress.java rename to hadoop-ozone/cli-shell/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneAddress.java diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneAddressClientCreation.java b/hadoop-ozone/cli-shell/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneAddressClientCreation.java similarity index 100% rename from hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneAddressClientCreation.java rename to hadoop-ozone/cli-shell/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneAddressClientCreation.java diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/keys/TestChecksumKeyHandler.java b/hadoop-ozone/cli-shell/src/test/java/org/apache/hadoop/ozone/shell/keys/TestChecksumKeyHandler.java similarity index 100% rename from hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/keys/TestChecksumKeyHandler.java rename to hadoop-ozone/cli-shell/src/test/java/org/apache/hadoop/ozone/shell/keys/TestChecksumKeyHandler.java diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/keys/package-info.java b/hadoop-ozone/cli-shell/src/test/java/org/apache/hadoop/ozone/shell/keys/package-info.java similarity index 100% rename from hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/keys/package-info.java rename to hadoop-ozone/cli-shell/src/test/java/org/apache/hadoop/ozone/shell/keys/package-info.java diff --git a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/package-info.java b/hadoop-ozone/cli-shell/src/test/java/org/apache/hadoop/ozone/shell/package-info.java similarity index 100% rename from hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/package-info.java rename to hadoop-ozone/cli-shell/src/test/java/org/apache/hadoop/ozone/shell/package-info.java diff --git a/hadoop-ozone/dev-support/intellij/runConfigurations/OzoneShell-ha.xml b/hadoop-ozone/dev-support/intellij/runConfigurations/OzoneShell-ha.xml index e0eef05ae9e..bb10aa85d46 100644 --- a/hadoop-ozone/dev-support/intellij/runConfigurations/OzoneShell-ha.xml +++ b/hadoop-ozone/dev-support/intellij/runConfigurations/OzoneShell-ha.xml @@ -17,7 +17,7 @@ + + org.apache.ozone + ozone-cli-shell + org.apache.ozone ozone-common diff --git a/hadoop-ozone/dist/src/main/license/jar-report.txt b/hadoop-ozone/dist/src/main/license/jar-report.txt index be48c1d1fe2..135b1d204cb 100644 --- a/hadoop-ozone/dist/src/main/license/jar-report.txt +++ b/hadoop-ozone/dist/src/main/license/jar-report.txt @@ -217,6 +217,7 @@ share/ozone/lib/opentracing-util.jar share/ozone/lib/orc-core.jar share/ozone/lib/osgi-resource-locator.jar share/ozone/lib/ozone-client.jar +share/ozone/lib/ozone-cli-shell.jar share/ozone/lib/ozone-common.jar share/ozone/lib/ozone-csi.jar share/ozone/lib/ozone-datanode.jar diff --git a/hadoop-ozone/dist/src/shell/ozone/ozone b/hadoop-ozone/dist/src/shell/ozone/ozone index d3d226a8b89..d868f0217e4 100755 --- a/hadoop-ozone/dist/src/shell/ozone/ozone +++ b/hadoop-ozone/dist/src/shell/ozone/ozone @@ -146,11 +146,11 @@ function ozonecmd_case OZONE_CLASSNAME=org.apache.hadoop.ozone.shell.OzoneShell ozone_deprecate_envvar HDFS_OM_SH_OPTS OZONE_SH_OPTS OZONE_SH_OPTS="${OZONE_SH_OPTS} ${RATIS_OPTS} ${OZONE_MODULE_ACCESS_ARGS}" - OZONE_RUN_ARTIFACT_NAME="ozone-tools" + OZONE_RUN_ARTIFACT_NAME="ozone-cli-shell" ;; s3) OZONE_CLASSNAME=org.apache.hadoop.ozone.shell.s3.S3Shell - OZONE_RUN_ARTIFACT_NAME="ozone-tools" + OZONE_RUN_ARTIFACT_NAME="ozone-cli-shell" ;; scm) OZONE_SUBCMD_SUPPORTDAEMONIZATION="true" @@ -271,13 +271,15 @@ function check_running_ozone_services ## @audience private function ozone_suppress_shell_log { - if [[ "${OZONE_RUN_ARTIFACT_NAME}" == "ozone-tools" ]] \ - && [[ "${OZONE_CLASSNAME}" != "org.apache.hadoop.ozone.freon.Freon" ]] \ - && [[ -z "${OZONE_ORIGINAL_LOGLEVEL}" ]] \ - && [[ -z "${OZONE_ORIGINAL_ROOT_LOGGER}" ]]; then - OZONE_LOGLEVEL=OFF - OZONE_ROOT_LOGGER="${OZONE_LOGLEVEL},console" - OZONE_OPTS="${OZONE_OPTS} -Dslf4j.internal.verbosity=ERROR" + if [[ "${OZONE_RUN_ARTIFACT_NAME}" =~ ozone-cli-.* ]] \ + || [[ "${OZONE_RUN_ARTIFACT_NAME}" == "ozone-tools" ]]; then + if [[ "${OZONE_CLASSNAME}" != "org.apache.hadoop.ozone.freon.Freon" ]] \ + && [[ -z "${OZONE_ORIGINAL_LOGLEVEL}" ]] \ + && [[ -z "${OZONE_ORIGINAL_ROOT_LOGGER}" ]]; then + OZONE_LOGLEVEL=OFF + OZONE_ROOT_LOGGER="${OZONE_LOGLEVEL},console" + OZONE_OPTS="${OZONE_OPTS} -Dslf4j.internal.verbosity=ERROR" + fi fi } diff --git a/hadoop-ozone/integration-test/pom.xml b/hadoop-ozone/integration-test/pom.xml index a7366ff7e5d..de43a673299 100644 --- a/hadoop-ozone/integration-test/pom.xml +++ b/hadoop-ozone/integration-test/pom.xml @@ -34,6 +34,10 @@ org.apache.ozone hdds-tools + + org.apache.ozone + ozone-cli-shell + org.apache.ozone ozone-client diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml index 3f73ca96460..f70bbb32f6d 100644 --- a/hadoop-ozone/pom.xml +++ b/hadoop-ozone/pom.xml @@ -25,6 +25,7 @@ Apache Ozone Apache Ozone Project + cli-shell client common csi @@ -149,6 +150,11 @@ hdds-tools ${hdds.version} + + org.apache.ozone + ozone-cli-shell + ${ozone.version} + org.apache.ozone ozone-client diff --git a/hadoop-ozone/tools/pom.xml b/hadoop-ozone/tools/pom.xml index b3ee3ff5793..8bd9d8905c0 100644 --- a/hadoop-ozone/tools/pom.xml +++ b/hadoop-ozone/tools/pom.xml @@ -158,6 +158,10 @@ org.apache.ozone hdds-tools + + org.apache.ozone + ozone-cli-shell + org.apache.ozone ozone-client From c7872f7d655aac5f5a2fba0b37e662a543531d30 Mon Sep 17 00:00:00 2001 From: Siyao Meng <50227127+smengcl@users.noreply.github.com> Date: Thu, 30 Jan 2025 00:14:30 -0800 Subject: [PATCH 145/168] HDDS-12162. Log available space of HddsVolume and DbVolume upon Datanode startup (#7777) --- .../java/org/apache/hadoop/hdds/fs/SpaceUsageSource.java | 5 +++++ .../hadoop/ozone/container/common/volume/DbVolume.java | 4 ++-- .../hadoop/ozone/container/common/volume/HddsVolume.java | 7 ++++--- 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/SpaceUsageSource.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/SpaceUsageSource.java index a367cfbdc06..46fd76b8bde 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/SpaceUsageSource.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/SpaceUsageSource.java @@ -82,5 +82,10 @@ public long getUsedSpace() { public SpaceUsageSource snapshot() { return this; // immutable } + + @Override + public String toString() { + return "capacity=" + capacity + ", used=" + used + ", available=" + available; + } } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/DbVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/DbVolume.java index 2a762b23874..ac5da4d176b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/DbVolume.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/DbVolume.java @@ -57,9 +57,9 @@ protected DbVolume(Builder b) throws IOException { this.hddsDbStorePathMap = new HashMap<>(); if (!b.getFailedVolume() && getVolumeInfo().isPresent()) { - LOG.info("Creating DbVolume: {} of storage type : {} capacity : {}", + LOG.info("Creating DbVolume: {} of storage type: {}, {}", getStorageDir(), b.getStorageType(), - getVolumeInfo().get().getCapacity()); + getCurrentUsage()); initialize(); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java index 5fced0e39b3..b48fed963c6 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java @@ -131,9 +131,10 @@ private HddsVolume(Builder b) throws IOException { this.volumeInfoMetrics = new VolumeInfoMetrics(b.getVolumeRootStr(), this); - LOG.info("Creating HddsVolume: {} of storage type : {} capacity : {}", - getStorageDir(), b.getStorageType(), - getVolumeInfo().get().getCapacity()); + LOG.info("Creating HddsVolume: {} of storage type: {}, {}", + getStorageDir(), + b.getStorageType(), + getCurrentUsage()); initialize(); } else { From fb2738cabf55fcfb88f3bb458d1dc1c6e09c6c03 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Thu, 30 Jan 2025 10:31:33 +0100 Subject: [PATCH 146/168] HDDS-12143. Generate list of integration check splits dynamically (#7766) --- .github/workflows/ci.yml | 18 +++++---------- dev-support/ci/integration_suites.sh | 27 +++++++++++++++++++++++ hadoop-ozone/dev-support/checks/native.sh | 2 +- pom.xml | 24 ++++++++++---------- 4 files changed, 46 insertions(+), 25 deletions(-) create mode 100755 dev-support/ci/integration_suites.sh diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 7d0f911ed3e..18ea0017f44 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -43,6 +43,7 @@ jobs: GITHUB_CONTEXT: ${{ toJson(github) }} outputs: acceptance-suites: ${{ steps.acceptance-suites.outputs.suites }} + integration-suites: ${{ steps.integration-suites.outputs.suites }} needs-basic-check: ${{ steps.categorize-basic-checks.outputs.needs-basic-check }} needs-native-check: ${{ steps.categorize-basic-checks.outputs.needs-native-check }} basic-checks: ${{ steps.categorize-basic-checks.outputs.basic-checks }} @@ -97,6 +98,9 @@ jobs: - name: Acceptance suites id: acceptance-suites run: dev-support/ci/acceptance_suites.sh + - name: Integration suites + id: integration-suites + run: dev-support/ci/integration_suites.sh - name: Categorize Basic Checks id: categorize-basic-checks env: @@ -591,17 +595,7 @@ jobs: if: needs.build-info.outputs.needs-integration-tests == 'true' strategy: matrix: - profile: - - client - - container - - filesystem - - hdds - - om - - ozone - - recon - - shell - - snapshot - - flaky + profile: ${{ fromJson(needs.build-info.outputs.integration-suites) }} fail-fast: false steps: - name: Checkout project @@ -639,7 +633,7 @@ jobs: args="$args -DskipShade" fi - hadoop-ozone/dev-support/checks/integration.sh -P${{ matrix.profile }} ${args} + hadoop-ozone/dev-support/checks/integration.sh -Ptest-${{ matrix.profile }} ${args} env: DEVELOCITY_ACCESS_KEY: ${{ secrets.DEVELOCITY_ACCESS_KEY }} - name: Summary of failures diff --git a/dev-support/ci/integration_suites.sh b/dev-support/ci/integration_suites.sh new file mode 100755 index 00000000000..266cf4dc818 --- /dev/null +++ b/dev-support/ci/integration_suites.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# shellcheck source=dev-support/ci/lib/_script_init.sh +. dev-support/ci/lib/_script_init.sh + +# output test suites without test- prefix +SUITES=$(grep -o 'test-[^<]\+' pom.xml \ + | sort -u | cut -f2 -d'-') + +initialization::ga_output suites \ + "$(initialization::parameters_to_json ${SUITES})" diff --git a/hadoop-ozone/dev-support/checks/native.sh b/hadoop-ozone/dev-support/checks/native.sh index b6996d689c3..f9d1c1b23a9 100755 --- a/hadoop-ozone/dev-support/checks/native.sh +++ b/hadoop-ozone/dev-support/checks/native.sh @@ -19,5 +19,5 @@ DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" CHECK=native -source "${DIR}/junit.sh" -Pnative -Drocks_tools_native \ +source "${DIR}/junit.sh" -Pnative-tests -Drocks_tools_native \ "$@" diff --git a/pom.xml b/pom.xml index 8f77960ff33..c51b874f28a 100644 --- a/pom.xml +++ b/pom.xml @@ -2165,7 +2165,7 @@ - client + test-client @@ -2182,7 +2182,7 @@ - container + test-container @@ -2200,7 +2200,7 @@ - om + test-om @@ -2220,7 +2220,7 @@ - snapshot + test-snapshot @@ -2237,7 +2237,7 @@ - filesystem + test-filesystem @@ -2254,7 +2254,7 @@ - hdds + test-hdds @@ -2274,7 +2274,7 @@ - ozone + test-ozone @@ -2301,7 +2301,7 @@ - recon + test-recon @@ -2318,7 +2318,7 @@ - shell + test-shell @@ -2337,7 +2337,7 @@ - stable + stable-tests @@ -2352,7 +2352,7 @@ - flaky + test-flaky @@ -2368,7 +2368,7 @@ - native + native-tests From 4fc4a9a460d418613be25969601baa4e015b8d85 Mon Sep 17 00:00:00 2001 From: len548 <63490262+len548@users.noreply.github.com> Date: Thu, 30 Jan 2025 13:25:20 +0100 Subject: [PATCH 147/168] HDDS-12040. `ozone freon cr` fails with NPE in ReplicationSupervisor (#7776) --- .../replication/ReplicationSupervisor.java | 14 ++++++- .../freon/ClosedContainerReplicator.java | 40 ++++++++++++++----- 2 files changed, 42 insertions(+), 12 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java index 9513cac84ef..8374e45e171 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/ReplicationSupervisor.java @@ -232,14 +232,24 @@ private ReplicationSupervisor(StateContext context, ExecutorService executor, * Queue an asynchronous download of the given container. */ public void addTask(AbstractReplicationTask task) { + if (queueHasRoomFor(task)) { + initCounters(task); + addToQueue(task); + } + } + + private boolean queueHasRoomFor(AbstractReplicationTask task) { final int max = maxQueueSize; if (getTotalInFlightReplications() >= max) { LOG.warn("Ignored {} command for container {} in Replication Supervisor" + "as queue reached max size of {}.", task.getClass(), task.getContainerId(), max); - return; + return false; } + return true; + } + public void initCounters(AbstractReplicationTask task) { if (requestCounter.get(task.getMetricName()) == null) { synchronized (this) { if (requestCounter.get(task.getMetricName()) == null) { @@ -255,7 +265,9 @@ public void addTask(AbstractReplicationTask task) { } } } + } + private void addToQueue(AbstractReplicationTask task) { if (inFlight.add(task)) { if (task.getPriority() != ReplicationCommandPriority.LOW) { // Low priority tasks are not included in the replication queue sizes diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java index 1c4f3601b3c..f641d4384ba 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ClosedContainerReplicator.java @@ -25,13 +25,15 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState; import org.apache.hadoop.hdds.scm.cli.ContainerOperationClient; import org.apache.hadoop.hdds.scm.container.ContainerInfo; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; +import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.utils.HddsServerUtil; import org.apache.hadoop.hdfs.server.datanode.StorageLocation; import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics; import org.apache.hadoop.ozone.container.common.impl.ContainerSet; import org.apache.hadoop.ozone.container.common.interfaces.Handler; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration; +import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil; +import org.apache.hadoop.ozone.container.common.utils.StorageVolumeUtil; import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet; import org.apache.hadoop.ozone.container.common.volume.StorageVolume; import org.apache.hadoop.ozone.container.metadata.WitnessedContainerMetadataStore; @@ -44,7 +46,10 @@ import org.apache.hadoop.ozone.container.replication.ReplicationSupervisor; import org.apache.hadoop.ozone.container.replication.ReplicationTask; import org.apache.hadoop.ozone.container.replication.SimpleContainerDownloader; +import org.apache.hadoop.ozone.container.upgrade.VersionedDatanodeFeatures; import org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.kohsuke.MetaInfServices; import picocli.CommandLine.Command; import picocli.CommandLine.Option; @@ -90,6 +95,9 @@ public class ClosedContainerReplicator extends BaseFreonGenerator implements private List replicationTasks; + private static final Logger LOG = + LoggerFactory.getLogger(ClosedContainerReplicator.class); + @Override public Void call() throws Exception { try { @@ -124,16 +132,9 @@ public Void replicate() throws Exception { replicationTasks = new ArrayList<>(); for (ContainerInfo container : containerInfos) { - - final ContainerWithPipeline containerWithPipeline = - containerOperationClient - .getContainerWithPipeline(container.getContainerID()); - if (container.getState() == LifeCycleState.CLOSED) { - - final List datanodesWithContainer = - containerWithPipeline.getPipeline().getNodes(); - + final Pipeline pipeline = containerOperationClient.getPipeline(container.getPipelineID().getProtobuf()); + final List datanodesWithContainer = pipeline.getNodes(); final List datanodeUUIDs = datanodesWithContainer .stream().map(DatanodeDetails::getUuidString) @@ -183,6 +184,8 @@ private void checkDestinationDirectory(String dirUrl) throws IOException { private void initializeReplicationSupervisor( ConfigurationSource conf, int queueSize) throws IOException { + String scmID = UUID.randomUUID().toString(); + String clusterID = UUID.randomUUID().toString(); String fakeDatanodeUuid = datanode; if (fakeDatanodeUuid.isEmpty()) { @@ -198,6 +201,20 @@ private void initializeReplicationSupervisor( MutableVolumeSet volumeSet = new MutableVolumeSet(fakeDatanodeUuid, conf, null, StorageVolume.VolumeType.DATA_VOLUME, null); + if (VersionedDatanodeFeatures.SchemaV3.isFinalizedAndEnabled(conf)) { + MutableVolumeSet dbVolumeSet = + HddsServerUtil.getDatanodeDbDirs(conf).isEmpty() ? null : + new MutableVolumeSet(fakeDatanodeUuid, conf, null, + StorageVolume.VolumeType.DB_VOLUME, null); + // load rocksDB with readOnly mode, otherwise it will fail. + HddsVolumeUtil.loadAllHddsVolumeDbStore( + volumeSet, dbVolumeSet, false, LOG); + + for (StorageVolume volume : volumeSet.getVolumesList()) { + StorageVolumeUtil.checkVolume(volume, scmID, clusterID, conf, LOG, dbVolumeSet); + } + } + Map handlers = new HashMap<>(); for (ContainerType containerType : ContainerType.values()) { @@ -211,7 +228,7 @@ private void initializeReplicationSupervisor( metrics, containerReplicaProto -> { }); - handler.setClusterID(UUID.randomUUID().toString()); + handler.setClusterID(clusterID); handlers.put(containerType, handler); } @@ -238,6 +255,7 @@ private void replicateContainer(long counter) throws Exception { timer.time(() -> { final ReplicationTask replicationTask = replicationTasks.get((int) counter); + supervisor.initCounters(replicationTask); supervisor.new TaskRunner(replicationTask).run(); return null; }); From dd0821d6d709cda9bbb05a3c809cea06176557f1 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Fri, 31 Jan 2025 05:08:32 +0100 Subject: [PATCH 148/168] HDDS-12144. Remove unsupported replication types from config description (#7764) --- .../org/apache/hadoop/hdds/client/ReplicationType.java | 1 + hadoop-hdds/common/src/main/resources/ozone-default.xml | 7 +++---- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationType.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationType.java index 64969eac422..88c89a288b7 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationType.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationType.java @@ -26,6 +26,7 @@ public enum ReplicationType { RATIS, STAND_ALONE, + @Deprecated CHAINED, EC; diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index dfd058f5d70..4693392a217 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -1369,9 +1369,8 @@ specified when writing the key. The default is used if replication is not specified when creating key or no default replication set at bucket. Supported values: - For Standalone: 1 - For Ratis: 3 - For Erasure Coding(EC) supported format: + For RATIS: 1, 3 + For EC (Erasure Coding) supported format: {ECCodec}-{DataBlocks}-{ParityBlocks}-{ChunkSize} ECCodec: Codec for encoding stripe. Supported values : XOR, RS (Reed Solomon) DataBlocks: Number of data blocks in a stripe. @@ -1389,7 +1388,7 @@ Default replication type to be used while writing key into ozone. The value can be specified when writing the key, default is used when nothing is specified when creating key or no default value set at bucket. - Supported values: RATIS, STAND_ALONE, CHAINED and EC. + Supported values: RATIS, EC. From 6d65d4fbe921d9044dca3cd08264176c8ac1724b Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Fri, 31 Jan 2025 06:33:55 +0100 Subject: [PATCH 149/168] HDDS-11277. Remove dependency on hadoop-hdfs in Ozone client (#7781) --- hadoop-hdds/hadoop-dependency-client/pom.xml | 92 -------------------- 1 file changed, 92 deletions(-) diff --git a/hadoop-hdds/hadoop-dependency-client/pom.xml b/hadoop-hdds/hadoop-dependency-client/pom.xml index d3df46061fc..6c2964c094d 100644 --- a/hadoop-hdds/hadoop-dependency-client/pom.xml +++ b/hadoop-hdds/hadoop-dependency-client/pom.xml @@ -203,98 +203,6 @@
- - org.apache.hadoop - hadoop-hdfs - ${hadoop.version} - compile - - - ch.qos.reload4j - reload4j - - - com.fasterxml.jackson.core - jackson-databind - - - com.google.guava - guava - - - com.google.protobuf - protobuf-java - - - com.sun.jersey - jersey-core - - - com.sun.jersey - jersey-server - - - commons-cli - commons-cli - - - commons-codec - commons-codec - - - commons-daemon - commons-daemon - - - commons-io - commons-io - - - commons-logging - commons-logging - - - io.netty - netty - - - io.netty - netty-all - - - javax.servlet - javax.servlet-api - - - log4j - log4j - - - org.apache.htrace - htrace-core4 - - - org.eclipse.jetty - jetty-server - - - org.eclipse.jetty - jetty-util - - - org.eclipse.jetty - jetty-util-ajax - - - org.fusesource.leveldbjni - leveldbjni-all - - - org.slf4j - slf4j-reload4j - - - org.apache.hadoop hadoop-hdfs-client From 182d106c401051a517d8a8027b3e40c5ee6fe96f Mon Sep 17 00:00:00 2001 From: Sarveksha Yeshavantha Raju <79865743+sarvekshayr@users.noreply.github.com> Date: Fri, 31 Jan 2025 13:06:12 +0530 Subject: [PATCH 150/168] HDDS-12132. Parameterize testUpdateTransactionInfoTable for SCM (#7768) --- .../ozone/shell/TestOzoneRepairShell.java | 42 ++++++++++++++----- .../hadoop/ozone/repair/RepairTool.java | 2 +- .../ozone/repair/TransactionInfoRepair.java | 5 +-- 3 files changed, 34 insertions(+), 15 deletions(-) diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneRepairShell.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneRepairShell.java index 7446bd0afa2..aa2ff697354 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneRepairShell.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneRepairShell.java @@ -18,18 +18,23 @@ package org.apache.hadoop.ozone.shell; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.server.ServerUtils; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.client.OzoneClient; import org.apache.hadoop.ozone.debug.OzoneDebug; import org.apache.hadoop.ozone.om.OMStorage; import org.apache.hadoop.ozone.repair.OzoneRepair; +import org.apache.hadoop.ozone.repair.RepairTool.Component; +import org.apache.hadoop.ozone.repair.TransactionInfoRepair; import org.apache.ozone.test.GenericTestUtils; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.EnumSource; import picocli.CommandLine; import java.io.File; @@ -37,6 +42,7 @@ import java.util.regex.Pattern; import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_NAME; +import static org.apache.hadoop.ozone.OzoneConsts.SCM_DB_NAME; import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY; import static org.apache.ozone.test.IntLambda.withTextFromSystemIn; import static org.assertj.core.api.Assertions.assertThat; @@ -52,7 +58,6 @@ public class TestOzoneRepairShell { private static MiniOzoneCluster cluster = null; private static OzoneConfiguration conf = null; private static String om; - private static final String TRANSACTION_INFO_TABLE_TERM_INDEX_PATTERN = "([0-9]+#[0-9]+)"; @BeforeAll @@ -80,20 +85,23 @@ static void cleanup() { IOUtils.closeQuietly(cluster); } - @Test - public void testUpdateTransactionInfoTable() throws Exception { + @ParameterizedTest + @EnumSource(value = Component.class, names = {"OM", "SCM"}) + public void testUpdateTransactionInfoTable(Component component) throws Exception { CommandLine cmd = new OzoneRepair().getCmd(); - String dbPath = new File(OMStorage.getOmDbDir(conf) + "/" + OM_DB_NAME).getPath(); + String dbPath = getDbPath(component); + String componentLowerCase = component.name().toLowerCase(); cluster.getOzoneManager().stop(); + cluster.getStorageContainerManager().stop(); - String cmdOut = scanTransactionInfoTable(dbPath); + String cmdOut = scanTransactionInfoTable(dbPath, component); String[] originalHighestTermIndex = parseScanOutput(cmdOut); String testTerm = "1111"; String testIndex = "1111"; int exitCode = withTextFromSystemIn("y") - .execute(() -> cmd.execute("om", "update-transaction", + .execute(() -> cmd.execute(componentLowerCase, "update-transaction", "--db", dbPath, "--term", testTerm, "--index", testIndex)); @@ -105,23 +113,35 @@ public void testUpdateTransactionInfoTable() throws Exception { String.format("The highest transaction info has been updated to: (t:%s, i:%s)", testTerm, testIndex) ); - String cmdOut2 = scanTransactionInfoTable(dbPath); + String cmdOut2 = scanTransactionInfoTable(dbPath, component); assertThat(cmdOut2).contains(testTerm + "#" + testIndex); withTextFromSystemIn("y") - .execute(() -> cmd.execute("om", "update-transaction", + .execute(() -> cmd.execute(componentLowerCase, "update-transaction", "--db", dbPath, "--term", originalHighestTermIndex[0], "--index", originalHighestTermIndex[1])); cluster.getOzoneManager().restart(); try (OzoneClient ozoneClient = cluster.newClient()) { - ozoneClient.getObjectStore().createVolume("vol1"); + ozoneClient.getObjectStore().createVolume("vol-" + componentLowerCase); + } + } + + private String getDbPath(Component component) { + switch (component) { + case OM: + return new File(OMStorage.getOmDbDir(conf) + "/" + OM_DB_NAME).getPath(); + case SCM: + return new File(ServerUtils.getScmDbDir(conf) + "/" + SCM_DB_NAME).getPath(); + default: + throw new IllegalStateException("Unknown component: " + component); } } - private String scanTransactionInfoTable(String dbPath) { + private String scanTransactionInfoTable(String dbPath, Component component) { CommandLine debugCmd = new OzoneDebug().getCmd(); - debugCmd.execute("ldb", "--db", dbPath, "scan", "--column_family", "transactionInfoTable"); + debugCmd.execute("ldb", "--db", dbPath, "scan", "--column_family", + TransactionInfoRepair.getColumnFamily(component).getName()); return out.get(); } diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RepairTool.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RepairTool.java index 3fa1033fb23..aac0680a10d 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RepairTool.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/RepairTool.java @@ -142,7 +142,7 @@ private String getConsoleReadLineWithFormat(String currentUser) { } /** Ozone component for offline tools. */ - protected enum Component { + public enum Component { DATANODE, OM, SCM, diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/TransactionInfoRepair.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/TransactionInfoRepair.java index 8b44c30877d..07ee93c0b73 100644 --- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/TransactionInfoRepair.java +++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/repair/TransactionInfoRepair.java @@ -73,7 +73,7 @@ public void execute() throws Exception { List cfHandleList = new ArrayList<>(); List cfDescList = RocksDBUtils.getColumnFamilyDescriptors( dbPath); - String columnFamilyName = getColumnFamily().getName(); + String columnFamilyName = getColumnFamily(serviceToBeOffline()).getName(); try (ManagedRocksDB db = ManagedRocksDB.open(dbPath, cfDescList, cfHandleList)) { ColumnFamilyHandle transactionInfoCfh = RocksDBUtils.getColumnFamilyHandle(columnFamilyName, cfHandleList); @@ -122,8 +122,7 @@ protected Component serviceToBeOffline() { } } - private DBColumnFamilyDefinition getColumnFamily() { - Component component = serviceToBeOffline(); + public static DBColumnFamilyDefinition getColumnFamily(Component component) { switch (component) { case OM: return OMDBDefinition.TRANSACTION_INFO_TABLE; From 3eabfe1e189efa27b3b55d0e09547b5fb80a5c90 Mon Sep 17 00:00:00 2001 From: Abhishek Pal <43001336+devabhishekpal@users.noreply.github.com> Date: Fri, 31 Jan 2025 22:41:18 +0530 Subject: [PATCH 151/168] HDDS-12085. Add manual refresh button for DU page (#7780) --- .../duBreadcrumbNav/duBreadcrumbNav.tsx | 8 +++--- .../src/v2/components/plots/duPieChart.tsx | 2 +- .../src/v2/pages/diskUsage/diskUsage.less | 1 + .../src/v2/pages/diskUsage/diskUsage.tsx | 25 ++++++++++++++----- 4 files changed, 25 insertions(+), 11 deletions(-) diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/duBreadcrumbNav/duBreadcrumbNav.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/duBreadcrumbNav/duBreadcrumbNav.tsx index ac09b6bb161..4a1c88ba65e 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/duBreadcrumbNav/duBreadcrumbNav.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/duBreadcrumbNav/duBreadcrumbNav.tsx @@ -164,10 +164,10 @@ const DUBreadcrumbNav: React.FC = ({ return ( ) } diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/plots/duPieChart.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/plots/duPieChart.tsx index 2601905a142..57fb1cce214 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/plots/duPieChart.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/plots/duPieChart.tsx @@ -203,7 +203,7 @@ const DUPieChart: React.FC = ({ ); } diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/diskUsage/diskUsage.less b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/diskUsage/diskUsage.less index ebeff236a1f..97d2a8a843b 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/diskUsage/diskUsage.less +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/diskUsage/diskUsage.less @@ -35,6 +35,7 @@ margin-bottom: 10px; width: 83vw; height: 20%; + flex: 0.98; .breadcrumb-nav { font-size: 0.9vw; diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/diskUsage/diskUsage.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/diskUsage/diskUsage.tsx index 57d7a612c34..ee6f8c6c6c5 100644 --- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/diskUsage/diskUsage.tsx +++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/pages/diskUsage/diskUsage.tsx @@ -19,10 +19,10 @@ import React, { useRef, useState } from 'react'; import { AxiosError } from 'axios'; import { - Alert + Alert, Button, Tooltip } from 'antd'; import { - InfoCircleFilled + InfoCircleFilled, ReloadOutlined, } from '@ant-design/icons'; import { ValueType } from 'react-select'; @@ -113,10 +113,23 @@ const DiskUsage: React.FC<{}> = () => { showIcon={true} closable={false} />
- +
+ + +
Date: Sat, 1 Feb 2025 08:22:38 +0100 Subject: [PATCH 152/168] HDDS-12165. Refactor VolumeInfoMetrics to use getCurrentUsage (#7784) --- .../common/volume/VolumeInfoMetrics.java | 111 ++++++++---------- .../common/volume/TestHddsVolume.java | 12 +- 2 files changed, 52 insertions(+), 71 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java index cd31b8063d3..4e553d2566e 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfoMetrics.java @@ -18,11 +18,18 @@ package org.apache.hadoop.ozone.container.common.volume; +import org.apache.hadoop.hdds.fs.SpaceUsageSource; +import org.apache.hadoop.metrics2.MetricsCollector; +import org.apache.hadoop.metrics2.MetricsInfo; +import org.apache.hadoop.metrics2.MetricsRecordBuilder; +import org.apache.hadoop.metrics2.MetricsSource; import org.apache.hadoop.metrics2.MetricsSystem; import org.apache.hadoop.metrics2.annotation.Metric; import org.apache.hadoop.metrics2.annotation.Metrics; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.lib.MutableRate; +import org.apache.hadoop.metrics2.lib.Interns; +import org.apache.hadoop.metrics2.lib.MetricsRegistry; import org.apache.hadoop.ozone.OzoneConsts; @@ -31,20 +38,37 @@ */ @Metrics(about = "Ozone Volume Information Metrics", context = OzoneConsts.OZONE) -public class VolumeInfoMetrics { - - private String metricsSourceName = VolumeInfoMetrics.class.getSimpleName(); +public class VolumeInfoMetrics implements MetricsSource { + + private static final String SOURCE_BASENAME = + VolumeInfoMetrics.class.getSimpleName(); + + private static final MetricsInfo CAPACITY = + Interns.info("Capacity", "Capacity"); + private static final MetricsInfo AVAILABLE = + Interns.info("Available", "Available Space"); + private static final MetricsInfo USED = + Interns.info("Used", "Used Space"); + private static final MetricsInfo RESERVED = + Interns.info("Reserved", "Reserved Space"); + private static final MetricsInfo TOTAL_CAPACITY = + Interns.info("TotalCapacity", "Total Capacity"); + + private final MetricsRegistry registry; + private final String metricsSourceName; private final HddsVolume volume; @Metric("Returns the RocksDB compact times of the Volume") private MutableRate dbCompactLatency; - private long containers; /** * @param identifier Typically, path to volume root. E.g. /data/hdds */ - public VolumeInfoMetrics(String identifier, HddsVolume ref) { - this.metricsSourceName += '-' + identifier; - this.volume = ref; + public VolumeInfoMetrics(String identifier, HddsVolume volume) { + this.volume = volume; + + metricsSourceName = SOURCE_BASENAME + '-' + identifier; + registry = new MetricsRegistry(metricsSourceName); + init(); } @@ -88,63 +112,6 @@ public String getVolumeType() { return volume.getType().name(); } - public String getMetricsSourceName() { - return metricsSourceName; - } - - /** - * Test conservative avail space. - * |----used----| (avail) |++++++++reserved++++++++| - * |<------- capacity ------->| - * |<------------------- Total capacity -------------->| - * A) avail = capacity - used - * B) capacity = used + avail - * C) Total capacity = used + avail + reserved - */ - - /** - * Return the Storage type for the Volume. - */ - @Metric("Returns the Used space") - public long getUsed() { - return volume.getVolumeInfo().map(VolumeInfo::getScmUsed) - .orElse(0L); - } - - /** - * Return the Total Available capacity of the Volume. - */ - @Metric("Returns the Available space") - public long getAvailable() { - return volume.getVolumeInfo().map(VolumeInfo::getAvailable) - .orElse(0L); - } - - /** - * Return the Total Reserved of the Volume. - */ - @Metric("Fetches the Reserved Space") - public long getReserved() { - return volume.getVolumeInfo().map(VolumeInfo::getReservedInBytes) - .orElse(0L); - } - - /** - * Return the Total capacity of the Volume. - */ - @Metric("Returns the Capacity of the Volume") - public long getCapacity() { - return getUsed() + getAvailable(); - } - - /** - * Return the Total capacity of the Volume. - */ - @Metric("Returns the Total Capacity of the Volume") - public long getTotalCapacity() { - return (getUsed() + getAvailable() + getReserved()); - } - @Metric("Returns the Committed bytes of the Volume") public long getCommitted() { return volume.getCommittedBytes(); @@ -161,4 +128,20 @@ public void dbCompactTimesNanoSecondsIncr(long time) { public long getContainers() { return volume.getContainers(); } + + @Override + public void getMetrics(MetricsCollector collector, boolean all) { + MetricsRecordBuilder builder = collector.addRecord(metricsSourceName); + registry.snapshot(builder, all); + volume.getVolumeInfo().ifPresent(volumeInfo -> { + SpaceUsageSource usage = volumeInfo.getCurrentUsage(); + long reserved = volumeInfo.getReservedInBytes(); + builder + .addGauge(CAPACITY, usage.getCapacity()) + .addGauge(AVAILABLE, usage.getAvailable()) + .addGauge(USED, usage.getUsedSpace()) + .addGauge(RESERVED, reserved) + .addGauge(TOTAL_CAPACITY, usage.getCapacity() + reserved); + }); + } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java index b6a6d2566f3..99056dd79bd 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java @@ -37,6 +37,8 @@ import org.apache.hadoop.hdds.fs.SpaceUsageSource; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult; +import org.apache.hadoop.metrics2.MetricsCollector; +import org.apache.hadoop.metrics2.impl.MetricsCollectorImpl; import org.apache.hadoop.ozone.OzoneConfigKeys; import static org.apache.hadoop.hdds.fs.MockSpaceUsagePersistence.inMemory; @@ -504,13 +506,9 @@ public void testFailedVolumeSpace() throws IOException { VolumeInfoMetrics volumeInfoMetrics = volume.getVolumeInfoStats(); try { - // In case of failed volume all stats should return 0. - assertEquals(0, volumeInfoMetrics.getUsed()); - assertEquals(0, volumeInfoMetrics.getAvailable()); - assertEquals(0, volumeInfoMetrics.getCapacity()); - assertEquals(0, volumeInfoMetrics.getReserved()); - assertEquals(0, volumeInfoMetrics.getTotalCapacity()); - assertEquals(0, volumeInfoMetrics.getCommitted()); + // In case of failed volume, metrics should not throw + MetricsCollector collector = new MetricsCollectorImpl(); + volumeInfoMetrics.getMetrics(collector, true); } finally { // Shutdown the volume. volume.shutdown(); From 6e8d443ab69f63ae629690c1b9f81264839f7b22 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 1 Feb 2025 12:56:46 +0100 Subject: [PATCH 153/168] HDDS-12181. Bump jline to 3.29.0 (#7789) --- pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pom.xml b/pom.xml index c51b874f28a..f4d6d14f325 100644 --- a/pom.xml +++ b/pom.xml @@ -129,7 +129,7 @@ 9.4.57.v20241219 1.4.0 3.9.12 - 3.28.0 + 3.29.0 0.10.4 3.1.20 2.12.7 From d9c7d1deedc9468578e10771873cf0ea246099b0 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Sat, 1 Feb 2025 19:14:05 +0100 Subject: [PATCH 154/168] HDDS-12176. Trivial dependency cleanup.(#7787) --- hadoop-hdds/common/pom.xml | 6 -- hadoop-hdds/framework/pom.xml | 2 - hadoop-hdds/hadoop-dependency-client/pom.xml | 2 - hadoop-hdds/hadoop-dependency-server/pom.xml | 4 - hadoop-hdds/hadoop-dependency-test/pom.xml | 2 - hadoop-hdds/interface-client/pom.xml | 3 +- hadoop-hdds/interface-server/pom.xml | 3 +- hadoop-hdds/pom.xml | 51 +++++------ hadoop-hdds/rocks-native/pom.xml | 4 +- hadoop-hdds/rocksdb-checkpoint-differ/pom.xml | 2 +- hadoop-hdds/server-scm/pom.xml | 1 - hadoop-hdds/tools/pom.xml | 2 - hadoop-ozone/csi/pom.xml | 3 +- hadoop-ozone/datanode/pom.xml | 1 - hadoop-ozone/dist/pom.xml | 2 + .../network-tests/pom.xml | 1 + hadoop-ozone/httpfsgateway/pom.xml | 4 - hadoop-ozone/integration-test/pom.xml | 1 - hadoop-ozone/interface-client/pom.xml | 2 +- hadoop-ozone/ozone-manager/pom.xml | 9 -- hadoop-ozone/pom.xml | 53 +++++------ hadoop-ozone/recon/pom.xml | 4 +- hadoop-ozone/s3-secret-store/pom.xml | 2 - hadoop-ozone/s3gateway/pom.xml | 1 - pom.xml | 88 ++++++++++++++++--- 25 files changed, 138 insertions(+), 115 deletions(-) diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml index 35fe32713b1..d9e5f6b9658 100644 --- a/hadoop-hdds/common/pom.xml +++ b/hadoop-hdds/common/pom.xml @@ -54,12 +54,10 @@ com.google.guava guava - compile com.google.protobuf protobuf-java - compile commons-collections @@ -84,8 +82,6 @@ io.grpc grpc-api - ${io.grpc.version} - compile com.google.code.findbugs @@ -186,7 +182,6 @@ org.bouncycastle bcpkix-jdk18on - ${bouncycastle.version} org.bouncycastle @@ -333,7 +328,6 @@ kr.motd.maven os-maven-plugin - ${os-maven-plugin.version} diff --git a/hadoop-hdds/framework/pom.xml b/hadoop-hdds/framework/pom.xml index e90b655148b..b2f5907cc68 100644 --- a/hadoop-hdds/framework/pom.xml +++ b/hadoop-hdds/framework/pom.xml @@ -69,7 +69,6 @@ commons-fileupload commons-fileupload - ${commons-fileupload.version} commons-io @@ -170,7 +169,6 @@ org.apache.ozone rocksdb-checkpoint-differ - ${hdds.version} org.apache.ratis diff --git a/hadoop-hdds/hadoop-dependency-client/pom.xml b/hadoop-hdds/hadoop-dependency-client/pom.xml index 6c2964c094d..e9682a374eb 100644 --- a/hadoop-hdds/hadoop-dependency-client/pom.xml +++ b/hadoop-hdds/hadoop-dependency-client/pom.xml @@ -42,7 +42,6 @@ org.apache.hadoop hadoop-common - ${hadoop.version} ch.qos.reload4j @@ -206,7 +205,6 @@ org.apache.hadoop hadoop-hdfs-client - ${hadoop.version} com.squareup.okhttp diff --git a/hadoop-hdds/hadoop-dependency-server/pom.xml b/hadoop-hdds/hadoop-dependency-server/pom.xml index 324b21ef668..8b327746780 100644 --- a/hadoop-hdds/hadoop-dependency-server/pom.xml +++ b/hadoop-hdds/hadoop-dependency-server/pom.xml @@ -47,7 +47,6 @@ org.apache.hadoop hadoop-auth - ${hadoop.version} ch.qos.reload4j @@ -78,7 +77,6 @@ org.apache.hadoop hadoop-common - ${hadoop.version} ch.qos.reload4j @@ -153,8 +151,6 @@ org.apache.hadoop hadoop-hdfs - ${hadoop.version} - compile ch.qos.reload4j diff --git a/hadoop-hdds/hadoop-dependency-test/pom.xml b/hadoop-hdds/hadoop-dependency-test/pom.xml index 6a2b8aa4e06..7385085703e 100644 --- a/hadoop-hdds/hadoop-dependency-test/pom.xml +++ b/hadoop-hdds/hadoop-dependency-test/pom.xml @@ -42,7 +42,6 @@ org.apache.hadoop hadoop-common - ${hadoop.version} test-jar @@ -54,7 +53,6 @@ org.apache.hadoop hadoop-hdfs - ${hadoop.version} test-jar diff --git a/hadoop-hdds/interface-client/pom.xml b/hadoop-hdds/interface-client/pom.xml index da6dec5cda4..4ba24837e6d 100644 --- a/hadoop-hdds/interface-client/pom.xml +++ b/hadoop-hdds/interface-client/pom.xml @@ -48,7 +48,6 @@ org.apache.ratis ratis-thirdparty-misc - ${ratis.thirdparty.version} @@ -128,8 +127,8 @@ + org.apache.maven.plugins maven-antrun-plugin - ${maven-antrun-plugin.version} diff --git a/hadoop-hdds/interface-server/pom.xml b/hadoop-hdds/interface-server/pom.xml index 83aa5f72e36..94e52f61a48 100644 --- a/hadoop-hdds/interface-server/pom.xml +++ b/hadoop-hdds/interface-server/pom.xml @@ -36,7 +36,6 @@ com.google.protobuf protobuf-java - compile @@ -108,8 +107,8 @@ + org.apache.maven.plugins maven-antrun-plugin - ${maven-antrun-plugin.version} diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml index a2cd95066a7..fe42204ce7d 100644 --- a/hadoop-hdds/pom.xml +++ b/hadoop-hdds/pom.xml @@ -68,6 +68,12 @@ hdds-common ${hdds.version} + + org.apache.ozone + hdds-common + ${hdds.version} + test-jar + org.apache.ozone hdds-config @@ -78,6 +84,12 @@ hdds-container-service ${hdds.version} + + org.apache.ozone + hdds-container-service + ${hdds.version} + test-jar + org.apache.ozone hdds-docs @@ -98,6 +110,11 @@ hdds-hadoop-dependency-server ${hdds.version} + + org.apache.ozone + hdds-hadoop-dependency-test + ${hdds.version} + org.apache.ozone hdds-interface-admin @@ -125,56 +142,40 @@ org.apache.ozone - hdds-server-framework - ${hdds.version} - - - org.apache.ozone - hdds-server-scm - ${hdds.version} - - - org.apache.ozone - hdds-tools - ${hdds.version} + hdds-rocks-native + ${hdds.rocks.native.version} + test-jar org.apache.ozone - rocksdb-checkpoint-differ + hdds-server-framework ${hdds.version} org.apache.ozone - hdds-common + hdds-server-scm ${hdds.version} - test-jar - test org.apache.ozone - hdds-container-service + hdds-server-scm ${hdds.version} test-jar - test org.apache.ozone - hdds-hadoop-dependency-test + hdds-test-utils ${hdds.version} - test org.apache.ozone - hdds-server-scm + hdds-tools ${hdds.version} - test-jar - test org.apache.ozone - hdds-test-utils + rocksdb-checkpoint-differ ${hdds.version} - test diff --git a/hadoop-hdds/rocks-native/pom.xml b/hadoop-hdds/rocks-native/pom.xml index 087dc8c0235..47d94d60df2 100644 --- a/hadoop-hdds/rocks-native/pom.xml +++ b/hadoop-hdds/rocks-native/pom.xml @@ -207,7 +207,6 @@ org.apache.maven.plugins maven-patch-plugin - 1.1.1 ${basedir}/src/main/patches/rocks-native.patch 1 @@ -224,8 +223,8 @@ + org.apache.maven.plugins maven-antrun-plugin - ${maven-antrun-plugin.version} unzip-artifact @@ -308,7 +307,6 @@ org.apache.maven.plugins maven-jar-plugin - ${maven-jar-plugin.version} **/*.class diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml b/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml index cb7ff3acd59..ea392b659c0 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml +++ b/hadoop-hdds/rocksdb-checkpoint-differ/pom.xml @@ -101,7 +101,6 @@ org.apache.ozone hdds-rocks-native - ${project.version} test-jar test @@ -129,6 +128,7 @@ + org.apache.maven.plugins maven-enforcer-plugin diff --git a/hadoop-hdds/server-scm/pom.xml b/hadoop-hdds/server-scm/pom.xml index 27dafa29b2b..140c77557d5 100644 --- a/hadoop-hdds/server-scm/pom.xml +++ b/hadoop-hdds/server-scm/pom.xml @@ -49,7 +49,6 @@ com.google.protobuf protobuf-java - compile commons-collections diff --git a/hadoop-hdds/tools/pom.xml b/hadoop-hdds/tools/pom.xml index cc47e325cf1..3eef19f323b 100644 --- a/hadoop-hdds/tools/pom.xml +++ b/hadoop-hdds/tools/pom.xml @@ -98,7 +98,6 @@ org.apache.ratis ratis-tools - ${ratis.version} org.apache.ratis @@ -117,7 +116,6 @@ org.slf4j slf4j-reload4j - ${slf4j.version} org.xerial diff --git a/hadoop-ozone/csi/pom.xml b/hadoop-ozone/csi/pom.xml index 905ef0f4adb..4ff4764eb18 100644 --- a/hadoop-ozone/csi/pom.xml +++ b/hadoop-ozone/csi/pom.xml @@ -39,7 +39,6 @@ com.google.guava guava - ${guava.version} com.google.protobuf @@ -247,6 +246,7 @@ + org.apache.maven.plugins maven-enforcer-plugin @@ -283,7 +283,6 @@ kr.motd.maven os-maven-plugin - ${os-maven-plugin.version} diff --git a/hadoop-ozone/datanode/pom.xml b/hadoop-ozone/datanode/pom.xml index 1c6bef22fc2..3decba7b5ab 100644 --- a/hadoop-ozone/datanode/pom.xml +++ b/hadoop-ozone/datanode/pom.xml @@ -47,7 +47,6 @@ org.apache.ozone hdds-hadoop-dependency-server - compile com.sun.xml.bind diff --git a/hadoop-ozone/dist/pom.xml b/hadoop-ozone/dist/pom.xml index f8a93778072..deff11e0684 100644 --- a/hadoop-ozone/dist/pom.xml +++ b/hadoop-ozone/dist/pom.xml @@ -151,6 +151,7 @@ + org.apache.maven.plugins maven-resources-plugin @@ -223,6 +224,7 @@ here. As the dependencies will be handled in a separated way with separated classpath definitions--> + org.apache.maven.plugins maven-enforcer-plugin diff --git a/hadoop-ozone/fault-injection-test/network-tests/pom.xml b/hadoop-ozone/fault-injection-test/network-tests/pom.xml index 7cebddbf093..6c34fa1fe2f 100644 --- a/hadoop-ozone/fault-injection-test/network-tests/pom.xml +++ b/hadoop-ozone/fault-injection-test/network-tests/pom.xml @@ -39,6 +39,7 @@ + org.apache.maven.plugins maven-resources-plugin diff --git a/hadoop-ozone/httpfsgateway/pom.xml b/hadoop-ozone/httpfsgateway/pom.xml index a0e15dba147..4bb4aa24b0d 100644 --- a/hadoop-ozone/httpfsgateway/pom.xml +++ b/hadoop-ozone/httpfsgateway/pom.xml @@ -55,8 +55,6 @@ com.googlecode.json-simple json-simple - 1.1.1 - compile junit @@ -215,7 +213,6 @@ org.apache.maven.plugins maven-checkstyle-plugin - ${maven-checkstyle-plugin.version} false @@ -276,7 +273,6 @@ org.apache.maven.plugins maven-antrun-plugin - ${maven-antrun-plugin.version} create-web-xmls diff --git a/hadoop-ozone/integration-test/pom.xml b/hadoop-ozone/integration-test/pom.xml index de43a673299..3e332d63470 100644 --- a/hadoop-ozone/integration-test/pom.xml +++ b/hadoop-ozone/integration-test/pom.xml @@ -77,7 +77,6 @@ org.assertj assertj-core - ${assertj.version} org.hamcrest diff --git a/hadoop-ozone/interface-client/pom.xml b/hadoop-ozone/interface-client/pom.xml index cc8a4e2fee2..383d22a4140 100644 --- a/hadoop-ozone/interface-client/pom.xml +++ b/hadoop-ozone/interface-client/pom.xml @@ -134,8 +134,8 @@ + org.apache.maven.plugins maven-antrun-plugin - ${maven-antrun-plugin.version} diff --git a/hadoop-ozone/ozone-manager/pom.xml b/hadoop-ozone/ozone-manager/pom.xml index 31ae740c7e2..3df66eb5033 100644 --- a/hadoop-ozone/ozone-manager/pom.xml +++ b/hadoop-ozone/ozone-manager/pom.xml @@ -180,13 +180,10 @@ org.apache.ranger ranger-intg - ${ranger.version} org.apache.ranger ranger-plugins-common - ${ranger.version} - compile @@ -279,12 +276,10 @@ org.aspectj aspectjrt - ${aspectj.version} org.aspectj aspectjweaver - ${aspectj.version} org.bouncycastle @@ -294,12 +289,10 @@ org.codehaus.jackson jackson-core-asl - ${jackson1.version} org.codehaus.jackson jackson-jaxrs - ${jackson-jaxr.version} org.codehaus.jackson @@ -314,7 +307,6 @@ org.codehaus.jackson jackson-mapper-asl - ${jackson1.version} org.eclipse.jetty @@ -467,7 +459,6 @@ dev.aspectj aspectj-maven-plugin - ${aspectj-plugin.version} 1.8 1.8 diff --git a/hadoop-ozone/pom.xml b/hadoop-ozone/pom.xml index f70bbb32f6d..620bc5f41eb 100644 --- a/hadoop-ozone/pom.xml +++ b/hadoop-ozone/pom.xml @@ -63,11 +63,23 @@ hdds-client ${hdds.version} + + org.apache.ozone + hdds-client + ${hdds.version} + test-jar + org.apache.ozone hdds-common ${hdds.version} + + org.apache.ozone + hdds-common + ${hdds.version} + test-jar + org.apache.ozone hdds-config @@ -104,6 +116,11 @@ hdds-hadoop-dependency-server ${hdds.version} + + org.apache.ozone + hdds-hadoop-dependency-test + ${hdds.version} + org.apache.ozone hdds-interface-admin @@ -145,6 +162,11 @@ ${hdds.version} test-jar + + org.apache.ozone + hdds-test-utils + ${hdds.version} + org.apache.ozone hdds-tools @@ -253,6 +275,11 @@ ozone-recon ${ozone.version} + + org.apache.ozone + ozone-reconcodegen + ${ozone.version} + org.apache.ozone ozone-s3-secret-store @@ -273,32 +300,6 @@ rocksdb-checkpoint-differ ${hdds.version} - - org.apache.ozone - hdds-client - ${hdds.version} - test-jar - test - - - org.apache.ozone - hdds-common - ${hdds.version} - test-jar - test - - - org.apache.ozone - hdds-hadoop-dependency-test - ${hdds.version} - test - - - org.apache.ozone - hdds-test-utils - ${hdds.version} - test - diff --git a/hadoop-ozone/recon/pom.xml b/hadoop-ozone/recon/pom.xml index 87e75e934b4..7006a5d7b75 100644 --- a/hadoop-ozone/recon/pom.xml +++ b/hadoop-ozone/recon/pom.xml @@ -182,7 +182,6 @@ org.apache.ozone ozone-reconcodegen - ${ozone.version} org.apache.ratis @@ -405,7 +404,6 @@ com.github.eirslett frontend-maven-plugin - ${frontend-maven-plugin.version} false target @@ -451,8 +449,8 @@ + org.apache.maven.plugins maven-clean-plugin - ${maven-clean-plugin.version} diff --git a/hadoop-ozone/s3-secret-store/pom.xml b/hadoop-ozone/s3-secret-store/pom.xml index decc23ba8af..d44d9e0cf3e 100644 --- a/hadoop-ozone/s3-secret-store/pom.xml +++ b/hadoop-ozone/s3-secret-store/pom.xml @@ -40,12 +40,10 @@ org.apache.ozone ozone-common - compile org.apache.ozone ozone-manager - compile org.slf4j diff --git a/hadoop-ozone/s3gateway/pom.xml b/hadoop-ozone/s3gateway/pom.xml index 63e21359823..ef5440caf87 100644 --- a/hadoop-ozone/s3gateway/pom.xml +++ b/hadoop-ozone/s3gateway/pom.xml @@ -171,7 +171,6 @@ org.apache.ozone ozone-common - compile org.apache.ozone diff --git a/pom.xml b/pom.xml index f4d6d14f325..55898f7d7d2 100644 --- a/pom.xml +++ b/pom.xml @@ -135,6 +135,7 @@ 2.12.7 3.11.10 0.1.55 + 1.1.1 2.1 1.1.1 5.11.4 @@ -155,6 +156,7 @@ 3.1.3 3.4.2 3.11.1 + 1.1.1 1.6.1 3.3.0 3.3.0 @@ -405,6 +407,11 @@ compile-testing ${compile-testing.version} + + com.googlecode.json-simple + json-simple + ${json-simple.version} + com.jcraft jsch @@ -465,6 +472,11 @@ commons-daemon ${commons-daemon.version} + + commons-fileupload + commons-fileupload + ${commons-fileupload.version} + commons-io commons-io @@ -762,6 +774,12 @@ hadoop-hdfs ${hadoop.version} + + org.apache.hadoop + hadoop-hdfs + ${hadoop.version} + test-jar + org.apache.hadoop hadoop-hdfs-client @@ -778,6 +796,11 @@ ${hadoop.version} test-jar + + org.apache.hadoop + hadoop-mapreduce-client-jobclient + ${hadoop.version} + org.apache.hadoop hadoop-minikdc @@ -833,6 +856,16 @@ log4j-core ${log4j2.version} + + org.apache.ranger + ranger-intg + ${ranger.version} + + + org.apache.ranger + ranger-plugins-common + ${ranger.version} + org.apache.ratis ratis-client @@ -919,6 +952,16 @@ + + org.aspectj + aspectjrt + ${aspectj.version} + + + org.aspectj + aspectjweaver + ${aspectj.version} + org.assertj assertj-core @@ -939,6 +982,21 @@ bcutil-jdk18on ${bouncycastle.version} + + org.codehaus.jackson + jackson-core-asl + ${jackson1.version} + + + org.codehaus.jackson + jackson-jaxrs + ${jackson-jaxr.version} + + + org.codehaus.jackson + jackson-mapper-asl + ${jackson1.version} + org.codehaus.woodstox stax2-api @@ -1020,7 +1078,6 @@ org.glassfish.jersey.containers jersey-container-servlet ${jersey2.version} - compile org.glassfish.hk2 @@ -1082,6 +1139,11 @@ hamcrest ${hamcrest.version} + + org.jacoco + org.jacoco.core + ${jacoco.version} + org.javassist javassist @@ -1216,18 +1278,6 @@ snakeyaml ${snakeyaml.version} - - org.jacoco - org.jacoco.core - ${jacoco.version} - provided - - - org.apache.hadoop - hadoop-mapreduce-client-jobclient - ${hadoop.version} - test - @@ -1259,6 +1309,11 @@ frontend-maven-plugin ${frontend-maven-plugin.version} + + dev.aspectj + aspectj-maven-plugin + ${aspectj-plugin.version} + org.codehaus.mojo build-helper-maven-plugin @@ -1296,6 +1351,7 @@ + org.apache.maven.plugins maven-clean-plugin ${maven-clean-plugin.version} @@ -1307,6 +1363,11 @@ false + + org.apache.maven.plugins + maven-patch-plugin + ${maven-patch-plugin.version} + org.apache.maven.plugins maven-shade-plugin @@ -1684,6 +1745,7 @@ + org.apache.maven.plugins maven-clean-plugin From 8f16a3b94ba138b46ed2ea514a1653c1f6fe5208 Mon Sep 17 00:00:00 2001 From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com> Date: Sun, 2 Feb 2025 13:05:17 +0100 Subject: [PATCH 155/168] HDDS-12163. Reduce number of individual getCapacity/getAvailable/getUsedSpace calls (#7790) --- .../container/common/impl/HddsDispatcher.java | 7 ++--- .../common/volume/AvailableSpaceFilter.java | 7 +++-- .../volume/CapacityVolumeChoosingPolicy.java | 4 +-- .../common/volume/MutableVolumeSet.java | 8 ++++-- .../common/volume/StorageVolume.java | 14 ---------- .../container/common/volume/VolumeInfo.java | 22 --------------- .../container/common/volume/VolumeUsage.java | 24 ++++------------ .../TestCapacityVolumeChoosingPolicy.java | 6 ++-- .../common/volume/TestHddsVolume.java | 28 +++++++++++-------- .../volume/TestReservedVolumeSpace.java | 4 +-- .../TestRoundRobinVolumeChoosingPolicy.java | 4 +-- .../common/volume/TestVolumeSet.java | 2 +- .../ozoneimpl/TestOzoneContainer.java | 2 +- 13 files changed, 46 insertions(+), 86 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java index d1ea73fbfd8..c4552dee01b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java @@ -622,12 +622,11 @@ private boolean isVolumeFull(Container container) { .orElse(Boolean.FALSE); if (isOpen) { HddsVolume volume = container.getContainerData().getVolume(); - SpaceUsageSource precomputedVolumeSpace = - volume.getCurrentUsage(); - long volumeCapacity = precomputedVolumeSpace.getCapacity(); + SpaceUsageSource usage = volume.getCurrentUsage(); + long volumeCapacity = usage.getCapacity(); long volumeFreeSpaceToSpare = freeSpaceCalculator.get(volumeCapacity); - long volumeFree = precomputedVolumeSpace.getAvailable(); + long volumeFree = usage.getAvailable(); long volumeCommitted = volume.getCommittedBytes(); long volumeAvailable = volumeFree - volumeCommitted; return (volumeAvailable <= volumeFreeSpaceToSpare); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AvailableSpaceFilter.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AvailableSpaceFilter.java index 0d82a38d1c7..bb74a051af1 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AvailableSpaceFilter.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/AvailableSpaceFilter.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.ozone.container.common.volume; +import org.apache.hadoop.hdds.fs.SpaceUsageSource; + import java.util.HashMap; import java.util.Map; import java.util.function.Predicate; @@ -38,8 +40,9 @@ public class AvailableSpaceFilter implements Predicate { @Override public boolean test(HddsVolume vol) { - long volumeCapacity = vol.getCapacity(); - long free = vol.getAvailable(); + SpaceUsageSource usage = vol.getCurrentUsage(); + long volumeCapacity = usage.getCapacity(); + long free = usage.getAvailable(); long committed = vol.getCommittedBytes(); long available = free - committed; long volumeFreeSpaceToSpare = diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/CapacityVolumeChoosingPolicy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/CapacityVolumeChoosingPolicy.java index 14afce020b2..f7204ad6065 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/CapacityVolumeChoosingPolicy.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/CapacityVolumeChoosingPolicy.java @@ -91,9 +91,9 @@ public HddsVolume chooseVolume(List volumes, HddsVolume firstVolume = volumesWithEnoughSpace.get(firstIndex); HddsVolume secondVolume = volumesWithEnoughSpace.get(secondIndex); - long firstAvailable = firstVolume.getAvailable() + long firstAvailable = firstVolume.getCurrentUsage().getAvailable() - firstVolume.getCommittedBytes(); - long secondAvailable = secondVolume.getAvailable() + long secondAvailable = secondVolume.getCurrentUsage().getAvailable() - secondVolume.getCommittedBytes(); return firstAvailable < secondAvailable ? secondVolume : firstVolume; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java index 9afea8e6b0c..4a85880105c 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java @@ -33,6 +33,7 @@ import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory; +import org.apache.hadoop.hdds.fs.SpaceUsageSource; import org.apache.hadoop.hdds.utils.HddsServerUtil; import org.apache.hadoop.hdfs.server.datanode.StorageLocation; import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport; @@ -479,9 +480,10 @@ public StorageLocationReport[] getStorageReport() { if (volumeInfo.isPresent()) { try { rootDir = volumeInfo.get().getRootDir(); - scmUsed = volumeInfo.get().getScmUsed(); - remaining = volumeInfo.get().getAvailable(); - capacity = volumeInfo.get().getCapacity(); + SpaceUsageSource usage = volumeInfo.get().getCurrentUsage(); + scmUsed = usage.getUsedSpace(); + remaining = usage.getAvailable(); + capacity = usage.getCapacity(); committed = (volume instanceof HddsVolume) ? ((HddsVolume) volume).getCommittedBytes() : 0; failed = false; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java index b85ac15c54e..db241da6f6a 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java @@ -447,25 +447,11 @@ public String getVolumeRootDir() { return volumeInfo.map(VolumeInfo::getRootDir).orElse(null); } - public long getCapacity() { - return volumeInfo.map(VolumeInfo::getCapacity).orElse(0L); - } - - public long getAvailable() { - return volumeInfo.map(VolumeInfo::getAvailable).orElse(0L); - - } - public SpaceUsageSource getCurrentUsage() { return volumeInfo.map(VolumeInfo::getCurrentUsage) .orElse(SpaceUsageSource.UNKNOWN); } - public long getUsedSpace() { - return volumeInfo.map(VolumeInfo::getScmUsed).orElse(0L); - - } - public File getStorageDir() { return this.storageDir; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java index 3d1be9791ec..fb99d35b62f 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java @@ -152,24 +152,6 @@ private VolumeInfo(Builder b) throws IOException { usage = new VolumeUsage(checkParams, b.conf); } - public long getCapacity() { - return usage.getCapacity(); - } - - /** - *
-   * {@code
-   * Calculate available space use method A.
-   * |----used----|   (avail)   |++++++++reserved++++++++|
-   * |<-     capacity         ->|
-   * A) avail = capacity - used
-   * }
-   * 
- */ - public long getAvailable() { - return usage.getAvailable(); - } - public SpaceUsageSource getCurrentUsage() { return usage.getCurrentUsage(); } @@ -186,10 +168,6 @@ public void refreshNow() { usage.refreshNow(); } - public long getScmUsed() { - return usage.getUsedSpace(); - } - void shutdownUsageThread() { usage.shutdown(); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java index 34ba66c91bb..838395d6ea6 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java @@ -65,18 +65,6 @@ SpaceUsageSource realUsage() { return source.snapshot(); } - public long getCapacity() { - return getCurrentUsage().getCapacity(); - } - - public long getAvailable() { - return getCurrentUsage().getAvailable(); - } - - public long getUsedSpace() { - return getCurrentUsage().getUsedSpace(); - } - /** *
    * {@code
@@ -114,15 +102,13 @@ public void decrementUsedSpace(long reclaimedSpace) {
    * so there could be that DU value > totalUsed when there are deletes.
    * @return other used space
    */
-  private static long getOtherUsed(SpaceUsageSource precomputedVolumeSpace) {
-    long totalUsed = precomputedVolumeSpace.getCapacity() -
-        precomputedVolumeSpace.getAvailable();
-    return Math.max(totalUsed - precomputedVolumeSpace.getUsedSpace(), 0L);
+  private static long getOtherUsed(SpaceUsageSource usage) {
+    long totalUsed = usage.getCapacity() - usage.getAvailable();
+    return Math.max(totalUsed - usage.getUsedSpace(), 0L);
   }
 
-  private long getRemainingReserved(
-      SpaceUsageSource precomputedVolumeSpace) {
-    return Math.max(reservedInBytes - getOtherUsed(precomputedVolumeSpace), 0L);
+  private long getRemainingReserved(SpaceUsageSource usage) {
+    return Math.max(reservedInBytes - getOtherUsed(usage), 0L);
   }
 
   public synchronized void start() {
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestCapacityVolumeChoosingPolicy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestCapacityVolumeChoosingPolicy.java
index 1eba25c3571..1a53291e0e3 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestCapacityVolumeChoosingPolicy.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestCapacityVolumeChoosingPolicy.java
@@ -104,9 +104,9 @@ public void testCapacityVolumeChoosingPolicy() throws Exception {
     HddsVolume hddsVolume2 = volumes.get(1);
     HddsVolume hddsVolume3 = volumes.get(2);
 
-    assertEquals(100L, hddsVolume1.getAvailable());
-    assertEquals(200L, hddsVolume2.getAvailable());
-    assertEquals(300L, hddsVolume3.getAvailable());
+    assertEquals(100L, hddsVolume1.getCurrentUsage().getAvailable());
+    assertEquals(200L, hddsVolume2.getCurrentUsage().getAvailable());
+    assertEquals(300L, hddsVolume3.getCurrentUsage().getAvailable());
 
     Map chooseCount = new HashMap<>();
     chooseCount.put(hddsVolume1, 0);
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java
index 99056dd79bd..ea275109480 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java
@@ -248,7 +248,7 @@ public void testShutdown() throws Exception {
     HddsVolume volume = volumeBuilder.build();
 
     assertEquals(initialUsedSpace, savedUsedSpace.get());
-    assertEquals(expectedUsedSpace, volume.getUsedSpace());
+    assertEquals(expectedUsedSpace, volume.getCurrentUsage().getUsedSpace());
 
     // Shutdown the volume.
     volume.shutdown();
@@ -264,10 +264,12 @@ public void testShutdown() throws Exception {
     StorageSize size = StorageSize.parse(RESERVED_SPACE);
     long reservedSpaceInBytes = (long) size.getUnit().toBytes(size.getValue());
 
+    SpaceUsageSource reportedUsage = volume.getCurrentUsage();
+
     assertEquals(spaceUsage.getCapacity(),
-        volume.getCapacity() + reservedSpaceInBytes);
+        reportedUsage.getCapacity() + reservedSpaceInBytes);
     assertEquals(spaceUsage.getAvailable(),
-        volume.getAvailable() + reservedSpaceInBytes);
+        reportedUsage.getAvailable() + reservedSpaceInBytes);
   }
 
   /**
@@ -300,8 +302,9 @@ public void testReportUsedBiggerThanActualUsed() throws IOException {
 
     HddsVolume volume = volumeBuilder.build();
 
-    assertEquals(400, volume.getCapacity());
-    assertEquals(100, volume.getAvailable());
+    SpaceUsageSource usage = volume.getCurrentUsage();
+    assertEquals(400, usage.getCapacity());
+    assertEquals(100, usage.getAvailable());
 
     // Shutdown the volume.
     volume.shutdown();
@@ -327,8 +330,9 @@ public void testReportUsedSmallerThanActualUsed() throws IOException {
 
     HddsVolume volume = volumeBuilder.build();
 
-    assertEquals(400, volume.getCapacity());
-    assertEquals(190, volume.getAvailable());
+    SpaceUsageSource usage = volume.getCurrentUsage();
+    assertEquals(400, usage.getCapacity());
+    assertEquals(190, usage.getAvailable());
 
     // Shutdown the volume.
     volume.shutdown();
@@ -353,8 +357,9 @@ public void testOverUsedReservedSpace() throws IOException {
 
     HddsVolume volume = volumeBuilder.build();
 
-    assertEquals(400, volume.getCapacity());
-    assertEquals(300, volume.getAvailable());
+    SpaceUsageSource usage = volume.getCurrentUsage();
+    assertEquals(400, usage.getCapacity());
+    assertEquals(300, usage.getAvailable());
 
     // Shutdown the volume.
     volume.shutdown();
@@ -379,8 +384,9 @@ public void testOverUsedHddsSpace() throws IOException {
 
     HddsVolume volume = volumeBuilder.build();
 
-    assertEquals(400, volume.getCapacity());
-    assertEquals(0, volume.getAvailable());
+    SpaceUsageSource usage = volume.getCurrentUsage();
+    assertEquals(400, usage.getCapacity());
+    assertEquals(0, usage.getAvailable());
 
     // Shutdown the volume.
     volume.shutdown();
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestReservedVolumeSpace.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestReservedVolumeSpace.java
index 5e0a31944f7..393b3870159 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestReservedVolumeSpace.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestReservedVolumeSpace.java
@@ -70,7 +70,7 @@ public void testDefaultConfig() throws Exception {
 
     // Gets the total capacity reported by Ozone, which may be limited to less than the volume's real capacity by the
     // DU reserved configurations.
-    long volumeCapacity = hddsVolume.getCapacity();
+    long volumeCapacity = hddsVolume.getCurrentUsage().getCapacity();
     VolumeUsage usage = hddsVolume.getVolumeInfo().get().getUsageForTesting();
 
     // Gets the actual total capacity without accounting for DU reserved space configurations.
@@ -94,7 +94,7 @@ public void testVolumeCapacityAfterReserve() throws Exception {
     float percentage = conf.getFloat(HDDS_DATANODE_DIR_DU_RESERVED_PERCENT,
         HDDS_DATANODE_DIR_DU_RESERVED_PERCENT_DEFAULT);
 
-    long volumeCapacity = hddsVolume.getCapacity();
+    long volumeCapacity = hddsVolume.getCurrentUsage().getCapacity();
     VolumeUsage usage = hddsVolume.getVolumeInfo().get().getUsageForTesting();
 
     //Gets the actual total capacity
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java
index 1df26365531..a95cb405790 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java
@@ -93,8 +93,8 @@ public void testRRVolumeChoosingPolicy() throws Exception {
     HddsVolume hddsVolume1 = volumes.get(0);
     HddsVolume hddsVolume2 = volumes.get(1);
 
-    assertEquals(100L, hddsVolume1.getAvailable());
-    assertEquals(200L, hddsVolume2.getAvailable());
+    assertEquals(100L, hddsVolume1.getCurrentUsage().getAvailable());
+    assertEquals(200L, hddsVolume2.getCurrentUsage().getAvailable());
 
     // Test two rounds of round-robin choosing
     assertEquals(hddsVolume1, policy.chooseVolume(volumes, 0));
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
index 68e687fefad..054e7a9fb23 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
@@ -220,7 +220,7 @@ public void testShutdown() throws Exception {
     for (StorageVolume volume : volumesList) {
       assertNotNull(volume.getVolumeInfo().get()
               .getUsageForTesting());
-      volume.getAvailable();
+      volume.getCurrentUsage();
     }
   }
 
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
index 2f2cbc81e90..c2e8393a8d4 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
@@ -234,7 +234,7 @@ public void testContainerCreateDiskFull(ContainerTestVersionInfo versionInfo)
       volume.format(clusterId);
 
       // eat up all available space except size of 1 container
-      volume.incCommittedBytes(volume.getAvailable() - containerSize);
+      volume.incCommittedBytes(volume.getCurrentUsage().getAvailable() - containerSize);
       // eat up 10 bytes more, now available space is less than 1 container
       volume.incCommittedBytes(10);
     }

From 8cbf459e4f845d037d85947f396abec897eacfcc Mon Sep 17 00:00:00 2001
From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com>
Date: Mon, 3 Feb 2025 07:51:43 +0100
Subject: [PATCH 156/168] HDDS-12142. Save logs from build check (#7782)

---
 .github/workflows/ci.yml                   | 16 ++++++++++++++-
 hadoop-ozone/dev-support/checks/_build.sh  | 16 ++++++++++++++-
 hadoop-ozone/dev-support/checks/build.sh   |  3 ++-
 hadoop-ozone/dev-support/checks/compile.sh | 23 ++++++++++++++++++++++
 hadoop-ozone/dev-support/checks/repro.sh   | 17 +++-------------
 5 files changed, 58 insertions(+), 17 deletions(-)
 create mode 100755 hadoop-ozone/dev-support/checks/compile.sh

diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 18ea0017f44..fa852843e7d 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -151,6 +151,13 @@ jobs:
         run: hadoop-ozone/dev-support/checks/build.sh -Pdist -Psrc -Dmaven.javadoc.skip=true ${{ inputs.ratis_args }}
         env:
           DEVELOCITY_ACCESS_KEY: ${{ secrets.DEVELOCITY_ACCESS_KEY }}
+      - name: Archive build results
+        uses: actions/upload-artifact@v4
+        if: ${{ !cancelled() }}
+        with:
+          name: ${{ github.job }}
+          path: target/${{ github.job }}
+        continue-on-error: true
       - name: Store binaries for tests
         uses: actions/upload-artifact@v4
         with:
@@ -227,10 +234,17 @@ jobs:
           distribution: 'temurin'
           java-version: ${{ matrix.java }}
       - name: Compile Ozone using Java ${{ matrix.java }}
-        run: hadoop-ozone/dev-support/checks/build.sh -Pdist -DskipRecon -Dmaven.javadoc.failOnWarnings=${{ matrix.java != 8 }} -Djavac.version=${{ matrix.java }} ${{ inputs.ratis_args }}
+        run: hadoop-ozone/dev-support/checks/compile.sh -Pdist -DskipRecon -Dmaven.javadoc.failOnWarnings=${{ matrix.java != 8 }} -Djavac.version=${{ matrix.java }} ${{ inputs.ratis_args }}
         env:
           OZONE_WITH_COVERAGE: false
           DEVELOCITY_ACCESS_KEY: ${{ secrets.DEVELOCITY_ACCESS_KEY }}
+      - name: Archive build results
+        uses: actions/upload-artifact@v4
+        if: ${{ !cancelled() }}
+        with:
+          name: ${{ github.job }}-${{ matrix.java }}
+          path: target/${{ github.job }}
+        continue-on-error: true
   basic:
     needs:
       - build-info
diff --git a/hadoop-ozone/dev-support/checks/_build.sh b/hadoop-ozone/dev-support/checks/_build.sh
index b1f23a9ba8a..fa2ff3224ca 100755
--- a/hadoop-ozone/dev-support/checks/_build.sh
+++ b/hadoop-ozone/dev-support/checks/_build.sh
@@ -13,11 +13,20 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
+
+set -u -o pipefail
+
 DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
 cd "$DIR/../../.." || exit 1
 
+: ${CHECK:="build"}
+: ${ERROR_PATTERN:="\[ERROR\]"}
 : ${OZONE_WITH_COVERAGE:="false"}
 
+BASE_DIR="$(pwd -P)"
+REPORT_DIR=${OUTPUT_DIR:-"${BASE_DIR}/target/${CHECK}"}
+REPORT_FILE="$REPORT_DIR/summary.txt"
+
 MAVEN_OPTIONS='-V -B -DskipTests -DskipDocs --no-transfer-progress'
 
 if [[ "${OZONE_WITH_COVERAGE}" == "true" ]]; then
@@ -27,5 +36,10 @@ else
 fi
 
 export MAVEN_OPTS="-Xmx4096m ${MAVEN_OPTS:-}"
-mvn ${MAVEN_OPTIONS} clean "$@"
+mvn ${MAVEN_OPTIONS} clean "$@" | tee output.log
 rc=$?
+
+mkdir -p "$REPORT_DIR" # after `mvn clean`
+mv output.log "$REPORT_DIR"/
+
+source "${DIR}/_post_process.sh"
diff --git a/hadoop-ozone/dev-support/checks/build.sh b/hadoop-ozone/dev-support/checks/build.sh
index ac524f755e3..11adbb9b8c0 100755
--- a/hadoop-ozone/dev-support/checks/build.sh
+++ b/hadoop-ozone/dev-support/checks/build.sh
@@ -14,7 +14,8 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-set -eu -o pipefail
+set -u -o pipefail
 
 DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
+
 source "${DIR}"/_build.sh install "$@"
diff --git a/hadoop-ozone/dev-support/checks/compile.sh b/hadoop-ozone/dev-support/checks/compile.sh
new file mode 100755
index 00000000000..cdae70546e7
--- /dev/null
+++ b/hadoop-ozone/dev-support/checks/compile.sh
@@ -0,0 +1,23 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -u -o pipefail
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
+
+CHECK=compile
+
+source "${DIR}"/_build.sh verify "$@"
diff --git a/hadoop-ozone/dev-support/checks/repro.sh b/hadoop-ozone/dev-support/checks/repro.sh
index 8d3db0fa7e9..cbc707c1274 100755
--- a/hadoop-ozone/dev-support/checks/repro.sh
+++ b/hadoop-ozone/dev-support/checks/repro.sh
@@ -19,19 +19,8 @@
 set -u -o pipefail
 
 DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
-cd "$DIR/../../.." || exit 1
 
-BASE_DIR="$(pwd -P)"
-REPORT_DIR=${OUTPUT_DIR:-"${BASE_DIR}/target/repro"}
+CHECK=repro
+ERROR_PATTERN='ERROR.*mismatch'
 
-rc=0
-source "${DIR}"/_build.sh verify artifact:compare "$@" | tee output.log
-
-mkdir -p "$REPORT_DIR"
-mv output.log "$REPORT_DIR"/
-
-REPORT_FILE="$REPORT_DIR/summary.txt"
-grep 'ERROR.*mismatch' "${REPORT_DIR}/output.log" > "${REPORT_FILE}"
-
-ERROR_PATTERN="\[ERROR\]"
-source "${DIR}/_post_process.sh"
+source "${DIR}"/_build.sh verify artifact:compare "$@"

From cd996f0468158f8ec653ee0ece21e37f0e0ec4b7 Mon Sep 17 00:00:00 2001
From: Abhishek Pal <43001336+devabhishekpal@users.noreply.github.com>
Date: Mon, 3 Feb 2025 12:33:28 +0530
Subject: [PATCH 157/168] HDDS-12073. Don't show Source Bucket and Volume if
 null in DU metadata (#7760)

---
 .../v2/components/duMetadata/duMetadata.tsx   | 19 +++++++++++++++++--
 1 file changed, 17 insertions(+), 2 deletions(-)

diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/duMetadata/duMetadata.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/duMetadata/duMetadata.tsx
index 5cae2fbc87e..b5dc002c1fc 100644
--- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/duMetadata/duMetadata.tsx
+++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/v2/components/duMetadata/duMetadata.tsx
@@ -137,8 +137,7 @@ const DUMetadata: React.FC = ({
      */
     const selectedInfoKeys = [
       'bucketName', 'bucketLayout', 'encInfo', 'fileName', 'keyName',
-      'name', 'owner', 'sourceBucket', 'sourceVolume', 'storageType',
-      'usedNamespace', 'volumeName', 'volume'
+      'name', 'owner', 'storageType', 'usedNamespace', 'volumeName', 'volume'
     ] as const;
     const objectInfo: ObjectInfo = summaryResponse.objectInfo ?? {};
 
@@ -155,6 +154,22 @@ const DUMetadata: React.FC = ({
       }
     });
 
+    // Source Volume and Source Bucket are present for linked buckets and volumes.
+    // If it is not linked it will be null and should not be shown
+    if (objectInfo?.sourceBucket !== undefined && objectInfo?.sourceBucket !== null) {
+      data.push({
+        key: 'Source Bucket',
+        value: objectInfo.sourceBucket
+      });
+    }
+
+    if(objectInfo?.sourceVolume !== undefined && objectInfo?.sourceVolume !== null) {
+      data.push({
+        key: 'Source Volume',
+        value: objectInfo.sourceVolume
+      });
+    }
+
     if (objectInfo?.creationTime !== undefined && objectInfo?.creationTime !== -1) {
       data.push({
         key: 'Creation Time',

From 24aab04e5c6c3ddd1ec051ce48b8d2a035b4d3ca Mon Sep 17 00:00:00 2001
From: Sadanand Shenoy 
Date: Mon, 3 Feb 2025 13:20:46 +0530
Subject: [PATCH 158/168] HDDS-11508. Decouple delete batch limits from Ratis
 request size for DirectoryDeletingService. (#7365)

---
 .../src/main/resources/ozone-default.xml      |  8 ---
 .../apache/hadoop/hdds/utils/db/Table.java    | 56 +++++++++++++++++
 .../hadoop/hdds/utils/db/TypedTable.java      | 16 ++++-
 .../apache/hadoop/ozone/om/OMConfigKeys.java  |  7 ---
 .../TestDirectoryDeletingServiceWithFSO.java  | 21 +++----
 .../hadoop/fs/ozone/TestRootedDDSWithFSO.java |  1 -
 ...napshotDeletingServiceIntegrationTest.java |  1 -
 ...estReconInsightsForDeletedDirectories.java | 37 ++++++-----
 .../hadoop/ozone/om/DeleteKeysResult.java     | 53 ++++++++++++++++
 .../apache/hadoop/ozone/om/KeyManager.java    | 10 ++-
 .../hadoop/ozone/om/KeyManagerImpl.java       | 49 ++++++++++-----
 .../org/apache/hadoop/ozone/om/OMMetrics.java |  1 -
 .../service/AbstractKeyDeletingService.java   | 60 ++++++++----------
 .../om/service/DirectoryDeletingService.java  | 52 +++++-----------
 .../service/TestDirectoryDeletingService.java | 62 +------------------
 15 files changed, 233 insertions(+), 201 deletions(-)
 create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/DeleteKeysResult.java

diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 4693392a217..f7ff9089b92 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -3686,14 +3686,6 @@
       be defined with postfix (ns,ms,s,m,h,d)
     
   
-  
-    ozone.path.deleting.limit.per.task
-    6000
-    OZONE, PERFORMANCE, OM
-    A maximum number of paths(dirs/files) to be deleted by
-      directory deleting service per time interval.
-    
-  
   
     ozone.snapshot.filtering.limit.per.task
     2
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java
index c7055267052..0c435066b8e 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java
@@ -336,6 +336,14 @@ interface KeyValue {
     KEY getKey() throws IOException;
 
     VALUE getValue() throws IOException;
+
+    default byte[] getRawKey() throws IOException {
+      return null;
+    }
+
+    default byte[] getRawValue() throws IOException {
+      return null;
+    }
   }
 
   static  KeyValue newKeyValue(K key, V value) {
@@ -375,6 +383,54 @@ public int hashCode() {
     };
   }
 
+  static  KeyValue newKeyValue(K key, V value, byte[] rawKey, byte[] rawValue) {
+    return new KeyValue() {
+      @Override
+      public K getKey() {
+        return key;
+      }
+
+      @Override
+      public V getValue() {
+        return value;
+      }
+
+      @Override
+      public byte[] getRawKey() throws IOException {
+        return rawKey;
+      }
+
+      @Override
+      public byte[] getRawValue() throws IOException {
+        return rawValue;
+      }
+
+      @Override
+      public String toString() {
+        return "(key=" + key + ", value=" + value + ")";
+      }
+
+      @Override
+      public boolean equals(Object obj) {
+        if (!(obj instanceof KeyValue)) {
+          return false;
+        }
+        KeyValue kv = (KeyValue) obj;
+        try {
+          return getKey().equals(kv.getKey()) && getValue().equals(kv.getValue());
+        } catch (IOException e) {
+          throw new RuntimeException(e);
+        }
+      }
+
+      @Override
+      public int hashCode() {
+        return Objects.hash(getKey(), getValue());
+      }
+    };
+  }
+
+
   /** A {@link TableIterator} to iterate {@link KeyValue}s. */
   interface KeyValueIterator
       extends TableIterator> {
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java
index 539bf8a29c4..9609b5bfc28 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java
@@ -573,6 +573,14 @@ public KEY getKey() throws IOException {
     public VALUE getValue() throws IOException {
       return decodeValue(rawKeyValue.getValue());
     }
+
+    public byte[] getRawKey() throws IOException {
+      return rawKeyValue.getKey();
+    }
+
+    public byte[] getRawValue() throws IOException {
+      return rawKeyValue.getValue();
+    }
   }
 
   RawIterator newCodecBufferTableIterator(
@@ -597,9 +605,11 @@ public CodecBuffer get() {
       @Override
       KeyValue convert(KeyValue raw)
           throws IOException {
-        final KEY key = keyCodec.fromCodecBuffer(raw.getKey());
-        final VALUE value = valueCodec.fromCodecBuffer(raw.getValue());
-        return Table.newKeyValue(key, value);
+        CodecBuffer keyCodecBuffer = raw.getKey();
+        final KEY key = keyCodec.fromCodecBuffer(keyCodecBuffer);
+        CodecBuffer valueCodecBuffer = raw.getValue();
+        final VALUE value = valueCodec.fromCodecBuffer(valueCodecBuffer);
+        return Table.newKeyValue(key, value, keyCodecBuffer.getArray(), valueCodecBuffer.getArray());
       }
     };
   }
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
index e274d822b63..0a1fd9f681c 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java
@@ -410,13 +410,6 @@ private OMConfigKeys() {
   public static final String
       OZONE_SNAPSHOT_DIRECTORY_SERVICE_TIMEOUT_DEFAULT = "300s";
 
-  public static final String OZONE_PATH_DELETING_LIMIT_PER_TASK =
-      "ozone.path.deleting.limit.per.task";
-  // default is 6000 taking account of 32MB buffer size, and assuming
-  // 4KB size (considering acls, key/file name, and other meata)  * 6000
-  // resulting 24MB
-  public static final int OZONE_PATH_DELETING_LIMIT_PER_TASK_DEFAULT = 6000;
-
   public static final String OZONE_THREAD_NUMBER_DIR_DELETION =
       "ozone.thread.number.dir.deletion";
 
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java
index 6cced078488..f31982e0f0c 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java
@@ -90,7 +90,6 @@
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
 import static org.mockito.ArgumentMatchers.any;
-import static org.mockito.ArgumentMatchers.anyInt;
 import static org.mockito.ArgumentMatchers.anyList;
 import static org.mockito.ArgumentMatchers.anyLong;
 import static org.mockito.ArgumentMatchers.eq;
@@ -116,7 +115,6 @@ public class TestDirectoryDeletingServiceWithFSO {
   public static void init() throws Exception {
     OzoneConfiguration conf = new OzoneConfiguration();
     conf.setInt(OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL, 2000);
-    conf.setInt(OMConfigKeys.OZONE_PATH_DELETING_LIMIT_PER_TASK, 5);
     conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 100,
         TimeUnit.MILLISECONDS);
     conf.setTimeDuration(OZONE_SNAPSHOT_DELETING_SERVICE_INTERVAL, 1000, TimeUnit.MILLISECONDS);
@@ -155,7 +153,7 @@ public static void teardown() {
   }
 
   @AfterEach
-  public void cleanup() {
+  public void cleanup() throws InterruptedException, TimeoutException {
     assertDoesNotThrow(() -> {
       Path root = new Path("/");
       FileStatus[] fileStatuses = fs.listStatus(root);
@@ -273,8 +271,6 @@ public void testDeleteWithLargeSubPathsThanBatchSize() throws Exception {
     assertTableRowCount(dirTable, 1);
 
     assertSubPathsCount(dirDeletingService::getMovedFilesCount, 15);
-    // 15 subDir + 3 parentDir
-    assertSubPathsCount(dirDeletingService::getMovedDirsCount, 18);
     assertSubPathsCount(dirDeletingService::getDeletedDirsCount, 19);
 
     assertEquals(15, metrics.getNumSubFilesSentForPurge());
@@ -335,7 +331,7 @@ public void testDeleteWithMultiLevels() throws Exception {
     assertTableRowCount(dirTable, 0);
 
     assertSubPathsCount(dirDeletingService::getMovedFilesCount, 3);
-    assertSubPathsCount(dirDeletingService::getMovedDirsCount, 2);
+    assertSubPathsCount(dirDeletingService::getMovedDirsCount, 0);
     assertSubPathsCount(dirDeletingService::getDeletedDirsCount, 5);
     assertEquals(5, metrics.getNumDirsSentForPurge());
     assertEquals(5, metrics.getNumDirsPurged());
@@ -431,7 +427,8 @@ public void testDeleteWithMultiLevelsBlockDoubleBuffer() throws Exception {
     omDoubleBuffer.stopDaemon();
 
     OzoneVolume volume = client.getObjectStore().getVolume(volumeName);
-    OzoneBucket bucket = volume.getBucket(bucketName);    long volumeId = metadataManager.getVolumeId(volumeName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+    long volumeId = metadataManager.getVolumeId(volumeName);
 
     // manually delete dir and add to deleted table. namespace count occupied "1" as manual deletion do not reduce
     long bucketId = metadataManager.getBucketId(volumeName, bucketName);
@@ -629,13 +626,14 @@ public void testAOSKeyDeletingWithSnapshotCreateParallelExecution()
     assertTableRowCount(deletedDirTable, initialDeletedCount + 1);
     assertTableRowCount(renameTable, initialRenameCount + 1);
     Mockito.doAnswer(i -> {
-      List purgePathRequestList = i.getArgument(5);
+      List purgePathRequestList = i.getArgument(4);
       for (OzoneManagerProtocolProtos.PurgePathRequest purgeRequest : purgePathRequestList) {
         Assertions.assertNotEquals(deletePathKey, purgeRequest.getDeletedDir());
       }
-      return i.callRealMethod();
-    }).when(service).optimizeDirDeletesAndSubmitRequest(anyLong(), anyLong(), anyLong(),
-        anyLong(), anyList(), anyList(), eq(null), anyLong(), anyInt(), Mockito.any(), any(), anyLong());
+      return null;
+    }).when(service).optimizeDirDeletesAndSubmitRequest(anyLong(), anyLong(),
+        anyLong(), anyList(), anyList(), eq(null), anyLong(), anyLong(), Mockito.any(), any(),
+        anyLong());
 
     Mockito.doAnswer(i -> {
       store.createSnapshot(testVolumeName, testBucketName, snap2);
@@ -783,6 +781,7 @@ public void testDirDeletedTableCleanUpForSnapshot() throws Exception {
     assertSubPathsCount(dirDeletingService::getDeletedDirsCount, 0);
 
     // Manual cleanup deletedDirTable for next tests
+    client.getObjectStore().deleteSnapshot(volumeName, bucketName, "snap1");
     cleanupTables();
   }
 
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedDDSWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedDDSWithFSO.java
index 4a3bd85aa31..84d949d3945 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedDDSWithFSO.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedDDSWithFSO.java
@@ -85,7 +85,6 @@ public class TestRootedDDSWithFSO {
   public static void init() throws Exception {
     OzoneConfiguration conf = new OzoneConfiguration();
     conf.setInt(OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL, 1);
-    conf.setInt(OMConfigKeys.OZONE_PATH_DELETING_LIMIT_PER_TASK, 5);
     conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 100,
         TimeUnit.MILLISECONDS);
     conf.setBoolean(OZONE_ACL_ENABLED, true);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingServiceIntegrationTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingServiceIntegrationTest.java
index 9bafe148aee..5575b3dc6d9 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingServiceIntegrationTest.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestSnapshotDeletingServiceIntegrationTest.java
@@ -134,7 +134,6 @@ public void setup() throws Exception {
     conf.setTimeDuration(OZONE_SNAPSHOT_DELETING_SERVICE_TIMEOUT,
         10000, TimeUnit.MILLISECONDS);
     conf.setInt(OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL, 500);
-    conf.setInt(OMConfigKeys.OZONE_PATH_DELETING_LIMIT_PER_TASK, 5);
     conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 500,
         TimeUnit.MILLISECONDS);
     conf.setBoolean(OZONE_ACL_ENABLED, true);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java
index 80f06b2ef54..359c6d45e1b 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconInsightsForDeletedDirectories.java
@@ -52,6 +52,8 @@
 
 import javax.ws.rs.core.Response;
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicLong;
@@ -60,7 +62,6 @@
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_PATH_DELETING_LIMIT_PER_TASK;
 import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertFalse;
@@ -86,7 +87,6 @@ public class TestReconInsightsForDeletedDirectories {
   public static void init() throws Exception {
     OzoneConfiguration conf = new OzoneConfiguration();
     conf.setInt(OZONE_DIR_DELETING_SERVICE_INTERVAL, 1000000);
-    conf.setInt(OZONE_PATH_DELETING_LIMIT_PER_TASK, 0);
     conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 10000000,
         TimeUnit.MILLISECONDS);
     conf.setBoolean(OZONE_ACL_ENABLED, true);
@@ -124,7 +124,7 @@ public static void teardown() {
   }
 
   @AfterEach
-  public void cleanup() {
+  public void cleanup() throws IOException {
     assertDoesNotThrow(() -> {
       Path root = new Path("/");
       FileStatus[] fileStatuses = fs.listStatus(root);
@@ -417,24 +417,31 @@ private void cleanupTables() throws IOException {
     OMMetadataManager metadataManager =
         cluster.getOzoneManager().getMetadataManager();
 
-    try (TableIterator it = metadataManager.getDeletedDirTable()
-        .iterator()) {
-      removeAllFromDB(it);
+    Table deletedDirTable =
+        metadataManager.getDeletedDirTable();
+    try (TableIterator> it = deletedDirTable.iterator()) {
+      removeAllFromDB(it, deletedDirTable);
     }
-    try (TableIterator it = metadataManager.getFileTable().iterator()) {
-      removeAllFromDB(it);
+    Table fileTable = metadataManager.getFileTable();
+    try (TableIterator> it = fileTable.iterator()) {
+      removeAllFromDB(it, fileTable);
     }
-    try (TableIterator it = metadataManager.getDirectoryTable()
-        .iterator()) {
-      removeAllFromDB(it);
+    Table directoryTable =
+        metadataManager.getDirectoryTable();
+    try (TableIterator> it = directoryTable.iterator()) {
+      removeAllFromDB(it, directoryTable);
     }
   }
 
-  private static void removeAllFromDB(TableIterator iterator)
-      throws IOException {
+  private static void removeAllFromDB(
+      TableIterator> iterator,
+      Table table) throws IOException {
+    List keysToDelete = new ArrayList<>();
     while (iterator.hasNext()) {
-      iterator.next();
-      iterator.removeFromDB();
+      keysToDelete.add(iterator.next().getKey());
+    }
+    for (String keyToDelete : keysToDelete) {
+      table.delete(keyToDelete);
     }
   }
 
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/DeleteKeysResult.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/DeleteKeysResult.java
new file mode 100644
index 00000000000..171525c149b
--- /dev/null
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/DeleteKeysResult.java
@@ -0,0 +1,53 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om;
+
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+
+import java.util.List;
+
+/**
+ * Used in {@link org.apache.hadoop.ozone.om.service.DirectoryDeletingService}
+ * to capture the result of each delete task.
+ */
+public class DeleteKeysResult {
+
+  private List keysToDelete;
+  private long consumedSize;
+
+  private boolean processedKeys;
+
+  public DeleteKeysResult(List keysToDelete,
+      long consumedSize, boolean processedKeys) {
+    this.keysToDelete = keysToDelete;
+    this.consumedSize = consumedSize;
+    this.processedKeys = processedKeys;
+  }
+
+  public List getKeysToDelete() {
+    return keysToDelete;
+  }
+
+  public long getConsumedSize() {
+    return consumedSize;
+  }
+
+  public boolean isProcessedKeys() {
+    return processedKeys;
+  }
+}
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java
index 9f6d8b81c10..db3d47dfcdb 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java
@@ -277,23 +277,21 @@ default List> getDeletedDirEntries(String volu
    * Returns all sub directories under the given parent directory.
    *
    * @param parentInfo
-   * @param numEntries
    * @return list of dirs
    * @throws IOException
    */
-  List getPendingDeletionSubDirs(long volumeId, long bucketId,
-      OmKeyInfo parentInfo, long numEntries) throws IOException;
+  DeleteKeysResult getPendingDeletionSubDirs(long volumeId, long bucketId,
+      OmKeyInfo parentInfo, long remainingBufLimit) throws IOException;
 
   /**
    * Returns all sub files under the given parent directory.
    *
    * @param parentInfo
-   * @param numEntries
    * @return list of files
    * @throws IOException
    */
-  List getPendingDeletionSubFiles(long volumeId,
-      long bucketId, OmKeyInfo parentInfo, long numEntries)
+  DeleteKeysResult getPendingDeletionSubFiles(long volumeId,
+      long bucketId, OmKeyInfo parentInfo, long remainingBufLimit)
           throws IOException;
 
   /**
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index 8e3bbb47c3c..4fe509d7e9c 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -2072,8 +2072,8 @@ public Table.KeyValue getPendingDeletionDir()
   }
 
   @Override
-  public List getPendingDeletionSubDirs(long volumeId, long bucketId,
-      OmKeyInfo parentInfo, long numEntries) throws IOException {
+  public DeleteKeysResult getPendingDeletionSubDirs(long volumeId, long bucketId,
+      OmKeyInfo parentInfo, long remainingBufLimit) throws IOException {
     String seekDirInDB = metadataManager.getOzonePathKey(volumeId, bucketId,
         parentInfo.getObjectID(), "");
     long countEntries = 0;
@@ -2082,31 +2082,38 @@ public List getPendingDeletionSubDirs(long volumeId, long bucketId,
     try (TableIterator>
         iterator = dirTable.iterator()) {
-      return gatherSubDirsWithIterator(parentInfo, numEntries,
-          seekDirInDB, countEntries, iterator);
+      return gatherSubDirsWithIterator(parentInfo,
+          seekDirInDB, countEntries, iterator, remainingBufLimit);
     }
 
   }
 
-  private List gatherSubDirsWithIterator(OmKeyInfo parentInfo,
-      long numEntries, String seekDirInDB,
+  private DeleteKeysResult gatherSubDirsWithIterator(OmKeyInfo parentInfo,
+       String seekDirInDB,
       long countEntries,
       TableIterator> iterator)
+          ? extends Table.KeyValue> iterator, long remainingBufLimit)
       throws IOException {
     List directories = new ArrayList<>();
     iterator.seek(seekDirInDB);
+    long consumedSize = 0;
+    boolean processedSubDirs = false;
 
-    while (iterator.hasNext() && numEntries - countEntries > 0) {
+    while (iterator.hasNext() && remainingBufLimit > 0) {
       Table.KeyValue entry = iterator.next();
       OmDirectoryInfo dirInfo = entry.getValue();
+      long objectSerializedSize = entry.getRawValue().length;
       if (!OMFileRequest.isImmediateChild(dirInfo.getParentObjectID(),
           parentInfo.getObjectID())) {
+        processedSubDirs = true;
         break;
       }
       if (!metadataManager.getDirectoryTable().isExist(entry.getKey())) {
         continue;
       }
+      if (remainingBufLimit - objectSerializedSize < 0) {
+        break;
+      }
       String dirName = OMFileRequest.getAbsolutePath(parentInfo.getKeyName(),
           dirInfo.getName());
       OmKeyInfo omKeyInfo = OMFileRequest.getOmKeyInfo(
@@ -2114,19 +2121,24 @@ private List gatherSubDirsWithIterator(OmKeyInfo parentInfo,
           dirName);
       directories.add(omKeyInfo);
       countEntries++;
+      remainingBufLimit -= objectSerializedSize;
+      consumedSize += objectSerializedSize;
     }
 
-    return directories;
+    processedSubDirs = processedSubDirs || (!iterator.hasNext());
+
+    return new DeleteKeysResult(directories, consumedSize, processedSubDirs);
   }
 
   @Override
-  public List getPendingDeletionSubFiles(long volumeId,
-      long bucketId, OmKeyInfo parentInfo, long numEntries)
+  public DeleteKeysResult getPendingDeletionSubFiles(long volumeId,
+      long bucketId, OmKeyInfo parentInfo, long remainingBufLimit)
           throws IOException {
     List files = new ArrayList<>();
     String seekFileInDB = metadataManager.getOzonePathKey(volumeId, bucketId,
         parentInfo.getObjectID(), "");
-    long countEntries = 0;
+    long consumedSize = 0;
+    boolean processedSubFiles = false;
 
     Table fileTable = metadataManager.getFileTable();
     try (TableIterator>
@@ -2134,27 +2146,34 @@ public List getPendingDeletionSubFiles(long volumeId,
 
       iterator.seek(seekFileInDB);
 
-      while (iterator.hasNext() && numEntries - countEntries > 0) {
+      while (iterator.hasNext() && remainingBufLimit > 0) {
         Table.KeyValue entry = iterator.next();
         OmKeyInfo fileInfo = entry.getValue();
+        long objectSerializedSize = entry.getRawValue().length;
         if (!OMFileRequest.isImmediateChild(fileInfo.getParentObjectID(),
             parentInfo.getObjectID())) {
+          processedSubFiles = true;
           break;
         }
         if (!metadataManager.getFileTable().isExist(entry.getKey())) {
           continue;
         }
+        if (remainingBufLimit - objectSerializedSize < 0) {
+          break;
+        }
         fileInfo.setFileName(fileInfo.getKeyName());
         String fullKeyPath = OMFileRequest.getAbsolutePath(
             parentInfo.getKeyName(), fileInfo.getKeyName());
         fileInfo.setKeyName(fullKeyPath);
 
         files.add(fileInfo);
-        countEntries++;
+        remainingBufLimit -= objectSerializedSize;
+        consumedSize += objectSerializedSize;
       }
+      processedSubFiles = processedSubFiles || (!iterator.hasNext());
     }
 
-    return files;
+    return new DeleteKeysResult(files, consumedSize, processedSubFiles);
   }
 
   public boolean isBucketFSOptimized(String volName, String buckName)
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java
index de4241b7ac4..394ae021005 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java
@@ -243,7 +243,6 @@ public class OMMetrics implements OmMetadataReaderMetrics {
   private @Metric MutableCounterLong ecKeyCreateFailsTotal;
   private @Metric MutableCounterLong ecBucketCreateTotal;
   private @Metric MutableCounterLong ecBucketCreateFailsTotal;
-
   private final DBCheckpointMetrics dbCheckpointMetrics;
 
   public OMMetrics() {
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java
index a3d7ccb6618..6369d708a2c 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/AbstractKeyDeletingService.java
@@ -30,6 +30,7 @@
 import org.apache.hadoop.ozone.common.BlockGroup;
 import org.apache.hadoop.ozone.lock.BootstrapStateHandler;
 import org.apache.hadoop.ozone.common.DeleteBlockGroupResult;
+import org.apache.hadoop.ozone.om.DeleteKeysResult;
 import org.apache.hadoop.ozone.om.DeletingServiceMetrics;
 import org.apache.hadoop.ozone.om.KeyManager;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
@@ -348,9 +349,9 @@ private OzoneManagerProtocolProtos.PurgePathRequest wrapPurgeRequest(
   }
 
   protected PurgePathRequest prepareDeleteDirRequest(
-      long remainNum, OmKeyInfo pendingDeletedDirInfo, String delDirName,
+      OmKeyInfo pendingDeletedDirInfo, String delDirName,
       List> subDirList,
-      KeyManager keyManager) throws IOException {
+      KeyManager keyManager, long remainingBufLimit) throws IOException {
     // step-0: Get one pending deleted directory
     if (LOG.isDebugEnabled()) {
       LOG.debug("Pending deleted dir name: {}",
@@ -362,10 +363,11 @@ protected PurgePathRequest prepareDeleteDirRequest(
     final long bucketId = Long.parseLong(keys[2]);
 
     // step-1: get all sub directories under the deletedDir
-    List subDirs = keyManager
-        .getPendingDeletionSubDirs(volumeId, bucketId,
-            pendingDeletedDirInfo, remainNum);
-    remainNum = remainNum - subDirs.size();
+    DeleteKeysResult subDirDeleteResult =
+        keyManager.getPendingDeletionSubDirs(volumeId, bucketId,
+            pendingDeletedDirInfo, remainingBufLimit);
+    List subDirs = subDirDeleteResult.getKeysToDelete();
+    remainingBufLimit -= subDirDeleteResult.getConsumedSize();
 
     OMMetadataManager omMetadataManager = keyManager.getMetadataManager();
     for (OmKeyInfo dirInfo : subDirs) {
@@ -378,10 +380,10 @@ protected PurgePathRequest prepareDeleteDirRequest(
     }
 
     // step-2: get all sub files under the deletedDir
-    List subFiles = keyManager
-        .getPendingDeletionSubFiles(volumeId, bucketId,
-            pendingDeletedDirInfo, remainNum);
-    remainNum = remainNum - subFiles.size();
+    DeleteKeysResult subFileDeleteResult =
+        keyManager.getPendingDeletionSubFiles(volumeId, bucketId,
+            pendingDeletedDirInfo, remainingBufLimit);
+    List subFiles = subFileDeleteResult.getKeysToDelete();
 
     if (LOG.isDebugEnabled()) {
       for (OmKeyInfo fileInfo : subFiles) {
@@ -389,50 +391,39 @@ protected PurgePathRequest prepareDeleteDirRequest(
       }
     }
 
-    // step-3: Since there is a boundary condition of 'numEntries' in
-    // each batch, check whether the sub paths count reached batch size
-    // limit. If count reached limit then there can be some more child
-    // paths to be visited and will keep the parent deleted directory
-    // for one more pass.
-    String purgeDeletedDir = remainNum > 0 ? delDirName : null;
+    // step-3: If both sub-dirs and sub-files are exhausted under a parent
+    // directory, only then delete the parent.
+    String purgeDeletedDir = subDirDeleteResult.isProcessedKeys() &&
+        subFileDeleteResult.isProcessedKeys() ? delDirName :  null;
     return wrapPurgeRequest(volumeId, bucketId,
         purgeDeletedDir, subFiles, subDirs);
   }
 
   @SuppressWarnings("checkstyle:ParameterNumber")
-  public long optimizeDirDeletesAndSubmitRequest(long remainNum,
+  public void optimizeDirDeletesAndSubmitRequest(
       long dirNum, long subDirNum, long subFileNum,
       List> allSubDirList,
       List purgePathRequestList,
       String snapTableKey, long startTime,
-      int remainingBufLimit, KeyManager keyManager,
+      long remainingBufLimit, KeyManager keyManager,
       UUID expectedPreviousSnapshotId, long rnCnt) {
 
-    long limit = remainNum;
     // Optimization to handle delete sub-dir and keys to remove quickly
     // This case will be useful to handle when depth of directory is high
     int subdirDelNum = 0;
     int subDirRecursiveCnt = 0;
     int consumedSize = 0;
-    while (remainNum > 0 && subDirRecursiveCnt < allSubDirList.size()) {
+    while (subDirRecursiveCnt < allSubDirList.size() && remainingBufLimit > 0) {
       try {
         Pair stringOmKeyInfoPair
             = allSubDirList.get(subDirRecursiveCnt);
         PurgePathRequest request = prepareDeleteDirRequest(
-            remainNum, stringOmKeyInfoPair.getValue(),
-            stringOmKeyInfoPair.getKey(), allSubDirList,
-            keyManager);
-        if (isBufferLimitCrossed(remainingBufLimit, consumedSize,
-            request.getSerializedSize())) {
-          // ignore further add request
-          break;
-        }
+            stringOmKeyInfoPair.getValue(),
+            stringOmKeyInfoPair.getKey(), allSubDirList, keyManager,
+            remainingBufLimit);
         consumedSize += request.getSerializedSize();
+        remainingBufLimit -= consumedSize;
         purgePathRequestList.add(request);
-        // reduce remain count for self, sub-files, and sub-directories
-        remainNum = remainNum - 1;
-        remainNum = remainNum - request.getDeletedSubFilesCount();
-        remainNum = remainNum - request.getMarkDeletedSubDirsCount();
         // Count up the purgeDeletedDir, subDirs and subFiles
         if (request.getDeletedDir() != null
             && !request.getDeletedDir().isEmpty()) {
@@ -461,13 +452,12 @@ public long optimizeDirDeletesAndSubmitRequest(long remainNum,
       LOG.info("Number of dirs deleted: {}, Number of sub-dir " +
               "deleted: {}, Number of sub-files moved:" +
               " {} to DeletedTable, Number of sub-dirs moved {} to " +
-              "DeletedDirectoryTable, limit per iteration: {}, iteration elapsed: {}ms, " +
+              "DeletedDirectoryTable, iteration elapsed: {}ms, " +
               " totalRunCount: {}",
-          dirNum, subdirDelNum, subFileNum, (subDirNum - subdirDelNum), limit,
+          dirNum, subdirDelNum, subFileNum, (subDirNum - subdirDelNum),
           timeTakenInIteration, rnCnt);
       metrics.incrementDirectoryDeletionTotalMetrics(dirNum + subdirDelNum, subDirNum, subFileNum);
     }
-    return remainNum;
   }
 
   /**
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java
index 05555439acf..1828ee73c08 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java
@@ -52,9 +52,6 @@
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
 
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_PATH_DELETING_LIMIT_PER_TASK;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_PATH_DELETING_LIMIT_PER_TASK_DEFAULT;
-
 /**
  * This is a background service to delete orphan directories and its
  * sub paths(sub-dirs and sub-files).
@@ -80,11 +77,7 @@ public class DirectoryDeletingService extends AbstractKeyDeletingService {
   // from parent directory info from deleted directory table concurrently
   // and send deletion requests.
   private final int dirDeletingCorePoolSize;
-  private static final int MIN_ERR_LIMIT_PER_TASK = 1000;
-
-  // Number of items(dirs/files) to be batched in an iteration.
-  private final long pathLimitPerTask;
-  private final int ratisByteLimit;
+  private int ratisByteLimit;
   private final AtomicBoolean suspended;
   private AtomicBoolean isRunningOnAOS;
 
@@ -97,9 +90,6 @@ public DirectoryDeletingService(long interval, TimeUnit unit,
       OzoneConfiguration configuration, int dirDeletingServiceCorePoolSize) {
     super(DirectoryDeletingService.class.getSimpleName(), interval, unit,
         dirDeletingServiceCorePoolSize, serviceTimeout, ozoneManager, null);
-    this.pathLimitPerTask = configuration
-        .getInt(OZONE_PATH_DELETING_LIMIT_PER_TASK,
-            OZONE_PATH_DELETING_LIMIT_PER_TASK_DEFAULT);
     int limit = (int) configuration.getStorageSize(
         OMConfigKeys.OZONE_OM_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT,
         OMConfigKeys.OZONE_OM_RATIS_LOG_APPENDER_QUEUE_BYTE_LIMIT_DEFAULT,
@@ -145,6 +135,10 @@ public void resume() {
     suspended.set(false);
   }
 
+  public void setRatisByteLimit(int ratisByteLimit) {
+    this.ratisByteLimit = ratisByteLimit;
+  }
+
   @Override
   public BackgroundTaskQueue getTasks() {
     BackgroundTaskQueue queue = new BackgroundTaskQueue();
@@ -221,11 +215,11 @@ public BackgroundTaskResult call() {
           long dirNum = 0L;
           long subDirNum = 0L;
           long subFileNum = 0L;
-          long remainNum = pathLimitPerTask;
+          long remainingBufLimit = ratisByteLimit;
           int consumedSize = 0;
           List purgePathRequestList = new ArrayList<>();
           List> allSubDirList =
-              new ArrayList<>((int) remainNum);
+              new ArrayList<>();
 
           Table.KeyValue pendingDeletedDirInfo;
           // This is to avoid race condition b/w purge request and snapshot chain updation. For AOS taking the global
@@ -236,7 +230,7 @@ public BackgroundTaskResult call() {
                     .getLatestGlobalSnapshotId();
 
             long startTime = Time.monotonicNow();
-            while (remainNum > 0) {
+            while (remainingBufLimit > 0) {
               pendingDeletedDirInfo = getPendingDeletedDirInfo();
               if (pendingDeletedDirInfo == null) {
                 break;
@@ -247,31 +241,14 @@ public BackgroundTaskResult call() {
                 continue;
               }
 
-              PurgePathRequest request = prepareDeleteDirRequest(remainNum,
+              PurgePathRequest request = prepareDeleteDirRequest(
                   pendingDeletedDirInfo.getValue(),
                   pendingDeletedDirInfo.getKey(), allSubDirList,
-                  getOzoneManager().getKeyManager());
-              if (isBufferLimitCrossed(ratisByteLimit, consumedSize,
-                  request.getSerializedSize())) {
-                if (purgePathRequestList.size() != 0) {
-                  // if message buffer reaches max limit, avoid sending further
-                  remainNum = 0;
-                  break;
-                }
-                // if directory itself is having a lot of keys / files,
-                // reduce capacity to minimum level
-                remainNum = MIN_ERR_LIMIT_PER_TASK;
-                request = prepareDeleteDirRequest(remainNum,
-                    pendingDeletedDirInfo.getValue(),
-                    pendingDeletedDirInfo.getKey(), allSubDirList,
-                    getOzoneManager().getKeyManager());
-              }
+                  getOzoneManager().getKeyManager(), remainingBufLimit);
+
               consumedSize += request.getSerializedSize();
+              remainingBufLimit -= consumedSize;
               purgePathRequestList.add(request);
-              // reduce remain count for self, sub-files, and sub-directories
-              remainNum = remainNum - 1;
-              remainNum = remainNum - request.getDeletedSubFilesCount();
-              remainNum = remainNum - request.getMarkDeletedSubDirsCount();
               // Count up the purgeDeletedDir, subDirs and subFiles
               if (request.getDeletedDir() != null && !request.getDeletedDir()
                   .isEmpty()) {
@@ -280,9 +257,10 @@ public BackgroundTaskResult call() {
               subDirNum += request.getMarkDeletedSubDirsCount();
               subFileNum += request.getDeletedSubFilesCount();
             }
-            optimizeDirDeletesAndSubmitRequest(remainNum, dirNum, subDirNum,
+
+            optimizeDirDeletesAndSubmitRequest(dirNum, subDirNum,
                 subFileNum, allSubDirList, purgePathRequestList, null,
-                startTime, ratisByteLimit - consumedSize,
+                startTime, remainingBufLimit,
                 getOzoneManager().getKeyManager(), expectedPreviousSnapshotId,
                 rnCnt);
 
diff --git a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestDirectoryDeletingService.java b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestDirectoryDeletingService.java
index 681b24b8e42..de5965d1c21 100644
--- a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestDirectoryDeletingService.java
+++ b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestDirectoryDeletingService.java
@@ -50,10 +50,6 @@
 
 import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_PATH_DELETING_LIMIT_PER_TASK_DEFAULT;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_PATH_DELETING_LIMIT_PER_TASK;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_THREAD_NUMBER_DIR_DELETION_DEFAULT;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_THREAD_NUMBER_DIR_DELETION;
 import static org.assertj.core.api.Assertions.assertThat;
 import static org.junit.jupiter.api.Assertions.assertTrue;
 
@@ -161,64 +157,8 @@ public void testDeleteDirectoryCrossingSizeLimit() throws Exception {
         (DirectoryDeletingService) keyManager.getDirDeletingService();
     GenericTestUtils.waitFor(
         () -> dirDeletingService.getMovedFilesCount() >= 1000
-            && dirDeletingService.getMovedFilesCount() < 2000,
+            && dirDeletingService.getMovedFilesCount() <= 2000,
         500, 60000);
     assertThat(dirDeletingService.getRunCount().get()).isGreaterThanOrEqualTo(1);
   }
-
-  @Test
-  public void testDeleteDirectoryFlatDirsHavingNoChilds() throws Exception {
-    OzoneConfiguration conf = createConfAndInitValues();
-    OmTestManagers omTestManagers
-        = new OmTestManagers(conf);
-    KeyManager keyManager = omTestManagers.getKeyManager();
-    writeClient = omTestManagers.getWriteClient();
-    om = omTestManagers.getOzoneManager();
-
-    OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
-        om.getMetadataManager(), BucketLayout.FILE_SYSTEM_OPTIMIZED);
-    String bucketKey = om.getMetadataManager().getBucketKey(volumeName, bucketName);
-    OmBucketInfo bucketInfo = om.getMetadataManager().getBucketTable().get(bucketKey);
-
-    int dirCreatesCount = OZONE_PATH_DELETING_LIMIT_PER_TASK_DEFAULT * 2 + 100;
-    long parentId = 1;
-    OmDirectoryInfo baseDir = new OmDirectoryInfo.Builder().setName("dir_base")
-        .setCreationTime(Time.now()).setModificationTime(Time.now())
-        .setObjectID(parentId).setParentObjectID(bucketInfo.getObjectID())
-        .setUpdateID(0).build();
-    OMRequestTestUtils.addDirKeyToDirTable(true, baseDir, volumeName, bucketName,
-        1L, om.getMetadataManager());
-    for (int i = 0; i < dirCreatesCount; ++i) {
-      OmDirectoryInfo dir1 = new OmDirectoryInfo.Builder().setName("dir" + i)
-          .setCreationTime(Time.now()).setModificationTime(Time.now()).setParentObjectID(parentId)
-          .setObjectID(i + 100).setUpdateID(i).build();
-      OMRequestTestUtils.addDirKeyToDirTable(true, dir1, volumeName, bucketName,
-          1L, om.getMetadataManager());
-    }
-
-    DirectoryDeletingService dirDeletingService = keyManager.getDirDeletingService();
-    long[] delDirCnt = new long[2];
-    delDirCnt[0] = dirDeletingService.getDeletedDirsCount();
-
-    OmKeyArgs delArgs = new OmKeyArgs.Builder()
-        .setVolumeName(volumeName).setBucketName(bucketName).setKeyName("dir_base")
-        .setReplicationConfig(StandaloneReplicationConfig.getInstance(ONE))
-        .setDataSize(0).setRecursive(true).build();
-    writeClient.deleteKey(delArgs);
-    int pathDelLimit = conf.getInt(OZONE_PATH_DELETING_LIMIT_PER_TASK,
-        OZONE_PATH_DELETING_LIMIT_PER_TASK_DEFAULT);
-    int numThread = conf.getInt(OZONE_THREAD_NUMBER_DIR_DELETION,
-        OZONE_THREAD_NUMBER_DIR_DELETION_DEFAULT);
-
-    // check if difference between each run should not cross the directory deletion limit
-    // and wait till all dir is removed
-    GenericTestUtils.waitFor(() -> {
-      delDirCnt[1] = dirDeletingService.getDeletedDirsCount();
-      assertTrue(
-          delDirCnt[1] - delDirCnt[0] <= ((long) pathDelLimit * numThread),
-          "base: " + delDirCnt[0] + ", new: " + delDirCnt[1]);
-      delDirCnt[0] = delDirCnt[1];
-      return dirDeletingService.getDeletedDirsCount() >= dirCreatesCount;
-    }, 500, 300000);
-  }
 }

From bd02526cc1a3759c340e5c38363d268cd931f3bc Mon Sep 17 00:00:00 2001
From: Sadanand Shenoy 
Date: Mon, 3 Feb 2025 21:31:15 +0530
Subject: [PATCH 159/168] HDDS-12186. Avoid array allocation for table
 iterator. (#7797)

---
 .../apache/hadoop/hdds/utils/db/Table.java    | 19 +++++--------------
 .../hadoop/hdds/utils/db/TypedTable.java      |  8 +++-----
 .../hadoop/ozone/om/KeyManagerImpl.java       |  4 ++--
 3 files changed, 10 insertions(+), 21 deletions(-)

diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java
index 0c435066b8e..f519bca5b10 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/Table.java
@@ -337,12 +337,8 @@ interface KeyValue {
 
     VALUE getValue() throws IOException;
 
-    default byte[] getRawKey() throws IOException {
-      return null;
-    }
-
-    default byte[] getRawValue() throws IOException {
-      return null;
+    default int getRawSize()  throws IOException {
+      return 0;
     }
   }
 
@@ -383,7 +379,7 @@ public int hashCode() {
     };
   }
 
-  static  KeyValue newKeyValue(K key, V value, byte[] rawKey, byte[] rawValue) {
+  static  KeyValue newKeyValue(K key, V value, int rawSize) {
     return new KeyValue() {
       @Override
       public K getKey() {
@@ -396,13 +392,8 @@ public V getValue() {
       }
 
       @Override
-      public byte[] getRawKey() throws IOException {
-        return rawKey;
-      }
-
-      @Override
-      public byte[] getRawValue() throws IOException {
-        return rawValue;
+      public int getRawSize() throws IOException {
+        return rawSize;
       }
 
       @Override
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java
index 9609b5bfc28..d7bd554a0cd 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java
@@ -605,11 +605,9 @@ public CodecBuffer get() {
       @Override
       KeyValue convert(KeyValue raw)
           throws IOException {
-        CodecBuffer keyCodecBuffer = raw.getKey();
-        final KEY key = keyCodec.fromCodecBuffer(keyCodecBuffer);
-        CodecBuffer valueCodecBuffer = raw.getValue();
-        final VALUE value = valueCodec.fromCodecBuffer(valueCodecBuffer);
-        return Table.newKeyValue(key, value, keyCodecBuffer.getArray(), valueCodecBuffer.getArray());
+        final KEY key = keyCodec.fromCodecBuffer(raw.getKey());
+        final VALUE value = valueCodec.fromCodecBuffer(raw.getValue());
+        return Table.newKeyValue(key, value, raw.getRawSize());
       }
     };
   }
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index 4fe509d7e9c..6bad74b6d42 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -2102,7 +2102,7 @@ private DeleteKeysResult gatherSubDirsWithIterator(OmKeyInfo parentInfo,
     while (iterator.hasNext() && remainingBufLimit > 0) {
       Table.KeyValue entry = iterator.next();
       OmDirectoryInfo dirInfo = entry.getValue();
-      long objectSerializedSize = entry.getRawValue().length;
+      long objectSerializedSize = entry.getRawSize();
       if (!OMFileRequest.isImmediateChild(dirInfo.getParentObjectID(),
           parentInfo.getObjectID())) {
         processedSubDirs = true;
@@ -2149,7 +2149,7 @@ public DeleteKeysResult getPendingDeletionSubFiles(long volumeId,
       while (iterator.hasNext() && remainingBufLimit > 0) {
         Table.KeyValue entry = iterator.next();
         OmKeyInfo fileInfo = entry.getValue();
-        long objectSerializedSize = entry.getRawValue().length;
+        long objectSerializedSize = entry.getRawSize();
         if (!OMFileRequest.isImmediateChild(fileInfo.getParentObjectID(),
             parentInfo.getObjectID())) {
           processedSubFiles = true;

From e79b19868541832ff7f1ffbaf810d99f17fa3ad1 Mon Sep 17 00:00:00 2001
From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com>
Date: Tue, 4 Feb 2025 07:28:54 +0100
Subject: [PATCH 160/168] HDDS-12186. (addendum) Avoid array allocation for
 table iterator (#7799)

---
 .../java/org/apache/hadoop/hdds/utils/db/TypedTable.java     | 3 ++-
 .../org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java   | 5 ++++-
 2 files changed, 6 insertions(+), 2 deletions(-)

diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java
index d7bd554a0cd..f144e2c03c5 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/TypedTable.java
@@ -605,9 +605,10 @@ public CodecBuffer get() {
       @Override
       KeyValue convert(KeyValue raw)
           throws IOException {
+        final int rawSize = raw.getValue().readableBytes();
         final KEY key = keyCodec.fromCodecBuffer(raw.getKey());
         final VALUE value = valueCodec.fromCodecBuffer(raw.getValue());
-        return Table.newKeyValue(key, value, raw.getRawSize());
+        return Table.newKeyValue(key, value, rawSize);
       }
     };
   }
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java
index 8095c1cbb1f..2ef49660508 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java
@@ -600,8 +600,11 @@ static void assertIterator(int expectedCount, String prefix,
     try (Table.KeyValueIterator i = table.iterator(prefix)) {
       int keyCount = 0;
       for (; i.hasNext(); keyCount++) {
+        Table.KeyValue entry = i.next();
         assertEquals(prefix,
-            i.next().getKey().substring(0, PREFIX_LENGTH));
+            entry.getKey().substring(0, PREFIX_LENGTH));
+        assertEquals(entry.getValue().getBytes(StandardCharsets.UTF_8).length,
+            entry.getRawSize());
       }
       assertEquals(expectedCount, keyCount);
 

From cfe56dee0e85ed7cd1efa7edba72228668c2e292 Mon Sep 17 00:00:00 2001
From: Aryan Gupta <44232823+aryangupta1998@users.noreply.github.com>
Date: Tue, 4 Feb 2025 12:13:55 +0530
Subject: [PATCH 161/168] HDDS-11714. resetDeletedBlockRetryCount with --all
 may fail and can cause long db lock in large cluster. (#7665)

---
 .../hdds/scm/block/DeletedBlockLog.java       |  4 +-
 .../hdds/scm/block/DeletedBlockLogImpl.java   | 46 +++++++++++++++----
 .../hdds/scm/TestStorageContainerManager.java |  2 +-
 3 files changed, 40 insertions(+), 12 deletions(-)

diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java
index 5ec68c78d74..5e5aa38df74 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java
@@ -53,11 +53,11 @@ DatanodeDeletedBlockTransactions getTransactions(
       throws IOException;
 
   /**
-   * Return the failed transactions in the log. A transaction is
+   * Return the failed transactions in batches in the log. A transaction is
    * considered to be failed if it has been sent more than MAX_RETRY limit
    * and its count is reset to -1.
    *
-   * @param count Maximum num of returned transactions, if < 0. return all.
+   * @param count Number of failed transactions to be returned.
    * @param startTxId The least transaction id to start with.
    * @return a list of failed deleted block transactions.
    * @throws IOException
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
index a83ce085dd8..cfcf485752e 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
@@ -19,7 +19,6 @@
 
 import java.io.IOException;
 import java.time.Duration;
-import java.util.HashSet;
 import java.util.List;
 import java.util.UUID;
 import java.util.Set;
@@ -31,6 +30,7 @@
 import java.util.stream.Collectors;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.Lists;
 import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandStatus;
@@ -53,7 +53,6 @@
 import org.apache.hadoop.hdds.utils.db.Table;
 import org.apache.hadoop.hdds.utils.db.TableIterator;
 
-import com.google.common.collect.Lists;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY_DEFAULT;
 import static org.apache.hadoop.hdds.scm.block.SCMDeletedBlockTransactionStatusManager.SCMDeleteBlocksCommandStatusManager.CmdStatus;
@@ -176,19 +175,48 @@ public void incrementCount(List txIDs)
    */
   @Override
   public int resetCount(List txIDs) throws IOException {
-    lock.lock();
+    final int batchSize = 1000;
+    int totalProcessed = 0;
+
     try {
-      if (txIDs == null || txIDs.isEmpty()) {
-        txIDs = getFailedTransactions(LIST_ALL_FAILED_TRANSACTIONS, 0).stream()
-            .map(DeletedBlocksTransaction::getTxID)
-            .collect(Collectors.toList());
+      if (txIDs != null && !txIDs.isEmpty()) {
+        return resetRetryCount(txIDs);
       }
+
+      // If txIDs are null or empty, fetch all failed transactions in batches
+      long startTxId = 0;
+      List batch;
+
+      do {
+        // Fetch the batch of failed transactions
+        batch = getFailedTransactions(batchSize, startTxId);
+        if (batch.isEmpty()) {
+          break;
+        }
+
+        List batchTxIDs = batch.stream().map(DeletedBlocksTransaction::getTxID).collect(Collectors.toList());
+        totalProcessed += resetRetryCount(new ArrayList<>(batchTxIDs));
+        // Update startTxId to continue from the last processed transaction
+        startTxId = batch.get(batch.size() - 1).getTxID() + 1;
+      } while (!batch.isEmpty());
+
+    } catch (Exception e) {
+      throw new IOException("Error during transaction reset", e);
+    }
+    return totalProcessed;
+  }
+
+  private int resetRetryCount(List txIDs) throws IOException {
+    int totalProcessed;
+    lock.lock();
+    try {
       transactionStatusManager.resetRetryCount(txIDs);
-      return deletedBlockLogStateManager.resetRetryCountOfTransactionInDB(
-          new ArrayList<>(new HashSet<>(txIDs)));
+      totalProcessed = deletedBlockLogStateManager.resetRetryCountOfTransactionInDB(new ArrayList<>(
+          txIDs));
     } finally {
       lock.unlock();
     }
+    return totalProcessed;
   }
 
   private DeletedBlocksTransaction constructNewTransaction(
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java
index c7e6e96284a..71f7a14ce83 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java
@@ -297,7 +297,7 @@ private void testBlockDeletionTransactions(MiniOzoneCluster cluster) throws Exce
 
     // Verify a few TX gets created in the TX log.
     assertThat(delLog.getNumOfValidTransactions()).isGreaterThan(0);
-
+    
     // These blocks cannot be found in the container, skip deleting them
     // eventually these TX will success.
     GenericTestUtils.waitFor(() -> {

From 260434fe465de8e9dd922c3d292ba7d62843f63e Mon Sep 17 00:00:00 2001
From: "Doroszlai, Attila" <6454655+adoroszlai@users.noreply.github.com>
Date: Tue, 4 Feb 2025 22:48:37 +0100
Subject: [PATCH 162/168] HDDS-12183. Reuse cluster across safe test classes
 (#7793)

---
 .../hadoop/fs/ozone/TestOzoneFsHAURLs.java    |  51 ++-----
 .../contract/AbstractOzoneContractTest.java   |  62 +-------
 .../ozone/contract/TestOzoneContractFSO.java  |   4 +-
 .../contract/TestOzoneContractLegacy.java     |   4 +-
 .../hdds/scm/TestAllocateContainer.java       |  26 ++--
 .../hdds/scm/TestContainerReportWithKeys.java |  59 ++------
 .../hdds/scm/TestContainerSmallFile.java      |  32 ++--
 .../TestGetCommittedBlockLengthAndPutKey.java |  30 ++--
 .../hdds/scm/TestSCMNodeManagerMXBean.java    |  36 +----
 .../pipeline/TestPipelineManagerMXBean.java   |  25 +---
 .../apache/hadoop/ozone/TestCpuMetrics.java   |  18 +--
 .../ozone/TestGetClusterTreeInformation.java  |  33 ++--
 .../metrics/TestDatanodeQueueMetrics.java     |  51 +------
 .../ozone/freon/TestDNRPCLoadGenerator.java   |  56 ++-----
 .../hadoop/ozone/om/TestObjectStore.java      |  33 ++--
 .../ozone/om/TestObjectStoreWithFSO.java      |  42 ++----
 .../ozone/om/TestOmBlockVersioning.java       |  44 ++----
 .../om/TestOzoneManagerRestInterface.java     |  24 ++-
 .../hadoop/ozone/shell/TestScmAdminHA.java    |  41 ++---
 .../apache/ozone/test/ClusterForTests.java    |  96 ++++++++++++
 .../java/org/apache/ozone/test/HATests.java   |  83 +++++++++++
 .../org/apache/ozone/test/NonHATests.java     | 141 ++++++++++++++++++
 .../ozone/test/TestOzoneIntegrationHA.java    |  29 ++++
 .../ozone/test/TestOzoneIntegrationNonHA.java |  31 ++++
 .../org/apache/ozone/test/package-info.java   |  21 +++
 25 files changed, 569 insertions(+), 503 deletions(-)
 create mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/ozone/test/ClusterForTests.java
 create mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/ozone/test/HATests.java
 create mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/ozone/test/NonHATests.java
 create mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/ozone/test/TestOzoneIntegrationHA.java
 create mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/ozone/test/TestOzoneIntegrationNonHA.java
 create mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/ozone/test/package-info.java

diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java
index 4f14ede8fa5..a07ee5276ab 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java
@@ -23,33 +23,28 @@
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsShell;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.client.ObjectStore;
 import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneClientFactory;
 import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.ha.ConfUtils;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.OzoneManager;
-import org.apache.hadoop.ozone.om.helpers.BucketLayout;
-import org.apache.ozone.test.GenericTestUtils;
 import org.apache.hadoop.util.ToolRunner;
+import org.apache.ozone.test.GenericTestUtils;
+import org.apache.ozone.test.HATests;
 import org.apache.ratis.util.LifeCycle;
 import org.junit.jupiter.api.AfterAll;
 import org.junit.jupiter.api.BeforeAll;
 import org.junit.jupiter.api.BeforeEach;
 import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.TestInstance;
 import org.junit.jupiter.api.Timeout;
-import org.junit.jupiter.api.io.TempDir;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.File;
 import java.util.Optional;
 import java.util.OptionalInt;
 
@@ -62,8 +57,9 @@
 /**
  * Test client-side URI handling with Ozone Manager HA.
  */
+@TestInstance(TestInstance.Lifecycle.PER_CLASS)
 @Timeout(300)
-public class TestOzoneFsHAURLs {
+public abstract class TestOzoneFsHAURLs implements HATests.TestCase {
 
   /**
     * Set a timeout for each test.
@@ -72,10 +68,9 @@ public class TestOzoneFsHAURLs {
       TestOzoneFsHAURLs.class);
 
   private OzoneConfiguration conf;
-  private static MiniOzoneHAClusterImpl cluster;
-  private static String omServiceId;
-  private static OzoneManager om;
-  private static int numOfOMs;
+  private MiniOzoneHAClusterImpl cluster;
+  private String omServiceId;
+  private OzoneManager om;
 
   private String volumeName;
   private String bucketName;
@@ -85,7 +80,7 @@ public class TestOzoneFsHAURLs {
       "fs." + OzoneConsts.OZONE_URI_SCHEME + ".impl";
   private static final String O3FS_IMPL_VALUE =
       "org.apache.hadoop.fs.ozone.OzoneFileSystem";
-  private static OzoneClient client;
+  private OzoneClient client;
 
   private static final String OFS_IMPL_KEY =
       "fs." + OzoneConsts.OZONE_OFS_URI_SCHEME + ".impl";
@@ -95,25 +90,10 @@ public class TestOzoneFsHAURLs {
 
 
   @BeforeAll
-  static void initClass(@TempDir File tempDir) throws Exception {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    omServiceId = "om-service-test1";
-    numOfOMs = 3;
-    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, tempDir.getAbsolutePath());
-    conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 3);
-
-    conf.set(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT,
-        BucketLayout.LEGACY.name());
-    conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, true);
-
-    // Start the cluster
-    MiniOzoneHAClusterImpl.Builder builder = MiniOzoneCluster.newHABuilder(conf);
-    builder.setOMServiceId(omServiceId)
-        .setNumOfOzoneManagers(numOfOMs)
-        .setNumDatanodes(5);
-    cluster = builder.build();
-    cluster.waitForClusterToBeReady();
-    client = OzoneClientFactory.getRpcClient(omServiceId, conf);
+  void initClass() throws Exception {
+    cluster = cluster();
+    omServiceId = cluster.getOzoneManager().getOMServiceId();
+    client = cluster.newClient();
 
     om = cluster.getOzoneManager();
   }
@@ -149,11 +129,8 @@ public void init() throws Exception {
   }
 
   @AfterAll
-  public static void shutdown() {
+  void cleanup() {
     IOUtils.closeQuietly(client);
-    if (cluster != null) {
-      cluster.shutdown();
-    }
   }
 
   /**
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/AbstractOzoneContractTest.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/AbstractOzoneContractTest.java
index bce96251873..9d47d7f41fa 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/AbstractOzoneContractTest.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/AbstractOzoneContractTest.java
@@ -29,25 +29,18 @@
 import org.apache.hadoop.fs.contract.AbstractContractSeekTest;
 import org.apache.hadoop.fs.contract.AbstractContractUnbufferTest;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
-import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig;
-import org.apache.hadoop.hdds.utils.IOUtils;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.om.helpers.BucketLayout;
 import org.apache.hadoop.tools.contract.AbstractContractDistCpTest;
-import org.junit.jupiter.api.AfterAll;
-import org.junit.jupiter.api.BeforeAll;
+import org.apache.ozone.test.ClusterForTests;
 import org.junit.jupiter.api.Nested;
 import org.junit.jupiter.api.Test;
 import org.junit.jupiter.api.TestInstance;
 
 import java.io.IOException;
-import java.time.Duration;
 
 import static org.apache.hadoop.fs.contract.ContractTestUtils.cleanup;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED;
 import static org.apache.hadoop.ozone.OzoneConsts.OZONE_OFS_URI_SCHEME;
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT;
 import static org.assertj.core.api.Assumptions.assumeThat;
@@ -62,67 +55,28 @@
  * but can tweak configuration by also overriding {@link #createOzoneConfig()}.
  */
 @TestInstance(TestInstance.Lifecycle.PER_CLASS)
-abstract class AbstractOzoneContractTest {
+abstract class AbstractOzoneContractTest extends ClusterForTests {
 
   private static final String CONTRACT_XML = "contract/ozone.xml";
 
-  private MiniOzoneCluster cluster;
-
   /**
    * This must be implemented by all subclasses.
    * @return the FS contract
    */
   abstract AbstractFSContract createOzoneContract(Configuration conf);
 
-  /**
-   * Creates the base configuration for contract tests.  This can be tweaked
-   * in subclasses by overriding {@link #createOzoneConfig()}.
-   */
-  protected static OzoneConfiguration createBaseConfiguration() {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    DatanodeRatisServerConfig ratisServerConfig =
-        conf.getObject(DatanodeRatisServerConfig.class);
-    ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3));
-    ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(10));
-    conf.setFromObject(ratisServerConfig);
-
-    RatisClientConfig.RaftConfig raftClientConfig =
-        conf.getObject(RatisClientConfig.RaftConfig.class);
-    raftClientConfig.setRpcRequestTimeout(Duration.ofSeconds(3));
-    raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(10));
-    conf.setFromObject(raftClientConfig);
-
+  @Override
+  protected OzoneConfiguration createOzoneConfig() {
+    OzoneConfiguration conf = createBaseConfiguration();
     conf.addResource(CONTRACT_XML);
-
-    conf.setBoolean(OZONE_HBASE_ENHANCEMENTS_ALLOWED, true);
-    conf.setBoolean("ozone.client.hbase.enhancements.allowed", true);
-    conf.setBoolean(OZONE_FS_HSYNC_ENABLED, true);
-
     return conf;
   }
 
-  /**
-   * Hook method that allows tweaking the configuration.
-   */
-  OzoneConfiguration createOzoneConfig() {
-    return createBaseConfiguration();
-  }
-
-  MiniOzoneCluster getCluster() {
-    return cluster;
-  }
-
-  @BeforeAll
-  void setup() throws Exception {
-    cluster = MiniOzoneCluster.newBuilder(createOzoneConfig())
+  @Override
+  protected MiniOzoneCluster createCluster() throws Exception {
+    return MiniOzoneCluster.newBuilder(createOzoneConfig())
         .setNumDatanodes(5)
         .build();
-    cluster.waitForClusterToBeReady();
-  }
-
-  @AfterAll
-  void teardown() {
-    IOUtils.closeQuietly(cluster);
   }
 
   @Nested
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/TestOzoneContractFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/TestOzoneContractFSO.java
index b45e68d85eb..ab893ef5779 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/TestOzoneContractFSO.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/TestOzoneContractFSO.java
@@ -30,8 +30,8 @@
 class TestOzoneContractFSO extends AbstractOzoneContractTest {
 
   @Override
-  OzoneConfiguration createOzoneConfig() {
-    OzoneConfiguration conf = createBaseConfiguration();
+  protected OzoneConfiguration createOzoneConfig() {
+    OzoneConfiguration conf = super.createOzoneConfig();
     conf.set(OZONE_DEFAULT_BUCKET_LAYOUT, FILE_SYSTEM_OPTIMIZED.name());
     return conf;
   }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/TestOzoneContractLegacy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/TestOzoneContractLegacy.java
index 97ced88fcde..c23cebd41bb 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/TestOzoneContractLegacy.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/TestOzoneContractLegacy.java
@@ -30,8 +30,8 @@
 class TestOzoneContractLegacy extends AbstractOzoneContractTest {
 
   @Override
-  OzoneConfiguration createOzoneConfig() {
-    OzoneConfiguration conf = createBaseConfiguration();
+  protected OzoneConfiguration createOzoneConfig() {
+    OzoneConfiguration conf = super.createOzoneConfig();
     conf.set(OZONE_DEFAULT_BUCKET_LAYOUT, LEGACY.name());
     return conf;
   }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestAllocateContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestAllocateContainer.java
index 2b64d397eae..94a5ed7c242 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestAllocateContainer.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestAllocateContainer.java
@@ -21,43 +21,37 @@
 import static org.junit.jupiter.api.Assertions.assertThrows;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.SCMTestUtils;
+import org.apache.ozone.test.NonHATests;
 import org.junit.jupiter.api.AfterAll;
 import org.junit.jupiter.api.BeforeAll;
 import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.TestInstance;
 import org.junit.jupiter.api.Timeout;
 
 /**
  * Test allocate container calls.
  */
+@TestInstance(TestInstance.Lifecycle.PER_CLASS)
 @Timeout(300)
-public class TestAllocateContainer {
+public abstract class TestAllocateContainer implements NonHATests.TestCase {
 
-  private static MiniOzoneCluster cluster;
-  private static OzoneConfiguration conf;
-  private static StorageContainerLocationProtocolClientSideTranslatorPB
+  private OzoneConfiguration conf;
+  private StorageContainerLocationProtocolClientSideTranslatorPB
       storageContainerLocationClient;
-  private static XceiverClientManager xceiverClientManager;
 
   @BeforeAll
-  public static void init() throws Exception {
-    conf = new OzoneConfiguration();
-    cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(3).build();
-    cluster.waitForClusterToBeReady();
+  void init() throws Exception {
+    conf = cluster().getConf();
     storageContainerLocationClient =
-        cluster.getStorageContainerLocationClient();
-    xceiverClientManager = new XceiverClientManager(conf);
+        cluster().getStorageContainerLocationClient();
   }
 
   @AfterAll
-  public static void shutdown() throws InterruptedException {
-    if (cluster != null) {
-      cluster.shutdown();
-    }
+  void cleanup() {
     IOUtils.cleanupWithLogger(null, storageContainerLocationClient);
   }
 
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestContainerReportWithKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestContainerReportWithKeys.java
index 43df6bf051d..85105809068 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestContainerReportWithKeys.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestContainerReportWithKeys.java
@@ -21,32 +21,27 @@
 import org.apache.hadoop.hdds.client.ReplicationFactor;
 import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.client.StandaloneReplicationConfig;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.ContainerReplica;
 import org.apache.hadoop.hdds.utils.IOUtils;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.client.ObjectStore;
 import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneClientFactory;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.container.common.impl.ContainerData;
-import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
+import org.apache.ozone.test.NonHATests;
 import org.junit.jupiter.api.AfterAll;
 import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.TestInstance;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.junit.jupiter.api.Test;
 import org.junit.jupiter.api.Timeout;
-import java.io.IOException;
 import java.util.HashMap;
 import java.util.Set;
-import java.util.concurrent.TimeUnit;
 
 import static java.nio.charset.StandardCharsets.UTF_8;
 import static org.junit.jupiter.api.Assertions.assertEquals;
@@ -55,40 +50,23 @@
 /**
  * This class tests container report with DN container state info.
  */
-@Timeout(value = 300, unit = TimeUnit.SECONDS)
-public class TestContainerReportWithKeys {
+@TestInstance(TestInstance.Lifecycle.PER_CLASS)
+@Timeout(300)
+public abstract class TestContainerReportWithKeys implements NonHATests.TestCase {
   private static final Logger LOG = LoggerFactory.getLogger(
       TestContainerReportWithKeys.class);
-  private static MiniOzoneCluster cluster = null;
-  private static OzoneClient client;
-  private static OzoneConfiguration conf;
-  private static StorageContainerManager scm;
+  private OzoneClient client;
+  private StorageContainerManager scm;
 
-  /**
-   * Create a MiniDFSCluster for testing.
-   * 

- * Ozone is made active by setting OZONE_ENABLED = true - * - * @throws IOException - */ @BeforeAll - public static void init() throws Exception { - conf = new OzoneConfiguration(); - cluster = MiniOzoneCluster.newBuilder(conf).build(); - cluster.waitForClusterToBeReady(); - client = OzoneClientFactory.getRpcClient(conf); - scm = cluster.getStorageContainerManager(); + void init() throws Exception { + client = cluster().newClient(); + scm = cluster().getStorageContainerManager(); } - /** - * Shutdown MiniDFSCluster. - */ @AfterAll - public static void shutdown() { + void cleanup() { IOUtils.closeQuietly(client); - if (cluster != null) { - cluster.shutdown(); - } } @Test @@ -121,7 +99,7 @@ public void testContainerReportKeyWrite() throws Exception { OmKeyLocationInfo keyInfo = - cluster.getOzoneManager().lookupKey(keyArgs).getKeyLocationVersions() + cluster().getOzoneManager().lookupKey(keyArgs).getKeyLocationVersions() .get(0).getBlocksLatestVersionOnly().get(0); @@ -137,13 +115,4 @@ public void testContainerReportKeyWrite() throws Exception { cinfo.getNumberOfKeys(), cinfo.getUsedBytes()); } - - private static ContainerData getContainerData(long containerID) { - ContainerData containerData; - ContainerSet containerManager = cluster.getHddsDatanodes().get(0) - .getDatanodeStateMachine().getContainer().getContainerSet(); - containerData = - containerManager.getContainer(containerID).getContainerData(); - return containerData; - } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestContainerSmallFile.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestContainerSmallFile.java index 87728f6ce10..24accb66fe2 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestContainerSmallFile.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestContainerSmallFile.java @@ -18,24 +18,24 @@ package org.apache.hadoop.hdds.scm; import org.apache.hadoop.hdds.client.BlockID; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementCapacity; import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.ContainerTestHelper; import org.apache.hadoop.ozone.container.common.SCMTestUtils; +import org.apache.ozone.test.NonHATests; import org.apache.ratis.thirdparty.com.google.protobuf.ByteString; import org.apache.ratis.thirdparty.com.google.protobuf.UnsafeByteOperations; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.api.Timeout; import static java.nio.charset.StandardCharsets.UTF_8; @@ -47,33 +47,25 @@ /** * Test Container calls. */ +@TestInstance(TestInstance.Lifecycle.PER_CLASS) @Timeout(300) -public class TestContainerSmallFile { +public abstract class TestContainerSmallFile implements NonHATests.TestCase { - private static MiniOzoneCluster cluster; - private static OzoneConfiguration ozoneConfig; - private static StorageContainerLocationProtocolClientSideTranslatorPB + private OzoneConfiguration ozoneConfig; + private StorageContainerLocationProtocolClientSideTranslatorPB storageContainerLocationClient; - private static XceiverClientManager xceiverClientManager; + private XceiverClientManager xceiverClientManager; @BeforeAll - public static void init() throws Exception { - ozoneConfig = new OzoneConfiguration(); - ozoneConfig.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, - SCMContainerPlacementCapacity.class, PlacementPolicy.class); - cluster = MiniOzoneCluster.newBuilder(ozoneConfig).setNumDatanodes(3) - .build(); - cluster.waitForClusterToBeReady(); - storageContainerLocationClient = cluster + void init() throws Exception { + ozoneConfig = cluster().getConf(); + storageContainerLocationClient = cluster() .getStorageContainerLocationClient(); xceiverClientManager = new XceiverClientManager(ozoneConfig); } @AfterAll - public static void shutdown() throws InterruptedException { - if (cluster != null) { - cluster.shutdown(); - } + void cleanup() { IOUtils.cleanupWithLogger(null, storageContainerLocationClient); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestGetCommittedBlockLengthAndPutKey.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestGetCommittedBlockLengthAndPutKey.java index 43fc45efd09..0003c5f9039 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestGetCommittedBlockLengthAndPutKey.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestGetCommittedBlockLengthAndPutKey.java @@ -26,18 +26,18 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; import org.apache.hadoop.hdds.scm.pipeline.Pipeline; import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException; -import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementCapacity; import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.ContainerTestHelper; import org.apache.hadoop.ozone.container.common.SCMTestUtils; import org.apache.ozone.test.GenericTestUtils; +import org.apache.ozone.test.NonHATests; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.api.Timeout; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -53,34 +53,26 @@ /** * Test Container calls. */ +@TestInstance(TestInstance.Lifecycle.PER_CLASS) @Timeout(300) -public class TestGetCommittedBlockLengthAndPutKey { +public abstract class TestGetCommittedBlockLengthAndPutKey implements NonHATests.TestCase { private static final Logger LOG = LoggerFactory.getLogger(TestGetCommittedBlockLengthAndPutKey.class); - private static MiniOzoneCluster cluster; - private static OzoneConfiguration ozoneConfig; - private static StorageContainerLocationProtocolClientSideTranslatorPB + private OzoneConfiguration ozoneConfig; + private StorageContainerLocationProtocolClientSideTranslatorPB storageContainerLocationClient; - private static XceiverClientManager xceiverClientManager; + private XceiverClientManager xceiverClientManager; @BeforeAll - public static void init() throws Exception { - ozoneConfig = new OzoneConfiguration(); - ozoneConfig.setClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, - SCMContainerPlacementCapacity.class, PlacementPolicy.class); - cluster = - MiniOzoneCluster.newBuilder(ozoneConfig).setNumDatanodes(3).build(); - cluster.waitForClusterToBeReady(); + void init() throws Exception { + ozoneConfig = cluster().getConf(); storageContainerLocationClient = - cluster.getStorageContainerLocationClient(); + cluster().getStorageContainerLocationClient(); xceiverClientManager = new XceiverClientManager(ozoneConfig); } @AfterAll - public static void shutdown() throws InterruptedException { - if (cluster != null) { - cluster.shutdown(); - } + void cleanup() { IOUtils.cleanupWithLogger(null, storageContainerLocationClient); } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMNodeManagerMXBean.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMNodeManagerMXBean.java index dcc9b3e8e37..88475583a09 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMNodeManagerMXBean.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestSCMNodeManagerMXBean.java @@ -18,28 +18,24 @@ package org.apache.hadoop.hdds.scm; +import org.apache.ozone.test.NonHATests; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.server.StorageContainerManager; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.api.Timeout; import javax.management.MBeanServer; import javax.management.ObjectName; import javax.management.openmbean.CompositeData; import javax.management.openmbean.TabularData; -import java.io.IOException; import java.lang.management.ManagementFactory; import java.util.HashMap; import java.util.Iterator; import java.util.Map; -import java.util.concurrent.TimeoutException; -import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertInstanceOf; @@ -48,36 +44,20 @@ /** * Class which tests the SCMNodeManagerInfo Bean. */ +@TestInstance(TestInstance.Lifecycle.PER_CLASS) @Timeout(300) -public class TestSCMNodeManagerMXBean { +public abstract class TestSCMNodeManagerMXBean implements NonHATests.TestCase { public static final Logger LOG = LoggerFactory.getLogger(TestSCMMXBean.class); - private static int numOfDatanodes = 3; - private static MiniOzoneCluster cluster; - private static OzoneConfiguration conf; - private static StorageContainerManager scm; - private static MBeanServer mbs; + private StorageContainerManager scm; + private MBeanServer mbs; @BeforeAll - public static void init() throws IOException, TimeoutException, - InterruptedException { - conf = new OzoneConfiguration(); - conf.set(OZONE_SCM_STALENODE_INTERVAL, "60000ms"); - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(numOfDatanodes) - .build(); - cluster.waitForClusterToBeReady(); - scm = cluster.getStorageContainerManager(); + void init() { + scm = cluster().getStorageContainerManager(); mbs = ManagementFactory.getPlatformMBeanServer(); } - @AfterAll - public static void cleanup() { - if (cluster != null) { - cluster.shutdown(); - } - } - @Test public void testDiskUsage() throws Exception { ObjectName bean = new ObjectName( diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerMXBean.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerMXBean.java index 4a9efceeb7b..d78a472f850 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerMXBean.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerMXBean.java @@ -18,10 +18,8 @@ package org.apache.hadoop.hdds.scm.pipeline; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.ozone.test.GenericTestUtils; -import org.junit.jupiter.api.AfterEach; +import org.apache.ozone.test.NonHATests; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; @@ -30,36 +28,26 @@ import javax.management.ObjectName; import javax.management.openmbean.CompositeData; import javax.management.openmbean.TabularData; -import java.io.IOException; import java.lang.management.ManagementFactory; import java.util.Map; -import java.util.concurrent.TimeoutException; import static org.junit.jupiter.api.Assertions.assertInstanceOf; - /** * Test cases to verify the metrics exposed by SCMPipelineManager via MXBean. */ @Timeout(3000) -public class TestPipelineManagerMXBean { +public abstract class TestPipelineManagerMXBean implements NonHATests.TestCase { - private MiniOzoneCluster cluster; private MBeanServer mbs; @BeforeEach - public void init() - throws IOException, TimeoutException, InterruptedException { - OzoneConfiguration conf = new OzoneConfiguration(); - cluster = MiniOzoneCluster.newBuilder(conf).build(); - cluster.waitForClusterToBeReady(); + void init() { mbs = ManagementFactory.getPlatformMBeanServer(); } /** * Verifies SCMPipelineManagerInfo metrics. - * - * @throws Exception */ @Test public void testPipelineInfo() throws Exception { @@ -68,7 +56,7 @@ public void testPipelineInfo() throws Exception { GenericTestUtils.waitFor(() -> { try { - Map pipelineStateCount = cluster + Map pipelineStateCount = cluster() .getStorageContainerManager().getPipelineManager().getPipelineInfo(); final TabularData data = (TabularData) mbs.getAttribute( bean, "PipelineInfo"); @@ -95,9 +83,4 @@ private Integer getMetricsCount(TabularData data, String state) { } return null; } - - @AfterEach - public void teardown() { - cluster.shutdown(); - } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestCpuMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestCpuMetrics.java index e49a378a15c..7d8641c05f3 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestCpuMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestCpuMetrics.java @@ -22,13 +22,11 @@ import static org.assertj.core.api.Assertions.assertThat; import java.io.IOException; -import java.util.concurrent.TimeoutException; import okhttp3.OkHttpClient; import okhttp3.Request; import okhttp3.Response; import org.apache.hadoop.hdds.HddsUtils; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.junit.jupiter.api.BeforeAll; +import org.apache.ozone.test.NonHATests; import org.junit.jupiter.api.Test; /** @@ -37,25 +35,15 @@ *

jvm_metrics_cpu_system_load

*

jvm_metrics_cpu_jvm_load

*/ -public class TestCpuMetrics { +public abstract class TestCpuMetrics implements NonHATests.TestCase { - private static MiniOzoneCluster cluster; private final OkHttpClient httpClient = new OkHttpClient(); - @BeforeAll - public static void setup() throws InterruptedException, TimeoutException, - IOException { - OzoneConfiguration conf = new OzoneConfiguration(); - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(1).build(); - cluster.waitForClusterToBeReady(); - } - @Test public void testCpuMetrics() throws IOException { // given String scmHttpServerUrl = "http://localhost:" + - HddsUtils.getPortNumberFromConfigKeys(cluster.getConf(), + HddsUtils.getPortNumberFromConfigKeys(cluster().getConf(), OZONE_SCM_HTTP_ADDRESS_KEY).getAsInt(); Request prometheusMetricsRequest = new Request.Builder() .url(scmHttpServerUrl + "/prom") diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestGetClusterTreeInformation.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestGetClusterTreeInformation.java index 9becc8b2591..b0c81b54a93 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestGetClusterTreeInformation.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestGetClusterTreeInformation.java @@ -20,9 +20,10 @@ import org.apache.hadoop.hdds.scm.net.InnerNode; import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolClientSideTranslatorPB; import org.apache.hadoop.hdds.scm.proxy.SCMBlockLocationFailoverProxyProvider; +import org.apache.ozone.test.HATests; import org.junit.jupiter.api.BeforeAll; -import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.api.Timeout; import org.slf4j.Logger; @@ -31,7 +32,6 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration; import java.io.IOException; -import java.util.concurrent.TimeoutException; import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -41,34 +41,19 @@ * This class is to test the serialization/deserialization of cluster tree * information from SCM. */ +@TestInstance(TestInstance.Lifecycle.PER_CLASS) @Timeout(300) -public class TestGetClusterTreeInformation { +public abstract class TestGetClusterTreeInformation implements HATests.TestCase { public static final Logger LOG = LoggerFactory.getLogger(TestGetClusterTreeInformation.class); - private static int numOfDatanodes = 3; - private static MiniOzoneCluster cluster; - private static OzoneConfiguration conf; - private static StorageContainerManager scm; + private OzoneConfiguration conf; + private StorageContainerManager scm; @BeforeAll - public static void init() throws IOException, TimeoutException, - InterruptedException { - conf = new OzoneConfiguration(); - cluster = MiniOzoneCluster.newHABuilder(conf) - .setNumOfOzoneManagers(3) - .setNumOfStorageContainerManagers(3) - .setNumDatanodes(numOfDatanodes) - .build(); - cluster.waitForClusterToBeReady(); - scm = cluster.getStorageContainerManager(); - } - - @AfterAll - public static void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } + void init() { + conf = cluster().getConf(); + scm = cluster().getStorageContainerManager(); } @Test diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestDatanodeQueueMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestDatanodeQueueMetrics.java index 2f18326f7b1..544b49a8c74 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestDatanodeQueueMetrics.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestDatanodeQueueMetrics.java @@ -17,19 +17,12 @@ package org.apache.hadoop.ozone.container.metrics; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; -import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl; import org.apache.hadoop.ozone.container.common.statemachine.DatanodeQueueMetrics; -import org.junit.jupiter.api.BeforeEach; +import org.apache.ozone.test.NonHATests; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.api.Timeout; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; import static org.apache.commons.text.WordUtils.capitalize; import static org.apache.hadoop.ozone.container.common.statemachine.DatanodeQueueMetrics.COMMAND_DISPATCHER_QUEUE_PREFIX; @@ -41,44 +34,9 @@ /** * Test for queue metrics of datanodes. */ +@TestInstance(TestInstance.Lifecycle.PER_CLASS) @Timeout(300) -public class TestDatanodeQueueMetrics { - - private MiniOzoneHAClusterImpl cluster = null; - private OzoneConfiguration conf; - private String omServiceId; - private static int numOfOMs = 3; - private String scmServiceId; - private static int numOfSCMs = 3; - - private static final Logger LOG = LoggerFactory - .getLogger(TestDatanodeQueueMetrics.class); - - /** - * Create a MiniDFSCluster for testing. - *

- * Ozone is made active by setting OZONE_ENABLED = true - * - * @throws IOException - */ - @BeforeEach - public void init() throws Exception { - conf = new OzoneConfiguration(); - conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_INTERVAL, "10s"); - omServiceId = "om-service-test1"; - scmServiceId = "scm-service-test1"; - MiniOzoneHAClusterImpl.Builder builder = MiniOzoneCluster.newHABuilder(conf); - builder.setOMServiceId(omServiceId) - .setSCMServiceId(scmServiceId) - .setNumOfStorageContainerManagers(numOfSCMs) - .setNumOfOzoneManagers(numOfOMs) - .setNumDatanodes(1); - cluster = builder.build(); - cluster.waitForClusterToBeReady(); - } - /** - * Set a timeout for each test. - */ +public abstract class TestDatanodeQueueMetrics implements NonHATests.TestCase { @Test public void testQueueMetrics() { @@ -89,7 +47,6 @@ public void testQueueMetrics() { assertThat(getGauge(COMMAND_DISPATCHER_QUEUE_PREFIX + typeSize)) .isGreaterThanOrEqualTo(0); } - } private long getGauge(String metricName) { diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDNRPCLoadGenerator.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDNRPCLoadGenerator.java index 33d59f101eb..e16139c63b8 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDNRPCLoadGenerator.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestDNRPCLoadGenerator.java @@ -18,27 +18,24 @@ package org.apache.hadoop.ozone.freon; -import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; import org.apache.hadoop.hdds.scm.XceiverClientCreator; import org.apache.hadoop.hdds.scm.XceiverClientFactory; import org.apache.hadoop.hdds.scm.XceiverClientSpi; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline; import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB; import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls; -import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.OzoneConsts; import org.apache.hadoop.ozone.container.common.SCMTestUtils; -import org.junit.jupiter.api.AfterAll; +import org.apache.ozone.test.NonHATests; import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; import picocli.CommandLine; -import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; import java.util.List; @@ -49,32 +46,16 @@ /** * Tests Freon, with MiniOzoneCluster and validate data. */ -public class TestDNRPCLoadGenerator { +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +public abstract class TestDNRPCLoadGenerator implements NonHATests.TestCase { - private static MiniOzoneCluster cluster = null; - private static ContainerWithPipeline container; - - private static void startCluster(OzoneConfiguration conf) throws Exception { - DatanodeRatisServerConfig ratisServerConfig = - conf.getObject(DatanodeRatisServerConfig.class); - ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); - ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(10)); - conf.setFromObject(ratisServerConfig); - - RatisClientConfig.RaftConfig raftClientConfig = - conf.getObject(RatisClientConfig.RaftConfig.class); - raftClientConfig.setRpcRequestTimeout(Duration.ofSeconds(3)); - raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(10)); - conf.setFromObject(raftClientConfig); - - cluster = MiniOzoneCluster.newBuilder(conf) - .setNumDatanodes(5).build(); - cluster.waitForClusterToBeReady(); - cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.THREE, - 180000); + private ContainerWithPipeline container; + @BeforeAll + void init() throws Exception { + OzoneConfiguration conf = cluster().getConf(); StorageContainerLocationProtocolClientSideTranslatorPB - storageContainerLocationClient = cluster + storageContainerLocationClient = cluster() .getStorageContainerLocationClient(); container = storageContainerLocationClient.allocateContainer( @@ -87,23 +68,6 @@ private static void startCluster(OzoneConfiguration conf) throws Exception { } } - static void shutdownCluster() { - if (cluster != null) { - cluster.shutdown(); - } - } - - @BeforeAll - public static void init() throws Exception { - OzoneConfiguration conf = new OzoneConfiguration(); - startCluster(conf); - } - - @AfterAll - public static void shutdown() { - shutdownCluster(); - } - private static Stream provideParameters() { return Stream.of( Arguments.of(true, true), @@ -117,7 +81,7 @@ private static Stream provideParameters() { @MethodSource("provideParameters") public void test(boolean readOnly, boolean ratis) { DNRPCLoadGenerator randomKeyGenerator = - new DNRPCLoadGenerator(cluster.getConf()); + new DNRPCLoadGenerator(cluster().getConf()); CommandLine cmd = new CommandLine(randomKeyGenerator); List cmdArgs = new ArrayList<>(Arrays.asList( "--container-id", Long.toString(container.getContainerInfo().getContainerID()), diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStore.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStore.java index 5997d5758a2..82e6405135f 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStore.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStore.java @@ -18,7 +18,6 @@ import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.client.BucketArgs; import org.apache.hadoop.ozone.client.ObjectStore; import org.apache.hadoop.ozone.client.OzoneBucket; @@ -26,9 +25,11 @@ import org.apache.hadoop.ozone.client.OzoneVolume; import org.apache.hadoop.ozone.om.exceptions.OMException; import org.apache.hadoop.ozone.om.helpers.BucketLayout; +import org.apache.ozone.test.NonHATests; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.api.Timeout; import java.io.IOException; @@ -40,35 +41,21 @@ /** * Tests to verify Object store without prefix enabled. */ +@TestInstance(TestInstance.Lifecycle.PER_CLASS) @Timeout(1200) -public class TestObjectStore { - private static MiniOzoneCluster cluster = null; - private static OzoneConfiguration conf; - private static OzoneClient client; +public abstract class TestObjectStore implements NonHATests.TestCase { + private OzoneConfiguration conf; + private OzoneClient client; - /** - * Create a MiniOzoneCluster for testing. - *

- * - * @throws IOException - */ @BeforeAll - public static void init() throws Exception { - conf = new OzoneConfiguration(); - cluster = MiniOzoneCluster.newBuilder(conf).build(); - cluster.waitForClusterToBeReady(); - client = cluster.newClient(); + void init() throws Exception { + conf = cluster().getConf(); + client = cluster().newClient(); } - /** - * Shutdown MiniOzoneCluster. - */ @AfterAll - public static void shutdown() { + void cleanup() { IOUtils.closeQuietly(client); - if (cluster != null) { - cluster.shutdown(); - } } @Test diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithFSO.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithFSO.java index 5e3a3aa1980..87b1a735266 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithFSO.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithFSO.java @@ -48,10 +48,12 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; import org.apache.hadoop.util.StringUtils; import org.apache.ozone.test.GenericTestUtils; +import org.apache.ozone.test.NonHATests; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.api.Timeout; import java.io.IOException; @@ -83,30 +85,22 @@ /** * Tests to verify Object store with prefix enabled cases. */ +@TestInstance(TestInstance.Lifecycle.PER_CLASS) @Timeout(1200) -public class TestObjectStoreWithFSO { +public abstract class TestObjectStoreWithFSO implements NonHATests.TestCase { private static final Path ROOT = new Path(OZONE_URI_DELIMITER); - private static MiniOzoneCluster cluster = null; - private static OzoneConfiguration conf; - private static String volumeName; - private static String bucketName; - private static FileSystem fs; - private static OzoneClient client; + private MiniOzoneCluster cluster; + private OzoneConfiguration conf; + private String volumeName; + private String bucketName; + private FileSystem fs; + private OzoneClient client; - /** - * Create a MiniDFSCluster for testing. - *

- * - * @throws IOException - */ @BeforeAll - public static void init() throws Exception { - conf = new OzoneConfiguration(); - conf.set(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT, - BucketLayout.FILE_SYSTEM_OPTIMIZED.name()); - cluster = MiniOzoneCluster.newBuilder(conf).build(); - cluster.waitForClusterToBeReady(); + void init() throws Exception { + conf = new OzoneConfiguration(cluster().getConf()); + cluster = cluster(); client = cluster.newClient(); // create a volume and a bucket to be used by OzoneFileSystem OzoneBucket bucket = TestDataUtil @@ -147,7 +141,7 @@ protected void deleteRootDir() throws IOException { } } - private static void deleteRootRecursively(FileStatus[] fileStatuses) + private void deleteRootRecursively(FileStatus[] fileStatuses) throws IOException { for (FileStatus fStatus : fileStatuses) { fs.delete(fStatus.getPath(), true); @@ -829,14 +823,8 @@ public BucketLayout getBucketLayout() { return BucketLayout.FILE_SYSTEM_OPTIMIZED; } - /** - * Shutdown MiniDFSCluster. - */ @AfterAll - public static void shutdown() { + void cleanup() { IOUtils.closeQuietly(client); - if (cluster != null) { - cluster.shutdown(); - } } } diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java index 247f7e75103..03eba1e5ef7 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmBlockVersioning.java @@ -16,15 +16,12 @@ */ package org.apache.hadoop.ozone.om; -import java.io.IOException; import java.util.ArrayList; import java.util.List; import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.client.StandaloneReplicationConfig; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList; -import org.apache.hadoop.ozone.MiniOzoneCluster; import org.apache.hadoop.ozone.TestDataUtil; import org.apache.hadoop.ozone.client.OzoneBucket; import org.apache.hadoop.ozone.client.OzoneClient; @@ -40,50 +37,35 @@ import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; import static org.junit.jupiter.api.Assertions.assertEquals; +import org.apache.ozone.test.NonHATests; import org.junit.jupiter.api.AfterAll; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.api.Timeout; /** * This class tests the versioning of blocks from OM side. */ +@TestInstance(TestInstance.Lifecycle.PER_CLASS) @Timeout(300) -public class TestOmBlockVersioning { - - private static MiniOzoneCluster cluster = null; - private static OzoneClient client; - private static OzoneConfiguration conf; - private static OzoneManager ozoneManager; - private static OzoneManagerProtocol writeClient; - - /** - * Create a MiniDFSCluster for testing. - *

- * Ozone is made active by setting OZONE_ENABLED = true - * - * @throws IOException - */ +public abstract class TestOmBlockVersioning implements NonHATests.TestCase { + + private OzoneClient client; + private OzoneManager ozoneManager; + private OzoneManagerProtocol writeClient; + @BeforeAll - public static void init() throws Exception { - conf = new OzoneConfiguration(); - cluster = MiniOzoneCluster.newBuilder(conf).build(); - cluster.waitForClusterToBeReady(); - client = cluster.newClient(); - ozoneManager = cluster.getOzoneManager(); + void init() throws Exception { + client = cluster().newClient(); + ozoneManager = cluster().getOzoneManager(); writeClient = client.getObjectStore() .getClientProxy().getOzoneManagerClient(); } - /** - * Shutdown MiniDFSCluster. - */ @AfterAll - public static void shutdown() { + void cleanup() { IOUtils.closeQuietly(client); - if (cluster != null) { - cluster.shutdown(); - } } @Test diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestInterface.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestInterface.java index 2cca0619afe..b7486aec200 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestInterface.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestInterface.java @@ -38,9 +38,10 @@ import java.util.List; import java.util.Map; -import org.junit.jupiter.api.AfterAll; +import org.apache.ozone.test.NonHATests; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.api.Timeout; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -51,24 +52,17 @@ /** * This class is to test the REST interface exposed by OzoneManager. */ +@TestInstance(TestInstance.Lifecycle.PER_CLASS) @Timeout(300) -public class TestOzoneManagerRestInterface { +public abstract class TestOzoneManagerRestInterface implements NonHATests.TestCase { - private static MiniOzoneCluster cluster; - private static OzoneConfiguration conf; + private MiniOzoneCluster cluster; + private OzoneConfiguration conf; @BeforeAll - public static void setUp() throws Exception { - conf = new OzoneConfiguration(); - cluster = MiniOzoneCluster.newBuilder(conf).build(); - cluster.waitForClusterToBeReady(); - } - - @AfterAll - public static void tearDown() throws Exception { - if (cluster != null) { - cluster.shutdown(); - } + void setup() { + conf = cluster().getConf(); + cluster = cluster(); } @Test diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java index 8691e5ede38..90aebee5e37 100644 --- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java @@ -19,47 +19,26 @@ import java.net.InetSocketAddress; -import org.apache.hadoop.ozone.admin.OzoneAdmin; -import org.apache.hadoop.hdds.conf.OzoneConfiguration; -import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.ozone.MiniOzoneCluster; -import org.junit.jupiter.api.AfterAll; +import org.apache.hadoop.ozone.admin.OzoneAdmin; +import org.apache.ozone.test.HATests; import org.junit.jupiter.api.BeforeAll; import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; /** * This class tests ozone admin scm commands. */ -public class TestScmAdminHA { - private static OzoneAdmin ozoneAdmin; - private static OzoneConfiguration conf; - private static String omServiceId; - private static int numOfOMs; - private static MiniOzoneCluster cluster; +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +public abstract class TestScmAdminHA implements HATests.TestCase { + + private OzoneAdmin ozoneAdmin; + private MiniOzoneCluster cluster; @BeforeAll - public static void init() throws Exception { + void init() { ozoneAdmin = new OzoneAdmin(); - conf = new OzoneConfiguration(); - - // Init HA cluster - omServiceId = "om-service-test1"; - numOfOMs = 3; - cluster = MiniOzoneCluster.newHABuilder(conf) - .setOMServiceId(omServiceId) - .setNumOfOzoneManagers(numOfOMs) - .build(); - conf.setQuietMode(false); - // enable ratis for Scm. - conf.setBoolean(ScmConfigKeys.HDDS_CONTAINER_RATIS_ENABLED_KEY, true); - cluster.waitForClusterToBeReady(); - } - - @AfterAll - public static void shutdown() { - if (cluster != null) { - cluster.shutdown(); - } + cluster = cluster(); } @Test diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/ozone/test/ClusterForTests.java b/hadoop-ozone/integration-test/src/test/java/org/apache/ozone/test/ClusterForTests.java new file mode 100644 index 00000000000..c09bff04c56 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/ozone/test/ClusterForTests.java @@ -0,0 +1,96 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ozone.test; + +import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig; +import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig; +import org.apache.hadoop.hdds.utils.IOUtils; +import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.TestInstance; + +import java.time.Duration; + +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_HSYNC_ENABLED; +import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_HBASE_ENHANCEMENTS_ALLOWED; + +/** + * Base class for Ozone integration tests. Manages lifecycle of {@link MiniOzoneCluster}. + *

+ * Subclasses can tweak configuration by overriding {@link #createOzoneConfig()}. + */ +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +public abstract class ClusterForTests { + + private C cluster; + + /** + * Creates the base configuration for tests. This can be tweaked + * in subclasses by overriding {@link #createOzoneConfig()}. + */ + protected static OzoneConfiguration createBaseConfiguration() { + OzoneConfiguration conf = new OzoneConfiguration(); + DatanodeRatisServerConfig ratisServerConfig = + conf.getObject(DatanodeRatisServerConfig.class); + ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3)); + ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(10)); + conf.setFromObject(ratisServerConfig); + + RatisClientConfig.RaftConfig raftClientConfig = + conf.getObject(RatisClientConfig.RaftConfig.class); + raftClientConfig.setRpcRequestTimeout(Duration.ofSeconds(3)); + raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(10)); + conf.setFromObject(raftClientConfig); + + conf.setBoolean(OZONE_HBASE_ENHANCEMENTS_ALLOWED, true); + conf.setBoolean("ozone.client.hbase.enhancements.allowed", true); + conf.setBoolean(OZONE_FS_HSYNC_ENABLED, true); + + return conf; + } + + /** + * Hook method that allows tweaking the configuration. + */ + protected OzoneConfiguration createOzoneConfig() { + return createBaseConfiguration(); + } + + /** + * Hook method to create cluster with different parameters. + */ + protected abstract C createCluster() throws Exception; + + protected C getCluster() { + return cluster; + } + + @BeforeAll + void startCluster() throws Exception { + cluster = createCluster(); + cluster.waitForClusterToBeReady(); + } + + @AfterAll + void shutdownCluster() { + IOUtils.closeQuietly(cluster); + } + +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/ozone/test/HATests.java b/hadoop-ozone/integration-test/src/test/java/org/apache/ozone/test/HATests.java new file mode 100644 index 00000000000..f01a9ed3a04 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/ozone/test/HATests.java @@ -0,0 +1,83 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ozone.test; + +import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl; +import org.junit.jupiter.api.Nested; +import org.junit.jupiter.api.TestInstance; + +import java.util.UUID; + +/** + * Group tests to be run with a single HA cluster. + *

+ * Specific tests are implemented in separate classes, and they are subclasses + * here as {@link Nested} inner classes. This allows running all tests in the + * same cluster. + */ +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +public abstract class HATests extends ClusterForTests { + + /** Hook method for subclasses. */ + MiniOzoneHAClusterImpl.Builder newClusterBuilder() { + return MiniOzoneCluster.newHABuilder(createOzoneConfig()) + .setOMServiceId("om-" + UUID.randomUUID()) + .setNumOfOzoneManagers(3) + .setSCMServiceId("scm-" + UUID.randomUUID()) + .setNumOfStorageContainerManagers(3); + } + + /** Test cases which need HA cluster should implement this. */ + public interface TestCase { + MiniOzoneHAClusterImpl cluster(); + } + + @Nested + class OzoneFsHAURLs extends org.apache.hadoop.fs.ozone.TestOzoneFsHAURLs { + @Override + public MiniOzoneHAClusterImpl cluster() { + return getCluster(); + } + } + + @Nested + class GetClusterTreeInformation extends org.apache.hadoop.ozone.TestGetClusterTreeInformation { + @Override + public MiniOzoneHAClusterImpl cluster() { + return getCluster(); + } + } + + @Nested + class DatanodeQueueMetrics extends org.apache.hadoop.ozone.container.metrics.TestDatanodeQueueMetrics { + @Override + public MiniOzoneHAClusterImpl cluster() { + return getCluster(); + } + } + + @Nested + class ScmAdminHA extends org.apache.hadoop.ozone.shell.TestScmAdminHA { + @Override + public MiniOzoneHAClusterImpl cluster() { + return getCluster(); + } + } + +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/ozone/test/NonHATests.java b/hadoop-ozone/integration-test/src/test/java/org/apache/ozone/test/NonHATests.java new file mode 100644 index 00000000000..8e0e3286881 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/ozone/test/NonHATests.java @@ -0,0 +1,141 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ozone.test; + +import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.junit.jupiter.api.Nested; +import org.junit.jupiter.api.TestInstance; + +/** + * Group tests to be run with a single non-HA cluster. + *

+ * Specific tests are implemented in separate classes, and they are subclasses + * here as {@link Nested} inner classes. This allows running all tests in the + * same cluster. + */ +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +public abstract class NonHATests extends ClusterForTests { + + /** Hook method for subclasses. */ + MiniOzoneCluster.Builder newClusterBuilder() { + return MiniOzoneCluster.newBuilder(createOzoneConfig()) + .setNumDatanodes(5); + } + + /** Test cases for non-HA cluster should implement this. */ + public interface TestCase { + MiniOzoneCluster cluster(); + } + + @Nested + class AllocateContainer extends org.apache.hadoop.hdds.scm.TestAllocateContainer { + @Override + public MiniOzoneCluster cluster() { + return getCluster(); + } + } + + @Nested + class ContainerReportWithKeys extends org.apache.hadoop.hdds.scm.TestContainerReportWithKeys { + @Override + public MiniOzoneCluster cluster() { + return getCluster(); + } + } + + @Nested + class ContainerSmallFile extends org.apache.hadoop.hdds.scm.TestContainerSmallFile { + @Override + public MiniOzoneCluster cluster() { + return getCluster(); + } + } + + @Nested + class GetCommittedBlockLengthAndPutKey extends org.apache.hadoop.hdds.scm.TestGetCommittedBlockLengthAndPutKey { + @Override + public MiniOzoneCluster cluster() { + return getCluster(); + } + } + + @Nested + class SCMNodeManagerMXBean extends org.apache.hadoop.hdds.scm.TestSCMNodeManagerMXBean { + @Override + public MiniOzoneCluster cluster() { + return getCluster(); + } + } + + @Nested + class PipelineManagerMXBean extends org.apache.hadoop.hdds.scm.pipeline.TestPipelineManagerMXBean { + @Override + public MiniOzoneCluster cluster() { + return getCluster(); + } + } + + @Nested + class CpuMetrics extends org.apache.hadoop.ozone.TestCpuMetrics { + @Override + public MiniOzoneCluster cluster() { + return getCluster(); + } + } + + @Nested + class DNRPCLoadGenerator extends org.apache.hadoop.ozone.freon.TestDNRPCLoadGenerator { + @Override + public MiniOzoneCluster cluster() { + return getCluster(); + } + } + + @Nested + class ObjectStore extends org.apache.hadoop.ozone.om.TestObjectStore { + @Override + public MiniOzoneCluster cluster() { + return getCluster(); + } + } + + @Nested + class ObjectStoreWithFSO extends org.apache.hadoop.ozone.om.TestObjectStoreWithFSO { + @Override + public MiniOzoneCluster cluster() { + return getCluster(); + } + } + + @Nested + class OmBlockVersioning extends org.apache.hadoop.ozone.om.TestOmBlockVersioning { + @Override + public MiniOzoneCluster cluster() { + return getCluster(); + } + } + + @Nested + class OzoneManagerRestInterface extends org.apache.hadoop.ozone.om.TestOzoneManagerRestInterface { + @Override + public MiniOzoneCluster cluster() { + return getCluster(); + } + } + +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/ozone/test/TestOzoneIntegrationHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/ozone/test/TestOzoneIntegrationHA.java new file mode 100644 index 00000000000..287328f6749 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/ozone/test/TestOzoneIntegrationHA.java @@ -0,0 +1,29 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ozone.test; + +import org.apache.hadoop.ozone.MiniOzoneHAClusterImpl; + +/** Test Ozone with HA cluster. */ +public class TestOzoneIntegrationHA extends HATests { + @Override + protected MiniOzoneHAClusterImpl createCluster() throws Exception { + return newClusterBuilder() + .build(); + } +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/ozone/test/TestOzoneIntegrationNonHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/ozone/test/TestOzoneIntegrationNonHA.java new file mode 100644 index 00000000000..793580b17d0 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/ozone/test/TestOzoneIntegrationNonHA.java @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.ozone.test; + +import org.apache.hadoop.ozone.MiniOzoneCluster; +import org.junit.jupiter.api.TestInstance; + +/** Test Ozone with non-HA cluster. */ +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +public class TestOzoneIntegrationNonHA extends NonHATests { + protected MiniOzoneCluster createCluster() throws Exception { + return newClusterBuilder() + .build(); + } + +} diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/ozone/test/package-info.java b/hadoop-ozone/integration-test/src/test/java/org/apache/ozone/test/package-info.java new file mode 100644 index 00000000000..2309ac44d87 --- /dev/null +++ b/hadoop-ozone/integration-test/src/test/java/org/apache/ozone/test/package-info.java @@ -0,0 +1,21 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/** + * Contains test cluster definitions. + */ +package org.apache.ozone.test; From 7efa081c70c12aa8e0a42b65ef6cb8c9cbabbb35 Mon Sep 17 00:00:00 2001 From: oneonestar Date: Wed, 5 Feb 2025 06:52:13 +0900 Subject: [PATCH 163/168] HDDS-12203. Initialize block length before skip (#7809) --- .../apache/hadoop/hdds/scm/storage/MultipartInputStream.java | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/MultipartInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/MultipartInputStream.java index 5f00e83e81b..368696be590 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/MultipartInputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/MultipartInputStream.java @@ -220,6 +220,11 @@ public synchronized void unbuffer() { @Override public synchronized long skip(long n) throws IOException { + checkOpen(); + if (!initialized) { + initialize(); + } + if (n <= 0) { return 0; } From bb860d2402c5efc9370388fdda36bacb2f476f56 Mon Sep 17 00:00:00 2001 From: Peter Lee Date: Wed, 5 Feb 2025 16:55:28 +0800 Subject: [PATCH 164/168] HDDS-12202. OpsCreate and OpsAppend metrics not incremented (#7811) --- .../main/java/org/apache/ozone/fs/http/server/FSOperations.java | 2 ++ 1 file changed, 2 insertions(+) diff --git a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/FSOperations.java b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/FSOperations.java index 59945f829eb..553f8ba451d 100644 --- a/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/FSOperations.java +++ b/hadoop-ozone/httpfsgateway/src/main/java/org/apache/ozone/fs/http/server/FSOperations.java @@ -455,6 +455,7 @@ public Void execute(FileSystem fs) throws IOException { OutputStream os = fs.append(path, bufferSize); long bytes = copyBytes(is, os); HttpFSServerWebApp.get().getMetrics().incrBytesWritten(bytes); + HttpFSServerWebApp.get().getMetrics().incrOpsAppend(); return null; } @@ -667,6 +668,7 @@ public Void execute(FileSystem fs) throws IOException { null); long bytes = copyBytes(is, os); HttpFSServerWebApp.get().getMetrics().incrBytesWritten(bytes); + HttpFSServerWebApp.get().getMetrics().incrOpsCreate(); return null; } From 1a9cf6cfe0f288da76d5adbe7066998bae39ba33 Mon Sep 17 00:00:00 2001 From: sreejasahithi <115860222+sreejasahithi@users.noreply.github.com> Date: Wed, 5 Feb 2025 15:47:32 +0530 Subject: [PATCH 165/168] HDDS-12200. Fix grammar in OM HA, EC and Snapshot doc (#7806) --- hadoop-hdds/docs/content/feature/ErasureCoding.md | 6 +++--- hadoop-hdds/docs/content/feature/OM-HA.md | 2 +- hadoop-hdds/docs/content/feature/Snapshot.md | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/hadoop-hdds/docs/content/feature/ErasureCoding.md b/hadoop-hdds/docs/content/feature/ErasureCoding.md index c4d3739f1dc..c174da2bc64 100644 --- a/hadoop-hdds/docs/content/feature/ErasureCoding.md +++ b/hadoop-hdds/docs/content/feature/ErasureCoding.md @@ -31,7 +31,7 @@ approach which is expensive. The Apache Ozone supports `RATIS/THREE` replication The Ozone default replication scheme `RATIS/THREE` has 200% overhead in storage space and other resources (e.g., network bandwidth). However, for warm and cold datasets with relatively low I/O activities, additional -block replicas rarely accessed during normal operations, but still consume the same +block replicas are rarely accessed during normal operations, but still consume the same amount of resources as the first replica. Therefore, a natural improvement is to use Erasure Coding (EC) in place of replication, @@ -200,7 +200,7 @@ We can also reset the EC Replication Config with the following command. ozone sh bucket set-replication-config --type EC --replication rs-3-2-1024k ``` -Once we reset, only newly created keys take effect of this new setting. Prior created keys in the bucket stay with same older setting. +Once we reset, only newly created keys will take effect of this new setting. Prior created keys in the bucket stay with same older setting. #### Setting EC Replication Config While Creating Keys/Files @@ -238,7 +238,7 @@ storage applications. Enabling ISA-L allows significantly improve EC performance To enable ISA-L you will also require Hadoop native libraries (libhadoop.so). #### Installation -Both libraries should be placed to the directory specified by the java.library.path property or set by `LD_LIBRARY_PATH` environment variable. +Both libraries should be placed in the directory specified by the java.library.path property or set by `LD_LIBRARY_PATH` environment variable. The default value of java.library.path depends on the OS and Java version. For example, on Linux with OpenJDK 8 it is `/usr/java/packages/lib/amd64:/usr/lib64:/lib64:/lib:/usr/lib`. #### Verification diff --git a/hadoop-hdds/docs/content/feature/OM-HA.md b/hadoop-hdds/docs/content/feature/OM-HA.md index cf8ca4351f3..1a0a46481d6 100644 --- a/hadoop-hdds/docs/content/feature/OM-HA.md +++ b/hadoop-hdds/docs/content/feature/OM-HA.md @@ -107,7 +107,7 @@ The details of this approach are discussed in a separate [design doc]({{< ref "d To convert a non-HA OM to be HA or to add new OM nodes to existing HA OM ring, new OM node(s) need to be bootstrapped. Before bootstrapping a new OM node, all the existing OM's on-disk configuration file (ozone-site.xml) must be updated with the configuration details -of the new OM such as nodeId, address, port etc. Note that the existing OM's need not be restarted. They will reload the configuration from disk when +of the new OM such as nodeId, address, port etc. Note that the existing OMs need not be restarted. They will reload the configuration from disk when they receive a bootstrap request from the bootstrapping node. To bootstrap an OM, the following command needs to be run: diff --git a/hadoop-hdds/docs/content/feature/Snapshot.md b/hadoop-hdds/docs/content/feature/Snapshot.md index 143a1a5f918..8e81b8e3301 100644 --- a/hadoop-hdds/docs/content/feature/Snapshot.md +++ b/hadoop-hdds/docs/content/feature/Snapshot.md @@ -34,7 +34,7 @@ Snapshot feature for Apache Ozone object store allows users to take point-in-tim ## Snapshot APIs Snapshot feature is available through 'ozone fs' and 'ozone sh' CLI. This feature can also be programmatically accessed from Ozone `ObjectStore` Java client. The feature provides following functionalities: -* Create Snapshot: Create an instantenous snapshot for a given bucket +* Create Snapshot: Create an instantaneous snapshot for a given bucket ```shell ozone sh snapshot create [-hV] [] ``` @@ -71,7 +71,7 @@ Ozone also provides SnapshotDiff API. Whenever a user issues a SnapshotDiff betw ---------- ### Cluster and Hardware Configuration -Snapshot feature places additional demands on the cluster in terms of CPU, memory and storage. Cluster nodes running Ozone Managers and Ozone Datanodes should be configured with extra storage capacity depending on the number of active snapshots that the user wants to keep. Ozone Snapshots consume incremental amount of space per snapshot. e.g. if the active object store has 100 GB data (before replication) and a snapshot is taken, then the 100 GB of space will be locked in that snapshot. If the active object store consumes another 10 GB of space (before replication) subsequently then overall space requirement would be 100 GB + 10 GB = 110 GB in total (before replication). This is because common keys between Ozone snapshots and the active object store will share the storage space. +Snapshot feature places additional demands on the cluster in terms of CPU, memory and storage. Cluster nodes running Ozone Managers and Ozone Datanodes should be configured with extra storage capacity depending on the number of active snapshots that the user wants to keep. Ozone Snapshots consume an incremental amount of space per snapshot. e.g. if the active object store has 100 GB data (before replication) and a snapshot is taken, then the 100 GB of space will be locked in that snapshot. If the active object store consumes another 10 GB of space (before replication) subsequently then overall space requirement would be 100 GB + 10 GB = 110 GB in total (before replication). This is because common keys between Ozone snapshots and the active object store will share the storage space. Similarly, nodes running Ozone Manager should be configured with extra memory depending on how many snapshots are concurrently read from. This also depends on how many concurrent SnapshotDiff jobs are expected in the cluster. By default, an Ozone Manager allows 10 concurrent SnapshotDiff jobs at a time, which can be increased in config. From 182bd53bef9c3233330c792feffc796277b14c43 Mon Sep 17 00:00:00 2001 From: oneonestar Date: Wed, 5 Feb 2025 20:24:43 +0900 Subject: [PATCH 166/168] HDDS-12195. Implement skip() in OzoneFSInputStream (#7801) --- .../java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java index f873b43ae98..9eaa6fbb1f8 100644 --- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java +++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSInputStream.java @@ -104,6 +104,11 @@ public long getPos() throws IOException { return ((Seekable) inputStream).getPos(); } + @Override + public long skip(long n) throws IOException { + return inputStream.skip(n); + } + @Override public boolean seekToNewSource(long targetPos) throws IOException { return false; From d8c94fb9a90eba68cc655f9690461b5f42d209c4 Mon Sep 17 00:00:00 2001 From: Gargi Jaiswal <134698352+Gargi-jais11@users.noreply.github.com> Date: Wed, 5 Feb 2025 18:57:09 +0530 Subject: [PATCH 167/168] HDDS-12212. Fix grammar in decommissioning and observability documentation (#7815) --- hadoop-hdds/docs/content/feature/Decommission.md | 2 +- hadoop-hdds/docs/content/feature/Observability.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-hdds/docs/content/feature/Decommission.md b/hadoop-hdds/docs/content/feature/Decommission.md index 8058c0c0902..233caf42891 100644 --- a/hadoop-hdds/docs/content/feature/Decommission.md +++ b/hadoop-hdds/docs/content/feature/Decommission.md @@ -70,7 +70,7 @@ ozone admin datanode recommission [-hV] [-id=] Ozone Manager (OM) decommissioning is the process in which you gracefully remove one of the OM from the OM HA Ring. To decommission an OM and remove the node from the OM HA ring, the following steps need to be executed. -1. Add the _OM NodeId_ of the to be decommissioned OM node to the _ozone.om.decommissioned.nodes._ property in _ozone-site.xml_ of all +1. Add the _OM NodeId_ of the OM Node to be decommissioned to the _ozone.om.decommissioned.nodes._ property in _ozone-site.xml_ of all other OMs. 2. Run the following command to decommission an OM node. ```shell diff --git a/hadoop-hdds/docs/content/feature/Observability.md b/hadoop-hdds/docs/content/feature/Observability.md index b122f4bc83c..d46e7d704f7 100644 --- a/hadoop-hdds/docs/content/feature/Observability.md +++ b/hadoop-hdds/docs/content/feature/Observability.md @@ -56,7 +56,7 @@ scrape_configs: ## Grafana -Once Prometheus is up and running, Grana can be configured to monitor and visualize Ozone metrics. +Once Prometheus is up and running, Grafana can be configured to monitor and visualize Ozone metrics. ### Add Prometheus as a data source From 98c765402ef8f79d8b48231379f46450e85125a1 Mon Sep 17 00:00:00 2001 From: Ritesh H Shukla Date: Wed, 5 Feb 2025 07:00:45 -0800 Subject: [PATCH 168/168] HDDS-12112. Fix interval used for Chunk Read/Write Dashboard (#7724) --- .../Datanode Chunk Read_Write Dashboard.json | 85 +++++++++---------- 1 file changed, 40 insertions(+), 45 deletions(-) diff --git a/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Datanode Chunk Read_Write Dashboard.json b/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Datanode Chunk Read_Write Dashboard.json index 44749f902cf..bef1c957fd1 100644 --- a/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Datanode Chunk Read_Write Dashboard.json +++ b/hadoop-ozone/dist/src/main/compose/common/grafana/dashboards/Datanode Chunk Read_Write Dashboard.json @@ -1,34 +1,4 @@ { - "__inputs": [ - { - "label": "prometheus", - "description": "", - "type": "datasource", - "pluginId": "prometheus", - "pluginName": "Prometheus" - } - ], - "__elements": {}, - "__requires": [ - { - "type": "grafana", - "id": "grafana", - "name": "Grafana", - "version": "11.1.4" - }, - { - "type": "datasource", - "id": "prometheus", - "name": "Prometheus", - "version": "1.0.0" - }, - { - "type": "panel", - "id": "timeseries", - "name": "Time series", - "version": "" - } - ], "annotations": { "list": [ { @@ -48,10 +18,11 @@ "editable": true, "fiscalYearStartMonth": 0, "graphTooltip": 0, - "id": null, + "id": 1, "links": [], "panels": [ { + "collapsed": false, "gridPos": { "h": 1, "w": 24, @@ -59,6 +30,7 @@ "y": 0 }, "id": 12, + "panels": [], "title": "Volume Metrics", "type": "row" }, @@ -78,6 +50,7 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -140,6 +113,7 @@ "sort": "none" } }, + "pluginVersion": "11.4.0", "targets": [ { "datasource": { @@ -147,7 +121,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "rate(volume_io_stats_read_bytes[$__interval])", + "expr": "rate(volume_io_stats_read_bytes[$__rate_interval])", "fullMetaSearch": false, "includeNullMetadata": true, "instant": false, @@ -176,6 +150,7 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -237,6 +212,7 @@ "sort": "none" } }, + "pluginVersion": "11.4.0", "targets": [ { "datasource": { @@ -244,7 +220,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "rate(volume_io_stats_read_op_count[$__interval])", + "expr": "rate(volume_io_stats_read_op_count[$__rate_interval])", "fullMetaSearch": false, "includeNullMetadata": true, "instant": false, @@ -273,6 +249,7 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -335,6 +312,7 @@ "sort": "none" } }, + "pluginVersion": "11.4.0", "targets": [ { "datasource": { @@ -342,7 +320,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "rate(volume_io_stats_write_bytes[$__interval])", + "expr": "rate(volume_io_stats_write_bytes[$__rate_interval])", "fullMetaSearch": false, "includeNullMetadata": true, "instant": false, @@ -371,6 +349,7 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -432,6 +411,7 @@ "sort": "none" } }, + "pluginVersion": "11.4.0", "targets": [ { "datasource": { @@ -439,7 +419,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "rate(volume_io_stats_write_op_count[$__interval])", + "expr": "rate(volume_io_stats_write_op_count[$__rate_interval])", "fullMetaSearch": false, "includeNullMetadata": true, "instant": false, @@ -481,6 +461,7 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -543,6 +524,7 @@ "sort": "none" } }, + "pluginVersion": "11.4.0", "targets": [ { "datasource": { @@ -550,7 +532,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "rate(storage_container_metrics_bytes_write_chunk[$__interval])", + "expr": "rate(storage_container_metrics_bytes_write_chunk[$__rate_interval])", "fullMetaSearch": false, "includeNullMetadata": true, "instant": false, @@ -579,6 +561,7 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -641,6 +624,7 @@ "sort": "none" } }, + "pluginVersion": "11.4.0", "targets": [ { "datasource": { @@ -648,7 +632,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "sum(rate(storage_container_metrics_bytes_write_chunk[$__interval]))", + "expr": "sum(rate(storage_container_metrics_bytes_write_chunk[$__rate_interval]))", "fullMetaSearch": false, "includeNullMetadata": true, "instant": false, @@ -677,6 +661,7 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -738,6 +723,7 @@ "sort": "none" } }, + "pluginVersion": "11.4.0", "targets": [ { "datasource": { @@ -745,7 +731,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "rate(storage_container_metrics_num_write_chunk[$__interval])", + "expr": "rate(storage_container_metrics_num_write_chunk[$__rate_interval])", "fullMetaSearch": false, "includeNullMetadata": true, "instant": false, @@ -774,6 +760,7 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -835,6 +822,7 @@ "sort": "none" } }, + "pluginVersion": "11.4.0", "targets": [ { "datasource": { @@ -842,7 +830,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "rate(storage_container_metrics_bytes_put_block[$__interval])", + "expr": "rate(storage_container_metrics_bytes_put_block[$__rate_interval])", "fullMetaSearch": false, "includeNullMetadata": true, "instant": false, @@ -884,6 +872,7 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -946,6 +935,7 @@ "sort": "none" } }, + "pluginVersion": "11.4.0", "targets": [ { "datasource": { @@ -953,7 +943,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "rate(storage_container_metrics_bytes_read_chunk[$__interval])", + "expr": "rate(storage_container_metrics_bytes_read_chunk[$__rate_interval])", "fullMetaSearch": false, "includeNullMetadata": true, "instant": false, @@ -982,6 +972,7 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -1044,6 +1035,7 @@ "sort": "none" } }, + "pluginVersion": "11.4.0", "targets": [ { "datasource": { @@ -1051,7 +1043,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "sum(rate(storage_container_metrics_bytes_read_chunk[$__interval]))", + "expr": "sum(rate(storage_container_metrics_bytes_read_chunk[$__rate_interval]))", "fullMetaSearch": false, "includeNullMetadata": true, "instant": false, @@ -1080,6 +1072,7 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, + "barWidthFactor": 0.6, "drawStyle": "line", "fillOpacity": 0, "gradientMode": "none", @@ -1141,6 +1134,7 @@ "sort": "none" } }, + "pluginVersion": "11.4.0", "targets": [ { "datasource": { @@ -1148,7 +1142,7 @@ }, "disableTextWrap": false, "editorMode": "builder", - "expr": "rate(storage_container_metrics_num_read_chunk[$__interval])", + "expr": "rate(storage_container_metrics_num_read_chunk[$__rate_interval])", "fullMetaSearch": false, "includeNullMetadata": true, "instant": false, @@ -1162,20 +1156,21 @@ "type": "timeseries" } ], + "preload": false, "refresh": "", - "schemaVersion": 39, + "schemaVersion": 40, "tags": [], "templating": { "list": [] }, "time": { - "from": "now-7d", + "from": "now-5m", "to": "now" }, "timepicker": {}, "timezone": "browser", "title": "Datanode Chunk Read/Write Dashboard", "uid": "edj2lc6lfn5s0a", - "version": 4, + "version": 7, "weekStart": "" } \ No newline at end of file