From f08f32f16e9c4b9823d6c9da3f793ce837dc0f1f Mon Sep 17 00:00:00 2001 From: Gargi Jaiswal Date: Fri, 6 Feb 2026 15:18:16 +0530 Subject: [PATCH 1/3] set default permissions as 700 for ozone metadata and data directory --- .../apache/hadoop/hdds/scm/ScmConfigKeys.java | 2 + .../src/main/resources/ozone-default.xml | 22 ++++-- .../common/volume/MutableVolumeSet.java | 11 +++ .../common/volume/StorageVolume.java | 32 +++++++++ .../common/volume/TestHddsVolume.java | 39 ++++++++++ .../hadoop/hdds/server/ServerUtils.java | 32 +++++++++ .../hadoop/hdds/server/TestServerUtils.java | 72 +++++++++++++++++++ 7 files changed, 203 insertions(+), 7 deletions(-) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java index 23400b1a06b4..69a4d0411f2b 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java @@ -221,6 +221,8 @@ public final class ScmConfigKeys { public static final int OZONE_SCM_HTTP_BIND_PORT_DEFAULT = 9876; public static final int OZONE_SCM_HTTPS_BIND_PORT_DEFAULT = 9877; public static final String HDDS_DATANODE_DIR_KEY = "hdds.datanode.dir"; + public static final String HDDS_DATANODE_DATA_DIR_PERMISSIONS = + "hdds.datanode.data.dir.permissions"; public static final String HDDS_DATANODE_DIR_DU_RESERVED = "hdds.datanode.dir.du.reserved"; public static final String HDDS_DATANODE_DIR_DU_RESERVED_PERCENT = diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml index d076e1e78d33..d757c53a16f8 100644 --- a/hadoop-hdds/common/src/main/resources/ozone-default.xml +++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml @@ -178,6 +178,14 @@ tagged explicitly. + + hdds.datanode.data.dir.permissions + 700 + Permissions for the datanode data directories where actual file blocks are stored. + The permissions can be either octal or symbolic. If the default permissions are not set + then the default value of 700 will be used. + + hdds.datanode.container.db.dir @@ -674,11 +682,11 @@ ozone.om.db.dirs.permissions - 750 + 700 Permissions for the metadata directories for Ozone Manager. The permissions have to be octal or symbolic. If the default permissions are not set - then the default value of 750 will be used. + then the default value of 700 will be used. @@ -705,7 +713,7 @@ ozone.metadata.dirs.permissions - 750 + 700 Permissions for the metadata directories for fallback location for SCM, OM, Recon and DataNodes to store their metadata. The permissions have to be octal or symbolic. This is the fallback used in case the @@ -750,11 +758,11 @@ ozone.scm.db.dirs.permissions - 750 + 700 Permissions for the metadata directories for Storage Container Manager. The permissions can either be octal or symbolic. If the default permissions are not set - then the default value of 750 will be used. + then the default value of 700 will be used. @@ -3337,11 +3345,11 @@ ozone.recon.db.dirs.permissions - 750 + 700 Permissions for the metadata directories for Recon. The permissions can either be octal or symbolic. If the default permissions are not set - then the default value of 750 will be used. + then the default value of 700 will be used. diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java index 65d2fb70166c..17159e3fdff8 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java @@ -33,6 +33,8 @@ import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdds.conf.ConfigurationSource; import org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.server.ServerUtils; import org.apache.hadoop.hdds.utils.HddsServerUtil; import org.apache.hadoop.hdfs.server.datanode.StorageLocation; import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport; @@ -179,6 +181,15 @@ private void initializeVolumeSet() throws IOException { throw new IOException("Failed to create storage dir " + volume.getStorageDir()); } + + // Ensure permissions are set on the storage directory + // (permissions are also set in StorageVolume.initializeImpl(), + // but this ensures they're set even if directory already existed + // from a previous run with incorrect permissions) + if (volumeType == StorageVolume.VolumeType.DATA_VOLUME) { + ServerUtils.setDataDirectoryPermissions(volume.getStorageDir(), conf, + ScmConfigKeys.HDDS_DATANODE_DATA_DIR_PERMISSIONS); + } volumeMap.put(volume.getStorageDir().getPath(), volume); volumeStateMap.get(volume.getStorageType()).add(volume); volumeHealthMetrics.incrementHealthyVolumes(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java index b39468318311..38e0eb7d536b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java @@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.container.common.volume; import static org.apache.hadoop.ozone.container.common.HDDSVolumeLayoutVersion.getLatestVersion; +import static org.apache.hadoop.ozone.container.common.volume.HddsVolume.HDDS_VOLUME_DIR; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; @@ -40,6 +41,8 @@ import org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory; import org.apache.hadoop.hdds.fs.SpaceUsageCheckParams; import org.apache.hadoop.hdds.fs.SpaceUsageSource; +import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.server.ServerUtils; import org.apache.hadoop.hdfs.server.datanode.StorageLocation; import org.apache.hadoop.hdfs.server.datanode.checker.Checkable; import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult; @@ -226,16 +229,22 @@ protected void initializeImpl() throws IOException { if (!getStorageDir().mkdirs()) { throw new IOException("Cannot create directory " + getStorageDir()); } + // Set permissions on storage directory (e.g., hdds subdirectory) + setStorageDirPermissions(); setState(VolumeState.NOT_FORMATTED); createVersionFile(); break; case NOT_FORMATTED: // Version File does not exist. Create it. + // Ensure permissions are correct even if directory already existed + setStorageDirPermissions(); createVersionFile(); break; case NOT_INITIALIZED: // Version File exists. // Verify its correctness and update property fields. + // Ensure permissions are correct even if directory already existed + setStorageDirPermissions(); readVersionFile(); setState(VolumeState.NORMAL); break; @@ -461,6 +470,10 @@ public boolean getFailedVolume() { public StorageType getStorageType() { return this.storageType; } + + public String getStorageDirStr() { + return this.storageDirStr; + } } public String getVolumeRootDir() { @@ -768,6 +781,15 @@ private static SpaceUsageCheckParams getSpaceUsageCheckParams(Builder b, Supplie throw new IOException("Unable to create the volume root dir at " + root); } + // Set permissions on volume root directory immediately after creation/check + // (for data volumes, we want to ensure the root has secure permissions, + // even if the directory already existed from a previous run) + // This follows the same pattern as metadata directories in getDirectoryFromConfig() + if (b.conf != null && root.exists() && HDDS_VOLUME_DIR.equals(b.getStorageDirStr())) { + ServerUtils.setDataDirectoryPermissions(root, b.conf, + ScmConfigKeys.HDDS_DATANODE_DATA_DIR_PERMISSIONS); + } + SpaceUsageCheckFactory usageCheckFactory = b.usageCheckFactory; if (usageCheckFactory == null) { usageCheckFactory = SpaceUsageCheckFactory.create(b.conf); @@ -775,4 +797,14 @@ private static SpaceUsageCheckParams getSpaceUsageCheckParams(Builder b, Supplie return usageCheckFactory.paramsFor(root, exclusionProvider); } + + /** + * Sets permissions on the storage directory (e.g., hdds subdirectory). + */ + private void setStorageDirPermissions() { + if (conf != null && getStorageDir().exists()) { + ServerUtils.setDataDirectoryPermissions(getStorageDir(), conf, + ScmConfigKeys.HDDS_DATANODE_DATA_DIR_PERMISSIONS); + } + } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java index 0bd576732a0b..1dd927b6f481 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java @@ -31,10 +31,14 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; +import java.nio.file.attribute.PosixFilePermission; +import java.nio.file.attribute.PosixFilePermissions; import java.time.Duration; import java.util.Properties; +import java.util.Set; import java.util.UUID; import java.util.concurrent.atomic.AtomicLong; +import java.util.stream.Stream; import org.apache.commons.io.FileUtils; import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -56,6 +60,9 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.io.TempDir; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; /** * Unit tests for {@link HddsVolume}. @@ -605,4 +612,36 @@ private MutableVolumeSet createDbVolumeSet() throws IOException { dbVolumeSet.getVolumesList().get(0).createWorkingDir(CLUSTER_ID, null); return dbVolumeSet; } + + private static Stream dataDirectoryPermissionsProvider() { + return Stream.of( + Arguments.of("700", "rwx------"), + Arguments.of("750", "rwxr-x---"), + Arguments.of("755", "rwxr-xr-x") + ); + } + + @ParameterizedTest + @MethodSource("dataDirectoryPermissionsProvider") + public void testDataDirectoryPermissions(String permissionValue, + String expectedPermissionString) throws Exception { + // Set permission configuration + CONF.set(ScmConfigKeys.HDDS_DATANODE_DATA_DIR_PERMISSIONS, permissionValue); + + // Create a new volume + StorageVolume volume = volumeBuilder.build(); + volume.format(CLUSTER_ID); + + // Verify storage directory (hdds subdirectory) has correct permissions + File storageDir = volume.getStorageDir(); + assertTrue(storageDir.exists()); + Path storageDirPath = storageDir.toPath(); + Set expectedPermissions = + PosixFilePermissions.fromString(expectedPermissionString); + Set actualPermissions = + Files.getPosixFilePermissions(storageDirPath); + + assertEquals(expectedPermissions, actualPermissions, + "Storage directory should have " + permissionValue + " permissions"); + } } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java index d6864d4f15db..20b9e915af7d 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java @@ -254,6 +254,38 @@ public static String getPermissions(String key, ConfigurationSource conf) { "Invalid configuration value for key: " + key); } + /** + * Sets directory permissions based on configuration. + * + * @param dir The directory to set permissions on + * @param conf Configuration source + * @param permissionConfigKey The configuration key for permissions + * @throws RuntimeException If setting permissions fails + */ + public static void setDataDirectoryPermissions(File dir, ConfigurationSource conf, + String permissionConfigKey) { + if (dir == null || !dir.exists()) { + return; + } + try { + String permissionValue = conf.get(permissionConfigKey); + if (permissionValue == null) { + LOG.warn("Permission configuration key {} not found, skipping permission " + + "setting for directory {}", permissionConfigKey, dir); + return; + } + String symbolicPermission = getSymbolicPermission(permissionValue); + Path path = dir.toPath(); + Files.setPosixFilePermissions(path, + PosixFilePermissions.fromString(symbolicPermission)); + LOG.debug("Set permissions {} on directory {}", symbolicPermission, dir); + } catch (Exception e) { + LOG.warn("Failed to set directory permissions for {}: {}", dir, + e.getMessage()); + // Don't throw exception to avoid failing startup, just log warning + } + } + /** * Checks and creates Ozone Metadir Path if it does not exist. * diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestServerUtils.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestServerUtils.java index 1eb633f455ed..39c618421b01 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestServerUtils.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestServerUtils.java @@ -439,4 +439,76 @@ public void testColocatedComponentsWithSharedMetadataDirForSnapshots() { FileUtils.deleteQuietly(metaDir); } } + + @Test + public void testSetDataDirectoryPermissionsWithOctal() throws IOException { + File testDir = new File(folder.toFile(), "testDir"); + assertTrue(testDir.mkdirs()); + + OzoneConfiguration conf = new OzoneConfiguration(); + conf.set(ScmConfigKeys.HDDS_DATANODE_DATA_DIR_PERMISSIONS, "700"); + + ServerUtils.setDataDirectoryPermissions(testDir, conf, + ScmConfigKeys.HDDS_DATANODE_DATA_DIR_PERMISSIONS); + + Path dirPath = testDir.toPath(); + Set expectedPermissions = + PosixFilePermissions.fromString("rwx------"); + Set actualPermissions = + Files.getPosixFilePermissions(dirPath); + + assertEquals(expectedPermissions, actualPermissions); + } + + @Test + public void testSetDataDirectoryPermissionsWithSymbolic() throws IOException { + File testDir = new File(folder.toFile(), "testDir2"); + assertTrue(testDir.mkdirs()); + + OzoneConfiguration conf = new OzoneConfiguration(); + conf.set(ScmConfigKeys.HDDS_DATANODE_DATA_DIR_PERMISSIONS, "rwx------"); + + ServerUtils.setDataDirectoryPermissions(testDir, conf, + ScmConfigKeys.HDDS_DATANODE_DATA_DIR_PERMISSIONS); + + Path dirPath = testDir.toPath(); + Set expectedPermissions = + PosixFilePermissions.fromString("rwx------"); + Set actualPermissions = + Files.getPosixFilePermissions(dirPath); + + assertEquals(expectedPermissions, actualPermissions); + } + + @Test + public void testSetDataDirectoryPermissionsWithDefaultValue() throws IOException { + File testDir = new File(folder.toFile(), "testDir3"); + assertTrue(testDir.mkdirs()); + + OzoneConfiguration conf = new OzoneConfiguration(); + // Don't explicitly set the permission config key - should use default value (700) + + // Should use default value from ozone-default.xml (700) + ServerUtils.setDataDirectoryPermissions(testDir, conf, + ScmConfigKeys.HDDS_DATANODE_DATA_DIR_PERMISSIONS); + + // Permissions should be set to default value (700 = rwx------) + Path dirPath = testDir.toPath(); + Set expectedPermissions = + PosixFilePermissions.fromString("rwx------"); + Set actualPermissions = + Files.getPosixFilePermissions(dirPath); + assertEquals(expectedPermissions, actualPermissions); + } + + @Test + public void testSetDataDirectoryPermissionsWithNonExistentDir() { + File nonExistentDir = new File(folder.toFile(), "nonExistent"); + OzoneConfiguration conf = new OzoneConfiguration(); + conf.set(ScmConfigKeys.HDDS_DATANODE_DATA_DIR_PERMISSIONS, "700"); + + // Should not throw exception for non-existent directory + ServerUtils.setDataDirectoryPermissions(nonExistentDir, conf, + ScmConfigKeys.HDDS_DATANODE_DATA_DIR_PERMISSIONS); + } } From 524d913d65b5f60811ed14f49e23ad100a90a13f Mon Sep 17 00:00:00 2001 From: Gargi Jaiswal Date: Fri, 6 Feb 2026 16:54:38 +0530 Subject: [PATCH 2/3] skip setting permissions on readonly volumes --- .../hadoop/hdds/server/ServerUtils.java | 7 +++++ .../hadoop/hdds/server/TestServerUtils.java | 31 +++++++++++++++++++ 2 files changed, 38 insertions(+) diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java index 20b9e915af7d..bfe38c2cb236 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java @@ -267,6 +267,13 @@ public static void setDataDirectoryPermissions(File dir, ConfigurationSource con if (dir == null || !dir.exists()) { return; } + // Don't attempt to set permissions on non-writable directories + // This allows volume initialization to fail naturally for read-only volumes + if (!dir.canWrite()) { + LOG.debug("Skipping permission setting for non-writable directory {}", dir); + return; + } + try { String permissionValue = conf.get(permissionConfigKey); if (permissionValue == null) { diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestServerUtils.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestServerUtils.java index 39c618421b01..56fb7eae6ca4 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestServerUtils.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestServerUtils.java @@ -511,4 +511,35 @@ public void testSetDataDirectoryPermissionsWithNonExistentDir() { ServerUtils.setDataDirectoryPermissions(nonExistentDir, conf, ScmConfigKeys.HDDS_DATANODE_DATA_DIR_PERMISSIONS); } + + @Test + public void testSetDataDirectoryPermissionsSkipsReadOnlyDir() throws IOException { + // Create a directory and set it to read-only + File readOnlyDir = new File(folder.toFile(), "readOnlyDir"); + assertTrue(readOnlyDir.mkdirs()); + + // Set initial permissions and make it read-only + Path dirPath = readOnlyDir.toPath(); + Set readOnlyPermissions = + PosixFilePermissions.fromString("r-xr-xr-x"); + Files.setPosixFilePermissions(dirPath, readOnlyPermissions); + + // Verify directory is read-only + assertFalse(readOnlyDir.canWrite()); + + // Configure system to use 700 permissions + OzoneConfiguration conf = new OzoneConfiguration(); + conf.set(ScmConfigKeys.HDDS_DATANODE_DATA_DIR_PERMISSIONS, "700"); + + // Call setDataDirectoryPermissions on read-only directory + // Should skip permission setting and not throw exception + ServerUtils.setDataDirectoryPermissions(readOnlyDir, conf, + ScmConfigKeys.HDDS_DATANODE_DATA_DIR_PERMISSIONS); + + // Verify permissions were NOT changed (still read-only) + Set actualPermissions = + Files.getPosixFilePermissions(dirPath); + assertEquals(readOnlyPermissions, actualPermissions); + assertFalse(readOnlyDir.canWrite()); + } } From 3ff06fadebef1f9c5094975144b126ff0119102c Mon Sep 17 00:00:00 2001 From: Gargi Jaiswal Date: Tue, 10 Feb 2026 11:29:35 +0530 Subject: [PATCH 3/3] address review comments --- .../common/volume/StorageVolume.java | 3 ++- .../hadoop/hdds/server/ServerUtils.java | 4 ++-- .../hadoop/hdds/server/TestServerUtils.java | 21 +++++++++++++++++++ 3 files changed, 25 insertions(+), 3 deletions(-) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java index 38e0eb7d536b..5260f8468930 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java @@ -802,7 +802,8 @@ private static SpaceUsageCheckParams getSpaceUsageCheckParams(Builder b, Supplie * Sets permissions on the storage directory (e.g., hdds subdirectory). */ private void setStorageDirPermissions() { - if (conf != null && getStorageDir().exists()) { + if (conf != null && getStorageDir().exists() + && HDDS_VOLUME_DIR.equals(getStorageDir().getName())) { ServerUtils.setDataDirectoryPermissions(getStorageDir(), conf, ScmConfigKeys.HDDS_DATANODE_DATA_DIR_PERMISSIONS); } diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java index bfe38c2cb236..481df512496f 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java @@ -260,7 +260,6 @@ public static String getPermissions(String key, ConfigurationSource conf) { * @param dir The directory to set permissions on * @param conf Configuration source * @param permissionConfigKey The configuration key for permissions - * @throws RuntimeException If setting permissions fails */ public static void setDataDirectoryPermissions(File dir, ConfigurationSource conf, String permissionConfigKey) { @@ -285,7 +284,8 @@ public static void setDataDirectoryPermissions(File dir, ConfigurationSource con Path path = dir.toPath(); Files.setPosixFilePermissions(path, PosixFilePermissions.fromString(symbolicPermission)); - LOG.debug("Set permissions {} on directory {}", symbolicPermission, dir); + LOG.debug("Set permissions {} on directory {} using config key {}", + symbolicPermission, dir, permissionConfigKey); } catch (Exception e) { LOG.warn("Failed to set directory permissions for {}: {}", dir, e.getMessage()); diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestServerUtils.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestServerUtils.java index 56fb7eae6ca4..7a50a09cb21f 100644 --- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestServerUtils.java +++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestServerUtils.java @@ -80,6 +80,27 @@ public void testGetPermissions() { } + @Test + public void testGetPermissionsWithDefaults() { + // Create an OzoneConfiguration without explicitly setting permissions + // Should fall back to default values from ozone-default.xml (700) + OzoneConfiguration conf = new OzoneConfiguration(); + + // Test getPermissions for different config names and verify they use defaults + assertEquals("700", + ServerUtils.getPermissions(ReconConfigKeys.OZONE_RECON_DB_DIR, conf), + "Should use default 700 for Recon DB dirs"); + assertEquals("700", + ServerUtils.getPermissions(ScmConfigKeys.OZONE_SCM_DB_DIRS, conf), + "Should use default 700 for SCM DB dirs"); + assertEquals("700", + ServerUtils.getPermissions(OzoneConfigKeys.OZONE_METADATA_DIRS, conf), + "Should use default 700 for metadata dirs"); + assertEquals("700", + ServerUtils.getPermissions(OzoneConfigKeys.OZONE_OM_DB_DIRS, conf), + "Should use default 700 for OM DB dirs"); + } + @Test public void testGetDirectoryFromConfigWithOctalPermissions() throws IOException {