diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index 23400b1a06b4..69a4d0411f2b 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -221,6 +221,8 @@ public final class ScmConfigKeys {
public static final int OZONE_SCM_HTTP_BIND_PORT_DEFAULT = 9876;
public static final int OZONE_SCM_HTTPS_BIND_PORT_DEFAULT = 9877;
public static final String HDDS_DATANODE_DIR_KEY = "hdds.datanode.dir";
+ public static final String HDDS_DATANODE_DATA_DIR_PERMISSIONS =
+ "hdds.datanode.data.dir.permissions";
public static final String HDDS_DATANODE_DIR_DU_RESERVED =
"hdds.datanode.dir.du.reserved";
public static final String HDDS_DATANODE_DIR_DU_RESERVED_PERCENT =
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index d076e1e78d33..d757c53a16f8 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -178,6 +178,14 @@
tagged explicitly.
+
+ hdds.datanode.data.dir.permissions
+ 700
+ Permissions for the datanode data directories where actual file blocks are stored.
+ The permissions can be either octal or symbolic. If the default permissions are not set
+ then the default value of 700 will be used.
+
+
hdds.datanode.container.db.dir
@@ -674,11 +682,11 @@
ozone.om.db.dirs.permissions
- 750
+ 700
Permissions for the metadata directories for Ozone Manager. The
permissions have to be octal or symbolic. If the default permissions are not set
- then the default value of 750 will be used.
+ then the default value of 700 will be used.
@@ -705,7 +713,7 @@
ozone.metadata.dirs.permissions
- 750
+ 700
Permissions for the metadata directories for fallback location for SCM, OM, Recon and DataNodes
to store their metadata. The permissions have to be octal or symbolic. This is the fallback used in case the
@@ -750,11 +758,11 @@
ozone.scm.db.dirs.permissions
- 750
+ 700
Permissions for the metadata directories for Storage Container Manager. The
permissions can either be octal or symbolic. If the default permissions are not set
- then the default value of 750 will be used.
+ then the default value of 700 will be used.
@@ -3337,11 +3345,11 @@
ozone.recon.db.dirs.permissions
- 750
+ 700
Permissions for the metadata directories for Recon. The
permissions can either be octal or symbolic. If the default permissions are not set
- then the default value of 750 will be used.
+ then the default value of 700 will be used.
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java
index 65d2fb70166c..17159e3fdff8 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java
@@ -33,6 +33,8 @@
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.server.ServerUtils;
import org.apache.hadoop.hdds.utils.HddsServerUtil;
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport;
@@ -179,6 +181,15 @@ private void initializeVolumeSet() throws IOException {
throw new IOException("Failed to create storage dir " +
volume.getStorageDir());
}
+
+ // Ensure permissions are set on the storage directory
+ // (permissions are also set in StorageVolume.initializeImpl(),
+ // but this ensures they're set even if directory already existed
+ // from a previous run with incorrect permissions)
+ if (volumeType == StorageVolume.VolumeType.DATA_VOLUME) {
+ ServerUtils.setDataDirectoryPermissions(volume.getStorageDir(), conf,
+ ScmConfigKeys.HDDS_DATANODE_DATA_DIR_PERMISSIONS);
+ }
volumeMap.put(volume.getStorageDir().getPath(), volume);
volumeStateMap.get(volume.getStorageType()).add(volume);
volumeHealthMetrics.incrementHealthyVolumes();
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java
index b39468318311..5260f8468930 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/StorageVolume.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.ozone.container.common.volume;
import static org.apache.hadoop.ozone.container.common.HDDSVolumeLayoutVersion.getLatestVersion;
+import static org.apache.hadoop.ozone.container.common.volume.HddsVolume.HDDS_VOLUME_DIR;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
@@ -40,6 +41,8 @@
import org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory;
import org.apache.hadoop.hdds.fs.SpaceUsageCheckParams;
import org.apache.hadoop.hdds.fs.SpaceUsageSource;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.server.ServerUtils;
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
import org.apache.hadoop.hdfs.server.datanode.checker.Checkable;
import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult;
@@ -226,16 +229,22 @@ protected void initializeImpl() throws IOException {
if (!getStorageDir().mkdirs()) {
throw new IOException("Cannot create directory " + getStorageDir());
}
+ // Set permissions on storage directory (e.g., hdds subdirectory)
+ setStorageDirPermissions();
setState(VolumeState.NOT_FORMATTED);
createVersionFile();
break;
case NOT_FORMATTED:
// Version File does not exist. Create it.
+ // Ensure permissions are correct even if directory already existed
+ setStorageDirPermissions();
createVersionFile();
break;
case NOT_INITIALIZED:
// Version File exists.
// Verify its correctness and update property fields.
+ // Ensure permissions are correct even if directory already existed
+ setStorageDirPermissions();
readVersionFile();
setState(VolumeState.NORMAL);
break;
@@ -461,6 +470,10 @@ public boolean getFailedVolume() {
public StorageType getStorageType() {
return this.storageType;
}
+
+ public String getStorageDirStr() {
+ return this.storageDirStr;
+ }
}
public String getVolumeRootDir() {
@@ -768,6 +781,15 @@ private static SpaceUsageCheckParams getSpaceUsageCheckParams(Builder b, Supplie
throw new IOException("Unable to create the volume root dir at " + root);
}
+ // Set permissions on volume root directory immediately after creation/check
+ // (for data volumes, we want to ensure the root has secure permissions,
+ // even if the directory already existed from a previous run)
+ // This follows the same pattern as metadata directories in getDirectoryFromConfig()
+ if (b.conf != null && root.exists() && HDDS_VOLUME_DIR.equals(b.getStorageDirStr())) {
+ ServerUtils.setDataDirectoryPermissions(root, b.conf,
+ ScmConfigKeys.HDDS_DATANODE_DATA_DIR_PERMISSIONS);
+ }
+
SpaceUsageCheckFactory usageCheckFactory = b.usageCheckFactory;
if (usageCheckFactory == null) {
usageCheckFactory = SpaceUsageCheckFactory.create(b.conf);
@@ -775,4 +797,15 @@ private static SpaceUsageCheckParams getSpaceUsageCheckParams(Builder b, Supplie
return usageCheckFactory.paramsFor(root, exclusionProvider);
}
+
+ /**
+ * Sets permissions on the storage directory (e.g., hdds subdirectory).
+ */
+ private void setStorageDirPermissions() {
+ if (conf != null && getStorageDir().exists()
+ && HDDS_VOLUME_DIR.equals(getStorageDir().getName())) {
+ ServerUtils.setDataDirectoryPermissions(getStorageDir(), conf,
+ ScmConfigKeys.HDDS_DATANODE_DATA_DIR_PERMISSIONS);
+ }
+ }
}
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java
index 0bd576732a0b..1dd927b6f481 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java
@@ -31,10 +31,14 @@
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
+import java.nio.file.attribute.PosixFilePermission;
+import java.nio.file.attribute.PosixFilePermissions;
import java.time.Duration;
import java.util.Properties;
+import java.util.Set;
import java.util.UUID;
import java.util.concurrent.atomic.AtomicLong;
+import java.util.stream.Stream;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -56,6 +60,9 @@
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;
+import org.junit.jupiter.params.ParameterizedTest;
+import org.junit.jupiter.params.provider.Arguments;
+import org.junit.jupiter.params.provider.MethodSource;
/**
* Unit tests for {@link HddsVolume}.
@@ -605,4 +612,36 @@ private MutableVolumeSet createDbVolumeSet() throws IOException {
dbVolumeSet.getVolumesList().get(0).createWorkingDir(CLUSTER_ID, null);
return dbVolumeSet;
}
+
+ private static Stream dataDirectoryPermissionsProvider() {
+ return Stream.of(
+ Arguments.of("700", "rwx------"),
+ Arguments.of("750", "rwxr-x---"),
+ Arguments.of("755", "rwxr-xr-x")
+ );
+ }
+
+ @ParameterizedTest
+ @MethodSource("dataDirectoryPermissionsProvider")
+ public void testDataDirectoryPermissions(String permissionValue,
+ String expectedPermissionString) throws Exception {
+ // Set permission configuration
+ CONF.set(ScmConfigKeys.HDDS_DATANODE_DATA_DIR_PERMISSIONS, permissionValue);
+
+ // Create a new volume
+ StorageVolume volume = volumeBuilder.build();
+ volume.format(CLUSTER_ID);
+
+ // Verify storage directory (hdds subdirectory) has correct permissions
+ File storageDir = volume.getStorageDir();
+ assertTrue(storageDir.exists());
+ Path storageDirPath = storageDir.toPath();
+ Set expectedPermissions =
+ PosixFilePermissions.fromString(expectedPermissionString);
+ Set actualPermissions =
+ Files.getPosixFilePermissions(storageDirPath);
+
+ assertEquals(expectedPermissions, actualPermissions,
+ "Storage directory should have " + permissionValue + " permissions");
+ }
}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java
index d6864d4f15db..481df512496f 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java
@@ -254,6 +254,45 @@ public static String getPermissions(String key, ConfigurationSource conf) {
"Invalid configuration value for key: " + key);
}
+ /**
+ * Sets directory permissions based on configuration.
+ *
+ * @param dir The directory to set permissions on
+ * @param conf Configuration source
+ * @param permissionConfigKey The configuration key for permissions
+ */
+ public static void setDataDirectoryPermissions(File dir, ConfigurationSource conf,
+ String permissionConfigKey) {
+ if (dir == null || !dir.exists()) {
+ return;
+ }
+ // Don't attempt to set permissions on non-writable directories
+ // This allows volume initialization to fail naturally for read-only volumes
+ if (!dir.canWrite()) {
+ LOG.debug("Skipping permission setting for non-writable directory {}", dir);
+ return;
+ }
+
+ try {
+ String permissionValue = conf.get(permissionConfigKey);
+ if (permissionValue == null) {
+ LOG.warn("Permission configuration key {} not found, skipping permission " +
+ "setting for directory {}", permissionConfigKey, dir);
+ return;
+ }
+ String symbolicPermission = getSymbolicPermission(permissionValue);
+ Path path = dir.toPath();
+ Files.setPosixFilePermissions(path,
+ PosixFilePermissions.fromString(symbolicPermission));
+ LOG.debug("Set permissions {} on directory {} using config key {}",
+ symbolicPermission, dir, permissionConfigKey);
+ } catch (Exception e) {
+ LOG.warn("Failed to set directory permissions for {}: {}", dir,
+ e.getMessage());
+ // Don't throw exception to avoid failing startup, just log warning
+ }
+ }
+
/**
* Checks and creates Ozone Metadir Path if it does not exist.
*
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestServerUtils.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestServerUtils.java
index 1eb633f455ed..7a50a09cb21f 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestServerUtils.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestServerUtils.java
@@ -80,6 +80,27 @@ public void testGetPermissions() {
}
+ @Test
+ public void testGetPermissionsWithDefaults() {
+ // Create an OzoneConfiguration without explicitly setting permissions
+ // Should fall back to default values from ozone-default.xml (700)
+ OzoneConfiguration conf = new OzoneConfiguration();
+
+ // Test getPermissions for different config names and verify they use defaults
+ assertEquals("700",
+ ServerUtils.getPermissions(ReconConfigKeys.OZONE_RECON_DB_DIR, conf),
+ "Should use default 700 for Recon DB dirs");
+ assertEquals("700",
+ ServerUtils.getPermissions(ScmConfigKeys.OZONE_SCM_DB_DIRS, conf),
+ "Should use default 700 for SCM DB dirs");
+ assertEquals("700",
+ ServerUtils.getPermissions(OzoneConfigKeys.OZONE_METADATA_DIRS, conf),
+ "Should use default 700 for metadata dirs");
+ assertEquals("700",
+ ServerUtils.getPermissions(OzoneConfigKeys.OZONE_OM_DB_DIRS, conf),
+ "Should use default 700 for OM DB dirs");
+ }
+
@Test
public void testGetDirectoryFromConfigWithOctalPermissions()
throws IOException {
@@ -439,4 +460,107 @@ public void testColocatedComponentsWithSharedMetadataDirForSnapshots() {
FileUtils.deleteQuietly(metaDir);
}
}
+
+ @Test
+ public void testSetDataDirectoryPermissionsWithOctal() throws IOException {
+ File testDir = new File(folder.toFile(), "testDir");
+ assertTrue(testDir.mkdirs());
+
+ OzoneConfiguration conf = new OzoneConfiguration();
+ conf.set(ScmConfigKeys.HDDS_DATANODE_DATA_DIR_PERMISSIONS, "700");
+
+ ServerUtils.setDataDirectoryPermissions(testDir, conf,
+ ScmConfigKeys.HDDS_DATANODE_DATA_DIR_PERMISSIONS);
+
+ Path dirPath = testDir.toPath();
+ Set expectedPermissions =
+ PosixFilePermissions.fromString("rwx------");
+ Set actualPermissions =
+ Files.getPosixFilePermissions(dirPath);
+
+ assertEquals(expectedPermissions, actualPermissions);
+ }
+
+ @Test
+ public void testSetDataDirectoryPermissionsWithSymbolic() throws IOException {
+ File testDir = new File(folder.toFile(), "testDir2");
+ assertTrue(testDir.mkdirs());
+
+ OzoneConfiguration conf = new OzoneConfiguration();
+ conf.set(ScmConfigKeys.HDDS_DATANODE_DATA_DIR_PERMISSIONS, "rwx------");
+
+ ServerUtils.setDataDirectoryPermissions(testDir, conf,
+ ScmConfigKeys.HDDS_DATANODE_DATA_DIR_PERMISSIONS);
+
+ Path dirPath = testDir.toPath();
+ Set expectedPermissions =
+ PosixFilePermissions.fromString("rwx------");
+ Set actualPermissions =
+ Files.getPosixFilePermissions(dirPath);
+
+ assertEquals(expectedPermissions, actualPermissions);
+ }
+
+ @Test
+ public void testSetDataDirectoryPermissionsWithDefaultValue() throws IOException {
+ File testDir = new File(folder.toFile(), "testDir3");
+ assertTrue(testDir.mkdirs());
+
+ OzoneConfiguration conf = new OzoneConfiguration();
+ // Don't explicitly set the permission config key - should use default value (700)
+
+ // Should use default value from ozone-default.xml (700)
+ ServerUtils.setDataDirectoryPermissions(testDir, conf,
+ ScmConfigKeys.HDDS_DATANODE_DATA_DIR_PERMISSIONS);
+
+ // Permissions should be set to default value (700 = rwx------)
+ Path dirPath = testDir.toPath();
+ Set expectedPermissions =
+ PosixFilePermissions.fromString("rwx------");
+ Set actualPermissions =
+ Files.getPosixFilePermissions(dirPath);
+ assertEquals(expectedPermissions, actualPermissions);
+ }
+
+ @Test
+ public void testSetDataDirectoryPermissionsWithNonExistentDir() {
+ File nonExistentDir = new File(folder.toFile(), "nonExistent");
+ OzoneConfiguration conf = new OzoneConfiguration();
+ conf.set(ScmConfigKeys.HDDS_DATANODE_DATA_DIR_PERMISSIONS, "700");
+
+ // Should not throw exception for non-existent directory
+ ServerUtils.setDataDirectoryPermissions(nonExistentDir, conf,
+ ScmConfigKeys.HDDS_DATANODE_DATA_DIR_PERMISSIONS);
+ }
+
+ @Test
+ public void testSetDataDirectoryPermissionsSkipsReadOnlyDir() throws IOException {
+ // Create a directory and set it to read-only
+ File readOnlyDir = new File(folder.toFile(), "readOnlyDir");
+ assertTrue(readOnlyDir.mkdirs());
+
+ // Set initial permissions and make it read-only
+ Path dirPath = readOnlyDir.toPath();
+ Set readOnlyPermissions =
+ PosixFilePermissions.fromString("r-xr-xr-x");
+ Files.setPosixFilePermissions(dirPath, readOnlyPermissions);
+
+ // Verify directory is read-only
+ assertFalse(readOnlyDir.canWrite());
+
+ // Configure system to use 700 permissions
+ OzoneConfiguration conf = new OzoneConfiguration();
+ conf.set(ScmConfigKeys.HDDS_DATANODE_DATA_DIR_PERMISSIONS, "700");
+
+ // Call setDataDirectoryPermissions on read-only directory
+ // Should skip permission setting and not throw exception
+ ServerUtils.setDataDirectoryPermissions(readOnlyDir, conf,
+ ScmConfigKeys.HDDS_DATANODE_DATA_DIR_PERMISSIONS);
+
+ // Verify permissions were NOT changed (still read-only)
+ Set actualPermissions =
+ Files.getPosixFilePermissions(dirPath);
+ assertEquals(readOnlyPermissions, actualPermissions);
+ assertFalse(readOnlyDir.canWrite());
+ }
}