Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -221,6 +221,8 @@ public final class ScmConfigKeys {
public static final int OZONE_SCM_HTTP_BIND_PORT_DEFAULT = 9876;
public static final int OZONE_SCM_HTTPS_BIND_PORT_DEFAULT = 9877;
public static final String HDDS_DATANODE_DIR_KEY = "hdds.datanode.dir";
public static final String HDDS_DATANODE_DATA_DIR_PERMISSIONS =
"hdds.datanode.data.dir.permissions";
public static final String HDDS_DATANODE_DIR_DU_RESERVED =
"hdds.datanode.dir.du.reserved";
public static final String HDDS_DATANODE_DIR_DU_RESERVED_PERCENT =
Expand Down
22 changes: 15 additions & 7 deletions hadoop-hdds/common/src/main/resources/ozone-default.xml
Original file line number Diff line number Diff line change
Expand Up @@ -178,6 +178,14 @@
tagged explicitly.
</description>
</property>
<property>
<name>hdds.datanode.data.dir.permissions</name>
<value>700</value>
<description>Permissions for the datanode data directories where actual file blocks are stored.
The permissions can be either octal or symbolic. If the default permissions are not set
then the default value of 700 will be used.
</description>
</property>
<property>
<name>hdds.datanode.container.db.dir</name>
<value/>
Expand Down Expand Up @@ -674,11 +682,11 @@
</property>
<property>
<name>ozone.om.db.dirs.permissions</name>
<value>750</value>
<value>700</value>
<description>
Permissions for the metadata directories for Ozone Manager. The
permissions have to be octal or symbolic. If the default permissions are not set
then the default value of 750 will be used.
then the default value of 700 will be used.
</description>
</property>
<property>
Expand All @@ -705,7 +713,7 @@
</property>
<property>
<name>ozone.metadata.dirs.permissions</name>
<value>750</value>
<value>700</value>
<description>
Permissions for the metadata directories for fallback location for SCM, OM, Recon and DataNodes
to store their metadata. The permissions have to be octal or symbolic. This is the fallback used in case the
Expand Down Expand Up @@ -750,11 +758,11 @@
</property>
<property>
<name>ozone.scm.db.dirs.permissions</name>
<value>750</value>
<value>700</value>
<description>
Permissions for the metadata directories for Storage Container Manager. The
permissions can either be octal or symbolic. If the default permissions are not set
then the default value of 750 will be used.
then the default value of 700 will be used.
</description>
</property>
<property>
Expand Down Expand Up @@ -3337,11 +3345,11 @@
</property>
<property>
<name>ozone.recon.db.dirs.permissions</name>
<value>750</value>
<value>700</value>
<description>
Permissions for the metadata directories for Recon. The
permissions can either be octal or symbolic. If the default permissions are not set
then the default value of 750 will be used.
then the default value of 700 will be used.
</description>
</property>
<property>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,8 @@
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.server.ServerUtils;
import org.apache.hadoop.hdds.utils.HddsServerUtil;
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport;
Expand Down Expand Up @@ -179,6 +181,15 @@ private void initializeVolumeSet() throws IOException {
throw new IOException("Failed to create storage dir " +
volume.getStorageDir());
}

// Ensure permissions are set on the storage directory
// (permissions are also set in StorageVolume.initializeImpl(),
// but this ensures they're set even if directory already existed
// from a previous run with incorrect permissions)
if (volumeType == StorageVolume.VolumeType.DATA_VOLUME) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is required only for datavolume other other volume types ?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The volumeType == StorageVolume.VolumeType.DATA_VOLUME check is correct. Permission setting is only for data volumes.
META_VOLUME (MetadataVolume) - Stores Ratis metadata which is managed by Ratis.
DbVolume is for db instance storage. So not sure we can use same config hdds.datanode.dir.data.permissions over here as well. However the parent hdds/ directory is the HddsVolume storage dir and gets permissions from hdds.datanode.data.dir.permissions (default 700). So container.db is already protected by that parent hierarchy.

ServerUtils.setDataDirectoryPermissions(volume.getStorageDir(), conf,
ScmConfigKeys.HDDS_DATANODE_DATA_DIR_PERMISSIONS);
}
volumeMap.put(volume.getStorageDir().getPath(), volume);
volumeStateMap.get(volume.getStorageType()).add(volume);
volumeHealthMetrics.incrementHealthyVolumes();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
package org.apache.hadoop.ozone.container.common.volume;

import static org.apache.hadoop.ozone.container.common.HDDSVolumeLayoutVersion.getLatestVersion;
import static org.apache.hadoop.ozone.container.common.volume.HddsVolume.HDDS_VOLUME_DIR;

import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
Expand All @@ -40,6 +41,8 @@
import org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory;
import org.apache.hadoop.hdds.fs.SpaceUsageCheckParams;
import org.apache.hadoop.hdds.fs.SpaceUsageSource;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.server.ServerUtils;
import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
import org.apache.hadoop.hdfs.server.datanode.checker.Checkable;
import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult;
Expand Down Expand Up @@ -226,16 +229,22 @@ protected void initializeImpl() throws IOException {
if (!getStorageDir().mkdirs()) {
throw new IOException("Cannot create directory " + getStorageDir());
}
// Set permissions on storage directory (e.g., hdds subdirectory)
setStorageDirPermissions();
setState(VolumeState.NOT_FORMATTED);
createVersionFile();
break;
case NOT_FORMATTED:
// Version File does not exist. Create it.
// Ensure permissions are correct even if directory already existed
setStorageDirPermissions();
createVersionFile();
break;
case NOT_INITIALIZED:
// Version File exists.
// Verify its correctness and update property fields.
// Ensure permissions are correct even if directory already existed
setStorageDirPermissions();
readVersionFile();
setState(VolumeState.NORMAL);
break;
Expand Down Expand Up @@ -461,6 +470,10 @@ public boolean getFailedVolume() {
public StorageType getStorageType() {
return this.storageType;
}

public String getStorageDirStr() {
return this.storageDirStr;
}
}

public String getVolumeRootDir() {
Expand Down Expand Up @@ -768,11 +781,31 @@ private static SpaceUsageCheckParams getSpaceUsageCheckParams(Builder b, Supplie
throw new IOException("Unable to create the volume root dir at " + root);
}

// Set permissions on volume root directory immediately after creation/check
// (for data volumes, we want to ensure the root has secure permissions,
// even if the directory already existed from a previous run)
// This follows the same pattern as metadata directories in getDirectoryFromConfig()
if (b.conf != null && root.exists() && HDDS_VOLUME_DIR.equals(b.getStorageDirStr())) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this is called here also, and even during loading root also, check if this is duplicate OR for different path.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

These are not duplicates - they set permissions on different paths. Line 789 sets permissions on the volume root (e.g., /data/hdds1), while line 807 sets permissions on the storage directory (e.g., /data/hdds1/hdds). Both levels need secure permissions for defense-in-depth.

ServerUtils.setDataDirectoryPermissions(root, b.conf,
ScmConfigKeys.HDDS_DATANODE_DATA_DIR_PERMISSIONS);
}

SpaceUsageCheckFactory usageCheckFactory = b.usageCheckFactory;
if (usageCheckFactory == null) {
usageCheckFactory = SpaceUsageCheckFactory.create(b.conf);
}

return usageCheckFactory.paramsFor(root, exclusionProvider);
}

/**
* Sets permissions on the storage directory (e.g., hdds subdirectory).
*/
private void setStorageDirPermissions() {
if (conf != null && getStorageDir().exists()
&& HDDS_VOLUME_DIR.equals(getStorageDir().getName())) {
ServerUtils.setDataDirectoryPermissions(getStorageDir(), conf,
ScmConfigKeys.HDDS_DATANODE_DATA_DIR_PERMISSIONS);
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -31,10 +31,14 @@
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.attribute.PosixFilePermission;
import java.nio.file.attribute.PosixFilePermissions;
import java.time.Duration;
import java.util.Properties;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.atomic.AtomicLong;
import java.util.stream.Stream;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
Expand All @@ -56,6 +60,9 @@
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.Arguments;
import org.junit.jupiter.params.provider.MethodSource;

/**
* Unit tests for {@link HddsVolume}.
Expand Down Expand Up @@ -605,4 +612,36 @@ private MutableVolumeSet createDbVolumeSet() throws IOException {
dbVolumeSet.getVolumesList().get(0).createWorkingDir(CLUSTER_ID, null);
return dbVolumeSet;
}

private static Stream<Arguments> dataDirectoryPermissionsProvider() {
return Stream.of(
Arguments.of("700", "rwx------"),
Arguments.of("750", "rwxr-x---"),
Arguments.of("755", "rwxr-xr-x")
);
}

@ParameterizedTest
@MethodSource("dataDirectoryPermissionsProvider")
public void testDataDirectoryPermissions(String permissionValue,
String expectedPermissionString) throws Exception {
// Set permission configuration
CONF.set(ScmConfigKeys.HDDS_DATANODE_DATA_DIR_PERMISSIONS, permissionValue);

// Create a new volume
StorageVolume volume = volumeBuilder.build();
volume.format(CLUSTER_ID);

// Verify storage directory (hdds subdirectory) has correct permissions
File storageDir = volume.getStorageDir();
assertTrue(storageDir.exists());
Path storageDirPath = storageDir.toPath();
Set<PosixFilePermission> expectedPermissions =
PosixFilePermissions.fromString(expectedPermissionString);
Set<PosixFilePermission> actualPermissions =
Files.getPosixFilePermissions(storageDirPath);

assertEquals(expectedPermissions, actualPermissions,
"Storage directory should have " + permissionValue + " permissions");
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -254,6 +254,45 @@ public static String getPermissions(String key, ConfigurationSource conf) {
"Invalid configuration value for key: " + key);
}

/**
* Sets directory permissions based on configuration.
*
* @param dir The directory to set permissions on
* @param conf Configuration source
* @param permissionConfigKey The configuration key for permissions
*/
public static void setDataDirectoryPermissions(File dir, ConfigurationSource conf,
String permissionConfigKey) {
if (dir == null || !dir.exists()) {
return;
}
// Don't attempt to set permissions on non-writable directories
// This allows volume initialization to fail naturally for read-only volumes
if (!dir.canWrite()) {
LOG.debug("Skipping permission setting for non-writable directory {}", dir);
return;
}

try {
String permissionValue = conf.get(permissionConfigKey);
if (permissionValue == null) {
LOG.warn("Permission configuration key {} not found, skipping permission " +
"setting for directory {}", permissionConfigKey, dir);
return;
}
String symbolicPermission = getSymbolicPermission(permissionValue);
Path path = dir.toPath();
Files.setPosixFilePermissions(path,
PosixFilePermissions.fromString(symbolicPermission));
LOG.debug("Set permissions {} on directory {} using config key {}",
symbolicPermission, dir, permissionConfigKey);
} catch (Exception e) {
LOG.warn("Failed to set directory permissions for {}: {}", dir,
e.getMessage());
// Don't throw exception to avoid failing startup, just log warning
}
}

/**
* Checks and creates Ozone Metadir Path if it does not exist.
*
Expand Down
Loading