diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneStoragePolicy.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneStoragePolicy.java
new file mode 100644
index 000000000000..47f6601bb607
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/OzoneStoragePolicy.java
@@ -0,0 +1,104 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.client;
+
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.StoragePolicyProto;
+
+/**
+ * Enum defining different storage policies using StorageTier.
+ */
+public enum OzoneStoragePolicy implements StoragePolicy {
+
+ HOT("Hot", StorageTier.SSD, StorageTier.DISK),
+ WARM("Warm", StorageTier.DISK, StorageTier.EMPTY),
+ COLD("Cold", StorageTier.ARCHIVE, StorageTier.EMPTY);
+
+ private final String name;
+ private final StorageTier creationTier;
+ private final StorageTier creationFallbackTier;
+
+ OzoneStoragePolicy(String name, StorageTier creationTier,
+ StorageTier creationFallbackTier) {
+ this.name = name;
+ this.creationTier = creationTier;
+ this.creationFallbackTier = creationFallbackTier;
+ }
+
+ @Override
+ public String getName() {
+ return name;
+ }
+
+ @Override
+ public StorageTier getCreationTier() {
+ return creationTier;
+ }
+
+ @Override
+ public StorageTier getCreationFallbackTier() {
+ return creationFallbackTier;
+ }
+
+ /**
+ * Converts the current StoragePolicyType to its protobuf representation.
+ * @return the corresponding StoragePolicyProto.
+ */
+ public StoragePolicyProto toProto() {
+ switch (this) {
+ case HOT:
+ return StoragePolicyProto.HOT;
+ case WARM:
+ return StoragePolicyProto.WARM;
+ case COLD:
+ return StoragePolicyProto.COLD;
+ default:
+ throw new IllegalArgumentException(
+ "Error: StoragePolicyType not found, type=" + this);
+ }
+ }
+
+ /**
+ * Converts a protobuf StoragePolicyProto to the corresponding StoragePolicyType.
+ * @param proto the StoragePolicyProto to convert.
+ * @return the corresponding StoragePolicyType.
+ */
+ public static OzoneStoragePolicy fromProto(StoragePolicyProto proto) {
+ if (proto == null) {
+ throw new IllegalArgumentException("StoragePolicyProto cannot be null");
+ }
+ switch (proto) {
+ case HOT:
+ return HOT;
+ case WARM:
+ return WARM;
+ case COLD:
+ return COLD;
+ default:
+ throw new IllegalArgumentException("Error: StoragePolicyProto not found, proto=" + proto);
+ }
+ }
+
+ @Override
+ public String toString() {
+ return "OzoneStoragePolicy{"
+ + "name=" + name
+ + ", creationTier=" + creationTier
+ + ", creationFallbackTier=" + creationFallbackTier
+ + '}';
+ }
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/StoragePolicy.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/StoragePolicy.java
new file mode 100644
index 000000000000..237e3f96a955
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/StoragePolicy.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.client;
+
+/**
+ * Interface for storage policies that define how to select storage tiers for data replication.
+ *
+ *
A storage policy specifies the preferred and fallback storage tiers for placing
+ * block replicas.
+ */
+public interface StoragePolicy {
+
+ /**
+ * Retrieves the name of the storage policy.
+ *
+ * @return a string representing the name of the storage policy.
+ */
+ String getName();
+
+ /**
+ * Retrieves the preferred storage tier used for placing data replicas.
+ *
+ *
This is the preferred storage tier where new data is initially stored
+ * according to the specified storage policy.
+ *
+ * @return the default {@link StorageTier} used for data placement.
+ */
+ StorageTier getCreationTier();
+
+ /**
+ * Retrieves the fallback storage tier used during the creation of new data replicas.
+ *
+ *
If the preferred storage tier is unavailable, this fallback tier is used to
+ * ensure that new data can still be reliably stored.
+ *
+ * @return the fallback {@link StorageTier} used for data placement.
+ */
+ StorageTier getCreationFallbackTier();
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/StorageTier.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/StorageTier.java
new file mode 100644
index 000000000000..3869a9058140
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/StorageTier.java
@@ -0,0 +1,170 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.client;
+
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.EnumMap;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.StorageTierProto;
+
+/**
+ * Ozone specific storage tiers.
+ */
+public enum StorageTier {
+ SSD("SSD", StorageType.SSD),
+ DISK("DISK", StorageType.DISK),
+ ARCHIVE("ARCHIVE", StorageType.ARCHIVE),
+ EMPTY("EMPTY");
+
+ private final String tierName;
+ private final List storageTypes;
+ private final boolean uniformStorageType;
+ private static final Map>>
+ CACHE = new EnumMap<>(StorageTier.class);
+
+ StorageTier(String tierName) {
+ this.tierName = tierName;
+ this.storageTypes = Collections.emptyList();
+ this.uniformStorageType = true;
+ }
+
+ // Constructor for uniform storage tiers
+ StorageTier(String tierName, StorageType uniformStorageType) {
+ this.tierName = tierName;
+ this.storageTypes = Collections.singletonList(uniformStorageType);
+ this.uniformStorageType = true;
+ }
+
+ // Constructor for non-uniform storage tiers
+ StorageTier(String tierName, StorageType... storageTypes) {
+ this.tierName = tierName;
+ if (Arrays.stream(storageTypes).distinct().count() <= 1) {
+ throw new IllegalArgumentException("StorageTier '" + tierName +
+ "' requires at least two different StorageType instances." +
+ " but only " + Arrays.stream(storageTypes).distinct().count() +
+ " StorageType were provided.");
+ }
+ this.storageTypes = Arrays.asList(storageTypes);
+ this.uniformStorageType = false;
+ }
+
+ static {
+ // Precompute storage type mappings for each replication config
+ for (StorageTier tier : StorageTier.values()) {
+ Map> tierCache = new HashMap<>();
+ List replicationConfigs = Arrays.asList(
+ RatisReplicationConfig.getInstance(ONE),
+ RatisReplicationConfig.getInstance(THREE),
+ StandaloneReplicationConfig.getInstance(ONE),
+ StandaloneReplicationConfig.getInstance(THREE)
+ );
+
+ for (ReplicationConfig config : replicationConfigs) {
+ tierCache.put(config, tier.computeStorageTypes(config));
+ }
+ CACHE.put(tier, tierCache);
+ }
+ }
+
+ public StorageTierProto toProto() {
+ switch (this) {
+ case SSD:
+ return StorageTierProto.SSD_TIER;
+ case DISK:
+ return StorageTierProto.DISK_TIER;
+ case ARCHIVE:
+ return StorageTierProto.ARCHIVE_TIER;
+ default:
+ throw new IllegalStateException(
+ "Illegal StorageTier: " + this);
+ }
+ }
+
+ public static StorageTier fromProto(StorageTierProto tier) {
+ switch (tier) {
+ case SSD_TIER:
+ return SSD;
+ case DISK_TIER:
+ return DISK;
+ case ARCHIVE_TIER:
+ return ARCHIVE;
+ default:
+ throw new IllegalStateException(
+ "Illegal StorageTierProto: " + tier);
+ }
+ }
+
+ public String getTierName() {
+ return tierName;
+ }
+
+ public boolean isUniformStorageType() {
+ return uniformStorageType;
+ }
+
+ /**
+ * Computes the list of StorageTypes based on replication configuration.
+ *
+ * @param replicationConfig The replication configuration.
+ * @return The list of StorageTypes for the given tier and replication configuration.
+ */
+ private List computeStorageTypes(
+ ReplicationConfig replicationConfig) {
+ if (isUniformStorageType()) {
+ int numberOfNodes = replicationConfig.getRequiredNodes();
+ if (storageTypes.isEmpty()) {
+ return Collections.emptyList();
+ }
+ return new ArrayList<>(Collections.nCopies(numberOfNodes, storageTypes.get(0)));
+ } else {
+ throw new UnsupportedOperationException(
+ "Unsupported not UniformStorage Storage Tier: " + replicationConfig);
+ }
+ }
+
+ /**
+ * Maps a StorageTier to its corresponding StorageType based on replication type.
+ *
+ * @param replicationConfig The replication configuration.
+ * @return The list of StorageTypes corresponding to the given tier and replication configuration.
+ * @throws IllegalArgumentException if the replication configuration is not supported.
+ */
+ public List getStorageTypes(
+ ReplicationConfig replicationConfig) {
+ Map> tierCache = CACHE.get(this);
+
+ if (tierCache != null) {
+ List cachedStorageType = tierCache.get(replicationConfig);
+ if (cachedStorageType != null) {
+ return cachedStorageType;
+ }
+ }
+
+ throw new IllegalArgumentException("Unsupported ReplicationConfig: " +
+ replicationConfig + " for StorageTier: " + getTierName());
+ }
+
+}
diff --git a/hadoop-hdds/interface-client/src/main/proto/hdds.proto b/hadoop-hdds/interface-client/src/main/proto/hdds.proto
index 59c46499a18c..a6933aec4350 100644
--- a/hadoop-hdds/interface-client/src/main/proto/hdds.proto
+++ b/hadoop-hdds/interface-client/src/main/proto/hdds.proto
@@ -326,6 +326,18 @@ enum ReplicationFactor {
ZERO = 0; // Invalid Factor
}
+enum StorageTierProto {
+ DISK_TIER = 1;
+ SSD_TIER = 2;
+ ARCHIVE_TIER = 3;
+}
+
+enum StoragePolicyProto {
+ HOT = 1;
+ WARM = 2;
+ COLD = 3;
+}
+
message ECReplicationConfig {
required int32 data = 1;
required int32 parity = 2;
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/hdds/protocol/StorageType.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/hdds/protocol/StorageType.java
index 4f99e699ae8f..c455302427b5 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/hdds/protocol/StorageType.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/hdds/protocol/StorageType.java
@@ -21,7 +21,13 @@
/**
* Ozone specific storage types.
+ *
+ * @deprecated Ozone buckets should not have a StorageType attribute.
+ * This class is specific to `OzoneBucket` and is planned for removal in future versions.
+ * It is recommended to use `{@link org.apache.hadoop.fs.StorageType}` instead for
+ * any storage type requirements.
*/
+@Deprecated
public enum StorageType {
RAM_DISK,
SSD,
diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/hdds/protocol/StoragePolicyTest.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/hdds/protocol/StoragePolicyTest.java
new file mode 100644
index 000000000000..428185a2d0f4
--- /dev/null
+++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/hdds/protocol/StoragePolicyTest.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.protocol;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+import org.apache.hadoop.hdds.client.OzoneStoragePolicy;
+import org.apache.hadoop.hdds.client.StorageTier;
+import org.junit.jupiter.api.Test;
+
+class StoragePolicyTest {
+
+ @Test
+ void testStoragePolicyTypeProperties() {
+ // Verify properties of HOT storage policy
+ OzoneStoragePolicy hotPolicy = OzoneStoragePolicy.HOT;
+ assertEquals("Hot", hotPolicy.getName());
+ assertEquals(StorageTier.SSD, hotPolicy.getCreationTier());
+ assertEquals(StorageTier.DISK, hotPolicy.getCreationFallbackTier());
+
+ // Verify properties of WARM storage policy
+ OzoneStoragePolicy warmPolicy = OzoneStoragePolicy.WARM;
+ assertEquals("Warm", warmPolicy.getName());
+ assertEquals(StorageTier.DISK, warmPolicy.getCreationTier());
+ assertEquals(StorageTier.EMPTY, warmPolicy.getCreationFallbackTier());
+
+ // Verify properties of COLD storage policy
+ OzoneStoragePolicy coldPolicy = OzoneStoragePolicy.COLD;
+ assertEquals("Cold", coldPolicy.getName());
+ assertEquals(StorageTier.ARCHIVE, coldPolicy.getCreationTier());
+ assertEquals(StorageTier.EMPTY, coldPolicy.getCreationFallbackTier());
+ }
+}
diff --git a/hadoop-ozone/common/src/test/java/org/apache/hadoop/hdds/protocol/StorageTierTest.java b/hadoop-ozone/common/src/test/java/org/apache/hadoop/hdds/protocol/StorageTierTest.java
new file mode 100644
index 000000000000..58907bc26509
--- /dev/null
+++ b/hadoop-ozone/common/src/test/java/org/apache/hadoop/hdds/protocol/StorageTierTest.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.protocol;
+
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.THREE;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+import java.util.Arrays;
+import java.util.List;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdds.client.RatisReplicationConfig;
+import org.apache.hadoop.hdds.client.ReplicationConfig;
+import org.apache.hadoop.hdds.client.StandaloneReplicationConfig;
+import org.apache.hadoop.hdds.client.StorageTier;
+import org.junit.jupiter.api.Test;
+
+class StorageTierTest {
+
+ @Test
+ void testUniformStorageType() {
+ assertTrue(StorageTier.SSD.isUniformStorageType());
+ assertTrue(StorageTier.DISK.isUniformStorageType());
+ assertTrue(StorageTier.ARCHIVE.isUniformStorageType());
+ }
+
+ @Test
+ void testGetStorageTypesWithReplicationConfig() {
+ ReplicationConfig ratisOne = RatisReplicationConfig.getInstance(ONE);
+ ReplicationConfig ratisThree = RatisReplicationConfig.getInstance(THREE);
+ ReplicationConfig standaloneOne =
+ StandaloneReplicationConfig.getInstance(ONE);
+ ReplicationConfig standaloneThree =
+ StandaloneReplicationConfig.getInstance(THREE);
+
+ // Assert uniform storage types
+ Arrays.asList(StorageTier.SSD, StorageTier.DISK, StorageTier.ARCHIVE, StorageTier.EMPTY)
+ .forEach(tier -> assertTrue(tier.isUniformStorageType()));
+
+ for (StorageTier tier : StorageTier.values()) {
+ if (!tier.isUniformStorageType()) {
+ return;
+ }
+ for (ReplicationConfig replicationConfig : Arrays.asList(ratisOne, ratisThree,
+ standaloneOne, standaloneThree)) {
+ List storageTypes = tier.getStorageTypes(replicationConfig);
+ if (tier.equals(StorageTier.EMPTY)) {
+ assertEquals(0, storageTypes.size());
+ } else {
+ assertStorageTypes(tier, storageTypes, replicationConfig.getRequiredNodes());
+ }
+ }
+ }
+ }
+
+ private void assertStorageTypes(StorageTier tier, List storageTypes,
+ int expectedSize) {
+ assertEquals(expectedSize, storageTypes.size());
+ StorageType expectedType = null;
+ switch (tier) {
+ case SSD:
+ expectedType = StorageType.SSD;
+ break;
+ case DISK:
+ expectedType = StorageType.DISK;
+ break;
+ case ARCHIVE:
+ expectedType = StorageType.ARCHIVE;
+ break;
+ default:
+ break;
+ }
+ if (expectedType != null) {
+ for (StorageType storageType : storageTypes) {
+ assertEquals(expectedType, storageType);
+ }
+ }
+ }
+}
diff --git a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
index 22c71cc29852..3875ed30979a 100644
--- a/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
+++ b/hadoop-ozone/interface-client/src/main/proto/OmClientProtocol.proto
@@ -769,7 +769,7 @@ message BucketInfo {
required string bucketName = 2;
repeated OzoneAclInfo acls = 3;
required bool isVersionEnabled = 4 [default = false];
- required hadoop.hdds.StorageTypeProto storageType = 5 [default = DISK];
+ required hadoop.hdds.StorageTypeProto storageType = 5 [deprecated = true, default = DISK];
optional uint64 creationTime = 6;
repeated hadoop.hdds.KeyValue metadata = 7;
optional BucketEncryptionInfoProto beinfo = 8;