diff --git a/hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/AbstractDiskBalancerSubCommand.java b/hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/AbstractDiskBalancerSubCommand.java index 1c92bd831cd8..266795fcb085 100644 --- a/hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/AbstractDiskBalancerSubCommand.java +++ b/hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/AbstractDiskBalancerSubCommand.java @@ -23,6 +23,7 @@ import java.util.List; import java.util.Map; import java.util.concurrent.Callable; +import java.util.stream.Collectors; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.scm.cli.ContainerOperationClient; @@ -42,6 +43,9 @@ public abstract class AbstractDiskBalancerSubCommand implements Callable { // Track if we're in batch mode to run commands on all in-service datanodes private boolean isBatchMode = false; + // Pre-fetched datanode address names for batch mode (address -> "hostname (ip:port)"); null in non-batch + private Map datanodeDisplayNames = null; + @Override public Void call() throws Exception { // Check if DiskBalancer is enabled in configuration @@ -74,6 +78,11 @@ public Void call() throws Exception { return null; } + // Remove duplicates while preserving order + List deduplicatedDatanodes = targetDatanodes.stream() + .distinct() + .collect(Collectors.toList()); + // Track if we're using batch mode for display isBatchMode = options.isInServiceDatanodes(); @@ -83,7 +92,7 @@ public Void call() throws Exception { List jsonResults = new ArrayList<>(); // Execute commands and collect results - for (String dn : targetDatanodes) { + for (String dn : deduplicatedDatanodes) { try { Object result = executeCommand(dn); successNodes.add(dn); @@ -145,6 +154,7 @@ private List getTargetDatanodes() { if (options.isInServiceDatanodes()) { return getAllInServiceDatanodes(); } else { + datanodeDisplayNames = null; // Non-batch: use user input as-is, no SCM for formatting return options.getDatanodes(); } } @@ -154,9 +164,10 @@ private List getTargetDatanodes() { */ private List getAllInServiceDatanodes() { try (ScmClient scmClient = new ContainerOperationClient(new OzoneConfiguration())) { - return DiskBalancerSubCommandUtil.getAllOperableNodesClientRpcAddress(scmClient); + datanodeDisplayNames = DiskBalancerSubCommandUtil.getAllOperableNodesClientRpcAddress(scmClient); + return new ArrayList<>(datanodeDisplayNames.keySet()); } catch (IOException e) { - System.err.println("Error querying SCM for in-service datanodes: %n" + e.getMessage()); + System.err.printf("Error querying SCM for in-service datanodes. %n%s%n", e.getMessage()); return null; } } @@ -220,7 +231,9 @@ protected Map getConfigurationMap() { */ private Map createErrorResult(String datanode, String errorMsg) { Map errorResult = new LinkedHashMap<>(); - errorResult.put("datanode", datanode); + // Format datanode string with hostname if available + String formattedDatanode = formatDatanodeDisplayName(datanode); + errorResult.put("datanode", formattedDatanode); errorResult.put("action", getActionName()); errorResult.put("status", "failure"); errorResult.put("errorMsg", errorMsg); @@ -233,5 +246,20 @@ private Map createErrorResult(String datanode, String errorMsg) return errorResult; } + + /** + * Format a datanode address for display. + * In batch mode, uses pre-fetched display names from the SCM query. + * In non-batch mode, returns the user's input as-is (no SCM call). + * + * @param address the datanode address in "ip:port" format + * @return formatted string "hostname (ip:port)" in batch mode, or address as-is in non-batch + */ + protected String formatDatanodeDisplayName(String address) { + if (datanodeDisplayNames != null) { + return datanodeDisplayNames.getOrDefault(address, address); + } + return address; + } } diff --git a/hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DiskBalancerReportSubcommand.java b/hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DiskBalancerReportSubcommand.java index 9f1d5347860c..200c9b2cd329 100644 --- a/hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DiskBalancerReportSubcommand.java +++ b/hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DiskBalancerReportSubcommand.java @@ -17,6 +17,8 @@ package org.apache.hadoop.hdds.scm.cli.datanode; +import static java.util.stream.Collectors.toList; + import java.io.IOException; import java.util.ArrayList; import java.util.LinkedHashMap; @@ -72,8 +74,10 @@ protected void displayResults(List successNodes, List failedNode // Display error messages for failed nodes if (!failedNodes.isEmpty()) { - System.err.printf("Failed to get DiskBalancer report from nodes: [%s]%n", - String.join(", ", failedNodes)); + System.err.printf("Failed to get DiskBalancer report from nodes: [%s]%n", + String.join(", ", failedNodes.stream() + .map(this::formatDatanodeDisplayName) + .collect(toList()))); } // Display consolidated report for successful nodes @@ -91,15 +95,18 @@ private String generateReport(List protos) { Double.compare(b.getCurrentVolumeDensitySum(), a.getCurrentVolumeDensitySum())); StringBuilder formatBuilder = new StringBuilder("Report result:%n" + - "%-50s %s%n"); + "%-60s %s%n"); List contentList = new ArrayList<>(); contentList.add("Datanode"); contentList.add("VolumeDensity"); for (DatanodeDiskBalancerInfoProto proto : sortedProtos) { - formatBuilder.append("%-50s %s%n"); - contentList.add(proto.getNode().getHostName()); + formatBuilder.append("%-60s %s%n"); + // Format datanode string with hostname and IP address + String formattedDatanode = DiskBalancerSubCommandUtil.getDatanodeHostAndIp( + proto.getNode()); + contentList.add(formattedDatanode); contentList.add(String.valueOf(proto.getCurrentVolumeDensitySum())); } @@ -121,7 +128,10 @@ protected String getActionName() { private Map createReportResult( HddsProtos.DatanodeDiskBalancerInfoProto report) { Map result = new LinkedHashMap<>(); - result.put("datanode", report.getNode().getHostName()); + // Format datanode string with hostname and IP address + String formattedDatanode = DiskBalancerSubCommandUtil.getDatanodeHostAndIp( + report.getNode()); + result.put("datanode", formattedDatanode); result.put("action", "report"); result.put("status", "success"); result.put("volumeDensity", report.getCurrentVolumeDensitySum()); diff --git a/hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DiskBalancerStartSubcommand.java b/hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DiskBalancerStartSubcommand.java index 8aefc0dca9f4..a6fd395f8f1b 100644 --- a/hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DiskBalancerStartSubcommand.java +++ b/hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DiskBalancerStartSubcommand.java @@ -17,6 +17,8 @@ package org.apache.hadoop.hdds.scm.cli.datanode; +import static java.util.stream.Collectors.toList; + import java.io.IOException; import java.util.LinkedHashMap; import java.util.List; @@ -65,7 +67,9 @@ protected Object executeCommand(String hostName) throws IOException { diskBalancerProxy.startDiskBalancer(config); Map result = new LinkedHashMap<>(); - result.put("datanode", hostName); + // Format datanode string with hostname if available + String formattedDatanode = formatDatanodeDisplayName(hostName); + result.put("datanode", formattedDatanode); result.put("action", "start"); result.put("status", "success"); Map configMap = getConfigurationMap(); @@ -107,7 +111,9 @@ protected void displayResults(List successNodes, if (isBatchMode()) { if (!failedNodes.isEmpty()) { System.err.printf("Failed to start DiskBalancer on nodes: [%s]%n", - String.join(", ", failedNodes)); + String.join(", ", failedNodes.stream() + .map(this::formatDatanodeDisplayName) + .collect(toList()))); } else { System.out.println("Started DiskBalancer on all IN_SERVICE nodes."); } @@ -115,11 +121,15 @@ protected void displayResults(List successNodes, // Detailed message for specific nodes if (!successNodes.isEmpty()) { System.out.printf("Started DiskBalancer on nodes: [%s]%n", - String.join(", ", successNodes)); + String.join(", ", successNodes.stream() + .map(this::formatDatanodeDisplayName) + .collect(toList()))); } if (!failedNodes.isEmpty()) { System.err.printf("Failed to start DiskBalancer on nodes: [%s]%n", - String.join(", ", failedNodes)); + String.join(", ", failedNodes.stream() + .map(this::formatDatanodeDisplayName) + .collect(toList()))); } } } diff --git a/hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DiskBalancerStatusSubcommand.java b/hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DiskBalancerStatusSubcommand.java index 34d4d1e1bc76..f84a5b9a78e2 100644 --- a/hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DiskBalancerStatusSubcommand.java +++ b/hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DiskBalancerStatusSubcommand.java @@ -17,6 +17,8 @@ package org.apache.hadoop.hdds.scm.cli.datanode; +import static java.util.stream.Collectors.toList; + import java.io.IOException; import java.util.ArrayList; import java.util.LinkedHashMap; @@ -72,8 +74,10 @@ protected void displayResults(List successNodes, List failedNode // Display error messages for failed nodes if (!failedNodes.isEmpty()) { - System.err.printf("Failed to get DiskBalancer status from nodes: [%s]%n", - String.join(", ", failedNodes)); + System.err.printf("Failed to get DiskBalancer status from nodes: [%s]%n", + String.join(", ", failedNodes.stream() + .map(this::formatDatanodeDisplayName) + .collect(toList()))); } // Display consolidated status for successful nodes @@ -86,7 +90,7 @@ protected void displayResults(List successNodes, List failedNode private String generateStatus(List protos) { StringBuilder formatBuilder = new StringBuilder("Status result:%n" + - "%-35s %-15s %-15s %-15s %-12s %-20s %-12s %-12s %-15s %-18s %-20s%n"); + "%-60s %-12s %-15s %-15s %-12s %-20s %-12s %-12s %-15s %-18s %-20s%n"); List contentList = new ArrayList<>(); contentList.add("Datanode"); @@ -102,12 +106,15 @@ private String generateStatus(List protos) { contentList.add("EstTimeLeft(min)"); for (HddsProtos.DatanodeDiskBalancerInfoProto proto : protos) { - formatBuilder.append("%-35s %-15s %-15s %-15s %-12s %-20s %-12s %-12s %-15s %-18s %-20s%n"); + formatBuilder.append("%-60s %-12s %-15s %-15s %-12s %-20s %-12s %-12s %-15s %-18s %-20s%n"); long estimatedTimeLeft = calculateEstimatedTimeLeft(proto); long bytesMovedMB = (long) Math.ceil(proto.getBytesMoved() / (1024.0 * 1024.0)); long bytesToMoveMB = (long) Math.ceil(proto.getBytesToMove() / (1024.0 * 1024.0)); - contentList.add(proto.getNode().getHostName()); + // Format datanode string with hostname and IP address + String formattedDatanode = DiskBalancerSubCommandUtil.getDatanodeHostAndIp( + proto.getNode()); + contentList.add(formattedDatanode); contentList.add(proto.getRunningStatus().name()); contentList.add( String.format("%.4f", proto.getDiskBalancerConf().getThreshold())); @@ -149,7 +156,10 @@ protected String getActionName() { */ private Map createStatusResult(DatanodeDiskBalancerInfoProto status) { Map result = new LinkedHashMap<>(); - result.put("datanode", status.getNode().getHostName()); + // Format datanode string with hostname and IP address + String formattedDatanode = DiskBalancerSubCommandUtil.getDatanodeHostAndIp( + status.getNode()); + result.put("datanode", formattedDatanode); result.put("action", "status"); result.put("status", "success"); result.put("serviceStatus", status.getRunningStatus().name()); diff --git a/hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DiskBalancerStopSubcommand.java b/hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DiskBalancerStopSubcommand.java index ba3355cb9d5f..dcb79480756a 100644 --- a/hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DiskBalancerStopSubcommand.java +++ b/hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DiskBalancerStopSubcommand.java @@ -17,6 +17,8 @@ package org.apache.hadoop.hdds.scm.cli.datanode; +import static java.util.stream.Collectors.toList; + import java.io.IOException; import java.util.List; import java.util.Map; @@ -41,7 +43,9 @@ protected Object executeCommand(String hostName) throws IOException { try { diskBalancerProxy.stopDiskBalancer(); Map result = new java.util.LinkedHashMap<>(); - result.put("datanode", hostName); + // Format datanode string with hostname if available + String formattedDatanode = formatDatanodeDisplayName(hostName); + result.put("datanode", formattedDatanode); result.put("action", "stop"); result.put("status", "success"); return result; @@ -61,7 +65,9 @@ protected void displayResults(List successNodes, List failedNode // Simpler message for batch mode if (!failedNodes.isEmpty()) { System.err.printf("Failed to stop DiskBalancer on nodes: [%s]%n", - String.join(", ", failedNodes)); + String.join(", ", failedNodes.stream() + .map(this::formatDatanodeDisplayName) + .collect(toList()))); } else { System.out.println("Stopped DiskBalancer on all IN_SERVICE nodes."); } @@ -69,11 +75,15 @@ protected void displayResults(List successNodes, List failedNode // Detailed message for specific nodes if (!successNodes.isEmpty()) { System.out.printf("Stopped DiskBalancer on nodes: [%s]%n", - String.join(", ", successNodes)); + String.join(", ", successNodes.stream() + .map(this::formatDatanodeDisplayName) + .collect(toList()))); } if (!failedNodes.isEmpty()) { System.err.printf("Failed to stop DiskBalancer on nodes: [%s]%n", - String.join(", ", failedNodes)); + String.join(", ", failedNodes.stream() + .map(this::formatDatanodeDisplayName) + .collect(toList()))); } } } diff --git a/hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DiskBalancerSubCommandUtil.java b/hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DiskBalancerSubCommandUtil.java index b5e0b4e57dde..3f3fb16331c3 100644 --- a/hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DiskBalancerSubCommandUtil.java +++ b/hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DiskBalancerSubCommandUtil.java @@ -22,8 +22,9 @@ import java.io.IOException; import java.net.InetSocketAddress; -import java.util.ArrayList; +import java.util.LinkedHashMap; import java.util.List; +import java.util.Map; import org.apache.hadoop.hdds.conf.OzoneConfiguration; import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.DatanodeDetails.Port; @@ -48,7 +49,7 @@ private DiskBalancerSubCommandUtil() { *

The address can be provided in two formats: *

    *
  • "hostname:port" - Uses the specified port
  • - *
  • "hostname" - Uses the default CLIENT_RPC port (19864)
  • + *
  • "hostname" - Uses the default CLIENT_RPC port (HDDS_DATANODE_CLIENT_PORT_DEFAULT)
  • *
* * @param address the datanode address in "host:port" or "host" format @@ -61,45 +62,33 @@ public static DiskBalancerProtocol getSingleNodeDiskBalancerProxy( UserGroupInformation user = UserGroupInformation.getCurrentUser(); // Parse address and add default port if not specified - InetSocketAddress nodeAddr = parseAddress(address, HDDS_DATANODE_CLIENT_PORT_DEFAULT); - - return new DiskBalancerProtocolClientSideTranslatorPB( - nodeAddr, user, ozoneConf); - } - - /** - * Parses a datanode address string and returns an InetSocketAddress. - * If the address doesn't contain a port, uses the provided default port. - * - * @param address the address string (e.g., "host:port" or "host") - * @param defaultPort the default port to use if not specified - * @return InetSocketAddress with the parsed or default port - */ - private static InetSocketAddress parseAddress(String address, int defaultPort) { + InetSocketAddress nodeAddr; if (address.contains(":")) { // Port is specified, use NetUtils to parse - return NetUtils.createSocketAddr(address); + nodeAddr = NetUtils.createSocketAddr(address); } else { // Port not specified, use default - return NetUtils.createSocketAddr(address, defaultPort); + nodeAddr = NetUtils.createSocketAddr(address, HDDS_DATANODE_CLIENT_PORT_DEFAULT); } + return new DiskBalancerProtocolClientSideTranslatorPB( + nodeAddr, user, ozoneConf); } /** - * Retrieves all IN_SERVICE datanode addresses from SCM. + * Retrieves all IN_SERVICE datanode addresses with their hostnames from SCM. * Used for batch operations with --in-service-datanodes flag. - * + * * @param scmClient the SCM client - * @return list of datanode addresses in "ip:port" format + * @return map of address (ip:port) to display string (hostname (ip:port) or ip:port) * @throws IOException if SCM query fails */ - public static List getAllOperableNodesClientRpcAddress( + public static Map getAllOperableNodesClientRpcAddress( ScmClient scmClient) throws IOException { List nodes = scmClient.queryNode( NodeOperationalState.IN_SERVICE, HddsProtos.NodeState.HEALTHY, HddsProtos.QueryScope.CLUSTER, ""); - List addresses = new ArrayList<>(); + Map addressToDisplay = new LinkedHashMap<>(); for (HddsProtos.Node node : nodes) { DatanodeDetails details = DatanodeDetails.getFromProtoBuf(node.getNodeID()); @@ -108,8 +97,13 @@ public static List getAllOperableNodesClientRpcAddress( } Port port = details.getPort(Port.Name.CLIENT_RPC); if (port != null) { - // Use IP address for reliable connection (hostnames with underscores may not be valid) - addresses.add(details.getIpAddress() + ":" + port.getValue()); + String address = details.getIpAddress() + ":" + port.getValue(); + // Format the display string: "hostname (ip:port)" or "ip:port" + String hostname = details.getHostName(); + String display = (hostname != null && !hostname.isEmpty() + && !hostname.equals(details.getIpAddress())) ? hostname + " (" + address + ")" + : address; + addressToDisplay.put(address, display); } else { System.out.printf("host: %s(%s) %s port not found%n", details.getHostName(), details.getIpAddress(), @@ -117,7 +111,31 @@ public static List getAllOperableNodesClientRpcAddress( } } - return addresses; + return addressToDisplay; } -} + /** + * Returns a formatted string combining hostname and IP address from DatanodeDetailsProto. + * If hostname is null or empty, returns just "ip:port". + * + * @param nodeProto the DatanodeDetailsProto from the diskbalancer info + * @return formatted string "hostname (ip:port)" or "ip:port" if hostname is not available + */ + public static String getDatanodeHostAndIp(HddsProtos.DatanodeDetailsProto nodeProto) { + String hostname = nodeProto.getHostName(); + String ipAddress = nodeProto.getIpAddress(); + int port = nodeProto.getPortsList().stream() + .filter(p -> p.getName().equals( + DatanodeDetails.Port.Name.CLIENT_RPC.name())) + .mapToInt(HddsProtos.Port::getValue) + .findFirst() + .orElse(HDDS_DATANODE_CLIENT_PORT_DEFAULT); // Default port if not found + + // Format the output string + String addressPort = ipAddress + ":" + port; + if (hostname != null && !hostname.isEmpty() && !hostname.equals(ipAddress)) { + return hostname + " (" + addressPort + ")"; + } + return addressPort; + } +} diff --git a/hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DiskBalancerUpdateSubcommand.java b/hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DiskBalancerUpdateSubcommand.java index 825a81c8aca0..9777ce78fab8 100644 --- a/hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DiskBalancerUpdateSubcommand.java +++ b/hadoop-ozone/cli-admin/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/DiskBalancerUpdateSubcommand.java @@ -17,6 +17,8 @@ package org.apache.hadoop.hdds.scm.cli.datanode; +import static java.util.stream.Collectors.toList; + import java.io.IOException; import java.util.LinkedHashMap; import java.util.List; @@ -74,7 +76,9 @@ protected Object executeCommand(String hostName) throws IOException { diskBalancerProxy.updateDiskBalancerConfiguration(config); Map result = new LinkedHashMap<>(); - result.put("datanode", hostName); + // Format datanode string with hostname if available + String formattedDatanode = formatDatanodeDisplayName(hostName); + result.put("datanode", formattedDatanode); result.put("action", "update"); result.put("status", "success"); Map configMap = getConfigurationMap(); @@ -117,7 +121,9 @@ protected void displayResults(List successNodes, // Simpler message for batch mode if (!failedNodes.isEmpty()) { System.err.printf("Failed to update DiskBalancer configuration on nodes: [%s]%n", - String.join(", ", failedNodes)); + String.join(", ", failedNodes.stream() + .map(this::formatDatanodeDisplayName) + .collect(toList()))); } else { System.out.println("Updated DiskBalancer configuration on all IN_SERVICE nodes."); } @@ -125,11 +131,15 @@ protected void displayResults(List successNodes, // Detailed message for specific nodes if (!successNodes.isEmpty()) { System.out.printf("Updated DiskBalancer configuration on nodes: [%s]%n", - String.join(", ", successNodes)); + String.join(", ", successNodes.stream() + .map(this::formatDatanodeDisplayName) + .collect(toList()))); } if (!failedNodes.isEmpty()) { System.err.printf("Failed to update DiskBalancer configuration on nodes: [%s]%n", - String.join(", ", failedNodes)); + String.join(", ", failedNodes.stream() + .map(this::formatDatanodeDisplayName) + .collect(toList()))); } } } diff --git a/hadoop-ozone/cli-admin/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDiskBalancerSubCommands.java b/hadoop-ozone/cli-admin/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDiskBalancerSubCommands.java index 105cb5d8566b..760408679204 100644 --- a/hadoop-ozone/cli-admin/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDiskBalancerSubCommands.java +++ b/hadoop-ozone/cli-admin/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestDiskBalancerSubCommands.java @@ -17,6 +17,7 @@ package org.apache.hadoop.hdds.scm.cli.datanode; +import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DATANODE_CLIENT_PORT_DEFAULT; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyString; @@ -35,13 +36,17 @@ import java.io.UnsupportedEncodingException; import java.nio.charset.StandardCharsets; import java.util.ArrayList; +import java.util.LinkedHashMap; import java.util.List; +import java.util.Map; import java.util.Random; import java.util.regex.Matcher; import java.util.regex.Pattern; import org.apache.hadoop.hdds.HddsConfigKeys; import org.apache.hadoop.hdds.conf.OzoneConfiguration; +import org.apache.hadoop.hdds.protocol.DatanodeDetails; import org.apache.hadoop.hdds.protocol.DiskBalancerProtocol; +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDiskBalancerInfoProto; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DiskBalancerConfigurationProto; @@ -73,9 +78,9 @@ public void setup() throws UnsupportedEncodingException { // Create shared list of in-service datanodes inServiceDatanodes = new ArrayList<>(); - inServiceDatanodes.add("host-1:19864"); - inServiceDatanodes.add("host-2:19864"); - inServiceDatanodes.add("host-3:19864"); + inServiceDatanodes.add("host-1:HDDS_DATANODE_CLIENT_PORT_DEFAULT"); + inServiceDatanodes.add("host-2:HDDS_DATANODE_CLIENT_PORT_DEFAULT"); + inServiceDatanodes.add("host-3:HDDS_DATANODE_CLIENT_PORT_DEFAULT"); // Create shared mock protocol mockProtocol = mock(DiskBalancerProtocol.class); @@ -130,13 +135,49 @@ private DiskBalancerMocks setupAllMocks() { MockedStatic mockedUtil = mockStatic(DiskBalancerSubCommandUtil.class); + Map addressToDisplay = new LinkedHashMap<>(); + for (String addr : inServiceDatanodes) { + addressToDisplay.put(addr, addr); + } mockedUtil.when(() -> DiskBalancerSubCommandUtil .getAllOperableNodesClientRpcAddress(any())) - .thenReturn(inServiceDatanodes); + .thenReturn(addressToDisplay); mockedUtil.when(() -> DiskBalancerSubCommandUtil .getSingleNodeDiskBalancerProxy(anyString())) .thenReturn(mockProtocol); - + // Mock getDatanodeHostAndIp(HddsProtos.DatanodeDetailsProto) to format the output + mockedUtil.when(() -> DiskBalancerSubCommandUtil + .getDatanodeHostAndIp(any(HddsProtos.DatanodeDetailsProto.class))) + .thenAnswer(invocation -> { + HddsProtos.DatanodeDetailsProto proto = invocation.getArgument(0); + return proto.getHostName() + " (" + proto.getIpAddress() + ":" + + HDDS_DATANODE_CLIENT_PORT_DEFAULT + ")"; + }); + // Mock getDatanodeHostAndIp(String, String, int) to format the output + // Return value is used by Mockito internally for mock setup + mockedUtil.when(() -> { + @SuppressWarnings("RV_RETURN_VALUE_IGNORED_NO_SIDE_EFFECT") + String ignored = DiskBalancerSubCommandUtil + .getDatanodeHostAndIp(any(DatanodeDetailsProto.class)); + // Use the value to avoid "ignored return value" static analysis warnings. + System.out.println(ignored); + }).thenAnswer(invocation -> { + DatanodeDetailsProto proto = invocation.getArgument(0); + String hostname = proto.getHostName(); + String ipAddress = proto.getIpAddress(); + int port = proto.getPortsList().stream() + .filter(p -> p.getName().equals( + DatanodeDetails.Port.Name.CLIENT_RPC.name())) + .mapToInt(HddsProtos.Port::getValue) + .findFirst() + .orElse(HDDS_DATANODE_CLIENT_PORT_DEFAULT); + String addressPort = ipAddress + ":" + port; + if (hostname != null && !hostname.isEmpty() && !hostname.equals(ipAddress)) { + return hostname + " (" + addressPort + ")"; + } + return addressPort; + }); + return new DiskBalancerMocks(mockedConf, mockedClient, mockedUtil); } @@ -202,6 +243,25 @@ public void testStartDiskBalancerWithMultipleNodes() throws Exception { } } + @Test + public void testStartDiskBalancerWithDuplicateHostnames() throws Exception { + DiskBalancerStartSubcommand cmd = new DiskBalancerStartSubcommand(); + doNothing().when(mockProtocol).startDiskBalancer(any(DiskBalancerConfigurationProto.class)); + + try (DiskBalancerMocks mocks = setupAllMocks()) { + + CommandLine c = new CommandLine(cmd); + c.parseArgs("host-1", "host-1", "host-2"); + cmd.call(); + + // output should show each host only once + String output = outContent.toString(DEFAULT_ENCODING); + Pattern p = Pattern.compile("Started DiskBalancer on nodes: \\[host-1, host-2\\]"); + Matcher m = p.matcher(output); + assertTrue(m.find()); + } + } + @Test public void testStartDiskBalancerWithStdin() throws Exception { DiskBalancerStartSubcommand cmd = new DiskBalancerStartSubcommand(); @@ -633,6 +693,10 @@ private DatanodeDiskBalancerInfoProto createStatusProto(String hostname, DatanodeDetailsProto nodeProto = DatanodeDetailsProto.newBuilder() .setHostName(hostname) .setIpAddress("127.0.0.1") + .addPorts(HddsProtos.Port.newBuilder() + .setName("CLIENT_RPC") + .setValue(HDDS_DATANODE_CLIENT_PORT_DEFAULT) + .build()) .build(); DiskBalancerConfigurationProto configProto = DiskBalancerConfigurationProto.newBuilder() @@ -684,6 +748,10 @@ private DatanodeDiskBalancerInfoProto generateRandomReportProto(String hostname) DatanodeDetailsProto nodeProto = DatanodeDetailsProto.newBuilder() .setHostName(hostname) .setIpAddress("127.0.0.1") + .addPorts(HddsProtos.Port.newBuilder() + .setName("CLIENT_RPC") + .setValue(HDDS_DATANODE_CLIENT_PORT_DEFAULT) + .build()) .build(); return DatanodeDiskBalancerInfoProto.newBuilder()