001 /**
002 * Licensed to the Apache Software Foundation (ASF) under one
003 * or more contributor license agreements. See the NOTICE file
004 * distributed with this work for additional information
005 * regarding copyright ownership. The ASF licenses this file
006 * to you under the Apache License, Version 2.0 (the
007 * "License"); you may not use this file except in compliance
008 * with the License. You may obtain a copy of the License at
009 *
010 * http://www.apache.org/licenses/LICENSE-2.0
011 *
012 * Unless required by applicable law or agreed to in writing, software
013 * distributed under the License is distributed on an "AS IS" BASIS,
014 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
015 * See the License for the specific language governing permissions and
016 * limitations under the License.
017 */
018
019 package org.apache.hadoop.hdfs;
020
021 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN;
022 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT;
023 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY;
024 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
025 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY;
026 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY;
027 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT;
028 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY;
029 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_DEFAULT;
030 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY;
031 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
032 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY;
033 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
034 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICES;
035 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID;
036
037 import java.io.IOException;
038 import java.io.PrintStream;
039 import java.io.UnsupportedEncodingException;
040 import java.net.InetSocketAddress;
041 import java.net.URI;
042 import java.net.URISyntaxException;
043 import java.security.SecureRandom;
044 import java.text.SimpleDateFormat;
045 import java.util.ArrayList;
046 import java.util.Arrays;
047 import java.util.Collection;
048 import java.util.Collections;
049 import java.util.Comparator;
050 import java.util.Date;
051 import java.util.HashSet;
052 import java.util.List;
053 import java.util.Locale;
054 import java.util.Map;
055 import java.util.Random;
056 import java.util.Set;
057
058 import javax.net.SocketFactory;
059
060 import org.apache.commons.cli.CommandLine;
061 import org.apache.commons.cli.CommandLineParser;
062 import org.apache.commons.cli.Option;
063 import org.apache.commons.cli.Options;
064 import org.apache.commons.cli.ParseException;
065 import org.apache.commons.cli.PosixParser;
066 import org.apache.commons.logging.Log;
067 import org.apache.commons.logging.LogFactory;
068 import org.apache.hadoop.HadoopIllegalArgumentException;
069 import org.apache.hadoop.classification.InterfaceAudience;
070 import org.apache.hadoop.conf.Configuration;
071 import org.apache.hadoop.fs.BlockLocation;
072 import org.apache.hadoop.fs.CommonConfigurationKeys;
073 import org.apache.hadoop.fs.FileSystem;
074 import org.apache.hadoop.fs.Path;
075 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
076 import org.apache.hadoop.hdfs.protocol.DatanodeID;
077 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
078 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
079 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
080 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
081 import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB;
082 import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
083 import org.apache.hadoop.hdfs.server.namenode.NameNode;
084 import org.apache.hadoop.hdfs.web.SWebHdfsFileSystem;
085 import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
086 import org.apache.hadoop.http.HttpConfig;
087 import org.apache.hadoop.http.HttpServer2;
088 import org.apache.hadoop.ipc.ProtobufRpcEngine;
089 import org.apache.hadoop.ipc.RPC;
090 import org.apache.hadoop.net.NetUtils;
091 import org.apache.hadoop.net.NodeBase;
092 import org.apache.hadoop.security.SecurityUtil;
093 import org.apache.hadoop.security.UserGroupInformation;
094 import org.apache.hadoop.security.authorize.AccessControlList;
095 import org.apache.hadoop.util.StringUtils;
096 import org.apache.hadoop.util.ToolRunner;
097
098 import com.google.common.annotations.VisibleForTesting;
099 import com.google.common.base.Charsets;
100 import com.google.common.base.Joiner;
101 import com.google.common.base.Preconditions;
102 import com.google.common.collect.Lists;
103 import com.google.common.collect.Maps;
104 import com.google.common.primitives.SignedBytes;
105 import com.google.protobuf.BlockingService;
106
107 @InterfaceAudience.Private
108 public class DFSUtil {
109 public static final Log LOG = LogFactory.getLog(DFSUtil.class.getName());
110
111 public static final byte[] EMPTY_BYTES = {};
112
113 /** Compare two byte arrays by lexicographical order. */
114 public static int compareBytes(byte[] left, byte[] right) {
115 if (left == null) {
116 left = EMPTY_BYTES;
117 }
118 if (right == null) {
119 right = EMPTY_BYTES;
120 }
121 return SignedBytes.lexicographicalComparator().compare(left, right);
122 }
123
124 private DFSUtil() { /* Hidden constructor */ }
125 private static final ThreadLocal<Random> RANDOM = new ThreadLocal<Random>() {
126 @Override
127 protected Random initialValue() {
128 return new Random();
129 }
130 };
131
132 private static final ThreadLocal<SecureRandom> SECURE_RANDOM = new ThreadLocal<SecureRandom>() {
133 @Override
134 protected SecureRandom initialValue() {
135 return new SecureRandom();
136 }
137 };
138
139 /** @return a pseudo random number generator. */
140 public static Random getRandom() {
141 return RANDOM.get();
142 }
143
144 /** @return a pseudo secure random number generator. */
145 public static SecureRandom getSecureRandom() {
146 return SECURE_RANDOM.get();
147 }
148
149 /** Shuffle the elements in the given array. */
150 public static <T> T[] shuffle(final T[] array) {
151 if (array != null && array.length > 0) {
152 final Random random = getRandom();
153 for (int n = array.length; n > 1; ) {
154 final int randomIndex = random.nextInt(n);
155 n--;
156 if (n != randomIndex) {
157 final T tmp = array[randomIndex];
158 array[randomIndex] = array[n];
159 array[n] = tmp;
160 }
161 }
162 }
163 return array;
164 }
165
166 /**
167 * Compartor for sorting DataNodeInfo[] based on decommissioned states.
168 * Decommissioned nodes are moved to the end of the array on sorting with
169 * this compartor.
170 */
171 public static final Comparator<DatanodeInfo> DECOM_COMPARATOR =
172 new Comparator<DatanodeInfo>() {
173 @Override
174 public int compare(DatanodeInfo a, DatanodeInfo b) {
175 return a.isDecommissioned() == b.isDecommissioned() ? 0 :
176 a.isDecommissioned() ? 1 : -1;
177 }
178 };
179
180
181 /**
182 * Comparator for sorting DataNodeInfo[] based on decommissioned/stale states.
183 * Decommissioned/stale nodes are moved to the end of the array on sorting
184 * with this comparator.
185 */
186 @InterfaceAudience.Private
187 public static class DecomStaleComparator implements Comparator<DatanodeInfo> {
188 private final long staleInterval;
189
190 /**
191 * Constructor of DecomStaleComparator
192 *
193 * @param interval
194 * The time interval for marking datanodes as stale is passed from
195 * outside, since the interval may be changed dynamically
196 */
197 public DecomStaleComparator(long interval) {
198 this.staleInterval = interval;
199 }
200
201 @Override
202 public int compare(DatanodeInfo a, DatanodeInfo b) {
203 // Decommissioned nodes will still be moved to the end of the list
204 if (a.isDecommissioned()) {
205 return b.isDecommissioned() ? 0 : 1;
206 } else if (b.isDecommissioned()) {
207 return -1;
208 }
209 // Stale nodes will be moved behind the normal nodes
210 boolean aStale = a.isStale(staleInterval);
211 boolean bStale = b.isStale(staleInterval);
212 return aStale == bStale ? 0 : (aStale ? 1 : -1);
213 }
214 }
215
216 /**
217 * Address matcher for matching an address to local address
218 */
219 static final AddressMatcher LOCAL_ADDRESS_MATCHER = new AddressMatcher() {
220 @Override
221 public boolean match(InetSocketAddress s) {
222 return NetUtils.isLocalAddress(s.getAddress());
223 };
224 };
225
226 /**
227 * Whether the pathname is valid. Currently prohibits relative paths,
228 * names which contain a ":" or "//", or other non-canonical paths.
229 */
230 public static boolean isValidName(String src) {
231 // Path must be absolute.
232 if (!src.startsWith(Path.SEPARATOR)) {
233 return false;
234 }
235
236 // Check for ".." "." ":" "/"
237 String[] components = StringUtils.split(src, '/');
238 for (int i = 0; i < components.length; i++) {
239 String element = components[i];
240 if (element.equals(".") ||
241 (element.indexOf(":") >= 0) ||
242 (element.indexOf("/") >= 0)) {
243 return false;
244 }
245 // ".." is allowed in path starting with /.reserved/.inodes
246 if (element.equals("..")) {
247 if (components.length > 4
248 && components[1].equals(FSDirectory.DOT_RESERVED_STRING)
249 && components[2].equals(FSDirectory.DOT_INODES_STRING)) {
250 continue;
251 }
252 return false;
253 }
254 // The string may start or end with a /, but not have
255 // "//" in the middle.
256 if (element.isEmpty() && i != components.length - 1 &&
257 i != 0) {
258 return false;
259 }
260 }
261 return true;
262 }
263
264 /**
265 * Checks if a string is a valid path component. For instance, components
266 * cannot contain a ":" or "/", and cannot be equal to a reserved component
267 * like ".snapshot".
268 * <p>
269 * The primary use of this method is for validating paths when loading the
270 * FSImage. During normal NN operation, paths are sometimes allowed to
271 * contain reserved components.
272 *
273 * @return If component is valid
274 */
275 public static boolean isValidNameForComponent(String component) {
276 if (component.equals(".") ||
277 component.equals("..") ||
278 component.indexOf(":") >= 0 ||
279 component.indexOf("/") >= 0) {
280 return false;
281 }
282 return !isReservedPathComponent(component);
283 }
284
285
286 /**
287 * Returns if the component is reserved.
288 *
289 * <p>
290 * Note that some components are only reserved under certain directories, e.g.
291 * "/.reserved" is reserved, while "/hadoop/.reserved" is not.
292 *
293 * @param component
294 * @return if the component is reserved
295 */
296 public static boolean isReservedPathComponent(String component) {
297 for (String reserved : HdfsConstants.RESERVED_PATH_COMPONENTS) {
298 if (component.equals(reserved)) {
299 return true;
300 }
301 }
302 return false;
303 }
304
305 /**
306 * Converts a byte array to a string using UTF8 encoding.
307 */
308 public static String bytes2String(byte[] bytes) {
309 return bytes2String(bytes, 0, bytes.length);
310 }
311
312 /**
313 * Decode a specific range of bytes of the given byte array to a string
314 * using UTF8.
315 *
316 * @param bytes The bytes to be decoded into characters
317 * @param offset The index of the first byte to decode
318 * @param length The number of bytes to decode
319 * @return The decoded string
320 */
321 public static String bytes2String(byte[] bytes, int offset, int length) {
322 try {
323 return new String(bytes, offset, length, "UTF8");
324 } catch(UnsupportedEncodingException e) {
325 assert false : "UTF8 encoding is not supported ";
326 }
327 return null;
328 }
329
330 /**
331 * Converts a string to a byte array using UTF8 encoding.
332 */
333 public static byte[] string2Bytes(String str) {
334 return str.getBytes(Charsets.UTF_8);
335 }
336
337 /**
338 * Given a list of path components returns a path as a UTF8 String
339 */
340 public static String byteArray2PathString(byte[][] pathComponents) {
341 if (pathComponents.length == 0) {
342 return "";
343 } else if (pathComponents.length == 1
344 && (pathComponents[0] == null || pathComponents[0].length == 0)) {
345 return Path.SEPARATOR;
346 }
347 StringBuilder result = new StringBuilder();
348 for (int i = 0; i < pathComponents.length; i++) {
349 result.append(new String(pathComponents[i], Charsets.UTF_8));
350 if (i < pathComponents.length - 1) {
351 result.append(Path.SEPARATOR_CHAR);
352 }
353 }
354 return result.toString();
355 }
356
357 /**
358 * Converts a list of path components into a path using Path.SEPARATOR.
359 *
360 * @param components Path components
361 * @return Combined path as a UTF-8 string
362 */
363 public static String strings2PathString(String[] components) {
364 if (components.length == 0) {
365 return "";
366 }
367 if (components.length == 1) {
368 if (components[0] == null || components[0].isEmpty()) {
369 return Path.SEPARATOR;
370 }
371 }
372 return Joiner.on(Path.SEPARATOR).join(components);
373 }
374
375 /**
376 * Given a list of path components returns a byte array
377 */
378 public static byte[] byteArray2bytes(byte[][] pathComponents) {
379 if (pathComponents.length == 0) {
380 return EMPTY_BYTES;
381 } else if (pathComponents.length == 1
382 && (pathComponents[0] == null || pathComponents[0].length == 0)) {
383 return new byte[]{(byte) Path.SEPARATOR_CHAR};
384 }
385 int length = 0;
386 for (int i = 0; i < pathComponents.length; i++) {
387 length += pathComponents[i].length;
388 if (i < pathComponents.length - 1) {
389 length++; // for SEPARATOR
390 }
391 }
392 byte[] path = new byte[length];
393 int index = 0;
394 for (int i = 0; i < pathComponents.length; i++) {
395 System.arraycopy(pathComponents[i], 0, path, index,
396 pathComponents[i].length);
397 index += pathComponents[i].length;
398 if (i < pathComponents.length - 1) {
399 path[index] = (byte) Path.SEPARATOR_CHAR;
400 index++;
401 }
402 }
403 return path;
404 }
405
406 /** Convert an object representing a path to a string. */
407 public static String path2String(final Object path) {
408 return path == null? null
409 : path instanceof String? (String)path
410 : path instanceof byte[][]? byteArray2PathString((byte[][])path)
411 : path.toString();
412 }
413
414 /**
415 * Splits the array of bytes into array of arrays of bytes
416 * on byte separator
417 * @param bytes the array of bytes to split
418 * @param separator the delimiting byte
419 */
420 public static byte[][] bytes2byteArray(byte[] bytes, byte separator) {
421 return bytes2byteArray(bytes, bytes.length, separator);
422 }
423
424 /**
425 * Splits first len bytes in bytes to array of arrays of bytes
426 * on byte separator
427 * @param bytes the byte array to split
428 * @param len the number of bytes to split
429 * @param separator the delimiting byte
430 */
431 public static byte[][] bytes2byteArray(byte[] bytes,
432 int len,
433 byte separator) {
434 assert len <= bytes.length;
435 int splits = 0;
436 if (len == 0) {
437 return new byte[][]{null};
438 }
439 // Count the splits. Omit multiple separators and the last one
440 for (int i = 0; i < len; i++) {
441 if (bytes[i] == separator) {
442 splits++;
443 }
444 }
445 int last = len - 1;
446 while (last > -1 && bytes[last--] == separator) {
447 splits--;
448 }
449 if (splits == 0 && bytes[0] == separator) {
450 return new byte[][]{null};
451 }
452 splits++;
453 byte[][] result = new byte[splits][];
454 int startIndex = 0;
455 int nextIndex = 0;
456 int index = 0;
457 // Build the splits
458 while (index < splits) {
459 while (nextIndex < len && bytes[nextIndex] != separator) {
460 nextIndex++;
461 }
462 result[index] = new byte[nextIndex - startIndex];
463 System.arraycopy(bytes, startIndex, result[index], 0, nextIndex
464 - startIndex);
465 index++;
466 startIndex = nextIndex + 1;
467 nextIndex = startIndex;
468 }
469 return result;
470 }
471
472 /**
473 * Convert a LocatedBlocks to BlockLocations[]
474 * @param blocks a LocatedBlocks
475 * @return an array of BlockLocations
476 */
477 public static BlockLocation[] locatedBlocks2Locations(LocatedBlocks blocks) {
478 if (blocks == null) {
479 return new BlockLocation[0];
480 }
481 return locatedBlocks2Locations(blocks.getLocatedBlocks());
482 }
483
484 /**
485 * Convert a List<LocatedBlock> to BlockLocation[]
486 * @param blocks A List<LocatedBlock> to be converted
487 * @return converted array of BlockLocation
488 */
489 public static BlockLocation[] locatedBlocks2Locations(List<LocatedBlock> blocks) {
490 if (blocks == null) {
491 return new BlockLocation[0];
492 }
493 int nrBlocks = blocks.size();
494 BlockLocation[] blkLocations = new BlockLocation[nrBlocks];
495 if (nrBlocks == 0) {
496 return blkLocations;
497 }
498 int idx = 0;
499 for (LocatedBlock blk : blocks) {
500 assert idx < nrBlocks : "Incorrect index";
501 DatanodeInfo[] locations = blk.getLocations();
502 String[] hosts = new String[locations.length];
503 String[] xferAddrs = new String[locations.length];
504 String[] racks = new String[locations.length];
505 for (int hCnt = 0; hCnt < locations.length; hCnt++) {
506 hosts[hCnt] = locations[hCnt].getHostName();
507 xferAddrs[hCnt] = locations[hCnt].getXferAddr();
508 NodeBase node = new NodeBase(xferAddrs[hCnt],
509 locations[hCnt].getNetworkLocation());
510 racks[hCnt] = node.toString();
511 }
512 DatanodeInfo[] cachedLocations = blk.getCachedLocations();
513 String[] cachedHosts = new String[cachedLocations.length];
514 for (int i=0; i<cachedLocations.length; i++) {
515 cachedHosts[i] = cachedLocations[i].getHostName();
516 }
517 blkLocations[idx] = new BlockLocation(xferAddrs, hosts, cachedHosts,
518 racks,
519 blk.getStartOffset(),
520 blk.getBlockSize(),
521 blk.isCorrupt());
522 idx++;
523 }
524 return blkLocations;
525 }
526
527 /**
528 * Returns collection of nameservice Ids from the configuration.
529 * @param conf configuration
530 * @return collection of nameservice Ids, or null if not specified
531 */
532 public static Collection<String> getNameServiceIds(Configuration conf) {
533 return conf.getTrimmedStringCollection(DFS_NAMESERVICES);
534 }
535
536 /**
537 * @return <code>coll</code> if it is non-null and non-empty. Otherwise,
538 * returns a list with a single null value.
539 */
540 private static Collection<String> emptyAsSingletonNull(Collection<String> coll) {
541 if (coll == null || coll.isEmpty()) {
542 return Collections.singletonList(null);
543 } else {
544 return coll;
545 }
546 }
547
548 /**
549 * Namenode HighAvailability related configuration.
550 * Returns collection of namenode Ids from the configuration. One logical id
551 * for each namenode in the in the HA setup.
552 *
553 * @param conf configuration
554 * @param nsId the nameservice ID to look at, or null for non-federated
555 * @return collection of namenode Ids
556 */
557 public static Collection<String> getNameNodeIds(Configuration conf, String nsId) {
558 String key = addSuffix(DFS_HA_NAMENODES_KEY_PREFIX, nsId);
559 return conf.getTrimmedStringCollection(key);
560 }
561
562 /**
563 * Given a list of keys in the order of preference, returns a value
564 * for the key in the given order from the configuration.
565 * @param defaultValue default value to return, when key was not found
566 * @param keySuffix suffix to add to the key, if it is not null
567 * @param conf Configuration
568 * @param keys list of keys in the order of preference
569 * @return value of the key or default if a key was not found in configuration
570 */
571 private static String getConfValue(String defaultValue, String keySuffix,
572 Configuration conf, String... keys) {
573 String value = null;
574 for (String key : keys) {
575 key = addSuffix(key, keySuffix);
576 value = conf.get(key);
577 if (value != null) {
578 break;
579 }
580 }
581 if (value == null) {
582 value = defaultValue;
583 }
584 return value;
585 }
586
587 /** Add non empty and non null suffix to a key */
588 private static String addSuffix(String key, String suffix) {
589 if (suffix == null || suffix.isEmpty()) {
590 return key;
591 }
592 assert !suffix.startsWith(".") :
593 "suffix '" + suffix + "' should not already have '.' prepended.";
594 return key + "." + suffix;
595 }
596
597 /** Concatenate list of suffix strings '.' separated */
598 private static String concatSuffixes(String... suffixes) {
599 if (suffixes == null) {
600 return null;
601 }
602 return Joiner.on(".").skipNulls().join(suffixes);
603 }
604
605 /**
606 * Return configuration key of format key.suffix1.suffix2...suffixN
607 */
608 public static String addKeySuffixes(String key, String... suffixes) {
609 String keySuffix = concatSuffixes(suffixes);
610 return addSuffix(key, keySuffix);
611 }
612
613 /**
614 * Returns the configured address for all NameNodes in the cluster.
615 * @param conf configuration
616 * @param defaultAddress default address to return in case key is not found.
617 * @param keys Set of keys to look for in the order of preference
618 * @return a map(nameserviceId to map(namenodeId to InetSocketAddress))
619 */
620 private static Map<String, Map<String, InetSocketAddress>>
621 getAddresses(Configuration conf,
622 String defaultAddress, String... keys) {
623 Collection<String> nameserviceIds = getNameServiceIds(conf);
624
625 // Look for configurations of the form <key>[.<nameserviceId>][.<namenodeId>]
626 // across all of the configured nameservices and namenodes.
627 Map<String, Map<String, InetSocketAddress>> ret = Maps.newLinkedHashMap();
628 for (String nsId : emptyAsSingletonNull(nameserviceIds)) {
629 Map<String, InetSocketAddress> isas =
630 getAddressesForNameserviceId(conf, nsId, defaultAddress, keys);
631 if (!isas.isEmpty()) {
632 ret.put(nsId, isas);
633 }
634 }
635 return ret;
636 }
637
638 /**
639 * Get all of the RPC addresses of the individual NNs in a given nameservice.
640 *
641 * @param conf Configuration
642 * @param nsId the nameservice whose NNs addresses we want.
643 * @param defaultValue default address to return in case key is not found.
644 * @return A map from nnId -> RPC address of each NN in the nameservice.
645 */
646 public static Map<String, InetSocketAddress> getRpcAddressesForNameserviceId(
647 Configuration conf, String nsId, String defaultValue) {
648 return getAddressesForNameserviceId(conf, nsId, defaultValue,
649 DFS_NAMENODE_RPC_ADDRESS_KEY);
650 }
651
652 private static Map<String, InetSocketAddress> getAddressesForNameserviceId(
653 Configuration conf, String nsId, String defaultValue,
654 String... keys) {
655 Collection<String> nnIds = getNameNodeIds(conf, nsId);
656 Map<String, InetSocketAddress> ret = Maps.newHashMap();
657 for (String nnId : emptyAsSingletonNull(nnIds)) {
658 String suffix = concatSuffixes(nsId, nnId);
659 String address = getConfValue(defaultValue, suffix, conf, keys);
660 if (address != null) {
661 InetSocketAddress isa = NetUtils.createSocketAddr(address);
662 if (isa.isUnresolved()) {
663 LOG.warn("Namenode for " + nsId +
664 " remains unresolved for ID " + nnId +
665 ". Check your hdfs-site.xml file to " +
666 "ensure namenodes are configured properly.");
667 }
668 ret.put(nnId, isa);
669 }
670 }
671 return ret;
672 }
673
674 /**
675 * @return a collection of all configured NN Kerberos principals.
676 */
677 public static Set<String> getAllNnPrincipals(Configuration conf) throws IOException {
678 Set<String> principals = new HashSet<String>();
679 for (String nsId : DFSUtil.getNameServiceIds(conf)) {
680 if (HAUtil.isHAEnabled(conf, nsId)) {
681 for (String nnId : DFSUtil.getNameNodeIds(conf, nsId)) {
682 Configuration confForNn = new Configuration(conf);
683 NameNode.initializeGenericKeys(confForNn, nsId, nnId);
684 String principal = SecurityUtil.getServerPrincipal(confForNn
685 .get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY),
686 NameNode.getAddress(confForNn).getHostName());
687 principals.add(principal);
688 }
689 } else {
690 Configuration confForNn = new Configuration(conf);
691 NameNode.initializeGenericKeys(confForNn, nsId, null);
692 String principal = SecurityUtil.getServerPrincipal(confForNn
693 .get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY),
694 NameNode.getAddress(confForNn).getHostName());
695 principals.add(principal);
696 }
697 }
698
699 return principals;
700 }
701
702 /**
703 * Returns list of InetSocketAddress corresponding to HA NN RPC addresses from
704 * the configuration.
705 *
706 * @param conf configuration
707 * @return list of InetSocketAddresses
708 */
709 public static Map<String, Map<String, InetSocketAddress>> getHaNnRpcAddresses(
710 Configuration conf) {
711 return getAddresses(conf, null, DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
712 }
713
714 /**
715 * Returns list of InetSocketAddress corresponding to HA NN HTTP addresses from
716 * the configuration.
717 *
718 * @return list of InetSocketAddresses
719 */
720 public static Map<String, Map<String, InetSocketAddress>> getHaNnWebHdfsAddresses(
721 Configuration conf, String scheme) {
722 if (WebHdfsFileSystem.SCHEME.equals(scheme)) {
723 return getAddresses(conf, null,
724 DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
725 } else if (SWebHdfsFileSystem.SCHEME.equals(scheme)) {
726 return getAddresses(conf, null,
727 DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY);
728 } else {
729 throw new IllegalArgumentException("Unsupported scheme: " + scheme);
730 }
731 }
732
733 /**
734 * Resolve an HDFS URL into real INetSocketAddress. It works like a DNS resolver
735 * when the URL points to an non-HA cluster. When the URL points to an HA
736 * cluster, the resolver further resolves the logical name (i.e., the authority
737 * in the URL) into real namenode addresses.
738 */
739 public static InetSocketAddress[] resolveWebHdfsUri(URI uri, Configuration conf)
740 throws IOException {
741 int defaultPort;
742 String scheme = uri.getScheme();
743 if (WebHdfsFileSystem.SCHEME.equals(scheme)) {
744 defaultPort = DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT;
745 } else if (SWebHdfsFileSystem.SCHEME.equals(scheme)) {
746 defaultPort = DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT;
747 } else {
748 throw new IllegalArgumentException("Unsupported scheme: " + scheme);
749 }
750
751 ArrayList<InetSocketAddress> ret = new ArrayList<InetSocketAddress>();
752
753 if (!HAUtil.isLogicalUri(conf, uri)) {
754 InetSocketAddress addr = NetUtils.createSocketAddr(uri.getAuthority(),
755 defaultPort);
756 ret.add(addr);
757
758 } else {
759 Map<String, Map<String, InetSocketAddress>> addresses = DFSUtil
760 .getHaNnWebHdfsAddresses(conf, scheme);
761
762 for (Map<String, InetSocketAddress> addrs : addresses.values()) {
763 for (InetSocketAddress addr : addrs.values()) {
764 ret.add(addr);
765 }
766 }
767 }
768
769 InetSocketAddress[] r = new InetSocketAddress[ret.size()];
770 return ret.toArray(r);
771 }
772
773 /**
774 * Returns list of InetSocketAddress corresponding to backup node rpc
775 * addresses from the configuration.
776 *
777 * @param conf configuration
778 * @return list of InetSocketAddresses
779 * @throws IOException on error
780 */
781 public static Map<String, Map<String, InetSocketAddress>> getBackupNodeAddresses(
782 Configuration conf) throws IOException {
783 Map<String, Map<String, InetSocketAddress>> addressList = getAddresses(conf,
784 null, DFS_NAMENODE_BACKUP_ADDRESS_KEY);
785 if (addressList.isEmpty()) {
786 throw new IOException("Incorrect configuration: backup node address "
787 + DFS_NAMENODE_BACKUP_ADDRESS_KEY + " is not configured.");
788 }
789 return addressList;
790 }
791
792 /**
793 * Returns list of InetSocketAddresses of corresponding to secondary namenode
794 * http addresses from the configuration.
795 *
796 * @param conf configuration
797 * @return list of InetSocketAddresses
798 * @throws IOException on error
799 */
800 public static Map<String, Map<String, InetSocketAddress>> getSecondaryNameNodeAddresses(
801 Configuration conf) throws IOException {
802 Map<String, Map<String, InetSocketAddress>> addressList = getAddresses(conf, null,
803 DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY);
804 if (addressList.isEmpty()) {
805 throw new IOException("Incorrect configuration: secondary namenode address "
806 + DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY + " is not configured.");
807 }
808 return addressList;
809 }
810
811 /**
812 * Returns list of InetSocketAddresses corresponding to namenodes from the
813 * configuration. Note this is to be used by datanodes to get the list of
814 * namenode addresses to talk to.
815 *
816 * Returns namenode address specifically configured for datanodes (using
817 * service ports), if found. If not, regular RPC address configured for other
818 * clients is returned.
819 *
820 * @param conf configuration
821 * @return list of InetSocketAddress
822 * @throws IOException on error
823 */
824 public static Map<String, Map<String, InetSocketAddress>> getNNServiceRpcAddresses(
825 Configuration conf) throws IOException {
826 // Use default address as fall back
827 String defaultAddress;
828 try {
829 defaultAddress = NetUtils.getHostPortString(NameNode.getAddress(conf));
830 } catch (IllegalArgumentException e) {
831 defaultAddress = null;
832 }
833
834 Map<String, Map<String, InetSocketAddress>> addressList =
835 getAddresses(conf, defaultAddress,
836 DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
837 if (addressList.isEmpty()) {
838 throw new IOException("Incorrect configuration: namenode address "
839 + DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY + " or "
840 + DFS_NAMENODE_RPC_ADDRESS_KEY
841 + " is not configured.");
842 }
843 return addressList;
844 }
845
846 /**
847 * Flatten the given map, as returned by other functions in this class,
848 * into a flat list of {@link ConfiguredNNAddress} instances.
849 */
850 public static List<ConfiguredNNAddress> flattenAddressMap(
851 Map<String, Map<String, InetSocketAddress>> map) {
852 List<ConfiguredNNAddress> ret = Lists.newArrayList();
853
854 for (Map.Entry<String, Map<String, InetSocketAddress>> entry :
855 map.entrySet()) {
856 String nsId = entry.getKey();
857 Map<String, InetSocketAddress> nnMap = entry.getValue();
858 for (Map.Entry<String, InetSocketAddress> e2 : nnMap.entrySet()) {
859 String nnId = e2.getKey();
860 InetSocketAddress addr = e2.getValue();
861
862 ret.add(new ConfiguredNNAddress(nsId, nnId, addr));
863 }
864 }
865 return ret;
866 }
867
868 /**
869 * Format the given map, as returned by other functions in this class,
870 * into a string suitable for debugging display. The format of this string
871 * should not be considered an interface, and is liable to change.
872 */
873 public static String addressMapToString(
874 Map<String, Map<String, InetSocketAddress>> map) {
875 StringBuilder b = new StringBuilder();
876 for (Map.Entry<String, Map<String, InetSocketAddress>> entry :
877 map.entrySet()) {
878 String nsId = entry.getKey();
879 Map<String, InetSocketAddress> nnMap = entry.getValue();
880 b.append("Nameservice <").append(nsId).append(">:").append("\n");
881 for (Map.Entry<String, InetSocketAddress> e2 : nnMap.entrySet()) {
882 b.append(" NN ID ").append(e2.getKey())
883 .append(" => ").append(e2.getValue()).append("\n");
884 }
885 }
886 return b.toString();
887 }
888
889 public static String nnAddressesAsString(Configuration conf) {
890 Map<String, Map<String, InetSocketAddress>> addresses =
891 getHaNnRpcAddresses(conf);
892 return addressMapToString(addresses);
893 }
894
895 /**
896 * Represent one of the NameNodes configured in the cluster.
897 */
898 public static class ConfiguredNNAddress {
899 private final String nameserviceId;
900 private final String namenodeId;
901 private final InetSocketAddress addr;
902
903 private ConfiguredNNAddress(String nameserviceId, String namenodeId,
904 InetSocketAddress addr) {
905 this.nameserviceId = nameserviceId;
906 this.namenodeId = namenodeId;
907 this.addr = addr;
908 }
909
910 public String getNameserviceId() {
911 return nameserviceId;
912 }
913
914 public String getNamenodeId() {
915 return namenodeId;
916 }
917
918 public InetSocketAddress getAddress() {
919 return addr;
920 }
921
922 @Override
923 public String toString() {
924 return "ConfiguredNNAddress[nsId=" + nameserviceId + ";" +
925 "nnId=" + namenodeId + ";addr=" + addr + "]";
926 }
927 }
928
929 /**
930 * Get a URI for each configured nameservice. If a nameservice is
931 * HA-enabled, then the logical URI of the nameservice is returned. If the
932 * nameservice is not HA-enabled, then a URI corresponding to an RPC address
933 * of the single NN for that nameservice is returned, preferring the service
934 * RPC address over the client RPC address.
935 *
936 * @param conf configuration
937 * @return a collection of all configured NN URIs, preferring service
938 * addresses
939 */
940 public static Collection<URI> getNsServiceRpcUris(Configuration conf) {
941 return getNameServiceUris(conf,
942 DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
943 DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
944 }
945
946 /**
947 * Get a URI for each configured nameservice. If a nameservice is
948 * HA-enabled, then the logical URI of the nameservice is returned. If the
949 * nameservice is not HA-enabled, then a URI corresponding to the address of
950 * the single NN for that nameservice is returned.
951 *
952 * @param conf configuration
953 * @param keys configuration keys to try in order to get the URI for non-HA
954 * nameservices
955 * @return a collection of all configured NN URIs
956 */
957 public static Collection<URI> getNameServiceUris(Configuration conf,
958 String... keys) {
959 Set<URI> ret = new HashSet<URI>();
960
961 // We're passed multiple possible configuration keys for any given NN or HA
962 // nameservice, and search the config in order of these keys. In order to
963 // make sure that a later config lookup (e.g. fs.defaultFS) doesn't add a
964 // URI for a config key for which we've already found a preferred entry, we
965 // keep track of non-preferred keys here.
966 Set<URI> nonPreferredUris = new HashSet<URI>();
967
968 for (String nsId : getNameServiceIds(conf)) {
969 if (HAUtil.isHAEnabled(conf, nsId)) {
970 // Add the logical URI of the nameservice.
971 try {
972 ret.add(new URI(HdfsConstants.HDFS_URI_SCHEME + "://" + nsId));
973 } catch (URISyntaxException ue) {
974 throw new IllegalArgumentException(ue);
975 }
976 } else {
977 // Add the URI corresponding to the address of the NN.
978 boolean uriFound = false;
979 for (String key : keys) {
980 String addr = conf.get(concatSuffixes(key, nsId));
981 if (addr != null) {
982 URI uri = createUri(HdfsConstants.HDFS_URI_SCHEME,
983 NetUtils.createSocketAddr(addr));
984 if (!uriFound) {
985 uriFound = true;
986 ret.add(uri);
987 } else {
988 nonPreferredUris.add(uri);
989 }
990 }
991 }
992 }
993 }
994
995 // Add the generic configuration keys.
996 boolean uriFound = false;
997 for (String key : keys) {
998 String addr = conf.get(key);
999 if (addr != null) {
1000 URI uri = createUri("hdfs", NetUtils.createSocketAddr(addr));
1001 if (!uriFound) {
1002 uriFound = true;
1003 ret.add(uri);
1004 } else {
1005 nonPreferredUris.add(uri);
1006 }
1007 }
1008 }
1009
1010 // Add the default URI if it is an HDFS URI.
1011 URI defaultUri = FileSystem.getDefaultUri(conf);
1012 // checks if defaultUri is ip:port format
1013 // and convert it to hostname:port format
1014 if (defaultUri != null && (defaultUri.getPort() != -1)) {
1015 defaultUri = createUri(defaultUri.getScheme(),
1016 NetUtils.createSocketAddr(defaultUri.getHost(),
1017 defaultUri.getPort()));
1018 }
1019 if (defaultUri != null &&
1020 HdfsConstants.HDFS_URI_SCHEME.equals(defaultUri.getScheme()) &&
1021 !nonPreferredUris.contains(defaultUri)) {
1022 ret.add(defaultUri);
1023 }
1024
1025 return ret;
1026 }
1027
1028 /**
1029 * Given the InetSocketAddress this method returns the nameservice Id
1030 * corresponding to the key with matching address, by doing a reverse
1031 * lookup on the list of nameservices until it finds a match.
1032 *
1033 * Since the process of resolving URIs to Addresses is slightly expensive,
1034 * this utility method should not be used in performance-critical routines.
1035 *
1036 * @param conf - configuration
1037 * @param address - InetSocketAddress for configured communication with NN.
1038 * Configured addresses are typically given as URIs, but we may have to
1039 * compare against a URI typed in by a human, or the server name may be
1040 * aliased, so we compare unambiguous InetSocketAddresses instead of just
1041 * comparing URI substrings.
1042 * @param keys - list of configured communication parameters that should
1043 * be checked for matches. For example, to compare against RPC addresses,
1044 * provide the list DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
1045 * DFS_NAMENODE_RPC_ADDRESS_KEY. Use the generic parameter keys,
1046 * not the NameServiceId-suffixed keys.
1047 * @return nameserviceId, or null if no match found
1048 */
1049 public static String getNameServiceIdFromAddress(final Configuration conf,
1050 final InetSocketAddress address, String... keys) {
1051 // Configuration with a single namenode and no nameserviceId
1052 String[] ids = getSuffixIDs(conf, address, keys);
1053 return (ids != null) ? ids[0] : null;
1054 }
1055
1056 /**
1057 * return server http or https address from the configuration for a
1058 * given namenode rpc address.
1059 * @param conf
1060 * @param namenodeAddr - namenode RPC address
1061 * @param scheme - the scheme (http / https)
1062 * @return server http or https address
1063 * @throws IOException
1064 */
1065 public static URI getInfoServer(InetSocketAddress namenodeAddr,
1066 Configuration conf, String scheme) throws IOException {
1067 String[] suffixes = null;
1068 if (namenodeAddr != null) {
1069 // if non-default namenode, try reverse look up
1070 // the nameServiceID if it is available
1071 suffixes = getSuffixIDs(conf, namenodeAddr,
1072 DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
1073 DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
1074 }
1075
1076 String authority;
1077 if ("http".equals(scheme)) {
1078 authority = getSuffixedConf(conf, DFS_NAMENODE_HTTP_ADDRESS_KEY,
1079 DFS_NAMENODE_HTTP_ADDRESS_DEFAULT, suffixes);
1080 } else if ("https".equals(scheme)) {
1081 authority = getSuffixedConf(conf, DFS_NAMENODE_HTTPS_ADDRESS_KEY,
1082 DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT, suffixes);
1083 } else {
1084 throw new IllegalArgumentException("Invalid scheme:" + scheme);
1085 }
1086
1087 if (namenodeAddr != null) {
1088 authority = substituteForWildcardAddress(authority,
1089 namenodeAddr.getHostName());
1090 }
1091 return URI.create(scheme + "://" + authority);
1092 }
1093
1094 /**
1095 * Lookup the HTTP / HTTPS address of the namenode, and replace its hostname
1096 * with defaultHost when it found out that the address is a wildcard / local
1097 * address.
1098 *
1099 * @param defaultHost
1100 * The default host name of the namenode.
1101 * @param conf
1102 * The configuration
1103 * @param scheme
1104 * HTTP or HTTPS
1105 * @throws IOException
1106 */
1107 public static URI getInfoServerWithDefaultHost(String defaultHost,
1108 Configuration conf, final String scheme) throws IOException {
1109 URI configuredAddr = getInfoServer(null, conf, scheme);
1110 String authority = substituteForWildcardAddress(
1111 configuredAddr.getAuthority(), defaultHost);
1112 return URI.create(scheme + "://" + authority);
1113 }
1114
1115 /**
1116 * Determine whether HTTP or HTTPS should be used to connect to the remote
1117 * server. Currently the client only connects to the server via HTTPS if the
1118 * policy is set to HTTPS_ONLY.
1119 *
1120 * @return the scheme (HTTP / HTTPS)
1121 */
1122 public static String getHttpClientScheme(Configuration conf) {
1123 HttpConfig.Policy policy = DFSUtil.getHttpPolicy(conf);
1124 return policy == HttpConfig.Policy.HTTPS_ONLY ? "https" : "http";
1125 }
1126
1127 /**
1128 * Substitute a default host in the case that an address has been configured
1129 * with a wildcard. This is used, for example, when determining the HTTP
1130 * address of the NN -- if it's configured to bind to 0.0.0.0, we want to
1131 * substitute the hostname from the filesystem URI rather than trying to
1132 * connect to 0.0.0.0.
1133 * @param configuredAddress the address found in the configuration
1134 * @param defaultHost the host to substitute with, if configuredAddress
1135 * is a local/wildcard address.
1136 * @return the substituted address
1137 * @throws IOException if it is a wildcard address and security is enabled
1138 */
1139 @VisibleForTesting
1140 static String substituteForWildcardAddress(String configuredAddress,
1141 String defaultHost) throws IOException {
1142 InetSocketAddress sockAddr = NetUtils.createSocketAddr(configuredAddress);
1143 InetSocketAddress defaultSockAddr = NetUtils.createSocketAddr(defaultHost
1144 + ":0");
1145 if (sockAddr.getAddress().isAnyLocalAddress()) {
1146 if (UserGroupInformation.isSecurityEnabled() &&
1147 defaultSockAddr.getAddress().isAnyLocalAddress()) {
1148 throw new IOException("Cannot use a wildcard address with security. " +
1149 "Must explicitly set bind address for Kerberos");
1150 }
1151 return defaultHost + ":" + sockAddr.getPort();
1152 } else {
1153 return configuredAddress;
1154 }
1155 }
1156
1157 private static String getSuffixedConf(Configuration conf,
1158 String key, String defaultVal, String[] suffixes) {
1159 String ret = conf.get(DFSUtil.addKeySuffixes(key, suffixes));
1160 if (ret != null) {
1161 return ret;
1162 }
1163 return conf.get(key, defaultVal);
1164 }
1165
1166 /**
1167 * Sets the node specific setting into generic configuration key. Looks up
1168 * value of "key.nameserviceId.namenodeId" and if found sets that value into
1169 * generic key in the conf. If this is not found, falls back to
1170 * "key.nameserviceId" and then the unmodified key.
1171 *
1172 * Note that this only modifies the runtime conf.
1173 *
1174 * @param conf
1175 * Configuration object to lookup specific key and to set the value
1176 * to the key passed. Note the conf object is modified.
1177 * @param nameserviceId
1178 * nameservice Id to construct the node specific key. Pass null if
1179 * federation is not configuration.
1180 * @param nnId
1181 * namenode Id to construct the node specific key. Pass null if
1182 * HA is not configured.
1183 * @param keys
1184 * The key for which node specific value is looked up
1185 */
1186 public static void setGenericConf(Configuration conf,
1187 String nameserviceId, String nnId, String... keys) {
1188 for (String key : keys) {
1189 String value = conf.get(addKeySuffixes(key, nameserviceId, nnId));
1190 if (value != null) {
1191 conf.set(key, value);
1192 continue;
1193 }
1194 value = conf.get(addKeySuffixes(key, nameserviceId));
1195 if (value != null) {
1196 conf.set(key, value);
1197 }
1198 }
1199 }
1200
1201 /** Return used as percentage of capacity */
1202 public static float getPercentUsed(long used, long capacity) {
1203 return capacity <= 0 ? 100 : (used * 100.0f)/capacity;
1204 }
1205
1206 /** Return remaining as percentage of capacity */
1207 public static float getPercentRemaining(long remaining, long capacity) {
1208 return capacity <= 0 ? 0 : (remaining * 100.0f)/capacity;
1209 }
1210
1211 /** Convert percentage to a string. */
1212 public static String percent2String(double percentage) {
1213 return StringUtils.format("%.2f%%", percentage);
1214 }
1215
1216 /**
1217 * Round bytes to GiB (gibibyte)
1218 * @param bytes number of bytes
1219 * @return number of GiB
1220 */
1221 public static int roundBytesToGB(long bytes) {
1222 return Math.round((float)bytes/ 1024 / 1024 / 1024);
1223 }
1224
1225 /** Create a {@link ClientDatanodeProtocol} proxy */
1226 public static ClientDatanodeProtocol createClientDatanodeProtocolProxy(
1227 DatanodeID datanodeid, Configuration conf, int socketTimeout,
1228 boolean connectToDnViaHostname, LocatedBlock locatedBlock) throws IOException {
1229 return new ClientDatanodeProtocolTranslatorPB(datanodeid, conf, socketTimeout,
1230 connectToDnViaHostname, locatedBlock);
1231 }
1232
1233 /** Create {@link ClientDatanodeProtocol} proxy using kerberos ticket */
1234 static ClientDatanodeProtocol createClientDatanodeProtocolProxy(
1235 DatanodeID datanodeid, Configuration conf, int socketTimeout,
1236 boolean connectToDnViaHostname) throws IOException {
1237 return new ClientDatanodeProtocolTranslatorPB(
1238 datanodeid, conf, socketTimeout, connectToDnViaHostname);
1239 }
1240
1241 /** Create a {@link ClientDatanodeProtocol} proxy */
1242 public static ClientDatanodeProtocol createClientDatanodeProtocolProxy(
1243 InetSocketAddress addr, UserGroupInformation ticket, Configuration conf,
1244 SocketFactory factory) throws IOException {
1245 return new ClientDatanodeProtocolTranslatorPB(addr, ticket, conf, factory);
1246 }
1247
1248 /**
1249 * Get nameservice Id for the {@link NameNode} based on namenode RPC address
1250 * matching the local node address.
1251 */
1252 public static String getNamenodeNameServiceId(Configuration conf) {
1253 return getNameServiceId(conf, DFS_NAMENODE_RPC_ADDRESS_KEY);
1254 }
1255
1256 /**
1257 * Get nameservice Id for the BackupNode based on backup node RPC address
1258 * matching the local node address.
1259 */
1260 public static String getBackupNameServiceId(Configuration conf) {
1261 return getNameServiceId(conf, DFS_NAMENODE_BACKUP_ADDRESS_KEY);
1262 }
1263
1264 /**
1265 * Get nameservice Id for the secondary node based on secondary http address
1266 * matching the local node address.
1267 */
1268 public static String getSecondaryNameServiceId(Configuration conf) {
1269 return getNameServiceId(conf, DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY);
1270 }
1271
1272 /**
1273 * Get the nameservice Id by matching the {@code addressKey} with the
1274 * the address of the local node.
1275 *
1276 * If {@link DFSConfigKeys#DFS_NAMESERVICE_ID} is not specifically
1277 * configured, and more than one nameservice Id is configured, this method
1278 * determines the nameservice Id by matching the local node's address with the
1279 * configured addresses. When a match is found, it returns the nameservice Id
1280 * from the corresponding configuration key.
1281 *
1282 * @param conf Configuration
1283 * @param addressKey configuration key to get the address.
1284 * @return nameservice Id on success, null if federation is not configured.
1285 * @throws HadoopIllegalArgumentException on error
1286 */
1287 private static String getNameServiceId(Configuration conf, String addressKey) {
1288 String nameserviceId = conf.get(DFS_NAMESERVICE_ID);
1289 if (nameserviceId != null) {
1290 return nameserviceId;
1291 }
1292 Collection<String> nsIds = getNameServiceIds(conf);
1293 if (1 == nsIds.size()) {
1294 return nsIds.toArray(new String[1])[0];
1295 }
1296 String nnId = conf.get(DFS_HA_NAMENODE_ID_KEY);
1297
1298 return getSuffixIDs(conf, addressKey, null, nnId, LOCAL_ADDRESS_MATCHER)[0];
1299 }
1300
1301 /**
1302 * Returns nameservice Id and namenode Id when the local host matches the
1303 * configuration parameter {@code addressKey}.<nameservice Id>.<namenode Id>
1304 *
1305 * @param conf Configuration
1306 * @param addressKey configuration key corresponding to the address.
1307 * @param knownNsId only look at configs for the given nameservice, if not-null
1308 * @param knownNNId only look at configs for the given namenode, if not null
1309 * @param matcher matching criteria for matching the address
1310 * @return Array with nameservice Id and namenode Id on success. First element
1311 * in the array is nameservice Id and second element is namenode Id.
1312 * Null value indicates that the configuration does not have the the
1313 * Id.
1314 * @throws HadoopIllegalArgumentException on error
1315 */
1316 static String[] getSuffixIDs(final Configuration conf, final String addressKey,
1317 String knownNsId, String knownNNId,
1318 final AddressMatcher matcher) {
1319 String nameserviceId = null;
1320 String namenodeId = null;
1321 int found = 0;
1322
1323 Collection<String> nsIds = getNameServiceIds(conf);
1324 for (String nsId : emptyAsSingletonNull(nsIds)) {
1325 if (knownNsId != null && !knownNsId.equals(nsId)) {
1326 continue;
1327 }
1328
1329 Collection<String> nnIds = getNameNodeIds(conf, nsId);
1330 for (String nnId : emptyAsSingletonNull(nnIds)) {
1331 if (LOG.isTraceEnabled()) {
1332 LOG.trace(String.format("addressKey: %s nsId: %s nnId: %s",
1333 addressKey, nsId, nnId));
1334 }
1335 if (knownNNId != null && !knownNNId.equals(nnId)) {
1336 continue;
1337 }
1338 String key = addKeySuffixes(addressKey, nsId, nnId);
1339 String addr = conf.get(key);
1340 if (addr == null) {
1341 continue;
1342 }
1343 InetSocketAddress s = null;
1344 try {
1345 s = NetUtils.createSocketAddr(addr);
1346 } catch (Exception e) {
1347 LOG.warn("Exception in creating socket address " + addr, e);
1348 continue;
1349 }
1350 if (!s.isUnresolved() && matcher.match(s)) {
1351 nameserviceId = nsId;
1352 namenodeId = nnId;
1353 found++;
1354 }
1355 }
1356 }
1357 if (found > 1) { // Only one address must match the local address
1358 String msg = "Configuration has multiple addresses that match "
1359 + "local node's address. Please configure the system with "
1360 + DFS_NAMESERVICE_ID + " and "
1361 + DFS_HA_NAMENODE_ID_KEY;
1362 throw new HadoopIllegalArgumentException(msg);
1363 }
1364 return new String[] { nameserviceId, namenodeId };
1365 }
1366
1367 /**
1368 * For given set of {@code keys} adds nameservice Id and or namenode Id
1369 * and returns {nameserviceId, namenodeId} when address match is found.
1370 * @see #getSuffixIDs(Configuration, String, AddressMatcher)
1371 */
1372 static String[] getSuffixIDs(final Configuration conf,
1373 final InetSocketAddress address, final String... keys) {
1374 AddressMatcher matcher = new AddressMatcher() {
1375 @Override
1376 public boolean match(InetSocketAddress s) {
1377 return address.equals(s);
1378 }
1379 };
1380
1381 for (String key : keys) {
1382 String[] ids = getSuffixIDs(conf, key, null, null, matcher);
1383 if (ids != null && (ids [0] != null || ids[1] != null)) {
1384 return ids;
1385 }
1386 }
1387 return null;
1388 }
1389
1390 private interface AddressMatcher {
1391 public boolean match(InetSocketAddress s);
1392 }
1393
1394 /** Create a URI from the scheme and address */
1395 public static URI createUri(String scheme, InetSocketAddress address) {
1396 try {
1397 return new URI(scheme, null, address.getHostName(), address.getPort(),
1398 null, null, null);
1399 } catch (URISyntaxException ue) {
1400 throw new IllegalArgumentException(ue);
1401 }
1402 }
1403
1404 /**
1405 * Add protobuf based protocol to the {@link org.apache.hadoop.ipc.RPC.Server}
1406 * @param conf configuration
1407 * @param protocol Protocol interface
1408 * @param service service that implements the protocol
1409 * @param server RPC server to which the protocol & implementation is added to
1410 * @throws IOException
1411 */
1412 public static void addPBProtocol(Configuration conf, Class<?> protocol,
1413 BlockingService service, RPC.Server server) throws IOException {
1414 RPC.setProtocolEngine(conf, protocol, ProtobufRpcEngine.class);
1415 server.addProtocol(RPC.RpcKind.RPC_PROTOCOL_BUFFER, protocol, service);
1416 }
1417
1418 /**
1419 * Map a logical namenode ID to its service address. Use the given
1420 * nameservice if specified, or the configured one if none is given.
1421 *
1422 * @param conf Configuration
1423 * @param nsId which nameservice nnId is a part of, optional
1424 * @param nnId the namenode ID to get the service addr for
1425 * @return the service addr, null if it could not be determined
1426 */
1427 public static String getNamenodeServiceAddr(final Configuration conf,
1428 String nsId, String nnId) {
1429
1430 if (nsId == null) {
1431 nsId = getOnlyNameServiceIdOrNull(conf);
1432 }
1433
1434 String serviceAddrKey = concatSuffixes(
1435 DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, nsId, nnId);
1436
1437 String addrKey = concatSuffixes(
1438 DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY, nsId, nnId);
1439
1440 String serviceRpcAddr = conf.get(serviceAddrKey);
1441 if (serviceRpcAddr == null) {
1442 serviceRpcAddr = conf.get(addrKey);
1443 }
1444 return serviceRpcAddr;
1445 }
1446
1447 /**
1448 * If the configuration refers to only a single nameservice, return the
1449 * name of that nameservice. If it refers to 0 or more than 1, return null.
1450 */
1451 public static String getOnlyNameServiceIdOrNull(Configuration conf) {
1452 Collection<String> nsIds = getNameServiceIds(conf);
1453 if (1 == nsIds.size()) {
1454 return nsIds.toArray(new String[1])[0];
1455 } else {
1456 // No nameservice ID was given and more than one is configured
1457 return null;
1458 }
1459 }
1460
1461 public static final Options helpOptions = new Options();
1462 public static final Option helpOpt = new Option("h", "help", false,
1463 "get help information");
1464
1465 static {
1466 helpOptions.addOption(helpOpt);
1467 }
1468
1469 /**
1470 * Parse the arguments for commands
1471 *
1472 * @param args the argument to be parsed
1473 * @param helpDescription help information to be printed out
1474 * @param out Printer
1475 * @param printGenericCommandUsage whether to print the
1476 * generic command usage defined in ToolRunner
1477 * @return true when the argument matches help option, false if not
1478 */
1479 public static boolean parseHelpArgument(String[] args,
1480 String helpDescription, PrintStream out, boolean printGenericCommandUsage) {
1481 if (args.length == 1) {
1482 try {
1483 CommandLineParser parser = new PosixParser();
1484 CommandLine cmdLine = parser.parse(helpOptions, args);
1485 if (cmdLine.hasOption(helpOpt.getOpt())
1486 || cmdLine.hasOption(helpOpt.getLongOpt())) {
1487 // should print out the help information
1488 out.println(helpDescription + "\n");
1489 if (printGenericCommandUsage) {
1490 ToolRunner.printGenericCommandUsage(out);
1491 }
1492 return true;
1493 }
1494 } catch (ParseException pe) {
1495 return false;
1496 }
1497 }
1498 return false;
1499 }
1500
1501 /**
1502 * Get DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION from configuration.
1503 *
1504 * @param conf Configuration
1505 * @return Value of DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION
1506 */
1507 public static float getInvalidateWorkPctPerIteration(Configuration conf) {
1508 float blocksInvalidateWorkPct = conf.getFloat(
1509 DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION,
1510 DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION_DEFAULT);
1511 Preconditions.checkArgument(
1512 (blocksInvalidateWorkPct > 0 && blocksInvalidateWorkPct <= 1.0f),
1513 DFSConfigKeys.DFS_NAMENODE_INVALIDATE_WORK_PCT_PER_ITERATION +
1514 " = '" + blocksInvalidateWorkPct + "' is invalid. " +
1515 "It should be a positive, non-zero float value, not greater than 1.0f, " +
1516 "to indicate a percentage.");
1517 return blocksInvalidateWorkPct;
1518 }
1519
1520 /**
1521 * Get DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION from
1522 * configuration.
1523 *
1524 * @param conf Configuration
1525 * @return Value of DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION
1526 */
1527 public static int getReplWorkMultiplier(Configuration conf) {
1528 int blocksReplWorkMultiplier = conf.getInt(
1529 DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION,
1530 DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION_DEFAULT);
1531 Preconditions.checkArgument(
1532 (blocksReplWorkMultiplier > 0),
1533 DFSConfigKeys.DFS_NAMENODE_REPLICATION_WORK_MULTIPLIER_PER_ITERATION +
1534 " = '" + blocksReplWorkMultiplier + "' is invalid. " +
1535 "It should be a positive, non-zero integer value.");
1536 return blocksReplWorkMultiplier;
1537 }
1538
1539 /**
1540 * Get SPNEGO keytab Key from configuration
1541 *
1542 * @param conf
1543 * Configuration
1544 * @param defaultKey
1545 * @return DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY if the key is not empty
1546 * else return defaultKey
1547 */
1548 public static String getSpnegoKeytabKey(Configuration conf, String defaultKey) {
1549 String value =
1550 conf.get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY);
1551 return (value == null || value.isEmpty()) ?
1552 defaultKey : DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY;
1553 }
1554
1555 /**
1556 * Get http policy. Http Policy is chosen as follows:
1557 * <ol>
1558 * <li>If hadoop.ssl.enabled is set, http endpoints are not started. Only
1559 * https endpoints are started on configured https ports</li>
1560 * <li>This configuration is overridden by dfs.https.enable configuration, if
1561 * it is set to true. In that case, both http and https endpoints are stared.</li>
1562 * <li>All the above configurations are overridden by dfs.http.policy
1563 * configuration. With this configuration you can set http-only, https-only
1564 * and http-and-https endpoints.</li>
1565 * </ol>
1566 * See hdfs-default.xml documentation for more details on each of the above
1567 * configuration settings.
1568 */
1569 public static HttpConfig.Policy getHttpPolicy(Configuration conf) {
1570 String policyStr = conf.get(DFSConfigKeys.DFS_HTTP_POLICY_KEY);
1571 if (policyStr == null) {
1572 boolean https = conf.getBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY,
1573 DFSConfigKeys.DFS_HTTPS_ENABLE_DEFAULT);
1574
1575 boolean hadoopSsl = conf.getBoolean(
1576 CommonConfigurationKeys.HADOOP_SSL_ENABLED_KEY,
1577 CommonConfigurationKeys.HADOOP_SSL_ENABLED_DEFAULT);
1578
1579 if (hadoopSsl) {
1580 LOG.warn(CommonConfigurationKeys.HADOOP_SSL_ENABLED_KEY
1581 + " is deprecated. Please use " + DFSConfigKeys.DFS_HTTP_POLICY_KEY
1582 + ".");
1583 }
1584 if (https) {
1585 LOG.warn(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY
1586 + " is deprecated. Please use " + DFSConfigKeys.DFS_HTTP_POLICY_KEY
1587 + ".");
1588 }
1589
1590 return (hadoopSsl || https) ? HttpConfig.Policy.HTTP_AND_HTTPS
1591 : HttpConfig.Policy.HTTP_ONLY;
1592 }
1593
1594 HttpConfig.Policy policy = HttpConfig.Policy.fromString(policyStr);
1595 if (policy == null) {
1596 throw new HadoopIllegalArgumentException("Unregonized value '"
1597 + policyStr + "' for " + DFSConfigKeys.DFS_HTTP_POLICY_KEY);
1598 }
1599
1600 conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, policy.name());
1601 return policy;
1602 }
1603
1604 public static HttpServer2.Builder loadSslConfToHttpServerBuilder(HttpServer2.Builder builder,
1605 Configuration sslConf) {
1606 return builder
1607 .needsClientAuth(
1608 sslConf.getBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
1609 DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT))
1610 .keyPassword(sslConf.get("ssl.server.keystore.keypassword"))
1611 .keyStore(sslConf.get("ssl.server.keystore.location"),
1612 sslConf.get("ssl.server.keystore.password"),
1613 sslConf.get("ssl.server.keystore.type", "jks"))
1614 .trustStore(sslConf.get("ssl.server.truststore.location"),
1615 sslConf.get("ssl.server.truststore.password"),
1616 sslConf.get("ssl.server.truststore.type", "jks"));
1617 }
1618
1619 /**
1620 * Load HTTPS-related configuration.
1621 */
1622 public static Configuration loadSslConfiguration(Configuration conf) {
1623 Configuration sslConf = new Configuration(false);
1624
1625 sslConf.addResource(conf.get(
1626 DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
1627 DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT));
1628
1629 boolean requireClientAuth = conf.getBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
1630 DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT);
1631 sslConf.setBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY, requireClientAuth);
1632 return sslConf;
1633 }
1634
1635 /**
1636 * Return a HttpServer.Builder that the journalnode / namenode / secondary
1637 * namenode can use to initialize their HTTP / HTTPS server.
1638 *
1639 */
1640 public static HttpServer2.Builder httpServerTemplateForNNAndJN(
1641 Configuration conf, final InetSocketAddress httpAddr,
1642 final InetSocketAddress httpsAddr, String name, String spnegoUserNameKey,
1643 String spnegoKeytabFileKey) throws IOException {
1644 HttpConfig.Policy policy = getHttpPolicy(conf);
1645
1646 HttpServer2.Builder builder = new HttpServer2.Builder().setName(name)
1647 .setConf(conf).setACL(new AccessControlList(conf.get(DFS_ADMIN, " ")))
1648 .setSecurityEnabled(UserGroupInformation.isSecurityEnabled())
1649 .setUsernameConfKey(spnegoUserNameKey)
1650 .setKeytabConfKey(getSpnegoKeytabKey(conf, spnegoKeytabFileKey));
1651
1652 // initialize the webserver for uploading/downloading files.
1653 LOG.info("Starting web server as: "
1654 + SecurityUtil.getServerPrincipal(conf.get(spnegoUserNameKey),
1655 httpAddr.getHostName()));
1656
1657 if (policy.isHttpEnabled()) {
1658 if (httpAddr.getPort() == 0) {
1659 builder.setFindPort(true);
1660 }
1661
1662 URI uri = URI.create("http://" + NetUtils.getHostPortString(httpAddr));
1663 builder.addEndpoint(uri);
1664 LOG.info("Starting Web-server for " + name + " at: " + uri);
1665 }
1666
1667 if (policy.isHttpsEnabled() && httpsAddr != null) {
1668 Configuration sslConf = loadSslConfiguration(conf);
1669 loadSslConfToHttpServerBuilder(builder, sslConf);
1670
1671 if (httpsAddr.getPort() == 0) {
1672 builder.setFindPort(true);
1673 }
1674
1675 URI uri = URI.create("https://" + NetUtils.getHostPortString(httpsAddr));
1676 builder.addEndpoint(uri);
1677 LOG.info("Starting Web-server for " + name + " at: " + uri);
1678 }
1679 return builder;
1680 }
1681
1682 /**
1683 * Converts a Date into an ISO-8601 formatted datetime string.
1684 */
1685 public static String dateToIso8601String(Date date) {
1686 SimpleDateFormat df =
1687 new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ssZ", Locale.ENGLISH);
1688 return df.format(date);
1689 }
1690
1691 /**
1692 * Converts a time duration in milliseconds into DDD:HH:MM:SS format.
1693 */
1694 public static String durationToString(long durationMs) {
1695 boolean negative = false;
1696 if (durationMs < 0) {
1697 negative = true;
1698 durationMs = -durationMs;
1699 }
1700 // Chop off the milliseconds
1701 long durationSec = durationMs / 1000;
1702 final int secondsPerMinute = 60;
1703 final int secondsPerHour = 60*60;
1704 final int secondsPerDay = 60*60*24;
1705 final long days = durationSec / secondsPerDay;
1706 durationSec -= days * secondsPerDay;
1707 final long hours = durationSec / secondsPerHour;
1708 durationSec -= hours * secondsPerHour;
1709 final long minutes = durationSec / secondsPerMinute;
1710 durationSec -= minutes * secondsPerMinute;
1711 final long seconds = durationSec;
1712 final long milliseconds = durationMs % 1000;
1713 String format = "%03d:%02d:%02d:%02d.%03d";
1714 if (negative) {
1715 format = "-" + format;
1716 }
1717 return String.format(format, days, hours, minutes, seconds, milliseconds);
1718 }
1719
1720 /**
1721 * Converts a relative time string into a duration in milliseconds.
1722 */
1723 public static long parseRelativeTime(String relTime) throws IOException {
1724 if (relTime.length() < 2) {
1725 throw new IOException("Unable to parse relative time value of " + relTime
1726 + ": too short");
1727 }
1728 String ttlString = relTime.substring(0, relTime.length()-1);
1729 long ttl;
1730 try {
1731 ttl = Long.parseLong(ttlString);
1732 } catch (NumberFormatException e) {
1733 throw new IOException("Unable to parse relative time value of " + relTime
1734 + ": " + ttlString + " is not a number");
1735 }
1736 if (relTime.endsWith("s")) {
1737 // pass
1738 } else if (relTime.endsWith("m")) {
1739 ttl *= 60;
1740 } else if (relTime.endsWith("h")) {
1741 ttl *= 60*60;
1742 } else if (relTime.endsWith("d")) {
1743 ttl *= 60*60*24;
1744 } else {
1745 throw new IOException("Unable to parse relative time value of " + relTime
1746 + ": unknown time unit " + relTime.charAt(relTime.length() - 1));
1747 }
1748 return ttl*1000;
1749 }
1750
1751 /**
1752 * Assert that all objects in the collection are equal. Returns silently if
1753 * so, throws an AssertionError if any object is not equal. All null values
1754 * are considered equal.
1755 *
1756 * @param objects the collection of objects to check for equality.
1757 */
1758 public static void assertAllResultsEqual(Collection<?> objects)
1759 throws AssertionError {
1760 if (objects.size() == 0 || objects.size() == 1)
1761 return;
1762
1763 Object[] resultsArray = objects.toArray();
1764 for (int i = 1; i < resultsArray.length; i++) {
1765 Object currElement = resultsArray[i];
1766 Object lastElement = resultsArray[i - 1];
1767 if ((currElement == null && currElement != lastElement) ||
1768 (currElement != null && !currElement.equals(lastElement))) {
1769 throw new AssertionError("Not all elements match in results: " +
1770 Arrays.toString(resultsArray));
1771 }
1772 }
1773 }
1774 }