diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
index 9e20f809ccf4d..3d779757a98cd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
@@ -408,8 +408,7 @@ else if(!nnReg.isRole(NamenodeRole.NAMENODE)) {
}
// TODO: move to a common with DataNode util class
- private static NamespaceInfo handshake(NamenodeProtocol namenode)
- throws IOException, SocketTimeoutException {
+ private static NamespaceInfo handshake(NamenodeProtocol namenode) throws IOException {
NamespaceInfo nsInfo;
nsInfo = namenode.versionRequest(); // throws SocketTimeoutException
String errorMsg = null;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java
index e71b057595952..81ce90a2ae3ba 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CacheManager.java
@@ -531,8 +531,7 @@ private void addInternal(CacheDirective directive, CachePool pool) {
* Adds a directive, skipping most error checking. This should only be called
* internally in special scenarios like edit log replay.
*/
- CacheDirectiveInfo addDirectiveFromEditLog(CacheDirectiveInfo directive)
- throws InvalidRequestException {
+ CacheDirectiveInfo addDirectiveFromEditLog(CacheDirectiveInfo directive) {
long id = directive.getId();
CacheDirective entry = new CacheDirective(directive);
CachePool pool = cachePools.get(directive.getPool());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CheckpointSignature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CheckpointSignature.java
index 5c753070a459e..84417caaf2eb8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CheckpointSignature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CheckpointSignature.java
@@ -114,7 +114,7 @@ public String toString() {
+ blockpoolID ;
}
- boolean storageVersionMatches(StorageInfo si) throws IOException {
+ boolean storageVersionMatches(StorageInfo si) {
return (layoutVersion == si.layoutVersion) && (cTime == si.cTime);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupInputStream.java
index a77075fa4f2e8..02cb4375bf58d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupInputStream.java
@@ -63,7 +63,7 @@ int length() {
}
}
- EditLogBackupInputStream(String name) throws IOException {
+ EditLogBackupInputStream(String name) {
address = name;
inner = new ByteBufferInputStream();
in = null;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java
index ba4f32fd2154d..63cbf4425d53e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileInputStream.java
@@ -148,8 +148,7 @@ private EditLogFileInputStream(LogSource log,
this.maxOpSize = DFSConfigKeys.DFS_NAMENODE_MAX_OP_SIZE_DEFAULT;
}
- private void init(boolean verifyLayoutVersion)
- throws LogHeaderCorruptException, IOException {
+ private void init(boolean verifyLayoutVersion) throws IOException {
Preconditions.checkState(state == State.UNINIT);
BufferedInputStream bin = null;
InputStream fStream = null;
@@ -374,7 +373,7 @@ static FSEditLogLoader.EditLogValidation scanEditLog(File file,
*/
@VisibleForTesting
static int readLogVersion(DataInputStream in, boolean verifyLayoutVersion)
- throws IOException, LogHeaderCorruptException {
+ throws IOException {
int logVersion;
try {
logVersion = in.readInt();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
index 7bf5879971615..93f3157b5f87e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EncryptionZoneManager.java
@@ -43,7 +43,6 @@
import org.apache.hadoop.hdfs.XAttrHelper;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.ReencryptionStatus;
-import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
@@ -348,8 +347,7 @@ void removeEncryptionZone(Long inodeId) {
*
* Called while holding the FSDirectory lock.
*/
- boolean isInAnEZ(INodesInPath iip) throws UnresolvedLinkException,
- SnapshotAccessControlException, IOException {
+ boolean isInAnEZ(INodesInPath iip) throws IOException {
assert dir.hasReadLock();
return (getEncryptionZoneForPath(iip) != null);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
index 04913d1a7cee0..25b6243a8f902 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
@@ -272,8 +272,7 @@ static void setQuota(FSDirectory fsd, FSPermissionChecker pc, String src,
static boolean unprotectedSetPermission(
FSDirectory fsd, INodesInPath iip, FsPermission permissions)
- throws FileNotFoundException, UnresolvedLinkException,
- QuotaExceededException, SnapshotAccessControlException {
+ throws FileNotFoundException {
assert fsd.hasWriteLock();
final INode inode = FSDirectory.resolveLastINode(iip);
int snapshotId = iip.getLatestSnapshotId();
@@ -284,8 +283,7 @@ static boolean unprotectedSetPermission(
static boolean unprotectedSetOwner(
FSDirectory fsd, INodesInPath iip, String username, String groupname)
- throws FileNotFoundException, UnresolvedLinkException,
- QuotaExceededException, SnapshotAccessControlException {
+ throws FileNotFoundException {
assert fsd.hasWriteLock();
final INode inode = FSDirectory.resolveLastINode(iip);
long oldPerm = inode.getPermissionLong();
@@ -383,8 +381,7 @@ static INodeDirectory unprotectedSetQuota(
static BlockInfo[] unprotectedSetReplication(
FSDirectory fsd, INodesInPath iip, short replication)
- throws QuotaExceededException, UnresolvedLinkException,
- SnapshotAccessControlException, UnsupportedActionException {
+ throws QuotaExceededException {
assert fsd.hasWriteLock();
final BlockManager bm = fsd.getBlockManager();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java
index 2110a408b0877..d021588f07bac 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirEncryptionZoneOp.java
@@ -35,7 +35,6 @@
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension.EncryptedKeyVersion;
import org.apache.hadoop.fs.FileEncryptionInfo;
import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries;
@@ -43,7 +42,6 @@
import org.apache.hadoop.hdfs.XAttrHelper;
import org.apache.hadoop.hdfs.protocol.EncryptionZone;
import org.apache.hadoop.hdfs.protocol.ZoneReencryptionStatus;
-import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ReencryptionInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.ZoneEncryptionInfoProto;
@@ -495,8 +493,7 @@ static FileEncryptionInfo getFileEncryptionInfo(final FSDirectory fsd,
* @throws RetryStartFileException if key is inconsistent with current zone
*/
static FileEncryptionInfo getFileEncryptionInfo(FSDirectory dir,
- INodesInPath iip, EncryptionKeyInfo ezInfo)
- throws RetryStartFileException, IOException {
+ INodesInPath iip, EncryptionKeyInfo ezInfo) throws IOException {
FileEncryptionInfo feInfo = null;
final EncryptionZone zone = getEZForPath(dir, iip);
if (zone != null) {
@@ -519,8 +516,7 @@ static FileEncryptionInfo getFileEncryptionInfo(FSDirectory dir,
}
static boolean isInAnEZ(final FSDirectory fsd, final INodesInPath iip)
- throws UnresolvedLinkException, SnapshotAccessControlException,
- IOException {
+ throws IOException {
if (!fsd.ezManager.hasCreatedEncryptionZone()) {
return false;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
index 6628b56a132e0..191639121a123 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirErasureCodingOp.java
@@ -67,10 +67,9 @@ private FSDirErasureCodingOp() {}
* @param fsn namespace
* @param ecPolicyName name of EC policy to be checked
* @return an erasure coding policy if ecPolicyName is valid and enabled
- * @throws IOException
*/
static ErasureCodingPolicy getEnabledErasureCodingPolicyByName(
- final FSNamesystem fsn, final String ecPolicyName) throws IOException {
+ final FSNamesystem fsn, final String ecPolicyName) {
assert fsn.hasReadLock();
ErasureCodingPolicy ecPolicy = fsn.getErasureCodingPolicyManager()
.getEnabledPolicyByName(ecPolicyName);
@@ -99,10 +98,9 @@ static ErasureCodingPolicy getEnabledErasureCodingPolicyByName(
* @param fsn namespace
* @param ecPolicyName name of EC policy to be checked
* @return an erasure coding policy if ecPolicyName is valid
- * @throws IOException
*/
static ErasureCodingPolicy getErasureCodingPolicyByName(
- final FSNamesystem fsn, final String ecPolicyName) throws IOException {
+ final FSNamesystem fsn, final String ecPolicyName) {
assert fsn.hasReadLock();
ErasureCodingPolicy ecPolicy = fsn.getErasureCodingPolicyManager()
.getErasureCodingPolicyByName(ecPolicyName);
@@ -126,12 +124,11 @@ static ErasureCodingPolicy getErasureCodingPolicyByName(
* @return {@link FileStatus}
* @throws IOException
* @throws HadoopIllegalArgumentException if the policy is not enabled
- * @throws AccessControlException if the user does not have write access
*/
static FileStatus setErasureCodingPolicy(final FSNamesystem fsn,
final String srcArg, final String ecPolicyName,
final FSPermissionChecker pc, final boolean logRetryCache)
- throws IOException, AccessControlException {
+ throws IOException {
assert fsn.hasWriteLock();
String src = srcArg;
@@ -348,11 +345,9 @@ private static List removeErasureCodingPolicyXAttr(
* been set or the policy is REPLICATION
* @throws IOException
* @throws FileNotFoundException if the path does not exist.
- * @throws AccessControlException if no read access
*/
static ErasureCodingPolicy getErasureCodingPolicy(final FSNamesystem fsn,
- final String src, FSPermissionChecker pc)
- throws IOException, AccessControlException {
+ final String src, FSPermissionChecker pc) throws IOException {
assert fsn.hasReadLock();
if (FSDirectory.isExactReservedName(src)) {
@@ -439,8 +434,7 @@ static ErasureCodingPolicyInfo[] getErasureCodingPolicies(
* @param fsn namespace
* @return {@link java.util.HashMap} array
*/
- static Map getErasureCodingCodecs(final FSNamesystem fsn)
- throws IOException {
+ static Map getErasureCodingCodecs(final FSNamesystem fsn) {
assert fsn.hasReadLock();
return CodecRegistry.getInstance().getCodec2CoderCompactMap();
}
@@ -485,8 +479,7 @@ private static ErasureCodingPolicy getErasureCodingPolicyForPath(
return null;
}
- private static XAttr getErasureCodingPolicyXAttrForINode(
- FSNamesystem fsn, INode inode) throws IOException {
+ private static XAttr getErasureCodingPolicyXAttrForINode(FSNamesystem fsn, INode inode) {
// INode can be null
if (inode == null) {
return null;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
index c129d1928abdd..dfd43093a4198 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirRenameOp.java
@@ -132,8 +132,7 @@ static INodesInPath renameForEditLog(FSDirectory fsd, String src, String dst,
// if destination is a directory, append source child's name, else return
// iip as-is.
- private static INodesInPath dstForRenameTo(
- INodesInPath srcIIP, INodesInPath dstIIP) throws IOException {
+ private static INodesInPath dstForRenameTo(INodesInPath srcIIP, INodesInPath dstIIP) {
INode dstINode = dstIIP.getLastINode();
if (dstINode != null && dstINode.isDirectory()) {
byte[] childName = srcIIP.getLastLocalName();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java
index 337062ec02af7..4a2c28248151d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirTruncateOp.java
@@ -22,13 +22,11 @@
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
-import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
@@ -70,7 +68,7 @@ static TruncateResult truncate(final FSNamesystem fsn, final String srcArg,
final long newLength, final String clientName,
final String clientMachine, final long mtime,
final BlocksMapUpdateInfo toRemoveBlocks, final FSPermissionChecker pc)
- throws IOException, UnresolvedLinkException {
+ throws IOException {
assert fsn.hasWriteLock();
FSDirectory fsd = fsn.getFSDirectory();
@@ -173,8 +171,7 @@ static void unprotectedTruncate(final FSNamesystem fsn,
final INodesInPath iip,
final String clientName, final String clientMachine,
final long newLength, final long mtime, final Block truncateBlock)
- throws UnresolvedLinkException, QuotaExceededException,
- SnapshotAccessControlException, IOException {
+ throws IOException {
assert fsn.hasWriteLock();
FSDirectory fsd = fsn.getFSDirectory();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
index 6e6ade291ce27..5e2b732fda3bc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
@@ -410,7 +410,7 @@ static List setINodeXAttrs(
}
static XAttr getXAttrByPrefixedName(FSDirectory fsd, INodesInPath iip,
- String prefixedName) throws IOException {
+ String prefixedName) {
fsd.readLock();
try {
return XAttrStorage.readINodeXAttrByPrefixedName(iip.getLastINode(),
@@ -421,8 +421,7 @@ static XAttr getXAttrByPrefixedName(FSDirectory fsd, INodesInPath iip,
}
static XAttr unprotectedGetXAttrByPrefixedName(
- INode inode, int snapshotId, String prefixedName)
- throws IOException {
+ INode inode, int snapshotId, String prefixedName) {
return XAttrStorage.readINodeXAttrByPrefixedName(
inode, snapshotId, prefixedName);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index d4fed21e98e17..eb1c7c32894a8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -811,10 +811,8 @@ public boolean isNonEmptyDirectory(INodesInPath inodesInPath) {
/**
* Check whether the filepath could be created
- * @throws SnapshotAccessControlException if path is in RO snapshot
*/
- boolean isValidToCreate(String src, INodesInPath iip)
- throws SnapshotAccessControlException {
+ boolean isValidToCreate(String src, INodesInPath iip) {
String srcs = normalizePath(src);
return srcs.startsWith("/") && !srcs.endsWith("/") &&
iip.getLastINode() == null;
@@ -1788,7 +1786,7 @@ private static byte[][] constructRemainingPath(byte[][] components,
return components;
}
- INode getINode4DotSnapshot(INodesInPath iip) throws UnresolvedLinkException {
+ INode getINode4DotSnapshot(INodesInPath iip) {
Preconditions.checkArgument(
iip.isDotSnapshotDir(), "%s does not end with %s",
iip.getPath(), HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
index c149d6021bbb8..fd5ba560e286c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
@@ -1285,7 +1285,7 @@ void purgeOldStorage(NameNodeFile nnf) {
* Rename FSImage with the specific txid
*/
private void renameCheckpoint(long txid, NameNodeFile fromNnf,
- NameNodeFile toNnf, boolean renameMD5) throws IOException {
+ NameNodeFile toNnf, boolean renameMD5) {
ArrayList al = null;
for (StorageDirectory sd : storage.dirIterable(NameNodeDirType.IMAGE)) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageTransactionalStorageInspector.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageTransactionalStorageInspector.java
index 4a40471971462..5d7814d181cfa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageTransactionalStorageInspector.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageTransactionalStorageInspector.java
@@ -73,7 +73,7 @@ private Matcher matchPattern(String name) {
}
@Override
- public void inspectDirectory(StorageDirectory sd) throws IOException {
+ public void inspectDirectory(StorageDirectory sd) {
// Was the directory just formatted?
if (!sd.getVersionFile().exists()) {
LOG.info("No version file in " + sd.getRoot());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 99f2089fe8d52..334afd1052a99 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -252,7 +252,6 @@
import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
import org.apache.hadoop.hdfs.protocol.RollingUpgradeException;
import org.apache.hadoop.hdfs.protocol.RollingUpgradeInfo;
-import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
import org.apache.hadoop.hdfs.protocol.SnapshotException;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.datatransfer.ReplaceDatanodeOnFailure;
@@ -2344,8 +2343,7 @@ void setTimes(String src, long mtime, long atime) throws IOException {
* false if client needs to wait for block recovery.
*/
boolean truncate(String src, long newLength, String clientName,
- String clientMachine, long mtime) throws IOException,
- UnresolvedLinkException {
+ String clientMachine, long mtime) throws IOException {
final String operationName = "truncate";
requireEffectiveLayoutVersionForFeature(Feature.TRUNCATE);
@@ -2553,8 +2551,7 @@ void satisfyStoragePolicy(String src, boolean logRetryCache)
logAuditEvent(true, operationName, src, null, auditStat);
}
- private void validateStoragePolicySatisfy()
- throws UnsupportedActionException, IOException {
+ private void validateStoragePolicySatisfy() throws IOException {
// checks sps status
boolean disabled = (blockManager.getSPSManager() == null);
if (disabled) {
@@ -2660,8 +2657,7 @@ long getPreferredBlockSize(String src) throws IOException {
*/
CryptoProtocolVersion chooseProtocolVersion(
EncryptionZone zone, CryptoProtocolVersion[] supportedVersions)
- throws UnknownCryptoProtocolVersionException, UnresolvedLinkException,
- SnapshotAccessControlException {
+ throws UnknownCryptoProtocolVersionException {
Preconditions.checkNotNull(zone);
Preconditions.checkNotNull(supportedVersions);
// Right now, we only support a single protocol version,
@@ -8049,8 +8045,7 @@ void createEncryptionZone(final String src, final String keyName,
* @throws AccessControlException if the caller is not the superuser.
* @throws UnresolvedLinkException if the path can't be resolved.
*/
- EncryptionZone getEZForPath(final String srcArg)
- throws AccessControlException, UnresolvedLinkException, IOException {
+ EncryptionZone getEZForPath(final String srcArg) throws IOException {
final String operationName = "getEZForPath";
FileStatus resultingStat = null;
EncryptionZone encryptionZone;
@@ -8367,9 +8362,8 @@ boolean disableErasureCodingPolicy(String ecPolicyName,
* @throws UnresolvedLinkException if the path can't be resolved.
* @throws SafeModeException if the Namenode is in safe mode.
*/
- void unsetErasureCodingPolicy(final String srcArg,
- final boolean logRetryCache) throws IOException,
- UnresolvedLinkException, SafeModeException, AccessControlException {
+ void unsetErasureCodingPolicy(final String srcArg, final boolean logRetryCache)
+ throws IOException, SafeModeException, AccessControlException {
final String operationName = "unsetErasureCodingPolicy";
checkOperation(OperationCategory.WRITE);
checkErasureCodingSupported(operationName);
@@ -8435,8 +8429,7 @@ public ECTopologyVerifierResult getECTopologyResultForPolicies(
/**
* Get the erasure coding policy information for specified path.
*/
- ErasureCodingPolicy getErasureCodingPolicy(String src)
- throws AccessControlException, UnresolvedLinkException, IOException {
+ ErasureCodingPolicy getErasureCodingPolicy(String src) throws IOException {
final String operationName = "getErasureCodingPolicy";
boolean success = false;
checkOperation(OperationCategory.READ);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileUnderConstructionFeature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileUnderConstructionFeature.java
index 81ec255e70c66..4f0d62e0519c1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileUnderConstructionFeature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileUnderConstructionFeature.java
@@ -17,8 +17,6 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
-import java.io.IOException;
-
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
@@ -53,10 +51,8 @@ public String getClientMachine() {
*
* @param lastBlockLength
* The length of the last block reported from client
- * @throws IOException
*/
- void updateLengthOfLastBlock(INodeFile f, long lastBlockLength)
- throws IOException {
+ void updateLengthOfLastBlock(INodeFile f, long lastBlockLength) {
BlockInfo lastBlock = f.getLastBlock();
assert (lastBlock != null) : "The last block for path "
+ f.getFullPathName() + " is null when updating its length";
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/MetaRecoveryContext.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/MetaRecoveryContext.java
index ead56a9f61aa9..f5b00301ef9f8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/MetaRecoveryContext.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/MetaRecoveryContext.java
@@ -89,8 +89,7 @@ public String ask(String prompt, String firstChoice, String... choices)
public static void editLogLoaderPrompt(String prompt,
MetaRecoveryContext recovery, String contStr)
- throws IOException, RequestStopException
- {
+ throws IOException {
if (recovery == null) {
throw new IOException(prompt);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
index e265742978d85..d7585c6f63c0d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
@@ -78,8 +78,7 @@ public class NameNodeHttpServer {
public static void initWebHdfs(Configuration conf, String hostname,
String httpKeytab,
- HttpServer2 httpServer2, String jerseyResourcePackage)
- throws IOException {
+ HttpServer2 httpServer2, String jerseyResourcePackage) {
// set user pattern based on configuration file
UserParam.setUserPattern(conf.get(
HdfsClientConfigKeys.DFS_WEBHDFS_USER_PATTERN_KEY,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeResourceChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeResourceChecker.java
index a1f7c55612343..3317bde678fa0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeResourceChecker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeResourceChecker.java
@@ -184,7 +184,7 @@ public boolean hasAvailableDiskSpace() {
* @return the set of directories whose free space is below the threshold.
*/
@VisibleForTesting
- Collection getVolumesLowOnSpace() throws IOException {
+ Collection getVolumesLowOnSpace() {
if (LOG.isDebugEnabled()) {
LOG.debug("Going to check the following volumes disk space: " + volumes);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index b19bfc13acf65..1520293221607 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -76,8 +76,6 @@
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.fs.QuotaUsage;
import org.apache.hadoop.ha.HAServiceStatus;
-import org.apache.hadoop.ha.HealthCheckFailedException;
-import org.apache.hadoop.ha.ServiceFailedException;
import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAServiceProtocolService;
import org.apache.hadoop.ha.protocolPB.HAServiceProtocolPB;
import org.apache.hadoop.ha.protocolPB.HAServiceProtocolServerSideTranslatorPB;
@@ -771,7 +769,7 @@ public Token getDelegationToken(Text renewer)
@Override // ClientProtocol
public long renewDelegationToken(Token token)
- throws InvalidToken, IOException {
+ throws IOException {
checkNNStartup();
return namesystem.renewDelegationToken(token);
}
@@ -1824,15 +1822,14 @@ public String[] getGroupsForUser(String user) throws IOException {
}
@Override // HAServiceProtocol
- public synchronized void monitorHealth() throws HealthCheckFailedException,
- AccessControlException, IOException {
+ public synchronized void monitorHealth() throws IOException {
checkNNStartup();
nn.monitorHealth();
}
@Override // HAServiceProtocol
public synchronized void transitionToActive(StateChangeRequestInfo req)
- throws ServiceFailedException, AccessControlException, IOException {
+ throws IOException {
checkNNStartup();
nn.checkHaStateChange(req);
nn.transitionToActive();
@@ -1840,7 +1837,7 @@ public synchronized void transitionToActive(StateChangeRequestInfo req)
@Override // HAServiceProtocol
public synchronized void transitionToStandby(StateChangeRequestInfo req)
- throws ServiceFailedException, AccessControlException, IOException {
+ throws IOException {
checkNNStartup();
// This is to eliminate any race condition between manual transition of
// namenode into Observer, and ZKFC auto failover election, when the
@@ -1859,15 +1856,14 @@ public synchronized void transitionToStandby(StateChangeRequestInfo req)
@Override // HAServiceProtocol
public synchronized void transitionToObserver(StateChangeRequestInfo req)
- throws ServiceFailedException, AccessControlException, IOException {
+ throws IOException {
checkNNStartup();
nn.checkHaStateChange(req);
nn.transitionToObserver();
}
@Override // HAServiceProtocol
- public synchronized HAServiceStatus getServiceStatus()
- throws AccessControlException, ServiceFailedException, IOException {
+ public synchronized HAServiceStatus getServiceStatus() throws IOException {
checkNNStartup();
return nn.getServiceStatus();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
index 2a7002b5cd2f5..d37cb354eb067 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionHandler.java
@@ -696,14 +696,10 @@ protected void checkINodeReady(long zoneId) throws IOException {
/**
* Submit the current batch to the thread pool.
*
- * @param zoneId
- * Id of the EZ INode
- * @throws IOException
- * @throws InterruptedException
+ * @param zoneId Id of the EZ INode
*/
@Override
- protected void submitCurrentBatch(final Long zoneId) throws IOException,
- InterruptedException {
+ protected void submitCurrentBatch(final Long zoneId) {
if (currentBatch.isEmpty()) {
return;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionUpdater.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionUpdater.java
index 43684a7accccd..35d92f7776d76 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionUpdater.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ReencryptionUpdater.java
@@ -240,11 +240,8 @@ boolean isRunning() {
* considered complete.
*
* @param zoneId Id of the zone inode.
- * @throws IOException
- * @throws InterruptedException
*/
- void markZoneSubmissionDone(final long zoneId)
- throws IOException, InterruptedException {
+ void markZoneSubmissionDone(final long zoneId) {
final ZoneSubmissionTracker tracker = handler.getTracker(zoneId);
if (tracker != null && !tracker.getTasks().isEmpty()) {
tracker.submissionDone = true;
@@ -288,10 +285,9 @@ public void run() {
* @param zoneNodePath full path of the EZ inode.
* @param task the completed task.
* @throws IOException
- * @throws InterruptedException
*/
private void processTaskEntries(final String zoneNodePath,
- final ReencryptionTask task) throws IOException, InterruptedException {
+ final ReencryptionTask task) throws IOException {
assert dir.hasWriteLock();
if (!task.batch.isEmpty() && task.numFailures == 0) {
LOG.debug(
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
index e95200b35aaa5..6573bd6c43d0b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
@@ -601,7 +601,7 @@ public boolean doCheckpoint() throws IOException {
* @exception Exception if the filesystem does not exist.
* @return 0 on success, non zero on error.
*/
- private int processStartupCommand(CommandLineOpts opts) throws Exception {
+ private int processStartupCommand(CommandLineOpts opts) {
if (opts.getCommand() == null) {
return 0;
}
@@ -1053,7 +1053,7 @@ void ensureCurrentDirExists() throws IOException {
}
}
- void deleteTempEdits() throws IOException {
+ void deleteTempEdits() {
FilenameFilter filter = new FilenameFilter() {
@Override
public boolean accept(File dir, String name) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
index 7a9ce46b1159f..3c352b7f97037 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
@@ -405,7 +405,7 @@ private boolean checkLogsAvailableForRead(FSImage image, long imageTxId,
}
}
- private boolean checkLayoutVersion(NamespaceInfo nsInfo) throws IOException {
+ private boolean checkLayoutVersion(NamespaceInfo nsInfo) {
return (nsInfo.getLayoutVersion() == HdfsServerConstants.NAMENODE_LAYOUT_VERSION);
}
@@ -453,8 +453,7 @@ private void parseConfAndFindOtherNN() throws IOException {
parseProvidedConfigurations(conf);
}
- private void parseProvidedConfigurations(Configuration configuration)
- throws IOException {
+ private void parseProvidedConfigurations(Configuration configuration) {
// if provided and in-memory aliasmap are enabled,
// get the aliasmap location.
boolean providedEnabled = configuration.getBoolean(
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
index f72ec7c9177df..5082bb0154f57 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/EditLogTailer.java
@@ -604,7 +604,7 @@ public T call() throws IOException {
throw new IOException("Cannot find any valid remote NN to service request!");
}
- private NamenodeProtocol getActiveNodeProxy() throws IOException {
+ private NamenodeProtocol getActiveNodeProxy() {
if (cachedActiveProxy == null) {
while (true) {
// if we have reached the max loop count, quit by returning null
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java
index f1a21cce45a4b..213677052c832 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FSImageFormatPBSnapshot.java
@@ -303,8 +303,7 @@ private void addToDeletedList(INode dnode, INodeDirectory parent) {
*/
private List loadDeletedList(final List refList,
InputStream in, INodeDirectory dir, List deletedNodes,
- List deletedRefNodes)
- throws IOException {
+ List deletedRefNodes) {
List dlist = new ArrayList(deletedRefNodes.size()
+ deletedNodes.size());
// load non-reference inodes
@@ -485,7 +484,7 @@ public void serializeINodeReferenceSection(OutputStream out)
}
private INodeReferenceSection.INodeReference.Builder buildINodeReference(
- final INodeReference ref, final long refIndex) throws IOException {
+ final INodeReference ref, final long refIndex) {
INodeReferenceSection.INodeReference.Builder rb =
INodeReferenceSection.INodeReference.newBuilder().
setReferredId(ref.getId());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/StoragePolicySatisfyManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/StoragePolicySatisfyManager.java
index 2c7f36a690bd1..668791b242505 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/StoragePolicySatisfyManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/StoragePolicySatisfyManager.java
@@ -240,8 +240,6 @@ public void verifyOutstandingPathQLimit() throws IOException {
/**
* Removes the SPS path id from the list of sps paths.
- *
- * @throws IOException
*/
private void clearPathIds(){
synchronized (pathsToBeTraversed) {