diff --git a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java index 956a0caac7c7..0f2d1c858614 100644 --- a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java +++ b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java @@ -573,8 +573,7 @@ byte[] addToCompactionLogTable(CompactionLogEntry compactionLogEntry) { try { activeRocksDB.get().put(compactionLogTableCFHandle, key, value); } catch (RocksDBException exception) { - // TODO: Revisit exception handling before merging the PR. - throw new RuntimeException(exception); + throw new RocksDBCheckpointDifferException("Failed to persist compaction log entry", exception); } return key; } @@ -968,8 +967,7 @@ synchronized void internalGetSSTDiffList(DifferSnapshotVersion src, DifferSnapsh // Clear output in case of error. Expect fall back to full diff sameFiles.clear(); differentFiles.clear(); - // TODO: Revisit error handling here. Use custom exception? - throw new RuntimeException(errorMsg); + throw new RocksDBCheckpointDifferException(errorMsg); } final Set nextLevel = new HashSet<>(); @@ -1143,8 +1141,7 @@ private synchronized Pair, List> getOlderFileNodes() { } } catch (InvalidProtocolBufferException exception) { - // TODO: Handle this properly before merging the PR. - throw new RuntimeException(exception); + throw new RocksDBCheckpointDifferException("Failed to parse compaction log entry", exception); } return Pair.of(compactionNodes, keysToRemove); } @@ -1156,8 +1153,7 @@ private synchronized void removeKeyFromCompactionLogTable( activeRocksDB.get().delete(compactionLogTableCFHandle, key); } } catch (RocksDBException exception) { - // TODO Handle exception properly before merging the PR. - throw new RuntimeException(exception); + throw new RocksDBCheckpointDifferException("Failed to delete compaction log entries", exception); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/RocksDbPersistentList.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/RocksDbPersistentList.java index af31a7955681..f433989c2c7e 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/RocksDbPersistentList.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/RocksDbPersistentList.java @@ -55,8 +55,7 @@ public boolean add(E entry) { db.get().put(columnFamilyHandle, rawKey, rawValue); return true; } catch (IOException | RocksDBException exception) { - // TODO: [SNAPSHOT] Fail gracefully. - throw new RuntimeException(exception); + throw SnapshotStorageException.fromRocksDB("append list entry", toRocks(exception)); } } @@ -75,8 +74,7 @@ public E get(int index) { byte[] rawValue = db.get().get(columnFamilyHandle, rawKey); return codecRegistry.asObject(rawValue, entryType); } catch (IOException | RocksDBException exception) { - // TODO: [SNAPSHOT] Fail gracefully. - throw new RuntimeException(exception); + throw SnapshotStorageException.fromRocksDB("read list entry", toRocks(exception)); } } @@ -99,8 +97,7 @@ public E next() { try { return codecRegistry.asObject(rawKey, entryType); } catch (IOException exception) { - // TODO: [SNAPSHOT] Fail gracefully. - throw new RuntimeException(exception); + throw SnapshotStorageException.fromIO("deserialize list entry", exception); } } @@ -110,4 +107,14 @@ public void close() { } }; } + + private RocksDBException toRocks(Exception e) { + if (e instanceof RocksDBException) { + return (RocksDBException) e; + } + if (e.getCause() instanceof RocksDBException) { + return (RocksDBException) e.getCause(); + } + return new RocksDBException(e); + } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/RocksDbPersistentMap.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/RocksDbPersistentMap.java index 9b80b75eb643..55352a162ac3 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/RocksDbPersistentMap.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/RocksDbPersistentMap.java @@ -60,8 +60,7 @@ public V get(K key) { byte[] rawValue = db.get().get(columnFamilyHandle, rawKey); return codecRegistry.asObject(rawValue, valueType); } catch (IOException | RocksDBException exception) { - // TODO: [SNAPSHOT] Fail gracefully. - throw new RuntimeException(exception); + throw SnapshotStorageException.fromRocksDB("read map entry", toRocks(exception)); } } @@ -72,8 +71,7 @@ public void put(K key, V value) { byte[] rawValue = codecRegistry.asRawData(value); db.get().put(columnFamilyHandle, rawKey, rawValue); } catch (IOException | RocksDBException exception) { - // TODO: [SNAPSHOT] Fail gracefully. - throw new RuntimeException(exception); + throw SnapshotStorageException.fromRocksDB("write map entry", toRocks(exception)); } } @@ -83,8 +81,7 @@ public void remove(K key) { byte[] rawKey = codecRegistry.asRawData(key); db.get().delete(columnFamilyHandle, rawKey); } catch (IOException | RocksDBException exception) { - // TODO: [SNAPSHOT] Fail gracefully. - throw new RuntimeException(exception); + throw SnapshotStorageException.fromRocksDB("delete map entry", toRocks(exception)); } } @@ -111,10 +108,9 @@ public ClosableIterator> iterator(Optional lowerBound, } else { upperBoundSlice = null; } - } catch (IOException exception) { - // TODO: [SNAPSHOT] Fail gracefully. - throw new RuntimeException(exception); - } + } catch (IOException exception) { + throw SnapshotStorageException.fromIO("deserialize map entry", exception); + } iterator = ManagedRocksIterator.managed( db.get().newIterator(columnFamilyHandle, readOptions)); @@ -165,16 +161,26 @@ public V setValue(V value) { } @Override - public void close() { - iterator.close(); - readOptions.close(); - if (upperBoundSlice != null) { - upperBoundSlice.close(); - } - if (lowerBoundSlice != null) { - lowerBoundSlice.close(); - } + public void close() { + iterator.close(); + readOptions.close(); + if (upperBoundSlice != null) { + upperBoundSlice.close(); + } + if (lowerBoundSlice != null) { + lowerBoundSlice.close(); } - }; + } + }; +} + + private RocksDBException toRocks(Exception e) { + if (e instanceof RocksDBException) { + return (RocksDBException) e; + } + if (e.getCause() instanceof RocksDBException) { + return (RocksDBException) e.getCause(); + } + return new RocksDBException(e); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/RocksDbPersistentSet.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/RocksDbPersistentSet.java index 53d15b9f88d1..01fef800df4f 100644 --- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/RocksDbPersistentSet.java +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/RocksDbPersistentSet.java @@ -52,8 +52,7 @@ public void add(E entry) { byte[] rawValue = codecRegistry.asRawData(emptyByteArray); db.get().put(columnFamilyHandle, rawKey, rawValue); } catch (IOException | RocksDBException exception) { - // TODO: [SNAPSHOT] Fail gracefully. - throw new RuntimeException(exception); + throw SnapshotStorageException.fromRocksDB("add set entry", toRocks(exception)); } } @@ -76,15 +75,24 @@ public E next() { try { return codecRegistry.asObject(rawKey, entryType); } catch (IOException exception) { - // TODO: [SNAPSHOT] Fail gracefully. - throw new RuntimeException(exception); + throw SnapshotStorageException.fromIO("deserialize set entry", exception); } } @Override - public void close() { - managedRocksIterator.close(); - } - }; + public void close() { + managedRocksIterator.close(); + } + }; +} + + private RocksDBException toRocks(Exception e) { + if (e instanceof RocksDBException) { + return (RocksDBException) e; + } + if (e.getCause() instanceof RocksDBException) { + return (RocksDBException) e.getCause(); + } + return new RocksDBException(e); } } diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotStorageException.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotStorageException.java new file mode 100644 index 000000000000..115c254a24f1 --- /dev/null +++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/snapshot/SnapshotStorageException.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.snapshot; + +import java.io.UncheckedIOException; +import org.apache.hadoop.hdds.utils.db.RocksDatabaseException; +import org.rocksdb.RocksDBException; + +/** + * Unchecked wrapper for snapshot metadata store failures. + */ +public class SnapshotStorageException extends RuntimeException { + + public SnapshotStorageException(String message, Exception cause) { + super(message, cause); + } + + public static SnapshotStorageException fromRocksDB(String op, + RocksDBException e) { + return new SnapshotStorageException("Failed to " + op, + new RocksDatabaseException("Failed to " + op, e)); + } + + public static SnapshotStorageException fromIO(String op, + java.io.IOException e) { + return new SnapshotStorageException("Failed to " + op, + new UncheckedIOException(e)); + } +}