From c4e293833edbea559d865841725de3a9b507aa05 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Wed, 24 Dec 2025 16:12:43 -0500 Subject: [PATCH 1/7] HDDS-14241. Refactor RDBBatchOperation to support various Operations by abstracting out implementation of each operation Change-Id: Idf5b9a4b9f66eae41a9d832ddbf22e43d1027344 --- .../hdds/utils/db/RDBBatchOperation.java | 236 +++++++++++++----- 1 file changed, 174 insertions(+), 62 deletions(-) diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBBatchOperation.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBBatchOperation.java index 87d9ffc625c..1d2f9ef456d 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBBatchOperation.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBBatchOperation.java @@ -20,12 +20,15 @@ import static org.apache.hadoop.hdds.StringUtils.bytes2String; import com.google.common.base.Preconditions; +import java.io.Closeable; import java.nio.ByteBuffer; import java.util.HashMap; import java.util.Map; import java.util.Objects; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Supplier; +import org.apache.hadoop.hdds.utils.IOUtils; import org.apache.hadoop.hdds.utils.db.RocksDatabase.ColumnFamily; import org.apache.hadoop.hdds.utils.db.managed.ManagedWriteBatch; import org.apache.hadoop.hdds.utils.db.managed.ManagedWriteOptions; @@ -42,6 +45,9 @@ public final class RDBBatchOperation implements BatchOperation { static final Logger LOG = LoggerFactory.getLogger(RDBBatchOperation.class); + private static final String PUT_OP = "PUT"; + private static final String DELETE_OP = "DELETE"; + private static final AtomicInteger BATCH_COUNT = new AtomicInteger(); private final String name = "Batch-" + BATCH_COUNT.getAndIncrement(); @@ -50,8 +56,6 @@ public final class RDBBatchOperation implements BatchOperation { private final OpCache opCache = new OpCache(); - private enum Op { DELETE } - public static RDBBatchOperation newAtomicOperation() { return newAtomicOperation(new ManagedWriteBatch()); } @@ -75,7 +79,7 @@ private static String countSize2String(int count, long size) { } /** - * The key type of {@link RDBBatchOperation.OpCache.FamilyCache#ops}. + * The key type of {@link RDBBatchOperation.OpCache.FamilyCache#batchOps}. * To implement {@link #equals(Object)} and {@link #hashCode()} * based on the contents of the bytes. */ @@ -97,10 +101,6 @@ static final class Bytes { this.hash = ByteBuffer.wrap(array).hashCode(); } - byte[] array() { - return array; - } - ByteBuffer asReadOnlyByteBuffer() { return buffer.asReadOnlyByteBuffer(); } @@ -135,6 +135,138 @@ public String toString() { } } + private abstract class Operation implements Closeable { + + private Operation() { + } + + abstract void apply(ColumnFamily family, ManagedWriteBatch batch) throws RocksDatabaseException; + + abstract int keyLen(); + + abstract int valLen(); + + int totalLength() { + return keyLen() + valLen(); + } + + abstract String getOpType(); + + @Override + public void close() { + } + } + + /** + * Delete operation to be applied to a {@link ColumnFamily} batch. + */ + private final class DeleteOperation extends Operation { + private final byte[] key; + + private DeleteOperation(byte[] key) { + super(); + this.key = Objects.requireNonNull(key, "key == null"); + } + + @Override + public void apply(ColumnFamily family, ManagedWriteBatch batch) throws RocksDatabaseException { + family.batchDelete(batch, this.key); + } + + @Override + public int keyLen() { + return key.length; + } + + @Override + public int valLen() { + return 0; + } + + @Override + public String getOpType() { + return DELETE_OP; + } + } + + /** + * Put operation to be applied to a {@link ColumnFamily} batch using the CodecBuffer api. + */ + private final class CodecBufferPutOperation extends Operation { + private final CodecBuffer key; + private final CodecBuffer value; + private final AtomicBoolean closed = new AtomicBoolean(false); + + private CodecBufferPutOperation(CodecBuffer key, CodecBuffer value) { + super(); + this.key = key; + this.value = value; + } + + @Override + public void apply(ColumnFamily family, ManagedWriteBatch batch) throws RocksDatabaseException { + family.batchPut(batch, key.asReadOnlyByteBuffer(), value.asReadOnlyByteBuffer()); + } + + @Override + public int keyLen() { + return key.readableBytes(); + } + + @Override + public int valLen() { + return value.readableBytes(); + } + + @Override + public String getOpType() { + return PUT_OP; + } + + @Override + public void close() { + if (closed.compareAndSet(false, true)) { + key.release(); + value.release(); + } + super.close(); + } + } + + /** + * Put operation to be applied to a {@link ColumnFamily} batch using the byte array api. + */ + private final class ByteArrayPutOperation extends Operation { + private final byte[] key; + private final byte[] value; + + private ByteArrayPutOperation(byte[] key, byte[] value) { + super(); + this.key = Objects.requireNonNull(key, "key == null"); + this.value = Objects.requireNonNull(value, "value == null"); + } + + @Override + public void apply(ColumnFamily family, ManagedWriteBatch batch) throws RocksDatabaseException { + family.batchPut(batch, key, value); + } + + @Override + public int keyLen() { + return key.length; + } + + @Override + public int valLen() { + return value.length; + } + + @Override + public String getOpType() { + return PUT_OP; + } + } + /** Cache and deduplicate db ops (put/delete). */ private class OpCache { /** A (family name -> {@link FamilyCache}) map. */ @@ -143,13 +275,18 @@ private class OpCache { /** A cache for a {@link ColumnFamily}. */ private class FamilyCache { private final ColumnFamily family; + /** - * A (dbKey -> dbValue) map, where the dbKey type is {@link Bytes} - * and the dbValue type is {@link Object}. - * When dbValue is a byte[]/{@link ByteBuffer}, it represents a put-op. - * Otherwise, it represents a delete-op (dbValue is {@link Op#DELETE}). + * A mapping of keys to operations for batch processing in the {@link FamilyCache}. + * The keys are represented as {@link Bytes} objects, encapsulating the byte array or buffer + * for efficient equality and hashing. The values are instances of {@link Operation}, representing + * different types of operations that can be applied to a {@link ColumnFamily}. + * + * This field is intended to store pending batch updates before they are written to the database. + * It supports operations such as additions and deletions while maintaining the ability to overwrite + * existing entries when necessary. */ - private final Map ops = new HashMap<>(); + private final Map batchOps = new HashMap<>(); private boolean isCommit; private long batchSize; @@ -166,22 +303,9 @@ private class FamilyCache { void prepareBatchWrite() throws RocksDatabaseException { Preconditions.checkState(!isCommit, "%s is already committed.", this); isCommit = true; - for (Map.Entry op : ops.entrySet()) { - final Bytes key = op.getKey(); - final Object value = op.getValue(); - if (value instanceof byte[]) { - family.batchPut(writeBatch, key.array(), (byte[]) value); - } else if (value instanceof CodecBuffer) { - family.batchPut(writeBatch, key.asReadOnlyByteBuffer(), - ((CodecBuffer) value).asReadOnlyByteBuffer()); - } else if (value == Op.DELETE) { - family.batchDelete(writeBatch, key.array()); - } else { - throw new IllegalStateException("Unexpected value: " + value - + ", class=" + value.getClass().getSimpleName()); - } + for (Operation op : batchOps.values()) { + op.apply(family, writeBatch); } - debug(this::summary); } @@ -194,48 +318,35 @@ void clear() { final boolean warn = !isCommit && batchSize > 0; String details = warn ? summary() : null; - for (Object value : ops.values()) { - if (value instanceof CodecBuffer) { - ((CodecBuffer) value).release(); // the key will also be released - } - } - ops.clear(); + IOUtils.close(LOG, batchOps.values()); + batchOps.clear(); if (warn) { LOG.warn("discarding changes {}", details); } } - void putOrDelete(Bytes key, int keyLen, Object val, int valLen) { - Preconditions.checkState(!isCommit, "%s is already committed.", this); - batchSize += keyLen + valLen; - // remove previous first in order to call release() - final Object previous = ops.remove(key); + private void deleteIfExist(Bytes key) { + final Operation previous = batchOps.remove(key); if (previous != null) { - final boolean isPut = previous != Op.DELETE; - final int preLen; - if (!isPut) { - preLen = 0; - } else if (previous instanceof CodecBuffer) { - final CodecBuffer previousValue = (CodecBuffer) previous; - preLen = previousValue.readableBytes(); - previousValue.release(); // key will also be released - } else if (previous instanceof byte[]) { - preLen = ((byte[]) previous).length; - } else { - throw new IllegalStateException("Unexpected previous: " + previous - + ", class=" + previous.getClass().getSimpleName()); - } - discardedSize += keyLen + preLen; + previous.close(); + discardedSize += previous.totalLength(); discardedCount++; - debug(() -> String.format("%s overwriting a previous %s", this, - isPut ? "put (value: " + byteSize2String(preLen) + ")" : "del")); + debug(() -> String.format("%s overwriting a previous %s[valLen => %s]", this, previous.getOpType(), + previous.valLen())); } - final Object overwritten = ops.put(key, val); + } + + void overWriteOpIfExist(Bytes key, Operation operation) { + Preconditions.checkState(!isCommit, "%s is already committed.", this); + deleteIfExist(key); + batchSize += operation.totalLength(); + Operation overwritten = batchOps.put(key, operation); Preconditions.checkState(overwritten == null); debug(() -> String.format("%s %s, %s; key=%s", this, - valLen == 0 ? delString(keyLen) : putString(keyLen, valLen), + DELETE_OP.equals(operation.getOpType()) ? delString(operation.totalLength()) : putString(operation.keyLen(), + operation.valLen()), batchSizeDiscardedString(), key)); } @@ -243,19 +354,20 @@ void put(CodecBuffer key, CodecBuffer value) { putCount++; // always release the key with the value - value.getReleaseFuture().thenAccept(v -> key.release()); - putOrDelete(new Bytes(key), key.readableBytes(), - value, value.readableBytes()); + Bytes keyBytes = new Bytes(key); + overWriteOpIfExist(keyBytes, new CodecBufferPutOperation(key, value)); } void put(byte[] key, byte[] value) { putCount++; - putOrDelete(new Bytes(key), key.length, value, value.length); + Bytes keyBytes = new Bytes(key); + overWriteOpIfExist(keyBytes, new ByteArrayPutOperation(key, value)); } void delete(byte[] key) { delCount++; - putOrDelete(new Bytes(key), key.length, Op.DELETE, 0); + Bytes keyBytes = new Bytes(key); + overWriteOpIfExist(keyBytes, new DeleteOperation(key)); } String putString(int keySize, int valueSize) { From 4e271fe3c6a75164312f3e644ad716c4e675ebc6 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Fri, 26 Dec 2025 14:55:30 -0500 Subject: [PATCH 2/7] HDDS-14241. Rename Operation class and make static Change-Id: I3a36aa7e4b421ec40c7a5dcb309c9a3a9e111e0a --- .../hdds/utils/db/RDBBatchOperation.java | 34 +++++++++---------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBBatchOperation.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBBatchOperation.java index 1d2f9ef456d..9c4655ef163 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBBatchOperation.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBBatchOperation.java @@ -135,9 +135,9 @@ public String toString() { } } - private abstract class Operation implements Closeable { + private abstract static class Op implements Closeable { - private Operation() { + private Op() { } abstract void apply(ColumnFamily family, ManagedWriteBatch batch) throws RocksDatabaseException; @@ -160,10 +160,10 @@ public void close() { /** * Delete operation to be applied to a {@link ColumnFamily} batch. */ - private final class DeleteOperation extends Operation { + private static final class DeleteOp extends Op { private final byte[] key; - private DeleteOperation(byte[] key) { + private DeleteOp(byte[] key) { super(); this.key = Objects.requireNonNull(key, "key == null"); } @@ -192,12 +192,12 @@ public String getOpType() { /** * Put operation to be applied to a {@link ColumnFamily} batch using the CodecBuffer api. */ - private final class CodecBufferPutOperation extends Operation { + private final class CodecBufferPutOp extends Op { private final CodecBuffer key; private final CodecBuffer value; private final AtomicBoolean closed = new AtomicBoolean(false); - private CodecBufferPutOperation(CodecBuffer key, CodecBuffer value) { + private CodecBufferPutOp(CodecBuffer key, CodecBuffer value) { super(); this.key = key; this.value = value; @@ -236,11 +236,11 @@ public void close() { /** * Put operation to be applied to a {@link ColumnFamily} batch using the byte array api. */ - private final class ByteArrayPutOperation extends Operation { + private static final class ByteArrayPutOp extends Op { private final byte[] key; private final byte[] value; - private ByteArrayPutOperation(byte[] key, byte[] value) { + private ByteArrayPutOp(byte[] key, byte[] value) { super(); this.key = Objects.requireNonNull(key, "key == null"); this.value = Objects.requireNonNull(value, "value == null"); @@ -279,14 +279,14 @@ private class FamilyCache { /** * A mapping of keys to operations for batch processing in the {@link FamilyCache}. * The keys are represented as {@link Bytes} objects, encapsulating the byte array or buffer - * for efficient equality and hashing. The values are instances of {@link Operation}, representing + * for efficient equality and hashing. The values are instances of {@link Op}, representing * different types of operations that can be applied to a {@link ColumnFamily}. * * This field is intended to store pending batch updates before they are written to the database. * It supports operations such as additions and deletions while maintaining the ability to overwrite * existing entries when necessary. */ - private final Map batchOps = new HashMap<>(); + private final Map batchOps = new HashMap<>(); private boolean isCommit; private long batchSize; @@ -303,7 +303,7 @@ private class FamilyCache { void prepareBatchWrite() throws RocksDatabaseException { Preconditions.checkState(!isCommit, "%s is already committed.", this); isCommit = true; - for (Operation op : batchOps.values()) { + for (Op op : batchOps.values()) { op.apply(family, writeBatch); } debug(this::summary); @@ -327,7 +327,7 @@ void clear() { } private void deleteIfExist(Bytes key) { - final Operation previous = batchOps.remove(key); + final Op previous = batchOps.remove(key); if (previous != null) { previous.close(); discardedSize += previous.totalLength(); @@ -337,11 +337,11 @@ private void deleteIfExist(Bytes key) { } } - void overWriteOpIfExist(Bytes key, Operation operation) { + void overWriteOpIfExist(Bytes key, Op operation) { Preconditions.checkState(!isCommit, "%s is already committed.", this); deleteIfExist(key); batchSize += operation.totalLength(); - Operation overwritten = batchOps.put(key, operation); + Op overwritten = batchOps.put(key, operation); Preconditions.checkState(overwritten == null); debug(() -> String.format("%s %s, %s; key=%s", this, @@ -355,19 +355,19 @@ void put(CodecBuffer key, CodecBuffer value) { // always release the key with the value Bytes keyBytes = new Bytes(key); - overWriteOpIfExist(keyBytes, new CodecBufferPutOperation(key, value)); + overWriteOpIfExist(keyBytes, new CodecBufferPutOp(key, value)); } void put(byte[] key, byte[] value) { putCount++; Bytes keyBytes = new Bytes(key); - overWriteOpIfExist(keyBytes, new ByteArrayPutOperation(key, value)); + overWriteOpIfExist(keyBytes, new ByteArrayPutOp(key, value)); } void delete(byte[] key) { delCount++; Bytes keyBytes = new Bytes(key); - overWriteOpIfExist(keyBytes, new DeleteOperation(key)); + overWriteOpIfExist(keyBytes, new DeleteOp(key)); } String putString(int keySize, int valueSize) { From 7b762ae9b1552df2e42359fada921be55cab49b5 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Fri, 26 Dec 2025 21:59:54 -0500 Subject: [PATCH 3/7] HDDS-14241. Remove opType Change-Id: I05e959b3a502156a783910ca24b679acc8025995 --- .../hdds/utils/db/RDBBatchOperation.java | 31 ++++--------------- 1 file changed, 6 insertions(+), 25 deletions(-) diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBBatchOperation.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBBatchOperation.java index 9c4655ef163..0716b7910b6 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBBatchOperation.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBBatchOperation.java @@ -144,14 +144,14 @@ private Op() { abstract int keyLen(); - abstract int valLen(); + int valLen() { + return 0; + } int totalLength() { return keyLen() + valLen(); } - abstract String getOpType(); - @Override public void close() { } @@ -177,16 +177,6 @@ public void apply(ColumnFamily family, ManagedWriteBatch batch) throws RocksData public int keyLen() { return key.length; } - - @Override - public int valLen() { - return 0; - } - - @Override - public String getOpType() { - return DELETE_OP; - } } /** @@ -218,11 +208,6 @@ public int valLen() { return value.readableBytes(); } - @Override - public String getOpType() { - return PUT_OP; - } - @Override public void close() { if (closed.compareAndSet(false, true)) { @@ -260,11 +245,6 @@ public int keyLen() { public int valLen() { return value.length; } - - @Override - public String getOpType() { - return PUT_OP; - } } /** Cache and deduplicate db ops (put/delete). */ @@ -332,7 +312,8 @@ private void deleteIfExist(Bytes key) { previous.close(); discardedSize += previous.totalLength(); discardedCount++; - debug(() -> String.format("%s overwriting a previous %s[valLen => %s]", this, previous.getOpType(), + debug(() -> String.format("%s overwriting a previous %s[valLen => %s]", this, + previous instanceof DeleteOp ? DELETE_OP : PUT_OP, previous.valLen())); } } @@ -345,7 +326,7 @@ void overWriteOpIfExist(Bytes key, Op operation) { Preconditions.checkState(overwritten == null); debug(() -> String.format("%s %s, %s; key=%s", this, - DELETE_OP.equals(operation.getOpType()) ? delString(operation.totalLength()) : putString(operation.keyLen(), + operation instanceof DeleteOp ? delString(operation.totalLength()) : putString(operation.keyLen(), operation.valLen()), batchSizeDiscardedString(), key)); } From 20e24f16c8f1d3461a905d5b77a0152f8974651c Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Sat, 27 Dec 2025 17:09:03 -0500 Subject: [PATCH 4/7] HDDS-14241. Use classname for debug logs Change-Id: I961641514c699d610719c1c0b2c6d7064ff4d0dd --- .../org/apache/hadoop/hdds/utils/db/RDBBatchOperation.java | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBBatchOperation.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBBatchOperation.java index 0716b7910b6..e1861f810dd 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBBatchOperation.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBBatchOperation.java @@ -45,9 +45,6 @@ public final class RDBBatchOperation implements BatchOperation { static final Logger LOG = LoggerFactory.getLogger(RDBBatchOperation.class); - private static final String PUT_OP = "PUT"; - private static final String DELETE_OP = "DELETE"; - private static final AtomicInteger BATCH_COUNT = new AtomicInteger(); private final String name = "Batch-" + BATCH_COUNT.getAndIncrement(); @@ -313,8 +310,7 @@ private void deleteIfExist(Bytes key) { discardedSize += previous.totalLength(); discardedCount++; debug(() -> String.format("%s overwriting a previous %s[valLen => %s]", this, - previous instanceof DeleteOp ? DELETE_OP : PUT_OP, - previous.valLen())); + previous.getClass().getName(), previous.valLen())); } } From 042d6e0ba208b259d9773ae5f1854a700e33dd64 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Sat, 27 Dec 2025 17:12:02 -0500 Subject: [PATCH 5/7] HDDS-14241. Rename map Change-Id: I7f35eaa783ad38f296541448aa9ebddf80f456fe --- .../hadoop/hdds/utils/db/RDBBatchOperation.java | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBBatchOperation.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBBatchOperation.java index e1861f810dd..0f87350945c 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBBatchOperation.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBBatchOperation.java @@ -76,7 +76,7 @@ private static String countSize2String(int count, long size) { } /** - * The key type of {@link RDBBatchOperation.OpCache.FamilyCache#batchOps}. + * The key type of {@link RDBBatchOperation.OpCache.FamilyCache#ops}. * To implement {@link #equals(Object)} and {@link #hashCode()} * based on the contents of the bytes. */ @@ -263,7 +263,7 @@ private class FamilyCache { * It supports operations such as additions and deletions while maintaining the ability to overwrite * existing entries when necessary. */ - private final Map batchOps = new HashMap<>(); + private final Map ops = new HashMap<>(); private boolean isCommit; private long batchSize; @@ -280,7 +280,7 @@ private class FamilyCache { void prepareBatchWrite() throws RocksDatabaseException { Preconditions.checkState(!isCommit, "%s is already committed.", this); isCommit = true; - for (Op op : batchOps.values()) { + for (Op op : ops.values()) { op.apply(family, writeBatch); } debug(this::summary); @@ -295,8 +295,8 @@ void clear() { final boolean warn = !isCommit && batchSize > 0; String details = warn ? summary() : null; - IOUtils.close(LOG, batchOps.values()); - batchOps.clear(); + IOUtils.close(LOG, ops.values()); + ops.clear(); if (warn) { LOG.warn("discarding changes {}", details); @@ -304,7 +304,7 @@ void clear() { } private void deleteIfExist(Bytes key) { - final Op previous = batchOps.remove(key); + final Op previous = ops.remove(key); if (previous != null) { previous.close(); discardedSize += previous.totalLength(); @@ -318,7 +318,7 @@ void overWriteOpIfExist(Bytes key, Op operation) { Preconditions.checkState(!isCommit, "%s is already committed.", this); deleteIfExist(key); batchSize += operation.totalLength(); - Op overwritten = batchOps.put(key, operation); + Op overwritten = ops.put(key, operation); Preconditions.checkState(overwritten == null); debug(() -> String.format("%s %s, %s; key=%s", this, From 4f9977c8ecf4854eb1b201a5007ad5d1920c532e Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Sat, 27 Dec 2025 23:33:52 -0500 Subject: [PATCH 6/7] HDDS-14241. Address review comments Change-Id: I0aff2ee9911a56c4265c72819f75da268f325aaa --- .../hdds/utils/db/RDBBatchOperation.java | 34 +++++++------------ 1 file changed, 12 insertions(+), 22 deletions(-) diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBBatchOperation.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBBatchOperation.java index 0f87350945c..64c6825cd24 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBBatchOperation.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBBatchOperation.java @@ -134,9 +134,6 @@ public String toString() { private abstract static class Op implements Closeable { - private Op() { - } - abstract void apply(ColumnFamily family, ManagedWriteBatch batch) throws RocksDatabaseException; abstract int keyLen(); @@ -161,7 +158,6 @@ private static final class DeleteOp extends Op { private final byte[] key; private DeleteOp(byte[] key) { - super(); this.key = Objects.requireNonNull(key, "key == null"); } @@ -179,13 +175,12 @@ public int keyLen() { /** * Put operation to be applied to a {@link ColumnFamily} batch using the CodecBuffer api. */ - private final class CodecBufferPutOp extends Op { + private final class PutOp extends Op { private final CodecBuffer key; private final CodecBuffer value; private final AtomicBoolean closed = new AtomicBoolean(false); - private CodecBufferPutOp(CodecBuffer key, CodecBuffer value) { - super(); + private PutOp(CodecBuffer key, CodecBuffer value) { this.key = key; this.value = value; } @@ -223,7 +218,6 @@ private static final class ByteArrayPutOp extends Op { private final byte[] value; private ByteArrayPutOp(byte[] key, byte[] value) { - super(); this.key = Objects.requireNonNull(key, "key == null"); this.value = Objects.requireNonNull(value, "value == null"); } @@ -314,37 +308,35 @@ private void deleteIfExist(Bytes key) { } } - void overWriteOpIfExist(Bytes key, Op operation) { + void overWriteIfExist(Bytes key, Op op) { Preconditions.checkState(!isCommit, "%s is already committed.", this); deleteIfExist(key); - batchSize += operation.totalLength(); - Op overwritten = ops.put(key, operation); + batchSize += op.totalLength(); + Op overwritten = ops.put(key, op); Preconditions.checkState(overwritten == null); debug(() -> String.format("%s %s, %s; key=%s", this, - operation instanceof DeleteOp ? delString(operation.totalLength()) : putString(operation.keyLen(), - operation.valLen()), + op instanceof DeleteOp ? delString(op.totalLength()) : putString(op.keyLen(), op.valLen()), batchSizeDiscardedString(), key)); } void put(CodecBuffer key, CodecBuffer value) { putCount++; - // always release the key with the value Bytes keyBytes = new Bytes(key); - overWriteOpIfExist(keyBytes, new CodecBufferPutOp(key, value)); + overWriteIfExist(keyBytes, new PutOp(key, value)); } void put(byte[] key, byte[] value) { putCount++; Bytes keyBytes = new Bytes(key); - overWriteOpIfExist(keyBytes, new ByteArrayPutOp(key, value)); + overWriteIfExist(keyBytes, new ByteArrayPutOp(key, value)); } void delete(byte[] key) { delCount++; Bytes keyBytes = new Bytes(key); - overWriteOpIfExist(keyBytes, new DeleteOp(key)); + overWriteIfExist(keyBytes, new DeleteOp(key)); } String putString(int keySize, int valueSize) { @@ -353,14 +345,12 @@ String putString(int keySize, int valueSize) { } String delString(int keySize) { - return String.format("del(key: %s), #del=%s", - byteSize2String(keySize), delCount); + return String.format("del(key: %s), #del=%s", byteSize2String(keySize), delCount); } String batchSizeDiscardedString() { return String.format("batchSize=%s, discarded: %s", - byteSize2String(batchSize), - countSize2String(discardedCount, discardedSize)); + byteSize2String(batchSize), countSize2String(discardedCount, discardedSize)); } @Override @@ -392,7 +382,7 @@ UncheckedAutoCloseable prepareBatchWrite() throws RocksDatabaseException { return this::clear; } - void clear() { + private void clear() { for (Map.Entry e : name2cache.entrySet()) { e.getValue().clear(); } From 3e321b953784e4ca7ef83ed3ac3ca908b515f518 Mon Sep 17 00:00:00 2001 From: Swaminathan Balachandran Date: Sun, 28 Dec 2025 10:25:58 -0500 Subject: [PATCH 7/7] HDDS-14241. Fix typo Change-Id: I152be40a8376823f501df6594621512368b0e7cc --- .../apache/hadoop/hdds/utils/db/RDBBatchOperation.java | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBBatchOperation.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBBatchOperation.java index 64c6825cd24..3681959b4b4 100644 --- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBBatchOperation.java +++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/RDBBatchOperation.java @@ -308,7 +308,7 @@ private void deleteIfExist(Bytes key) { } } - void overWriteIfExist(Bytes key, Op op) { + void overwriteIfExists(Bytes key, Op op) { Preconditions.checkState(!isCommit, "%s is already committed.", this); deleteIfExist(key); batchSize += op.totalLength(); @@ -324,19 +324,19 @@ void put(CodecBuffer key, CodecBuffer value) { putCount++; // always release the key with the value Bytes keyBytes = new Bytes(key); - overWriteIfExist(keyBytes, new PutOp(key, value)); + overwriteIfExists(keyBytes, new PutOp(key, value)); } void put(byte[] key, byte[] value) { putCount++; Bytes keyBytes = new Bytes(key); - overWriteIfExist(keyBytes, new ByteArrayPutOp(key, value)); + overwriteIfExists(keyBytes, new ByteArrayPutOp(key, value)); } void delete(byte[] key) { delCount++; Bytes keyBytes = new Bytes(key); - overWriteIfExist(keyBytes, new DeleteOp(key)); + overwriteIfExists(keyBytes, new DeleteOp(key)); } String putString(int keySize, int valueSize) {