From 1231cb7b032e07c3f26b7ca694d56c4be1b17ba8 Mon Sep 17 00:00:00 2001 From: Kodai Doki Date: Thu, 6 Nov 2025 19:07:06 +0900 Subject: [PATCH 1/5] Add DML support for Blob Storage --- ...AdminIntegrationTestWithObjectStorage.java | 4 - ...nScanIntegrationTestWithObjectStorage.java | 23 + ...ommitIntegrationTestWithObjectStorage.java | 31 + ...adataIntegrationTestWithObjectStorage.java | 13 + .../ConsensusCommitObjectStorageEnv.java | 22 + ...cificIntegrationTestWithObjectStorage.java | 130 +++ ...abledIntegrationTestWithObjectStorage.java | 13 + ...geAdminCaseSensitivityIntegrationTest.java | 4 - .../ObjectStorageAdminIntegrationTest.java | 4 - ...StorageCaseSensitivityIntegrationTest.java | 45 + ...jectStorageColumnValueIntegrationTest.java | 12 + ...ageConditionalMutationIntegrationTest.java | 30 + ...rageCrossPartitionScanIntegrationTest.java | 30 + .../ObjectStorageIntegrationTest.java | 43 + .../ObjectStorageJapaneseIntegrationTest.java | 13 + ...tipleClusteringKeyScanIntegrationTest.java | 51 + ...geMultiplePartitionKeyIntegrationTest.java | 40 + ...eMutationAtomicityUnitIntegrationTest.java | 19 + ...ingleClusteringKeyScanIntegrationTest.java | 45 + ...rageSinglePartitionKeyIntegrationTest.java | 30 + .../objectstorage/ObjectStorageTestUtils.java | 19 + ...ageWithReservedKeywordIntegrationTest.java | 45 + .../ObjectStorageWrapperIntegrationTest.java | 58 +- ...rapperLargeObjectWriteIntegrationTest.java | 160 +++ ...AdminIntegrationTestWithObjectStorage.java | 4 - ...ctionIntegrationTestWithObjectStorage.java | 13 + ...nScanIntegrationTestWithObjectStorage.java | 23 + ...ommitIntegrationTestWithObjectStorage.java | 22 + ...cificIntegrationTestWithObjectStorage.java | 13 + ...abledIntegrationTestWithObjectStorage.java | 13 + .../java/com/scalar/db/common/CoreError.java | 28 + .../ClusteringKeyComparator.java | 39 + .../objectstorage/ColumnValueMapper.java | 79 ++ .../db/storage/objectstorage/MapVisitor.java | 92 ++ .../objectstorage/MutateStatementHandler.java | 308 ++++++ .../storage/objectstorage/ObjectStorage.java | 157 +++ .../objectstorage/ObjectStorageMutation.java | 63 ++ .../objectstorage/ObjectStorageOperation.java | 77 ++ .../ObjectStorageOperationChecker.java | 154 +++ .../objectstorage/ObjectStorageProvider.java | 2 +- .../objectstorage/ObjectStorageRecord.java | 116 +++ .../ObjectStorageTableMetadata.java | 14 +- .../objectstorage/PartitionIdentifier.java | 45 + .../objectstorage/ResultInterpreter.java | 53 + .../db/storage/objectstorage/ScannerImpl.java | 59 ++ .../objectstorage/SelectStatementHandler.java | 296 ++++++ .../objectstorage/StatementHandler.java | 135 +++ .../storage/objectstorage/MapVisitorTest.java | 277 ++++++ .../MutateStatementHandlerTest.java | 908 ++++++++++++++++++ .../ObjectStorageMutationTest.java | 114 +++ .../ObjectStorageOperationCheckerTest.java | 830 ++++++++++++++++ .../ObjectStorageOperationTest.java | 109 +++ .../objectstorage/ObjectStorageTest.java | 319 ++++++ .../objectstorage/ResultInterpreterTest.java | 312 ++++++ .../objectstorage/ScannerImplTest.java | 210 ++++ .../SelectStatementHandlerTest.java | 443 +++++++++ .../objectstorage/StatementHandlerTest.java | 339 +++++++ 57 files changed, 6525 insertions(+), 25 deletions(-) create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitCrossPartitionScanIntegrationTestWithObjectStorage.java create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitIntegrationTestWithObjectStorage.java create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitNullMetadataIntegrationTestWithObjectStorage.java create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitObjectStorageEnv.java create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitSpecificIntegrationTestWithObjectStorage.java create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitWithIncludeMetadataEnabledIntegrationTestWithObjectStorage.java create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageCaseSensitivityIntegrationTest.java create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageColumnValueIntegrationTest.java create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageConditionalMutationIntegrationTest.java create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageCrossPartitionScanIntegrationTest.java create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageIntegrationTest.java create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageJapaneseIntegrationTest.java create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMultipleClusteringKeyScanIntegrationTest.java create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMultiplePartitionKeyIntegrationTest.java create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMutationAtomicityUnitIntegrationTest.java create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageSingleClusteringKeyScanIntegrationTest.java create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageSinglePartitionKeyIntegrationTest.java create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageTestUtils.java create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageWithReservedKeywordIntegrationTest.java create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperLargeObjectWriteIntegrationTest.java create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/SingleCrudOperationTransactionIntegrationTestWithObjectStorage.java create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitCrossPartitionScanIntegrationTestWithObjectStorage.java create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitIntegrationTestWithObjectStorage.java create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitSpecificIntegrationTestWithObjectStorage.java create mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitWithIncludeMetadataEnabledIntegrationTestWithObjectStorage.java create mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/ClusteringKeyComparator.java create mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/ColumnValueMapper.java create mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/MapVisitor.java create mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/MutateStatementHandler.java create mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorage.java create mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageMutation.java create mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageOperation.java create mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationChecker.java create mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageRecord.java create mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/PartitionIdentifier.java create mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/ResultInterpreter.java create mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/ScannerImpl.java create mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/SelectStatementHandler.java create mode 100644 core/src/main/java/com/scalar/db/storage/objectstorage/StatementHandler.java create mode 100644 core/src/test/java/com/scalar/db/storage/objectstorage/MapVisitorTest.java create mode 100644 core/src/test/java/com/scalar/db/storage/objectstorage/MutateStatementHandlerTest.java create mode 100644 core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageMutationTest.java create mode 100644 core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationCheckerTest.java create mode 100644 core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationTest.java create mode 100644 core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageTest.java create mode 100644 core/src/test/java/com/scalar/db/storage/objectstorage/ResultInterpreterTest.java create mode 100644 core/src/test/java/com/scalar/db/storage/objectstorage/ScannerImplTest.java create mode 100644 core/src/test/java/com/scalar/db/storage/objectstorage/SelectStatementHandlerTest.java create mode 100644 core/src/test/java/com/scalar/db/storage/objectstorage/StatementHandlerTest.java diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitAdminIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitAdminIntegrationTestWithObjectStorage.java index 5c25c0c1fe..44d7af5a4c 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitAdminIntegrationTestWithObjectStorage.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitAdminIntegrationTestWithObjectStorage.java @@ -18,10 +18,6 @@ protected AdminTestUtils getAdminTestUtils(String testName) { return new ObjectStorageAdminTestUtils(getProperties(testName)); } - @Override - @Disabled("Temporarily disabled because it includes DML operations") - public void truncateTable_ShouldTruncateProperly() {} - @Override @Disabled("Object Storage does not support index-related operations") public void createIndex_ForAllDataTypesWithExistingData_ShouldCreateIndexesCorrectly() {} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitCrossPartitionScanIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitCrossPartitionScanIntegrationTestWithObjectStorage.java new file mode 100644 index 0000000000..54d5c7d019 --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitCrossPartitionScanIntegrationTestWithObjectStorage.java @@ -0,0 +1,23 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.transaction.consensuscommit.ConsensusCommitConfig; +import com.scalar.db.transaction.consensuscommit.ConsensusCommitCrossPartitionScanIntegrationTestBase; +import java.util.Properties; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; + +public class ConsensusCommitCrossPartitionScanIntegrationTestWithObjectStorage + extends ConsensusCommitCrossPartitionScanIntegrationTestBase { + + @Override + protected Properties getProps(String testName) { + Properties properties = ConsensusCommitObjectStorageEnv.getProperties(testName); + properties.setProperty(ConsensusCommitConfig.ISOLATION_LEVEL, "SERIALIZABLE"); + return properties; + } + + @Test + @Override + @Disabled("Cross partition scan with ordering is not supported in Object Storages") + public void scan_CrossPartitionScanWithOrderingGivenForCommittedRecord_ShouldReturnRecords() {} +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitIntegrationTestWithObjectStorage.java new file mode 100644 index 0000000000..abb84e4a5d --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitIntegrationTestWithObjectStorage.java @@ -0,0 +1,31 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.transaction.consensuscommit.ConsensusCommitIntegrationTestBase; +import java.util.Properties; +import org.junit.jupiter.api.Disabled; + +public class ConsensusCommitIntegrationTestWithObjectStorage + extends ConsensusCommitIntegrationTestBase { + @Override + protected Properties getProps(String testName) { + return ConsensusCommitObjectStorageEnv.getProperties(testName); + } + + @Override + protected boolean isTimestampTypeSupported() { + return false; + } + + @Override + @Disabled("Object Storage does not support index-related operations") + public void get_GetGivenForIndexColumn_ShouldReturnRecords() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void scanOrGetScanner_ScanGivenForIndexColumn_ShouldReturnRecords(ScanType scanType) {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void scanOrGetScanner_ScanGivenForIndexColumnWithConjunctions_ShouldReturnRecords( + ScanType scanType) {} +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitNullMetadataIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitNullMetadataIntegrationTestWithObjectStorage.java new file mode 100644 index 0000000000..440e753212 --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitNullMetadataIntegrationTestWithObjectStorage.java @@ -0,0 +1,13 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.transaction.consensuscommit.ConsensusCommitNullMetadataIntegrationTestBase; +import java.util.Properties; + +public class ConsensusCommitNullMetadataIntegrationTestWithObjectStorage + extends ConsensusCommitNullMetadataIntegrationTestBase { + + @Override + protected Properties getProperties(String testName) { + return ConsensusCommitObjectStorageEnv.getProperties(testName); + } +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitObjectStorageEnv.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitObjectStorageEnv.java new file mode 100644 index 0000000000..25d5c9a174 --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitObjectStorageEnv.java @@ -0,0 +1,22 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.transaction.consensuscommit.ConsensusCommitTestUtils; +import java.util.Map; +import java.util.Properties; + +public class ConsensusCommitObjectStorageEnv { + private ConsensusCommitObjectStorageEnv() {} + + public static Properties getProperties(String testName) { + Properties properties = ObjectStorageEnv.getProperties(testName); + + // Add testName as a coordinator schema suffix + ConsensusCommitTestUtils.addSuffixToCoordinatorNamespace(properties, testName); + + return ConsensusCommitTestUtils.loadConsensusCommitProperties(properties); + } + + public static Map getCreationOptions() { + return ObjectStorageEnv.getCreationOptions(); + } +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitSpecificIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitSpecificIntegrationTestWithObjectStorage.java new file mode 100644 index 0000000000..20f961cd6a --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitSpecificIntegrationTestWithObjectStorage.java @@ -0,0 +1,130 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.transaction.consensuscommit.ConsensusCommitSpecificIntegrationTestBase; +import com.scalar.db.transaction.consensuscommit.Isolation; +import java.util.Properties; +import org.junit.jupiter.api.Disabled; + +public class ConsensusCommitSpecificIntegrationTestWithObjectStorage + extends ConsensusCommitSpecificIntegrationTestBase { + + @Override + protected Properties getProperties(String testName) { + return ConsensusCommitObjectStorageEnv.getProperties(testName); + } + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + scanWithIndex_PutWithOverlappedIndexKeyAndNonOverlappedConjunctionsGivenBefore_ShouldScan( + Isolation isolation) {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + scanWithIndex_OverlappingPutWithNonIndexedColumnGivenBefore_ShouldThrowIllegalArgumentException( + Isolation isolation) {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + scanWithIndex_NonOverlappingPutWithIndexedColumnGivenBefore_ShouldThrowIllegalArgumentException( + Isolation isolation) {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + scanWithIndex_OverlappingPutWithIndexedColumnGivenBefore_ShouldThrowIllegalArgumentException( + Isolation isolation) {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + scanWithIndex_OverlappingPutWithIndexedColumnAndConjunctionsGivenBefore_ShouldThrowIllegalArgumentException( + Isolation isolation) {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void scan_ScanWithIndexGiven_WithSerializable_ShouldNotThrowAnyException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + scan_ScanWithIndexGiven_RecordUpdatedByAnotherTransaction_WithSerializable_ShouldThrowCommitConflictException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + scan_ScanWithIndexGiven_RecordUpdatedByMyself_WithSerializable_ShouldNotThrowAnyException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + scan_ScanWithIndexGiven_RecordDeletedByAnotherTransaction_WithSerializable_ShouldThrowCommitConflictException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + scan_ScanWithIndexGiven_RecordDeletedByMyself_WithSerializable_ShouldNotThrowAnyException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void scan_ScanWithIndexWithLimitGiven_WithSerializable_ShouldNotThrowAnyException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void get_GetWithIndexGiven_WithSerializable_ShouldNotThrowAnyException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + get_GetWithIndexGiven_RecordUpdatedByAnotherTransaction_WithSerializable_ShouldThrowCommitConflictException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + get_GetWithIndexGiven_RecordUpdatedByMyself_WithSerializable_ShouldNotThrowAnyException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + get_GetWithIndexGiven_RecordDeletedByAnotherTransaction_WithSerializable_ShouldThrowCommitConflictException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + get_GetWithIndexGiven_RecordDeletedByMyself_WithSerializable_ShouldNotThrowAnyException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + get_GetWithIndexGiven_NoRecordsInIndexRange_WithSerializable_ShouldNotThrowAnyException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + get_GetWithIndexGiven_RecordInsertedIntoIndexRangeByMyself_WithSerializable_ShouldNotThrowAnyException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + get_GetWithIndexGiven_RecordInsertedIntoIndexRangeByAnotherTransaction_WithSerializable_ShouldThrowCommitConflictException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + get_GetWithIndexGiven_NoRecordsInIndexRange_RecordInsertedIntoIndexRangeByMyself_WithSerializable_ShouldNotThrowAnyException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + get_GetWithIndexGiven_NoRecordsInIndexRange_RecordInsertedIntoIndexRangeByAnotherTransaction_WithSerializable_ShouldThrowCommitConflictException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void getAndUpdate_GetWithIndexGiven_ShouldUpdate(Isolation isolation) {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void scanAndUpdate_ScanWithIndexGiven_ShouldUpdate(Isolation isolation) {} +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitWithIncludeMetadataEnabledIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitWithIncludeMetadataEnabledIntegrationTestWithObjectStorage.java new file mode 100644 index 0000000000..884e464008 --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitWithIncludeMetadataEnabledIntegrationTestWithObjectStorage.java @@ -0,0 +1,13 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.transaction.consensuscommit.ConsensusCommitWithIncludeMetadataEnabledIntegrationTestBase; +import java.util.Properties; + +public class ConsensusCommitWithIncludeMetadataEnabledIntegrationTestWithObjectStorage + extends ConsensusCommitWithIncludeMetadataEnabledIntegrationTestBase { + + @Override + protected Properties getProperties(String testName) { + return ConsensusCommitObjectStorageEnv.getProperties(testName); + } +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminCaseSensitivityIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminCaseSensitivityIntegrationTest.java index b8710a054d..ca4f5d5eef 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminCaseSensitivityIntegrationTest.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminCaseSensitivityIntegrationTest.java @@ -24,10 +24,6 @@ protected AdminTestUtils getAdminTestUtils(String testName) { return new ObjectStorageAdminTestUtils(getProperties(testName)); } - @Override - @Disabled("Temporarily disabled because it includes DML operations") - public void truncateTable_ShouldTruncateProperly() {} - @Override @Disabled("Object Storage does not have a concept of namespaces") public void diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminIntegrationTest.java index 9c10eb9fbf..21c1f41c9f 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminIntegrationTest.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminIntegrationTest.java @@ -22,10 +22,6 @@ protected AdminTestUtils getAdminTestUtils(String testName) { return new ObjectStorageAdminTestUtils(getProperties(testName)); } - @Override - @Disabled("Temporarily disabled because it includes DML operations") - public void truncateTable_ShouldTruncateProperly() {} - @Override @Disabled("Object Storage does not have a concept of namespaces") public void diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageCaseSensitivityIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageCaseSensitivityIntegrationTest.java new file mode 100644 index 0000000000..8515cbe204 --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageCaseSensitivityIntegrationTest.java @@ -0,0 +1,45 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.DistributedStorageCaseSensitivityIntegrationTestBase; +import java.util.Map; +import java.util.Properties; +import org.junit.jupiter.api.Disabled; + +public class ObjectStorageCaseSensitivityIntegrationTest + extends DistributedStorageCaseSensitivityIntegrationTestBase { + + @Override + protected Properties getProperties(String testName) { + return ObjectStorageEnv.getProperties(testName); + } + + @Override + protected Map getCreationOptions() { + return ObjectStorageEnv.getCreationOptions(); + } + + @Override + @Disabled("Object Storage does not support index-related operations") + public void get_GetGivenForIndexedColumn_ShouldGet() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void get_GetGivenForIndexedColumnWithMatchedConjunctions_ShouldGet() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void get_GetGivenForIndexedColumnWithUnmatchedConjunctions_ShouldReturnEmpty() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + get_GetGivenForIndexedColumnMatchingMultipleRecords_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void scan_ScanGivenForIndexedColumn_ShouldScan() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void scan_ScanGivenForNonIndexedColumn_ShouldThrowIllegalArgumentException() {} +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageColumnValueIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageColumnValueIntegrationTest.java new file mode 100644 index 0000000000..1514c98f76 --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageColumnValueIntegrationTest.java @@ -0,0 +1,12 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.DistributedStorageColumnValueIntegrationTestBase; +import java.util.Properties; + +public class ObjectStorageColumnValueIntegrationTest + extends DistributedStorageColumnValueIntegrationTestBase { + @Override + protected Properties getProperties(String testName) { + return ObjectStorageEnv.getProperties(testName); + } +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageConditionalMutationIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageConditionalMutationIntegrationTest.java new file mode 100644 index 0000000000..759dd22507 --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageConditionalMutationIntegrationTest.java @@ -0,0 +1,30 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.ConditionalExpression; +import com.scalar.db.api.DistributedStorageConditionalMutationIntegrationTestBase; +import java.util.List; +import java.util.Properties; +import java.util.stream.Collectors; + +public class ObjectStorageConditionalMutationIntegrationTest + extends DistributedStorageConditionalMutationIntegrationTestBase { + @Override + protected Properties getProperties(String testName) { + return ObjectStorageEnv.getProperties(testName); + } + + @Override + protected int getThreadNum() { + return 3; + } + + @Override + protected List getOperatorAndDataTypeListForTest() { + return super.getOperatorAndDataTypeListForTest().stream() + .filter( + operatorAndDataType -> + operatorAndDataType.getOperator() == ConditionalExpression.Operator.EQ + || operatorAndDataType.getOperator() == ConditionalExpression.Operator.NE) + .collect(Collectors.toList()); + } +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageCrossPartitionScanIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageCrossPartitionScanIntegrationTest.java new file mode 100644 index 0000000000..e3761048db --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageCrossPartitionScanIntegrationTest.java @@ -0,0 +1,30 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.DistributedStorageCrossPartitionScanIntegrationTestBase; +import java.util.Properties; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; + +public class ObjectStorageCrossPartitionScanIntegrationTest + extends DistributedStorageCrossPartitionScanIntegrationTestBase { + + @Override + protected Properties getProperties(String testName) { + return ObjectStorageEnv.getProperties(testName); + } + + @Override + protected int getThreadNum() { + return 3; + } + + @Override + protected boolean isParallelDdlSupported() { + return false; + } + + @Test + @Override + @Disabled("Cross partition scan with ordering is not supported in Object Storages") + public void scan_WithOrderingForNonPrimaryColumns_ShouldReturnProperResult() {} +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageIntegrationTest.java new file mode 100644 index 0000000000..ced27160f3 --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageIntegrationTest.java @@ -0,0 +1,43 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.DistributedStorageIntegrationTestBase; +import java.util.Map; +import java.util.Properties; +import org.junit.jupiter.api.Disabled; + +public class ObjectStorageIntegrationTest extends DistributedStorageIntegrationTestBase { + @Override + protected Properties getProperties(String testName) { + return ObjectStorageEnv.getProperties(testName); + } + + @Override + protected Map getCreationOptions() { + return ObjectStorageEnv.getCreationOptions(); + } + + @Override + @Disabled("Object Storage does not support index-related operations") + public void get_GetGivenForIndexedColumn_ShouldGet() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void get_GetGivenForIndexedColumnWithMatchedConjunctions_ShouldGet() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void get_GetGivenForIndexedColumnWithUnmatchedConjunctions_ShouldReturnEmpty() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + get_GetGivenForIndexedColumnMatchingMultipleRecords_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void scan_ScanGivenForIndexedColumn_ShouldScan() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void scan_ScanGivenForNonIndexedColumn_ShouldThrowIllegalArgumentException() {} +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageJapaneseIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageJapaneseIntegrationTest.java new file mode 100644 index 0000000000..4610d84aed --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageJapaneseIntegrationTest.java @@ -0,0 +1,13 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.DistributedStorageJapaneseIntegrationTestBase; +import java.util.Properties; + +public class ObjectStorageJapaneseIntegrationTest + extends DistributedStorageJapaneseIntegrationTestBase { + + @Override + protected Properties getProperties(String testName) { + return ObjectStorageEnv.getProperties(testName); + } +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMultipleClusteringKeyScanIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMultipleClusteringKeyScanIntegrationTest.java new file mode 100644 index 0000000000..e3a93e8ff6 --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMultipleClusteringKeyScanIntegrationTest.java @@ -0,0 +1,51 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.DistributedStorageMultipleClusteringKeyScanIntegrationTestBase; +import com.scalar.db.io.Column; +import com.scalar.db.io.DataType; +import java.util.List; +import java.util.Properties; +import java.util.stream.Collectors; + +public class ObjectStorageMultipleClusteringKeyScanIntegrationTest + extends DistributedStorageMultipleClusteringKeyScanIntegrationTestBase { + + @Override + protected Properties getProperties(String testName) { + return ObjectStorageEnv.getProperties(testName); + } + + @Override + protected List getDataTypes() { + // Return types without BLOB because blob is not supported for clustering key for now + return super.getDataTypes().stream() + .filter(type -> type != DataType.BLOB) + .collect(Collectors.toList()); + } + + @Override + protected boolean isParallelDdlSupported() { + return false; + } + + @Override + protected int getThreadNum() { + return 3; + } + + @Override + protected Column getColumnWithMinValue(String columnName, DataType dataType) { + if (dataType == DataType.TEXT) { + return ObjectStorageTestUtils.getMinTextValue(columnName); + } + return super.getColumnWithMinValue(columnName, dataType); + } + + @Override + protected Column getColumnWithMaxValue(String columnName, DataType dataType) { + if (dataType == DataType.TEXT) { + return ObjectStorageTestUtils.getMaxTextValue(columnName); + } + return super.getColumnWithMaxValue(columnName, dataType); + } +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMultiplePartitionKeyIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMultiplePartitionKeyIntegrationTest.java new file mode 100644 index 0000000000..d3b077df18 --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMultiplePartitionKeyIntegrationTest.java @@ -0,0 +1,40 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.DistributedStorageMultiplePartitionKeyIntegrationTestBase; +import com.scalar.db.io.Column; +import com.scalar.db.io.DataType; +import java.util.Properties; + +public class ObjectStorageMultiplePartitionKeyIntegrationTest + extends DistributedStorageMultiplePartitionKeyIntegrationTestBase { + @Override + protected Properties getProperties(String testName) { + return ObjectStorageEnv.getProperties(testName); + } + + @Override + protected int getThreadNum() { + return 3; + } + + @Override + protected boolean isParallelDdlSupported() { + return false; + } + + @Override + protected Column getColumnWithMinValue(String columnName, DataType dataType) { + if (dataType == DataType.TEXT) { + return ObjectStorageTestUtils.getMinTextValue(columnName); + } + return super.getColumnWithMinValue(columnName, dataType); + } + + @Override + protected Column getColumnWithMaxValue(String columnName, DataType dataType) { + if (dataType == DataType.TEXT) { + return ObjectStorageTestUtils.getMaxTextValue(columnName); + } + return super.getColumnWithMaxValue(columnName, dataType); + } +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMutationAtomicityUnitIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMutationAtomicityUnitIntegrationTest.java new file mode 100644 index 0000000000..98c4ea857f --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMutationAtomicityUnitIntegrationTest.java @@ -0,0 +1,19 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.DistributedStorageMutationAtomicityUnitIntegrationTestBase; +import java.util.Map; +import java.util.Properties; + +public class ObjectStorageMutationAtomicityUnitIntegrationTest + extends DistributedStorageMutationAtomicityUnitIntegrationTestBase { + + @Override + protected Properties getProperties(String testName) { + return ObjectStorageEnv.getProperties(testName); + } + + @Override + protected Map getCreationOptions() { + return ObjectStorageEnv.getCreationOptions(); + } +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageSingleClusteringKeyScanIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageSingleClusteringKeyScanIntegrationTest.java new file mode 100644 index 0000000000..955b94330b --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageSingleClusteringKeyScanIntegrationTest.java @@ -0,0 +1,45 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.DistributedStorageSingleClusteringKeyScanIntegrationTestBase; +import com.scalar.db.io.Column; +import com.scalar.db.io.DataType; +import java.util.ArrayList; +import java.util.List; +import java.util.Properties; + +public class ObjectStorageSingleClusteringKeyScanIntegrationTest + extends DistributedStorageSingleClusteringKeyScanIntegrationTestBase { + @Override + protected Properties getProperties(String testName) { + return ObjectStorageEnv.getProperties(testName); + } + + @Override + protected List getClusteringKeyTypes() { + // Return types without BLOB because blob is not supported for clustering key for now + List clusteringKeyTypes = new ArrayList<>(); + for (DataType dataType : DataType.values()) { + if (dataType == DataType.BLOB) { + continue; + } + clusteringKeyTypes.add(dataType); + } + return clusteringKeyTypes; + } + + @Override + protected Column getColumnWithMinValue(String columnName, DataType dataType) { + if (dataType == DataType.TEXT) { + return ObjectStorageTestUtils.getMinTextValue(columnName); + } + return super.getColumnWithMinValue(columnName, dataType); + } + + @Override + protected Column getColumnWithMaxValue(String columnName, DataType dataType) { + if (dataType == DataType.TEXT) { + return ObjectStorageTestUtils.getMaxTextValue(columnName); + } + return super.getColumnWithMaxValue(columnName, dataType); + } +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageSinglePartitionKeyIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageSinglePartitionKeyIntegrationTest.java new file mode 100644 index 0000000000..215993d078 --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageSinglePartitionKeyIntegrationTest.java @@ -0,0 +1,30 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.DistributedStorageSinglePartitionKeyIntegrationTestBase; +import com.scalar.db.io.Column; +import com.scalar.db.io.DataType; +import java.util.Properties; + +public class ObjectStorageSinglePartitionKeyIntegrationTest + extends DistributedStorageSinglePartitionKeyIntegrationTestBase { + @Override + protected Properties getProperties(String testName) { + return ObjectStorageEnv.getProperties(testName); + } + + @Override + protected Column getColumnWithMinValue(String columnName, DataType dataType) { + if (dataType == DataType.TEXT) { + return ObjectStorageTestUtils.getMinTextValue(columnName); + } + return super.getColumnWithMinValue(columnName, dataType); + } + + @Override + protected Column getColumnWithMaxValue(String columnName, DataType dataType) { + if (dataType == DataType.TEXT) { + return ObjectStorageTestUtils.getMaxTextValue(columnName); + } + return super.getColumnWithMaxValue(columnName, dataType); + } +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageTestUtils.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageTestUtils.java new file mode 100644 index 0000000000..0263043fed --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageTestUtils.java @@ -0,0 +1,19 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.io.TextColumn; +import com.scalar.db.util.TestUtils; +import java.util.stream.IntStream; + +public class ObjectStorageTestUtils { + public static TextColumn getMinTextValue(String columnName) { + // Since ObjectStorage can't handle an empty string correctly, we use "0" as the min value + return TextColumn.of(columnName, "0"); + } + + public static TextColumn getMaxTextValue(String columnName) { + // Since ObjectStorage can't handle 0xFF character correctly, we use "ZZZ..." as the max value + StringBuilder builder = new StringBuilder(); + IntStream.range(0, TestUtils.MAX_TEXT_COUNT).forEach(i -> builder.append('Z')); + return TextColumn.of(columnName, builder.toString()); + } +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageWithReservedKeywordIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageWithReservedKeywordIntegrationTest.java new file mode 100644 index 0000000000..ce6a1ffc2e --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageWithReservedKeywordIntegrationTest.java @@ -0,0 +1,45 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.DistributedStorageWithReservedKeywordIntegrationTestBase; +import java.util.Map; +import java.util.Properties; +import org.junit.jupiter.api.Disabled; + +public class ObjectStorageWithReservedKeywordIntegrationTest + extends DistributedStorageWithReservedKeywordIntegrationTestBase { + + @Override + protected Properties getProperties(String testName) { + return ObjectStorageEnv.getProperties(testName); + } + + @Override + protected Map getCreationOptions() { + return ObjectStorageEnv.getCreationOptions(); + } + + @Override + @Disabled("Object Storage does not support index-related operations") + public void get_GetGivenForIndexedColumn_ShouldGet() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void get_GetGivenForIndexedColumnWithMatchedConjunctions_ShouldGet() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void get_GetGivenForIndexedColumnWithUnmatchedConjunctions_ShouldReturnEmpty() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void + get_GetGivenForIndexedColumnMatchingMultipleRecords_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void scan_ScanGivenForIndexedColumn_ShouldScan() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void scan_ScanGivenForNonIndexedColumn_ShouldThrowIllegalArgumentException() {} +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperIntegrationTest.java index e32ab2ebac..aed79a73b9 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperIntegrationTest.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperIntegrationTest.java @@ -27,8 +27,9 @@ public class ObjectStorageWrapperIntegrationTest { private static final String TEST_OBJECT1 = "test-object1"; private static final String TEST_OBJECT2 = "test-object2"; private static final String TEST_OBJECT3 = "test-object3"; + private static final int BLOB_STORAGE_LIST_MAX_KEYS = 5000; - protected ObjectStorageWrapper wrapper; + private ObjectStorageWrapper wrapper; @BeforeAll public void beforeAll() throws ObjectStorageWrapperException { @@ -248,6 +249,32 @@ public void getKeys_WithNonExistingPrefix_ShouldReturnEmptySet() throws Exceptio assertThat(keys).isEmpty(); } + @Test + public void getKeys_WithPrefixForTheNumberOfObjectsExceedingTheListLimit_ShouldReturnAllKeys() + throws Exception { + String prefix = "prefix-"; + int numberOfObjects = BLOB_STORAGE_LIST_MAX_KEYS + 1; + try { + // Arrange + for (int i = 0; i < numberOfObjects; i++) { + wrapper.insert(prefix + i, "object-" + i); + } + + // Act + Set keys = wrapper.getKeys(prefix); + + // Assert + assertThat(keys.size()).isEqualTo(numberOfObjects); + for (int i = 0; i < numberOfObjects; i++) { + assertThat(keys).contains(prefix + i); + } + } finally { + for (int i = 0; i < numberOfObjects; i++) { + wrapper.delete(prefix + i); + } + } + } + @Test public void deleteByPrefix_WithExistingPrefix_ShouldDeleteObjectsSuccessfully() throws Exception { // Arrange @@ -277,6 +304,35 @@ public void deleteByPrefix_WithNonExistingPrefix_ShouldDoNothing() throws Except assertThat(keys).containsExactlyInAnyOrder(TEST_KEY1, TEST_KEY2, TEST_KEY3); } + @Test + public void + deleteByPrefix_WithPrefixForTheNumberOfObjectsExceedingTheListLimit_ShouldDeleteAllObjects() + throws Exception { + String prefix = "prefix-"; + int numberOfObjects = BLOB_STORAGE_LIST_MAX_KEYS + 1; + try { + // Arrange + for (int i = 0; i < numberOfObjects; i++) { + wrapper.insert(prefix + i, "object-" + i); + } + + // Act + wrapper.deleteByPrefix(prefix); + + // Assert + Set keys = wrapper.getKeys(prefix); + assertThat(keys).isEmpty(); + } finally { + for (int i = 0; i < numberOfObjects; i++) { + try { + wrapper.delete(prefix + i); + } catch (PreconditionFailedException e) { + // The object may have already been deleted, so do nothing + } + } + } + } + @Test public void close_ShouldNotThrowException() { // Arrange diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperLargeObjectWriteIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperLargeObjectWriteIntegrationTest.java new file mode 100644 index 0000000000..8d63ec1890 --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperLargeObjectWriteIntegrationTest.java @@ -0,0 +1,160 @@ +package com.scalar.db.storage.objectstorage; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatCode; + +import com.scalar.db.config.DatabaseConfig; +import com.scalar.db.storage.objectstorage.blobstorage.BlobStorageConfig; +import java.util.Arrays; +import java.util.Optional; +import java.util.Properties; +import java.util.stream.LongStream; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +public class ObjectStorageWrapperLargeObjectWriteIntegrationTest { + private static final Logger logger = + LoggerFactory.getLogger(ObjectStorageWrapperLargeObjectWriteIntegrationTest.class); + + private static final String TEST_NAME = "object_storage_wrapper_integration_test"; + private static final String TEST_KEY1 = "test-key1"; + private static final String TEST_KEY2 = "test-key2"; + private static final String TEST_KEY3 = "test-key3"; + + private String testObject1; + private String testObject2; + private String testObject3; + + private ObjectStorageWrapper wrapper; + + @BeforeAll + public void beforeAll() throws ObjectStorageWrapperException { + Properties properties = getProperties(TEST_NAME); + ObjectStorageConfig objectStorageConfig = + ObjectStorageUtils.getObjectStorageConfig(new DatabaseConfig(properties)); + wrapper = ObjectStorageWrapperFactory.create(objectStorageConfig); + long objectSizeInBytes = + LongStream.of(BlobStorageConfig.DEFAULT_PARALLEL_UPLOAD_THRESHOLD_IN_BYTES) + .max() + .getAsLong() + + 1; + + char[] charArray = new char[(int) objectSizeInBytes]; + Arrays.fill(charArray, 'a'); + testObject1 = new String(charArray); + Arrays.fill(charArray, 'b'); + testObject2 = new String(charArray); + Arrays.fill(charArray, 'c'); + testObject3 = new String(charArray); + + createObjects(); + } + + @AfterAll + public void afterAll() { + try { + deleteObjects(); + } catch (Exception e) { + logger.warn("Failed to delete objects", e); + } + + try { + if (wrapper != null) { + wrapper.close(); + } + } catch (Exception e) { + logger.warn("Failed to close wrapper", e); + } + } + + protected Properties getProperties(String testName) { + return ObjectStorageEnv.getProperties(testName); + } + + private void createObjects() throws ObjectStorageWrapperException { + wrapper.insert(TEST_KEY1, testObject1); + wrapper.insert(TEST_KEY2, testObject2); + wrapper.insert(TEST_KEY3, testObject3); + } + + protected void deleteObjects() throws ObjectStorageWrapperException { + wrapper.delete(TEST_KEY1); + wrapper.delete(TEST_KEY2); + wrapper.delete(TEST_KEY3); + } + + @Test + public void insert_NewObjectKeyGiven_ShouldInsertObjectSuccessfully() throws Exception { + // Arrange + String objectKey = "new-object-key"; + String object = "new-object"; + + try { + // Act + wrapper.insert(objectKey, object); + + // Assert + Optional response = wrapper.get(objectKey); + assertThat(response.isPresent()).isTrue(); + assertThat(response.get().getPayload()).isEqualTo(object); + } finally { + wrapper.delete(objectKey); + } + } + + @Test + public void insert_ExistingObjectKeyGiven_ShouldThrowPreconditionFailedException() { + // Arrange + + // Act Assert + assertThatCode(() -> wrapper.insert(TEST_KEY2, "another-object")) + .isInstanceOf(PreconditionFailedException.class); + } + + @Test + public void update_ExistingObjectKeyGiven_ShouldUpdateObjectSuccessfully() throws Exception { + // Arrange + String updatedObject = "updated-object2"; + Optional response1 = wrapper.get(TEST_KEY2); + assertThat(response1.isPresent()).isTrue(); + String version = response1.get().getVersion(); + + try { + // Act + wrapper.update(TEST_KEY2, updatedObject, version); + + // Assert + Optional response2 = wrapper.get(TEST_KEY2); + assertThat(response2.isPresent()).isTrue(); + assertThat(response2.get().getPayload()).isEqualTo(updatedObject); + } finally { + wrapper.delete(TEST_KEY2); + wrapper.insert(TEST_KEY2, testObject2); + } + } + + @Test + public void update_NonExistingObjectKeyGiven_ShouldThrowPreconditionFailedException() { + // Arrange + String objectKey = "non-existing-key"; + + // Act Assert + assertThatCode(() -> wrapper.update(objectKey, "some-object", "some-version")) + .isInstanceOf(PreconditionFailedException.class); + } + + @Test + public void update_WrongVersionGiven_ShouldThrowPreconditionFailedException() { + // Arrange + String wrongVersion = "wrong-version"; + + // Act Assert + assertThatCode(() -> wrapper.update(TEST_KEY2, "another-object", wrongVersion)) + .isInstanceOf(PreconditionFailedException.class); + } +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/SingleCrudOperationTransactionAdminIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/SingleCrudOperationTransactionAdminIntegrationTestWithObjectStorage.java index 9d7c946b5e..ea50183cad 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/SingleCrudOperationTransactionAdminIntegrationTestWithObjectStorage.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/SingleCrudOperationTransactionAdminIntegrationTestWithObjectStorage.java @@ -12,10 +12,6 @@ protected Properties getProps(String testName) { return ObjectStorageEnv.getProperties(testName); } - @Override - @Disabled("Temporarily disabled because it includes DML operations") - public void truncateTable_ShouldTruncateProperly() {} - @Override @Disabled("Object Storage does not support index-related operations") public void createIndex_ForAllDataTypesWithExistingData_ShouldCreateIndexesCorrectly() {} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/SingleCrudOperationTransactionIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/SingleCrudOperationTransactionIntegrationTestWithObjectStorage.java new file mode 100644 index 0000000000..7405f7e829 --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/SingleCrudOperationTransactionIntegrationTestWithObjectStorage.java @@ -0,0 +1,13 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.transaction.singlecrudoperation.SingleCrudOperationTransactionIntegrationTestBase; +import java.util.Properties; + +public class SingleCrudOperationTransactionIntegrationTestWithObjectStorage + extends SingleCrudOperationTransactionIntegrationTestBase { + + @Override + protected Properties getProps(String testName) { + return ObjectStorageEnv.getProperties(testName); + } +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitCrossPartitionScanIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitCrossPartitionScanIntegrationTestWithObjectStorage.java new file mode 100644 index 0000000000..d5ebdb1a82 --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitCrossPartitionScanIntegrationTestWithObjectStorage.java @@ -0,0 +1,23 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.transaction.consensuscommit.ConsensusCommitConfig; +import com.scalar.db.transaction.consensuscommit.TwoPhaseConsensusCommitCrossPartitionScanIntegrationTestBase; +import java.util.Properties; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; + +public class TwoPhaseConsensusCommitCrossPartitionScanIntegrationTestWithObjectStorage + extends TwoPhaseConsensusCommitCrossPartitionScanIntegrationTestBase { + + @Override + protected Properties getProps1(String testName) { + Properties properties = ConsensusCommitObjectStorageEnv.getProperties(testName); + properties.setProperty(ConsensusCommitConfig.ISOLATION_LEVEL, "SERIALIZABLE"); + return properties; + } + + @Test + @Override + @Disabled("Cross partition scan with ordering is not supported in Object Storages") + public void scan_ScanWithOrderingGivenForCommittedRecord_ShouldReturnRecords() {} +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitIntegrationTestWithObjectStorage.java new file mode 100644 index 0000000000..1d278f8f25 --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitIntegrationTestWithObjectStorage.java @@ -0,0 +1,22 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.transaction.consensuscommit.TwoPhaseConsensusCommitIntegrationTestBase; +import java.util.Properties; +import org.junit.jupiter.api.Disabled; + +public class TwoPhaseConsensusCommitIntegrationTestWithObjectStorage + extends TwoPhaseConsensusCommitIntegrationTestBase { + + @Override + protected Properties getProps1(String testName) { + return ConsensusCommitObjectStorageEnv.getProperties(testName); + } + + @Override + @Disabled("Object Storage does not support index-related operations") + public void get_GetGivenForIndexColumn_ShouldReturnRecords() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void scanOrGetScanner_ScanGivenForIndexColumn_ShouldReturnRecords(ScanType scanType) {} +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitSpecificIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitSpecificIntegrationTestWithObjectStorage.java new file mode 100644 index 0000000000..1e4b66e32b --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitSpecificIntegrationTestWithObjectStorage.java @@ -0,0 +1,13 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.transaction.consensuscommit.TwoPhaseConsensusCommitSpecificIntegrationTestBase; +import java.util.Properties; + +public class TwoPhaseConsensusCommitSpecificIntegrationTestWithObjectStorage + extends TwoPhaseConsensusCommitSpecificIntegrationTestBase { + + @Override + protected Properties getProperties1(String testName) { + return ConsensusCommitObjectStorageEnv.getProperties(testName); + } +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitWithIncludeMetadataEnabledIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitWithIncludeMetadataEnabledIntegrationTestWithObjectStorage.java new file mode 100644 index 0000000000..38a95fd99e --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/TwoPhaseConsensusCommitWithIncludeMetadataEnabledIntegrationTestWithObjectStorage.java @@ -0,0 +1,13 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.transaction.consensuscommit.TwoPhaseConsensusCommitWithIncludeMetadataEnabledIntegrationTestBase; +import java.util.Properties; + +public class TwoPhaseConsensusCommitWithIncludeMetadataEnabledIntegrationTestWithObjectStorage + extends TwoPhaseConsensusCommitWithIncludeMetadataEnabledIntegrationTestBase { + + @Override + protected Properties getProperties(String testName) { + return ConsensusCommitObjectStorageEnv.getProperties(testName); + } +} diff --git a/core/src/main/java/com/scalar/db/common/CoreError.java b/core/src/main/java/com/scalar/db/common/CoreError.java index fa097bad68..45ab74504a 100644 --- a/core/src/main/java/com/scalar/db/common/CoreError.java +++ b/core/src/main/java/com/scalar/db/common/CoreError.java @@ -889,6 +889,24 @@ public enum CoreError implements ScalarDbError { "Object Storage does not support the feature for altering column types", "", ""), + OBJECT_STORAGE_CROSS_PARTITION_SCAN_WITH_ORDERING_NOT_SUPPORTED( + Category.USER_ERROR, + "0256", + "Cross-partition scan with ordering is not supported in Object Storage", + "", + ""), + OBJECT_STORAGE_PRIMARY_KEY_CONTAINS_ILLEGAL_CHARACTER( + Category.USER_ERROR, + "0257", + "The value of the column %s in the primary key contains an illegal character. ", + "", + ""), + OBJECT_STORAGE_CONDITION_OPERATION_NOT_SUPPORTED_FOR_BLOB_TYPE( + Category.USER_ERROR, + "0258", + "Object Storage supports only EQ, NE, IS_NULL, and IS_NOT_NULL operations for the BLOB type in conditions. Mutation: %s", + "", + ""), // // Errors for the concurrency error category @@ -1016,6 +1034,12 @@ public enum CoreError implements ScalarDbError { "A conflict occurred when committing records. Details: %s", "", ""), + OBJECT_STORAGE_CONFLICT_OCCURRED_IN_MUTATION( + Category.CONCURRENCY_ERROR, + "0027", + "A transaction conflict occurred in the mutation. Details: %s", + "", + ""), // // Errors for the internal error category @@ -1197,6 +1221,10 @@ public enum CoreError implements ScalarDbError { ""), JDBC_MYSQL_GETTING_CONNECTION_METADATA_FAILED( Category.INTERNAL_ERROR, "0063", "Getting the MySQL JDBC connection metadata failed", "", ""), + OBJECT_STORAGE_ERROR_OCCURRED_IN_SELECTION( + Category.INTERNAL_ERROR, "0064", "An error occurred in the selection. Details: %s", "", ""), + OBJECT_STORAGE_ERROR_OCCURRED_IN_MUTATION( + Category.INTERNAL_ERROR, "0065", "An error occurred in the mutation. Details: %s", "", ""), // // Errors for the unknown transaction status error category diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ClusteringKeyComparator.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ClusteringKeyComparator.java new file mode 100644 index 0000000000..7e5188c63a --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ClusteringKeyComparator.java @@ -0,0 +1,39 @@ +package com.scalar.db.storage.objectstorage; + +import com.google.common.collect.Ordering; +import com.scalar.db.api.Scan; +import com.scalar.db.api.TableMetadata; +import com.scalar.db.io.Column; +import java.util.Comparator; +import java.util.Map; + +public class ClusteringKeyComparator implements Comparator> { + private final TableMetadata metadata; + + public ClusteringKeyComparator(TableMetadata metadata) { + this.metadata = metadata; + } + + @Override + public int compare(Map clusteringKey1, Map clusteringKey2) { + for (String columnName : metadata.getClusteringKeyNames()) { + Scan.Ordering.Order order = metadata.getClusteringOrder(columnName); + + Column column1 = + ColumnValueMapper.convert( + clusteringKey1.get(columnName), columnName, metadata.getColumnDataType(columnName)); + Column column2 = + ColumnValueMapper.convert( + clusteringKey2.get(columnName), columnName, metadata.getColumnDataType(columnName)); + + int cmp = + order == Scan.Ordering.Order.ASC + ? Ordering.natural().compare(column1, column2) + : Ordering.natural().compare(column2, column1); + if (cmp != 0) { + return cmp; + } + } + return 0; + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ColumnValueMapper.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ColumnValueMapper.java new file mode 100644 index 0000000000..34e2b2d780 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ColumnValueMapper.java @@ -0,0 +1,79 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.io.BigIntColumn; +import com.scalar.db.io.BlobColumn; +import com.scalar.db.io.BooleanColumn; +import com.scalar.db.io.Column; +import com.scalar.db.io.DataType; +import com.scalar.db.io.DateColumn; +import com.scalar.db.io.DoubleColumn; +import com.scalar.db.io.FloatColumn; +import com.scalar.db.io.IntColumn; +import com.scalar.db.io.TextColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; +import com.scalar.db.util.TimeRelatedColumnEncodingUtils; +import java.util.Base64; +import javax.annotation.Nullable; + +public class ColumnValueMapper { + public static Column convert(@Nullable Object recordValue, String name, DataType dataType) { + switch (dataType) { + case BOOLEAN: + return recordValue == null + ? BooleanColumn.ofNull(name) + : BooleanColumn.of(name, (boolean) recordValue); + case INT: + return recordValue == null + ? IntColumn.ofNull(name) + : IntColumn.of(name, ((Number) recordValue).intValue()); + case BIGINT: + return recordValue == null + ? BigIntColumn.ofNull(name) + : BigIntColumn.of(name, ((Number) recordValue).longValue()); + case FLOAT: + return recordValue == null + ? FloatColumn.ofNull(name) + : FloatColumn.of(name, ((Number) recordValue).floatValue()); + case DOUBLE: + return recordValue == null + ? DoubleColumn.ofNull(name) + : DoubleColumn.of(name, ((Number) recordValue).doubleValue()); + case TEXT: + return recordValue == null + ? TextColumn.ofNull(name) + : TextColumn.of(name, (String) recordValue); + case BLOB: + return recordValue == null + ? BlobColumn.ofNull(name) + : BlobColumn.of(name, Base64.getDecoder().decode((String) recordValue)); + case DATE: + return recordValue == null + ? DateColumn.ofNull(name) + : DateColumn.of( + name, TimeRelatedColumnEncodingUtils.decodeDate(((Number) recordValue).intValue())); + case TIME: + return recordValue == null + ? TimeColumn.ofNull(name) + : TimeColumn.of( + name, + TimeRelatedColumnEncodingUtils.decodeTime(((Number) recordValue).longValue())); + case TIMESTAMP: + return recordValue == null + ? TimestampColumn.ofNull(name) + : TimestampColumn.of( + name, + TimeRelatedColumnEncodingUtils.decodeTimestamp(((Number) recordValue).longValue())); + case TIMESTAMPTZ: + return recordValue == null + ? TimestampTZColumn.ofNull(name) + : TimestampTZColumn.of( + name, + TimeRelatedColumnEncodingUtils.decodeTimestampTZ( + ((Number) recordValue).longValue())); + default: + throw new AssertionError(); + } + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/MapVisitor.java b/core/src/main/java/com/scalar/db/storage/objectstorage/MapVisitor.java new file mode 100644 index 0000000000..6d9e2b4167 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/MapVisitor.java @@ -0,0 +1,92 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.io.BigIntColumn; +import com.scalar.db.io.BlobColumn; +import com.scalar.db.io.BooleanColumn; +import com.scalar.db.io.ColumnVisitor; +import com.scalar.db.io.DateColumn; +import com.scalar.db.io.DoubleColumn; +import com.scalar.db.io.FloatColumn; +import com.scalar.db.io.IntColumn; +import com.scalar.db.io.TextColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; +import com.scalar.db.util.TimeRelatedColumnEncodingUtils; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; +import java.util.HashMap; +import java.util.Map; +import javax.annotation.concurrent.NotThreadSafe; + +@NotThreadSafe +public class MapVisitor implements ColumnVisitor { + private final Map values = new HashMap<>(); + + @SuppressFBWarnings("EI_EXPOSE_REP") + public Map get() { + return values; + } + + @Override + public void visit(BooleanColumn column) { + values.put(column.getName(), column.hasNullValue() ? null : column.getBooleanValue()); + } + + @Override + public void visit(IntColumn column) { + values.put(column.getName(), column.hasNullValue() ? null : column.getIntValue()); + } + + @Override + public void visit(BigIntColumn column) { + values.put(column.getName(), column.hasNullValue() ? null : column.getBigIntValue()); + } + + @Override + public void visit(FloatColumn column) { + values.put(column.getName(), column.hasNullValue() ? null : column.getFloatValue()); + } + + @Override + public void visit(DoubleColumn column) { + values.put(column.getName(), column.hasNullValue() ? null : column.getDoubleValue()); + } + + @Override + public void visit(TextColumn column) { + values.put(column.getName(), column.hasNullValue() ? null : column.getTextValue()); + } + + @Override + public void visit(BlobColumn column) { + values.put(column.getName(), column.hasNullValue() ? null : column.getBlobValue()); + } + + @Override + public void visit(DateColumn column) { + values.put( + column.getName(), + column.hasNullValue() ? null : TimeRelatedColumnEncodingUtils.encode(column)); + } + + @Override + public void visit(TimeColumn column) { + values.put( + column.getName(), + column.hasNullValue() ? null : TimeRelatedColumnEncodingUtils.encode(column)); + } + + @Override + public void visit(TimestampColumn column) { + values.put( + column.getName(), + column.hasNullValue() ? null : TimeRelatedColumnEncodingUtils.encode(column)); + } + + @Override + public void visit(TimestampTZColumn column) { + values.put( + column.getName(), + column.hasNullValue() ? null : TimeRelatedColumnEncodingUtils.encode(column)); + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/MutateStatementHandler.java b/core/src/main/java/com/scalar/db/storage/objectstorage/MutateStatementHandler.java new file mode 100644 index 0000000000..e1497c6617 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/MutateStatementHandler.java @@ -0,0 +1,308 @@ +package com.scalar.db.storage.objectstorage; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.scalar.db.api.Delete; +import com.scalar.db.api.DeleteIf; +import com.scalar.db.api.DeleteIfExists; +import com.scalar.db.api.Mutation; +import com.scalar.db.api.Put; +import com.scalar.db.api.PutIf; +import com.scalar.db.api.PutIfExists; +import com.scalar.db.api.PutIfNotExists; +import com.scalar.db.api.TableMetadata; +import com.scalar.db.common.CoreError; +import com.scalar.db.common.TableMetadataManager; +import com.scalar.db.exception.storage.ExecutionException; +import com.scalar.db.exception.storage.NoMutationException; +import com.scalar.db.exception.storage.RetriableExecutionException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; + +public class MutateStatementHandler extends StatementHandler { + public MutateStatementHandler( + ObjectStorageWrapper wrapper, TableMetadataManager metadataManager) { + super(wrapper, metadataManager); + } + + public void handle(Mutation mutation) throws ExecutionException { + TableMetadata tableMetadata = metadataManager.getTableMetadata(mutation); + ObjectStorageMutation objectStorageMutation = + new ObjectStorageMutation(mutation, tableMetadata); + mutate( + getNamespace(mutation), + getTable(mutation), + objectStorageMutation.getConcatenatedPartitionKey(), + Collections.singletonList(mutation)); + } + + public void handle(List mutations) throws ExecutionException { + Map> mutationPerPartition = new HashMap<>(); + for (Mutation mutation : mutations) { + TableMetadata tableMetadata = metadataManager.getTableMetadata(mutation); + ObjectStorageMutation objectStorageMutation = + new ObjectStorageMutation(mutation, tableMetadata); + String partitionKey = objectStorageMutation.getConcatenatedPartitionKey(); + PartitionIdentifier partitionIdentifier = + PartitionIdentifier.of(getNamespace(mutation), getTable(mutation), partitionKey); + mutationPerPartition + .computeIfAbsent(partitionIdentifier, k -> new ArrayList<>()) + .add(mutation); + } + for (Map.Entry> entry : mutationPerPartition.entrySet()) { + PartitionIdentifier partitionIdentifier = entry.getKey(); + mutate( + partitionIdentifier.getNamespaceName(), + partitionIdentifier.getTableName(), + partitionIdentifier.getPartitionName(), + entry.getValue()); + } + } + + private void mutate( + String namespaceName, String tableName, String partitionKey, List mutations) + throws ExecutionException { + Map readVersionMap = new HashMap<>(); + Map partition = + getPartition(namespaceName, tableName, partitionKey, readVersionMap); + for (Mutation mutation : mutations) { + if (mutation instanceof Put) { + putInternal(partition, (Put) mutation); + } else { + assert mutation instanceof Delete; + deleteInternal(partition, (Delete) mutation); + } + } + applyPartitionWrite(namespaceName, tableName, partitionKey, partition, readVersionMap); + } + + private void putInternal(Map partition, Put put) + throws ExecutionException { + TableMetadata tableMetadata = metadataManager.getTableMetadata(put); + ObjectStorageMutation mutation = new ObjectStorageMutation(put, tableMetadata); + if (!put.getCondition().isPresent()) { + ObjectStorageRecord existingRecord = partition.get(mutation.getRecordId()); + if (existingRecord == null) { + partition.put(mutation.getRecordId(), mutation.makeRecord()); + } else { + partition.put(mutation.getRecordId(), mutation.makeRecord(existingRecord)); + } + } else if (put.getCondition().get() instanceof PutIfNotExists) { + if (partition.containsKey(mutation.getRecordId())) { + throw new NoMutationException( + CoreError.NO_MUTATION_APPLIED.buildMessage(), Collections.singletonList(put)); + } + partition.put(mutation.getRecordId(), mutation.makeRecord()); + } else if (put.getCondition().get() instanceof PutIfExists) { + ObjectStorageRecord existingRecord = partition.get(mutation.getRecordId()); + if (existingRecord == null) { + throw new NoMutationException( + CoreError.NO_MUTATION_APPLIED.buildMessage(), Collections.singletonList(put)); + } + partition.put(mutation.getRecordId(), mutation.makeRecord(existingRecord)); + } else { + assert put.getCondition().get() instanceof PutIf; + ObjectStorageRecord existingRecord = partition.get(mutation.getRecordId()); + if (existingRecord == null) { + throw new NoMutationException( + CoreError.NO_MUTATION_APPLIED.buildMessage(), Collections.singletonList(put)); + } + try { + validateConditions( + partition.get(mutation.getRecordId()), + put.getCondition().get().getExpressions(), + metadataManager.getTableMetadata(mutation.getOperation())); + } catch (ExecutionException e) { + throw new NoMutationException( + CoreError.NO_MUTATION_APPLIED.buildMessage(), Collections.singletonList(put), e); + } + partition.put(mutation.getRecordId(), mutation.makeRecord(existingRecord)); + } + } + + private void deleteInternal(Map partition, Delete delete) + throws ExecutionException { + TableMetadata tableMetadata = metadataManager.getTableMetadata(delete); + ObjectStorageMutation mutation = new ObjectStorageMutation(delete, tableMetadata); + if (!delete.getCondition().isPresent()) { + partition.remove(mutation.getRecordId()); + } else if (delete.getCondition().get() instanceof DeleteIfExists) { + if (!partition.containsKey(mutation.getRecordId())) { + throw new NoMutationException( + CoreError.NO_MUTATION_APPLIED.buildMessage(), Collections.singletonList(delete)); + } + partition.remove(mutation.getRecordId()); + } else { + assert delete.getCondition().get() instanceof DeleteIf; + if (!partition.containsKey(mutation.getRecordId())) { + throw new NoMutationException( + CoreError.NO_MUTATION_APPLIED.buildMessage(), Collections.singletonList(delete)); + } + try { + validateConditions( + partition.get(mutation.getRecordId()), + delete.getCondition().get().getExpressions(), + metadataManager.getTableMetadata(mutation.getOperation())); + } catch (ExecutionException e) { + throw new NoMutationException( + CoreError.NO_MUTATION_APPLIED.buildMessage(), Collections.singletonList(delete), e); + } + partition.remove(mutation.getRecordId()); + } + } + + /** + * Applies the partition write. + * + * @param namespaceName the namespace name + * @param tableName the table name + * @param partitionKey the partition key + * @param partition the partition to be written + * @param readVersionMap the map of read versions + * @throws ExecutionException if a failure occurs during the operation + */ + private void applyPartitionWrite( + String namespaceName, + String tableName, + String partitionKey, + Map partition, + Map readVersionMap) + throws ExecutionException { + if (readVersionMap.containsKey( + PartitionIdentifier.of(namespaceName, tableName, partitionKey))) { + String readVersion = + readVersionMap.get(PartitionIdentifier.of(namespaceName, tableName, partitionKey)); + if (!partition.isEmpty()) { + updatePartition(namespaceName, tableName, partitionKey, partition, readVersion); + } else { + deletePartition(namespaceName, tableName, partitionKey, readVersion); + } + } else { + if (!partition.isEmpty()) { + insertPartition(namespaceName, tableName, partitionKey, partition); + } + } + } + + /** + * Gets a partition from the object storage. + * + * @param namespaceName the namespace name + * @param tableName the table name + * @param partitionKey the partition key + * @param readVersionMap the map to store the read version + * @return the partition + * @throws ExecutionException if a failure occurs during the operation + */ + private Map getPartition( + String namespaceName, + String tableName, + String partitionKey, + Map readVersionMap) + throws ExecutionException { + String objectKey = ObjectStorageUtils.getObjectKey(namespaceName, tableName, partitionKey); + try { + Optional response = wrapper.get(objectKey); + if (!response.isPresent()) { + return new HashMap<>(); + } + readVersionMap.put( + PartitionIdentifier.of(namespaceName, tableName, partitionKey), + response.get().getVersion()); + return Serializer.deserialize( + response.get().getPayload(), new TypeReference>() {}); + } catch (ObjectStorageWrapperException e) { + throw new ExecutionException( + CoreError.OBJECT_STORAGE_ERROR_OCCURRED_IN_MUTATION.buildMessage(e.getMessage()), e); + } + } + + /** + * Inserts a partition into the object storage. This method is called after confirming that the + * partition does not exist. + * + * @param namespaceName the namespace name + * @param tableName the table name + * @param partitionKey the partition key + * @param partition the partition to be inserted + * @throws ExecutionException if a failure occurs during the operation + */ + private void insertPartition( + String namespaceName, + String tableName, + String partitionKey, + Map partition) + throws ExecutionException { + try { + wrapper.insert( + ObjectStorageUtils.getObjectKey(namespaceName, tableName, partitionKey), + Serializer.serialize(partition)); + } catch (PreconditionFailedException e) { + throw new RetriableExecutionException( + CoreError.OBJECT_STORAGE_CONFLICT_OCCURRED_IN_MUTATION.buildMessage(e.getMessage()), e); + } catch (ObjectStorageWrapperException e) { + throw new ExecutionException( + CoreError.OBJECT_STORAGE_ERROR_OCCURRED_IN_MUTATION.buildMessage(e.getMessage()), e); + } + } + + /** + * Updates a partition in the object storage. This method is called after confirming that the + * partition exists. + * + * @param namespaceName the namespace name + * @param tableName the table name + * @param partitionKey the partition key + * @param partition the partition to be updated + * @param readVersion the read version + * @throws ExecutionException if a failure occurs during the operation + */ + private void updatePartition( + String namespaceName, + String tableName, + String partitionKey, + Map partition, + String readVersion) + throws ExecutionException { + try { + wrapper.update( + ObjectStorageUtils.getObjectKey(namespaceName, tableName, partitionKey), + Serializer.serialize(partition), + readVersion); + } catch (PreconditionFailedException e) { + throw new RetriableExecutionException( + CoreError.OBJECT_STORAGE_CONFLICT_OCCURRED_IN_MUTATION.buildMessage(e.getMessage()), e); + } catch (ObjectStorageWrapperException e) { + throw new ExecutionException( + CoreError.OBJECT_STORAGE_ERROR_OCCURRED_IN_MUTATION.buildMessage(e.getMessage()), e); + } + } + + /** + * Deletes a partition from the object storage. This method is called after confirming that the + * partition exists. + * + * @param namespaceName the namespace name + * @param tableName the table name + * @param partitionKey the partition key + * @param readVersion the read version + * @throws ExecutionException if a failure occurs during the operation + */ + private void deletePartition( + String namespaceName, String tableName, String partitionKey, String readVersion) + throws ExecutionException { + try { + wrapper.delete( + ObjectStorageUtils.getObjectKey(namespaceName, tableName, partitionKey), readVersion); + } catch (PreconditionFailedException e) { + throw new RetriableExecutionException( + CoreError.OBJECT_STORAGE_CONFLICT_OCCURRED_IN_MUTATION.buildMessage(e.getMessage()), e); + } catch (ObjectStorageWrapperException e) { + throw new ExecutionException( + CoreError.OBJECT_STORAGE_ERROR_OCCURRED_IN_MUTATION.buildMessage(e.getMessage()), e); + } + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorage.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorage.java new file mode 100644 index 0000000000..a3c5a3856b --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorage.java @@ -0,0 +1,157 @@ +package com.scalar.db.storage.objectstorage; + +import static com.scalar.db.util.ScalarDbUtils.copyAndPrepareForDynamicFiltering; + +import com.google.common.annotations.VisibleForTesting; +import com.scalar.db.api.Delete; +import com.scalar.db.api.Get; +import com.scalar.db.api.Mutation; +import com.scalar.db.api.Put; +import com.scalar.db.api.Result; +import com.scalar.db.api.Scan; +import com.scalar.db.api.Scanner; +import com.scalar.db.common.AbstractDistributedStorage; +import com.scalar.db.common.CoreError; +import com.scalar.db.common.FilterableScanner; +import com.scalar.db.common.StorageInfoProvider; +import com.scalar.db.common.TableMetadataManager; +import com.scalar.db.common.checker.OperationChecker; +import com.scalar.db.config.DatabaseConfig; +import com.scalar.db.exception.storage.ExecutionException; +import java.io.IOException; +import java.util.List; +import java.util.Optional; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class ObjectStorage extends AbstractDistributedStorage { + private static final Logger logger = LoggerFactory.getLogger(ObjectStorage.class); + + private final ObjectStorageWrapper wrapper; + private final SelectStatementHandler selectStatementHandler; + private final MutateStatementHandler mutateStatementHandler; + private final OperationChecker operationChecker; + + public ObjectStorage(DatabaseConfig databaseConfig) { + super(databaseConfig); + if (databaseConfig.isCrossPartitionScanOrderingEnabled()) { + throw new IllegalArgumentException( + CoreError.OBJECT_STORAGE_CROSS_PARTITION_SCAN_WITH_ORDERING_NOT_SUPPORTED.buildMessage()); + } + ObjectStorageConfig objectStorageConfig = + ObjectStorageUtils.getObjectStorageConfig(databaseConfig); + wrapper = ObjectStorageWrapperFactory.create(objectStorageConfig); + ObjectStorageAdmin admin = new ObjectStorageAdmin(wrapper, objectStorageConfig); + TableMetadataManager metadataManager = + new TableMetadataManager(admin, databaseConfig.getMetadataCacheExpirationTimeSecs()); + operationChecker = + new ObjectStorageOperationChecker( + databaseConfig, metadataManager, new StorageInfoProvider(admin)); + selectStatementHandler = new SelectStatementHandler(wrapper, metadataManager); + mutateStatementHandler = new MutateStatementHandler(wrapper, metadataManager); + logger.info("ObjectStorage object is created properly"); + } + + @VisibleForTesting + ObjectStorage( + DatabaseConfig databaseConfig, + ObjectStorageWrapper wrapper, + SelectStatementHandler selectStatementHandler, + MutateStatementHandler mutateStatementHandler, + OperationChecker operationChecker) { + super(databaseConfig); + this.wrapper = wrapper; + this.selectStatementHandler = selectStatementHandler; + this.mutateStatementHandler = mutateStatementHandler; + this.operationChecker = operationChecker; + } + + @Override + public Optional get(Get get) throws ExecutionException { + get = copyAndSetTargetToIfNot(get); + operationChecker.check(get); + Scanner scanner = null; + try { + if (get.getConjunctions().isEmpty()) { + scanner = selectStatementHandler.handle(get); + } else { + scanner = + new FilterableScanner( + get, selectStatementHandler.handle(copyAndPrepareForDynamicFiltering(get))); + } + Optional ret = scanner.one(); + if (!scanner.one().isPresent()) { + return ret; + } else { + throw new IllegalArgumentException( + CoreError.GET_OPERATION_USED_FOR_NON_EXACT_MATCH_SELECTION.buildMessage(get)); + } + } finally { + if (scanner != null) { + try { + scanner.close(); + } catch (IOException e) { + logger.warn("Failed to close the scanner", e); + } + } + } + } + + @Override + public Scanner scan(Scan scan) throws ExecutionException { + scan = copyAndSetTargetToIfNot(scan); + operationChecker.check(scan); + if (scan.getConjunctions().isEmpty()) { + return selectStatementHandler.handle(scan); + } else { + return new FilterableScanner( + scan, selectStatementHandler.handle(copyAndPrepareForDynamicFiltering(scan))); + } + } + + @Override + public void put(Put put) throws ExecutionException { + put = copyAndSetTargetToIfNot(put); + operationChecker.check(put); + mutateStatementHandler.handle(put); + } + + @Override + public void put(List puts) throws ExecutionException { + mutate(puts); + } + + @Override + public void delete(Delete delete) throws ExecutionException { + delete = copyAndSetTargetToIfNot(delete); + operationChecker.check(delete); + mutateStatementHandler.handle(delete); + } + + @Override + public void delete(List deletes) throws ExecutionException { + mutate(deletes); + } + + @Override + public void mutate(List mutations) throws ExecutionException { + if (mutations.size() == 1) { + Mutation mutation = mutations.get(0); + if (mutation instanceof Put) { + put((Put) mutation); + return; + } else if (mutation instanceof Delete) { + delete((Delete) mutation); + return; + } + } + mutations = copyAndSetTargetToIfNot(mutations); + operationChecker.check(mutations); + mutateStatementHandler.handle(mutations); + } + + @Override + public void close() { + wrapper.close(); + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageMutation.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageMutation.java new file mode 100644 index 0000000000..3556189489 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageMutation.java @@ -0,0 +1,63 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.Delete; +import com.scalar.db.api.Mutation; +import com.scalar.db.api.Put; +import com.scalar.db.api.TableMetadata; +import com.scalar.db.io.Column; +import java.util.Collection; +import java.util.Collections; +import java.util.Map; +import javax.annotation.Nonnull; +import javax.annotation.concurrent.Immutable; + +@Immutable +public class ObjectStorageMutation extends ObjectStorageOperation { + ObjectStorageMutation(Mutation mutation, TableMetadata metadata) { + super(mutation, metadata); + } + + @Nonnull + public ObjectStorageRecord makeRecord() { + Mutation mutation = (Mutation) getOperation(); + + if (mutation instanceof Delete) { + return new ObjectStorageRecord(); + } + Put put = (Put) mutation; + + return ObjectStorageRecord.newBuilder() + .id(getRecordId()) + .partitionKey(toMap(put.getPartitionKey().getColumns())) + .clusteringKey( + put.getClusteringKey().map(k -> toMap(k.getColumns())).orElse(Collections.emptyMap())) + .values(toMapForPut(put)) + .build(); + } + + @Nonnull + public ObjectStorageRecord makeRecord(ObjectStorageRecord existingRecord) { + Mutation mutation = (Mutation) getOperation(); + + if (mutation instanceof Delete) { + return new ObjectStorageRecord(); + } + Put put = (Put) mutation; + + ObjectStorageRecord newRecord = new ObjectStorageRecord(existingRecord); + toMapForPut(put).forEach((k, v) -> newRecord.getValues().put(k, v)); + return newRecord; + } + + private Map toMap(Collection> columns) { + MapVisitor visitor = new MapVisitor(); + columns.forEach(c -> c.accept(visitor)); + return visitor.get(); + } + + private Map toMapForPut(Put put) { + MapVisitor visitor = new MapVisitor(); + put.getColumns().values().forEach(c -> c.accept(visitor)); + return visitor.get(); + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageOperation.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageOperation.java new file mode 100644 index 0000000000..d632009ae0 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageOperation.java @@ -0,0 +1,77 @@ +package com.scalar.db.storage.objectstorage; + +import com.google.common.base.Joiner; +import com.scalar.db.api.Operation; +import com.scalar.db.api.TableMetadata; +import com.scalar.db.io.Column; +import java.util.HashMap; +import java.util.Map; +import javax.annotation.Nonnull; +import javax.annotation.concurrent.Immutable; + +@Immutable +public class ObjectStorageOperation { + private final Operation operation; + private final TableMetadata metadata; + + public ObjectStorageOperation(Operation operation, TableMetadata metadata) { + this.operation = operation; + this.metadata = metadata; + } + + @Nonnull + public Operation getOperation() { + return operation; + } + + @Nonnull + public String getConcatenatedPartitionKey() { + Map> keyMap = new HashMap<>(); + operation.getPartitionKey().getColumns().forEach(c -> keyMap.put(c.getName(), c)); + + ConcatenationVisitor visitor = new ConcatenationVisitor(); + metadata.getPartitionKeyNames().forEach(name -> keyMap.get(name).accept(visitor)); + + return visitor.build(); + } + + @Nonnull + public String getConcatenatedClusteringKey() { + Map> keyMap = new HashMap<>(); + operation + .getClusteringKey() + .ifPresent(k -> k.getColumns().forEach(c -> keyMap.put(c.getName(), c))); + + ConcatenationVisitor visitor = new ConcatenationVisitor(); + metadata.getClusteringKeyNames().forEach(name -> keyMap.get(name).accept(visitor)); + + return visitor.build(); + } + + @Nonnull + public String getRecordId() { + if (operation.getClusteringKey().isPresent()) { + return String.join( + String.valueOf(ObjectStorageUtils.CONCATENATED_KEY_DELIMITER), + getConcatenatedPartitionKey(), + getConcatenatedClusteringKey()); + } + return getConcatenatedPartitionKey(); + } + + @SafeVarargs + public final void checkArgument(Class... expected) { + for (Class e : expected) { + if (e.isInstance(operation)) { + return; + } + } + throw new IllegalArgumentException( + Joiner.on(" ") + .join( + new String[] { + operation.getClass().toString(), "is passed where something like", + expected[0].toString(), "is expected" + })); + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationChecker.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationChecker.java new file mode 100644 index 0000000000..e5700df190 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationChecker.java @@ -0,0 +1,154 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.ConditionalExpression; +import com.scalar.db.api.Delete; +import com.scalar.db.api.Get; +import com.scalar.db.api.Mutation; +import com.scalar.db.api.Operation; +import com.scalar.db.api.Put; +import com.scalar.db.api.Scan; +import com.scalar.db.api.TableMetadata; +import com.scalar.db.common.CoreError; +import com.scalar.db.common.StorageInfoProvider; +import com.scalar.db.common.TableMetadataManager; +import com.scalar.db.common.checker.OperationChecker; +import com.scalar.db.config.DatabaseConfig; +import com.scalar.db.exception.storage.ExecutionException; +import com.scalar.db.io.BigIntColumn; +import com.scalar.db.io.BlobColumn; +import com.scalar.db.io.BooleanColumn; +import com.scalar.db.io.ColumnVisitor; +import com.scalar.db.io.DataType; +import com.scalar.db.io.DateColumn; +import com.scalar.db.io.DoubleColumn; +import com.scalar.db.io.FloatColumn; +import com.scalar.db.io.IntColumn; +import com.scalar.db.io.TextColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; + +public class ObjectStorageOperationChecker extends OperationChecker { + private static final char[] ILLEGAL_CHARACTERS_IN_PRIMARY_KEY = { + ObjectStorageUtils.OBJECT_KEY_DELIMITER, ObjectStorageUtils.CONCATENATED_KEY_DELIMITER, + }; + + private static final ColumnVisitor PRIMARY_KEY_COLUMN_CHECKER = + new ColumnVisitor() { + @Override + public void visit(BooleanColumn column) {} + + @Override + public void visit(IntColumn column) {} + + @Override + public void visit(BigIntColumn column) {} + + @Override + public void visit(FloatColumn column) {} + + @Override + public void visit(DoubleColumn column) {} + + @Override + public void visit(TextColumn column) { + String value = column.getTextValue(); + assert value != null; + + for (char illegalCharacter : ILLEGAL_CHARACTERS_IN_PRIMARY_KEY) { + if (value.indexOf(illegalCharacter) != -1) { + throw new IllegalArgumentException( + CoreError.OBJECT_STORAGE_PRIMARY_KEY_CONTAINS_ILLEGAL_CHARACTER.buildMessage( + column.getName(), value)); + } + } + } + + @Override + public void visit(BlobColumn column) {} + + @Override + public void visit(DateColumn column) {} + + @Override + public void visit(TimeColumn column) {} + + @Override + public void visit(TimestampColumn column) {} + + @Override + public void visit(TimestampTZColumn column) {} + }; + + public ObjectStorageOperationChecker( + DatabaseConfig databaseConfig, + TableMetadataManager metadataManager, + StorageInfoProvider storageInfoProvider) { + super(databaseConfig, metadataManager, storageInfoProvider); + } + + @Override + public void check(Get get) throws ExecutionException { + super.check(get); + checkPrimaryKey(get); + } + + @Override + public void check(Scan scan) throws ExecutionException { + super.check(scan); + checkPrimaryKey(scan); + scan.getStartClusteringKey() + .ifPresent( + c -> c.getColumns().forEach(column -> column.accept(PRIMARY_KEY_COLUMN_CHECKER))); + scan.getEndClusteringKey() + .ifPresent( + c -> c.getColumns().forEach(column -> column.accept(PRIMARY_KEY_COLUMN_CHECKER))); + } + + @Override + public void check(Put put) throws ExecutionException { + super.check(put); + checkPrimaryKey(put); + + TableMetadata metadata = getTableMetadata(put); + checkCondition(put, metadata); + } + + @Override + public void check(Delete delete) throws ExecutionException { + super.check(delete); + checkPrimaryKey(delete); + + TableMetadata metadata = getTableMetadata(delete); + checkCondition(delete, metadata); + } + + private void checkPrimaryKey(Operation operation) { + operation + .getPartitionKey() + .getColumns() + .forEach(column -> column.accept(PRIMARY_KEY_COLUMN_CHECKER)); + operation + .getClusteringKey() + .ifPresent( + c -> c.getColumns().forEach(column -> column.accept(PRIMARY_KEY_COLUMN_CHECKER))); + } + + private void checkCondition(Mutation mutation, TableMetadata metadata) { + if (!mutation.getCondition().isPresent()) { + return; + } + for (ConditionalExpression expression : mutation.getCondition().get().getExpressions()) { + if (metadata.getColumnDataType(expression.getColumn().getName()) == DataType.BLOB) { + if (expression.getOperator() != ConditionalExpression.Operator.EQ + && expression.getOperator() != ConditionalExpression.Operator.NE + && expression.getOperator() != ConditionalExpression.Operator.IS_NULL + && expression.getOperator() != ConditionalExpression.Operator.IS_NOT_NULL) { + throw new IllegalArgumentException( + CoreError.OBJECT_STORAGE_CONDITION_OPERATION_NOT_SUPPORTED_FOR_BLOB_TYPE.buildMessage( + mutation)); + } + } + } + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageProvider.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageProvider.java index 1f7b94275b..0be04f632e 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageProvider.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageProvider.java @@ -10,7 +10,7 @@ public interface ObjectStorageProvider extends DistributedStorageProvider { @Override default DistributedStorage createDistributedStorage(DatabaseConfig config) { - return null; + return new ObjectStorage(config); } @Override diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageRecord.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageRecord.java new file mode 100644 index 0000000000..5f6ff6a035 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageRecord.java @@ -0,0 +1,116 @@ +package com.scalar.db.storage.objectstorage; + +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; +import javax.annotation.Nullable; +import javax.annotation.concurrent.Immutable; + +@SuppressFBWarnings({"EI_EXPOSE_REP", "EI_EXPOSE_REP2"}) +@Immutable +public class ObjectStorageRecord { + private final String id; + private final Map partitionKey; + private final Map clusteringKey; + private final Map values; + + // The default constructor is required by Jackson to deserialize JSON object + public ObjectStorageRecord() { + this(null, null, null, null); + } + + public ObjectStorageRecord( + @Nullable String id, + @Nullable Map partitionKey, + @Nullable Map clusteringKey, + @Nullable Map values) { + this.id = id != null ? id : ""; + this.partitionKey = partitionKey != null ? new HashMap<>(partitionKey) : Collections.emptyMap(); + this.clusteringKey = + clusteringKey != null ? new HashMap<>(clusteringKey) : Collections.emptyMap(); + this.values = values != null ? new HashMap<>(values) : Collections.emptyMap(); + } + + public ObjectStorageRecord(ObjectStorageRecord record) { + this(record.getId(), record.getPartitionKey(), record.getClusteringKey(), record.getValues()); + } + + public String getId() { + return id; + } + + public Map getPartitionKey() { + return partitionKey; + } + + public Map getClusteringKey() { + return clusteringKey; + } + + public Map getValues() { + return values; + } + + @Override + public boolean equals(Object o) { + if (o == this) { + return true; + } + if (!(o instanceof ObjectStorageRecord)) { + return false; + } + ObjectStorageRecord other = (ObjectStorageRecord) o; + if (!other.getId().equals(id)) { + return false; + } + if (!other.getPartitionKey().equals(partitionKey)) { + return false; + } + if (!other.getClusteringKey().equals(clusteringKey)) { + return false; + } + return other.getValues().equals(values); + } + + @Override + public int hashCode() { + return Objects.hash(id, partitionKey, clusteringKey, values); + } + + public static Builder newBuilder() { + return new Builder(); + } + + public static final class Builder { + private String id; + private Map partitionKey = new HashMap<>(); + private Map clusteringKey = new HashMap<>(); + private Map values = new HashMap<>(); + + public Builder id(String id) { + this.id = id; + return this; + } + + public Builder partitionKey(Map partitionKey) { + this.partitionKey = partitionKey; + return this; + } + + public Builder clusteringKey(Map clusteringKey) { + this.clusteringKey = clusteringKey; + return this; + } + + public Builder values(Map values) { + this.values = values; + return this; + } + + public ObjectStorageRecord build() { + return new ObjectStorageRecord(id, partitionKey, clusteringKey, values); + } + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageTableMetadata.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageTableMetadata.java index 38cbfcfad0..88e75a755f 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageTableMetadata.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageTableMetadata.java @@ -157,8 +157,8 @@ private DataType convertDataType(String columnType) { } } - public static ObjectStorageTableMetadata.Builder newBuilder() { - return new ObjectStorageTableMetadata.Builder(); + public static Builder newBuilder() { + return new Builder(); } public static final class Builder { @@ -170,27 +170,27 @@ public static final class Builder { private Builder() {} - public ObjectStorageTableMetadata.Builder partitionKeyNames(LinkedHashSet val) { + public Builder partitionKeyNames(LinkedHashSet val) { partitionKeyNames = val; return this; } - public ObjectStorageTableMetadata.Builder clusteringKeyNames(LinkedHashSet val) { + public Builder clusteringKeyNames(LinkedHashSet val) { clusteringKeyNames = val; return this; } - public ObjectStorageTableMetadata.Builder clusteringOrders(Map val) { + public Builder clusteringOrders(Map val) { clusteringOrders = val; return this; } - public ObjectStorageTableMetadata.Builder secondaryIndexNames(Set val) { + public Builder secondaryIndexNames(Set val) { secondaryIndexNames = val; return this; } - public ObjectStorageTableMetadata.Builder columns(Map val) { + public Builder columns(Map val) { columns = val; return this; } diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/PartitionIdentifier.java b/core/src/main/java/com/scalar/db/storage/objectstorage/PartitionIdentifier.java new file mode 100644 index 0000000000..41d65deb90 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/PartitionIdentifier.java @@ -0,0 +1,45 @@ +package com.scalar.db.storage.objectstorage; + +public class PartitionIdentifier { + private final String namespaceName; + private final String tableName; + private final String partitionName; + + public PartitionIdentifier(String namespaceName, String tableName, String partitionName) { + this.namespaceName = namespaceName; + this.tableName = tableName; + this.partitionName = partitionName; + } + + public static PartitionIdentifier of( + String namespaceName, String tableName, String partitionName) { + return new PartitionIdentifier(namespaceName, tableName, partitionName); + } + + public String getNamespaceName() { + return namespaceName; + } + + public String getTableName() { + return tableName; + } + + public String getPartitionName() { + return partitionName; + } + + @Override + public int hashCode() { + return (namespaceName + tableName + partitionName).hashCode(); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (!(obj instanceof PartitionIdentifier)) return false; + PartitionIdentifier other = (PartitionIdentifier) obj; + return namespaceName.equals(other.namespaceName) + && tableName.equals(other.tableName) + && partitionName.equals(other.partitionName); + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ResultInterpreter.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ResultInterpreter.java new file mode 100644 index 0000000000..19246231c0 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ResultInterpreter.java @@ -0,0 +1,53 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.Result; +import com.scalar.db.api.TableMetadata; +import com.scalar.db.common.ResultImpl; +import com.scalar.db.io.Column; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import javax.annotation.concurrent.ThreadSafe; + +@ThreadSafe +public class ResultInterpreter { + private final List projections; + private final TableMetadata metadata; + + @SuppressFBWarnings("EI_EXPOSE_REP2") + public ResultInterpreter(List projections, TableMetadata metadata) { + this.projections = Objects.requireNonNull(projections); + this.metadata = Objects.requireNonNull(metadata); + } + + public Result interpret(ObjectStorageRecord record) { + Map> ret = new HashMap<>(); + + if (projections.isEmpty()) { + metadata.getColumnNames().forEach(name -> add(ret, name, record, metadata)); + } else { + projections.forEach(name -> add(ret, name, record, metadata)); + } + + return new ResultImpl(ret, metadata); + } + + private void add( + Map> columns, + String name, + ObjectStorageRecord record, + TableMetadata metadata) { + Object value; + if (record.getPartitionKey().containsKey(name)) { + value = record.getPartitionKey().get(name); + } else if (record.getClusteringKey().containsKey(name)) { + value = record.getClusteringKey().get(name); + } else { + value = record.getValues().get(name); + } + + columns.put(name, ColumnValueMapper.convert(value, name, metadata.getColumnDataType(name))); + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ScannerImpl.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ScannerImpl.java new file mode 100644 index 0000000000..d7d14a39f0 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ScannerImpl.java @@ -0,0 +1,59 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.Result; +import com.scalar.db.common.AbstractScanner; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.Optional; +import javax.annotation.Nonnull; +import javax.annotation.concurrent.NotThreadSafe; + +@NotThreadSafe +public class ScannerImpl extends AbstractScanner { + private final Iterator recordIterator; + private final ResultInterpreter resultInterpreter; + private final int recordCountLimit; + + private int recordCount; + + @SuppressFBWarnings("EI_EXPOSE_REP2") + public ScannerImpl( + Iterator recordIterator, + ResultInterpreter resultInterpreter, + int recordCountLimit) { + this.recordIterator = recordIterator; + this.resultInterpreter = resultInterpreter; + this.recordCountLimit = recordCountLimit; + this.recordCount = 0; + } + + @Override + @Nonnull + public Optional one() { + if (!recordIterator.hasNext()) { + return Optional.empty(); + } + if (recordCountLimit != 0 && recordCount >= recordCountLimit) { + return Optional.empty(); + } + recordCount++; + return Optional.of(resultInterpreter.interpret(recordIterator.next())); + } + + @Override + @Nonnull + public List all() { + List results = new ArrayList<>(); + Optional result; + while ((result = one()).isPresent()) { + results.add(result.get()); + } + return results; + } + + @Override + public void close() throws IOException {} +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/SelectStatementHandler.java b/core/src/main/java/com/scalar/db/storage/objectstorage/SelectStatementHandler.java new file mode 100644 index 0000000000..abc190b170 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/SelectStatementHandler.java @@ -0,0 +1,296 @@ +package com.scalar.db.storage.objectstorage; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.google.common.collect.Ordering; +import com.scalar.db.api.Get; +import com.scalar.db.api.Scan; +import com.scalar.db.api.ScanAll; +import com.scalar.db.api.Scanner; +import com.scalar.db.api.Selection; +import com.scalar.db.api.TableMetadata; +import com.scalar.db.common.CoreError; +import com.scalar.db.common.EmptyScanner; +import com.scalar.db.common.TableMetadataManager; +import com.scalar.db.exception.storage.ExecutionException; +import com.scalar.db.io.Column; +import com.scalar.db.io.Key; +import com.scalar.db.util.ScalarDbUtils; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.stream.Collectors; +import javax.annotation.Nonnull; +import javax.annotation.concurrent.ThreadSafe; + +@ThreadSafe +public class SelectStatementHandler extends StatementHandler { + public SelectStatementHandler( + ObjectStorageWrapper wrapper, TableMetadataManager metadataManager) { + super(wrapper, metadataManager); + } + + @Nonnull + public Scanner handle(Selection selection) throws ExecutionException { + TableMetadata tableMetadata = metadataManager.getTableMetadata(selection); + if (selection instanceof Get) { + if (ScalarDbUtils.isSecondaryIndexSpecified(selection, tableMetadata)) { + throw new UnsupportedOperationException( + CoreError.OBJECT_STORAGE_INDEX_NOT_SUPPORTED.buildMessage()); + } else { + return executeGet((Get) selection, tableMetadata); + } + } else { + if (selection instanceof ScanAll) { + return executeScanAll((ScanAll) selection, tableMetadata); + } else if (ScalarDbUtils.isSecondaryIndexSpecified(selection, tableMetadata)) { + throw new UnsupportedOperationException( + CoreError.OBJECT_STORAGE_INDEX_NOT_SUPPORTED.buildMessage()); + } else { + return executeScan((Scan) selection, tableMetadata); + } + } + } + + private Scanner executeGet(Get get, TableMetadata metadata) throws ExecutionException { + ObjectStorageOperation operation = new ObjectStorageOperation(get, metadata); + operation.checkArgument(Get.class); + Optional record = + getRecord( + getNamespace(get), + getTable(get), + operation.getConcatenatedPartitionKey(), + operation.getRecordId()); + if (!record.isPresent()) { + return new EmptyScanner(); + } + return new ScannerImpl( + Collections.singletonList(record.get()).iterator(), + new ResultInterpreter(get.getProjections(), metadata), + 1); + } + + private Scanner executeScan(Scan scan, TableMetadata metadata) throws ExecutionException { + ObjectStorageOperation operation = new ObjectStorageOperation(scan, metadata); + operation.checkArgument(Scan.class); + List records = + new ArrayList<>( + getRecordsInPartition( + getNamespace(scan), getTable(scan), operation.getConcatenatedPartitionKey())); + + records.sort( + (o1, o2) -> + new ClusteringKeyComparator(metadata) + .compare(o1.getClusteringKey(), o2.getClusteringKey())); + if (isReverseOrder(scan, metadata)) { + Collections.reverse(records); + } + + // If the scan is for DESC clustering order, use the end clustering key as a start key and the + // start clustering key as an end key + boolean scanForDescClusteringOrder = isScanForDescClusteringOrder(scan, metadata); + Optional startKey = + scanForDescClusteringOrder ? scan.getEndClusteringKey() : scan.getStartClusteringKey(); + boolean startInclusive = + scanForDescClusteringOrder ? scan.getEndInclusive() : scan.getStartInclusive(); + Optional endKey = + scanForDescClusteringOrder ? scan.getStartClusteringKey() : scan.getEndClusteringKey(); + boolean endInclusive = + scanForDescClusteringOrder ? scan.getStartInclusive() : scan.getEndInclusive(); + + if (startKey.isPresent()) { + records = + filterRecordsByClusteringKeyBoundary( + records, startKey.get(), true, startInclusive, metadata); + } + if (endKey.isPresent()) { + records = + filterRecordsByClusteringKeyBoundary( + records, endKey.get(), false, endInclusive, metadata); + } + + if (scan.getLimit() > 0) { + records = records.subList(0, Math.min(scan.getLimit(), records.size())); + } + + return new ScannerImpl( + records.iterator(), + new ResultInterpreter(scan.getProjections(), metadata), + scan.getLimit()); + } + + private Scanner executeScanAll(ScanAll scan, TableMetadata metadata) throws ExecutionException { + ObjectStorageOperation operation = new ObjectStorageOperation(scan, metadata); + operation.checkArgument(ScanAll.class); + Set records = getRecordsInTable(getNamespace(scan), getTable(scan)); + if (scan.getLimit() > 0) { + records = records.stream().limit(scan.getLimit()).collect(Collectors.toSet()); + } + return new ScannerImpl( + records.iterator(), + new ResultInterpreter(scan.getProjections(), metadata), + scan.getLimit()); + } + + private Map getPartition( + String namespace, String table, String partition) throws ObjectStorageWrapperException { + Optional response = + wrapper.get(ObjectStorageUtils.getObjectKey(namespace, table, partition)); + if (!response.isPresent()) { + return Collections.emptyMap(); + } + return Serializer.deserialize( + response.get().getPayload(), new TypeReference>() {}); + } + + private Optional getRecord( + String namespace, String table, String partition, String recordId) throws ExecutionException { + try { + Map recordsInPartition = + getPartition(namespace, table, partition); + if (recordsInPartition.containsKey(recordId)) { + return Optional.of(recordsInPartition.get(recordId)); + } else { + return Optional.empty(); + } + } catch (Exception e) { + throw new ExecutionException( + CoreError.OBJECT_STORAGE_ERROR_OCCURRED_IN_SELECTION.buildMessage(e.getMessage()), e); + } + } + + private Set getRecordsInPartition( + String namespace, String table, String partition) throws ExecutionException { + try { + Map recordsInPartition = + getPartition(namespace, table, partition); + return new HashSet<>(recordsInPartition.values()); + } catch (Exception e) { + throw new ExecutionException( + CoreError.OBJECT_STORAGE_ERROR_OCCURRED_IN_SELECTION.buildMessage(e.getMessage()), e); + } + } + + private Set getRecordsInTable(String namespace, String table) + throws ExecutionException { + try { + Set partitionNames = + wrapper.getKeys(ObjectStorageUtils.getObjectKey(namespace, table, "")).stream() + .map( + key -> + key.substring(key.lastIndexOf(ObjectStorageUtils.OBJECT_KEY_DELIMITER) + 1)) + .filter(partition -> !partition.isEmpty()) + .collect(Collectors.toSet()); + Set records = new HashSet<>(); + for (String key : partitionNames) { + records.addAll(getRecordsInPartition(namespace, table, key)); + } + return records; + } catch (Exception e) { + throw new ExecutionException( + CoreError.OBJECT_STORAGE_ERROR_OCCURRED_IN_SELECTION.buildMessage(e.getMessage()), e); + } + } + + private boolean isReverseOrder(Scan scan, TableMetadata metadata) { + Boolean reverse = null; + Iterator iterator = metadata.getClusteringKeyNames().iterator(); + for (Scan.Ordering ordering : scan.getOrderings()) { + String clusteringKeyName = iterator.next(); + if (!ordering.getColumnName().equals(clusteringKeyName)) { + throw new IllegalArgumentException( + CoreError.OPERATION_CHECK_ERROR_ORDERING_NOT_PROPERLY_SPECIFIED.buildMessage(scan)); + } + boolean rightOrder = + ordering.getOrder() != metadata.getClusteringOrder(ordering.getColumnName()); + if (reverse == null) { + reverse = rightOrder; + } else { + if (reverse != rightOrder) { + throw new IllegalArgumentException( + CoreError.OPERATION_CHECK_ERROR_ORDERING_NOT_PROPERLY_SPECIFIED.buildMessage(scan)); + } + } + } + return reverse != null && reverse; + } + + private boolean isScanForDescClusteringOrder(Scan scan, TableMetadata tableMetadata) { + if (scan.getStartClusteringKey().isPresent()) { + Key startClusteringKey = scan.getStartClusteringKey().get(); + String lastValueName = + startClusteringKey.getColumns().get(startClusteringKey.size() - 1).getName(); + return tableMetadata.getClusteringOrder(lastValueName) == Scan.Ordering.Order.DESC; + } + if (scan.getEndClusteringKey().isPresent()) { + Key endClusteringKey = scan.getEndClusteringKey().get(); + String lastValueName = + endClusteringKey.getColumns().get(endClusteringKey.size() - 1).getName(); + return tableMetadata.getClusteringOrder(lastValueName) == Scan.Ordering.Order.DESC; + } + return false; + } + + private List filterRecordsByClusteringKeyBoundary( + List records, + Key clusteringKey, + boolean isStart, + boolean isInclusive, + TableMetadata metadata) { + for (Column column : clusteringKey.getColumns()) { + Scan.Ordering.Order order = metadata.getClusteringOrder(column.getName()); + if (clusteringKey.getColumns().indexOf(column) == clusteringKey.size() - 1) { + return records.stream() + .filter( + record -> { + Column recordColumn = + ColumnValueMapper.convert( + record.getClusteringKey().get(column.getName()), + column.getName(), + column.getDataType()); + int cmp = + order == Scan.Ordering.Order.ASC + ? Ordering.natural().compare(recordColumn, column) + : Ordering.natural().compare(column, recordColumn); + if (isStart) { + if (isInclusive) { + return cmp >= 0; + } else { + return cmp > 0; + } + } else { + if (isInclusive) { + return cmp <= 0; + } else { + return cmp < 0; + } + } + }) + .collect(Collectors.toList()); + } else { + List tmpRecords = new ArrayList<>(); + records.forEach( + record -> { + Column recordColumn = + ColumnValueMapper.convert( + record.getClusteringKey().get(column.getName()), + column.getName(), + column.getDataType()); + int cmp = Ordering.natural().compare(recordColumn, column); + if (cmp == 0) { + tmpRecords.add(record); + } + }); + if (tmpRecords.isEmpty()) { + return Collections.emptyList(); + } + records = tmpRecords; + } + } + return records; + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/StatementHandler.java b/core/src/main/java/com/scalar/db/storage/objectstorage/StatementHandler.java new file mode 100644 index 0000000000..e86445e796 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/StatementHandler.java @@ -0,0 +1,135 @@ +package com.scalar.db.storage.objectstorage; + +import static com.google.common.base.Preconditions.checkNotNull; + +import com.google.common.collect.Ordering; +import com.scalar.db.api.ConditionalExpression; +import com.scalar.db.api.Operation; +import com.scalar.db.api.TableMetadata; +import com.scalar.db.common.TableMetadataManager; +import com.scalar.db.exception.storage.ExecutionException; +import com.scalar.db.io.Column; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; +import java.util.List; +import javax.annotation.Nonnull; + +public class StatementHandler { + protected final ObjectStorageWrapper wrapper; + protected final TableMetadataManager metadataManager; + + @SuppressFBWarnings("EI_EXPOSE_REP2") + public StatementHandler(ObjectStorageWrapper wrapper, TableMetadataManager metadataManager) { + this.wrapper = checkNotNull(wrapper); + this.metadataManager = checkNotNull(metadataManager); + } + + @Nonnull + protected String getNamespace(Operation operation) { + assert operation.forNamespace().isPresent(); + return operation.forNamespace().get(); + } + + @Nonnull + protected String getTable(Operation operation) { + assert operation.forTable().isPresent(); + return operation.forTable().get(); + } + + protected void validateConditions( + ObjectStorageRecord record, List expressions, TableMetadata metadata) + throws ExecutionException { + for (ConditionalExpression expression : expressions) { + Column expectedColumn = expression.getColumn(); + Column actualColumn = + ColumnValueMapper.convert( + record.getValues().get(expectedColumn.getName()), + expectedColumn.getName(), + metadata.getColumnDataType(expectedColumn.getName())); + boolean validationFailed = false; + switch (expression.getOperator()) { + case EQ: + if (actualColumn.hasNullValue()) { + validationFailed = true; + break; + } + if (Ordering.natural().compare(actualColumn, expectedColumn) != 0) { + validationFailed = true; + break; + } + break; + case NE: + if (actualColumn.hasNullValue()) { + validationFailed = true; + break; + } + if (Ordering.natural().compare(actualColumn, expectedColumn) == 0) { + validationFailed = true; + break; + } + break; + case GT: + if (actualColumn.hasNullValue()) { + validationFailed = true; + break; + } + if (Ordering.natural().compare(actualColumn, expectedColumn) <= 0) { + validationFailed = true; + break; + } + break; + case GTE: + if (actualColumn.hasNullValue()) { + validationFailed = true; + break; + } + if (Ordering.natural().compare(actualColumn, expectedColumn) < 0) { + validationFailed = true; + break; + } + break; + case LT: + if (actualColumn.hasNullValue()) { + validationFailed = true; + break; + } + if (Ordering.natural().compare(actualColumn, expectedColumn) >= 0) { + validationFailed = true; + break; + } + break; + case LTE: + if (actualColumn.hasNullValue()) { + validationFailed = true; + break; + } + if (Ordering.natural().compare(actualColumn, expectedColumn) > 0) { + validationFailed = true; + break; + } + break; + case IS_NULL: + if (!actualColumn.hasNullValue()) { + validationFailed = true; + break; + } + break; + case IS_NOT_NULL: + if (actualColumn.hasNullValue()) { + validationFailed = true; + break; + } + break; + case LIKE: + case NOT_LIKE: + default: + throw new AssertionError("Unsupported operator"); + } + if (validationFailed) { + throw new ExecutionException( + String.format( + "A condition failed. ConditionalExpression: %s, Column: %s", + expectedColumn, actualColumn)); + } + } + } +} diff --git a/core/src/test/java/com/scalar/db/storage/objectstorage/MapVisitorTest.java b/core/src/test/java/com/scalar/db/storage/objectstorage/MapVisitorTest.java new file mode 100644 index 0000000000..9f4768bd79 --- /dev/null +++ b/core/src/test/java/com/scalar/db/storage/objectstorage/MapVisitorTest.java @@ -0,0 +1,277 @@ +package com.scalar.db.storage.objectstorage; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.scalar.db.io.BigIntColumn; +import com.scalar.db.io.BlobColumn; +import com.scalar.db.io.BooleanColumn; +import com.scalar.db.io.DateColumn; +import com.scalar.db.io.DoubleColumn; +import com.scalar.db.io.FloatColumn; +import com.scalar.db.io.IntColumn; +import com.scalar.db.io.TextColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; +import com.scalar.db.util.TimeRelatedColumnEncodingUtils; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +public class MapVisitorTest { + private static final boolean ANY_BOOLEAN = false; + private static final BooleanColumn ANY_BOOLEAN_COLUMN = + BooleanColumn.of("any_boolean", ANY_BOOLEAN); + private static final int ANY_INT = Integer.MIN_VALUE; + private static final IntColumn ANY_INT_COLUMN = IntColumn.of("any_int", ANY_INT); + private static final long ANY_BIGINT = BigIntColumn.MAX_VALUE; + private static final BigIntColumn ANY_BIGINT_COLUMN = BigIntColumn.of("any_bigint", ANY_BIGINT); + private static final float ANY_FLOAT = Float.MIN_NORMAL; + private static final FloatColumn ANY_FLOAT_COLUMN = FloatColumn.of("any_float", ANY_FLOAT); + private static final double ANY_DOUBLE = Double.MIN_NORMAL; + private static final DoubleColumn ANY_DOUBLE_COLUMN = DoubleColumn.of("any_double", ANY_DOUBLE); + private static final String ANY_TEXT = "test"; + private static final TextColumn ANY_TEXT_COLUMN = TextColumn.of("any_text", ANY_TEXT); + private static final byte[] ANY_BLOB = ANY_TEXT.getBytes(StandardCharsets.UTF_8); + private static final BlobColumn ANY_BLOB_COLUMN = BlobColumn.of("any_blob", ANY_BLOB); + private static final LocalDate ANY_DATE = DateColumn.MAX_VALUE; + private static final DateColumn ANY_DATE_COLUMN = DateColumn.of("any_date", ANY_DATE); + private static final LocalTime ANY_TIME = TimeColumn.MAX_VALUE; + private static final TimeColumn ANY_TIME_COLUMN = TimeColumn.of("any_time", ANY_TIME); + private static final LocalDateTime ANY_TIMESTAMP = TimestampColumn.MAX_VALUE; + private static final TimestampColumn ANY_TIMESTAMP_COLUMN = + TimestampColumn.of("any_timestamp", ANY_TIMESTAMP); + private static final Instant ANY_TIMESTAMPTZ = TimestampTZColumn.MAX_VALUE; + private static final TimestampTZColumn ANY_TIMESTAMPTZ_COLUMN = + TimestampTZColumn.of("any_timestamptz", ANY_TIMESTAMPTZ); + + private MapVisitor visitor; + + @BeforeEach + public void setUp() { + visitor = new MapVisitor(); + } + + @Test + public void visit_BooleanColumnAcceptCalled_ShouldGetMap() { + // Act + ANY_BOOLEAN_COLUMN.accept(visitor); + + // Assert + assertThat(visitor.get().get(ANY_BOOLEAN_COLUMN.getName())).isEqualTo(ANY_BOOLEAN); + } + + @Test + public void visit_BooleanColumnWithNullValueAcceptCalled_ShouldGetMap() { + // Act + BooleanColumn.ofNull("any_boolean").accept(visitor); + + // Assert + assertThat(visitor.get().containsKey("any_boolean")).isTrue(); + assertThat(visitor.get().get("any_boolean")).isNull(); + } + + @Test + public void visit_IntColumnAcceptCalled_ShouldGetMap() { + // Act + ANY_INT_COLUMN.accept(visitor); + + // Assert + assertThat(visitor.get().get(ANY_INT_COLUMN.getName())).isEqualTo(ANY_INT); + } + + @Test + public void visit_IntColumnWithNullValueAcceptCalled_ShouldGetMap() { + // Act + IntColumn.ofNull("any_int").accept(visitor); + + // Assert + assertThat(visitor.get().containsKey("any_int")).isTrue(); + assertThat(visitor.get().get("any_int")).isNull(); + } + + @Test + public void visit_BigIntColumnAcceptCalled_ShouldGetMap() { + // Act + ANY_BIGINT_COLUMN.accept(visitor); + + // Assert + assertThat(visitor.get().get(ANY_BIGINT_COLUMN.getName())).isEqualTo(ANY_BIGINT); + } + + @Test + public void visit_BigIntColumnWithNullValueAcceptCalled_ShouldGetMap() { + // Act + BigIntColumn.ofNull("any_bigint").accept(visitor); + + // Assert + assertThat(visitor.get().containsKey("any_bigint")).isTrue(); + assertThat(visitor.get().get("any_bigint")).isNull(); + } + + @Test + public void visit_FloatColumnAcceptCalled_ShouldGetMap() { + // Act + ANY_FLOAT_COLUMN.accept(visitor); + + // Assert + assertThat(visitor.get().get(ANY_FLOAT_COLUMN.getName())).isEqualTo(ANY_FLOAT); + } + + @Test + public void visit_FloatColumnWithNullValueAcceptCalled_ShouldGetMap() { + // Act + FloatColumn.ofNull("any_float").accept(visitor); + + // Assert + assertThat(visitor.get().containsKey("any_float")).isTrue(); + assertThat(visitor.get().get("any_float")).isNull(); + } + + @Test + public void visit_DoubleColumnAcceptCalled_ShouldGetMap() { + // Act + ANY_DOUBLE_COLUMN.accept(visitor); + + // Assert + assertThat(visitor.get().get(ANY_DOUBLE_COLUMN.getName())).isEqualTo(ANY_DOUBLE); + } + + @Test + public void visit_DoubleColumnWithNullValueAcceptCalled_ShouldGetMap() { + // Act + FloatColumn.ofNull("any_double").accept(visitor); + + // Assert + assertThat(visitor.get().containsKey("any_double")).isTrue(); + assertThat(visitor.get().get("any_double")).isNull(); + } + + @Test + public void visit_TextColumnAcceptCalled_ShouldGetMap() { + // Act + ANY_TEXT_COLUMN.accept(visitor); + + // Assert + assertThat(visitor.get().get(ANY_TEXT_COLUMN.getName())).isEqualTo(ANY_TEXT); + } + + @Test + public void visit_TextColumnWithNullValueAcceptCalled_ShouldGetMap() { + // Act + TextColumn.ofNull("any_text").accept(visitor); + + // Assert + assertThat(visitor.get().containsKey("any_text")).isTrue(); + assertThat(visitor.get().get("any_text")).isNull(); + } + + @Test + public void visit_BlobColumnAcceptCalled_ShouldGetMap() { + // Act + ANY_BLOB_COLUMN.accept(visitor); + + // Assert + ByteBuffer expected = + (ByteBuffer) + ByteBuffer.allocate(ANY_TEXT.length()) + .put(ANY_TEXT.getBytes(StandardCharsets.UTF_8)) + .flip(); + assertThat(visitor.get().get(ANY_BLOB_COLUMN.getName())).isEqualTo(expected); + } + + @Test + public void visit_BlobColumnWithNullValueAcceptCalled_ShouldGetMap() { + // Act + BlobColumn.ofNull("any_blob").accept(visitor); + + // Assert + assertThat(visitor.get().containsKey("any_blob")).isTrue(); + assertThat(visitor.get().get("any_blob")).isNull(); + } + + @Test + public void visit_DateColumnAcceptCalled_ShouldGetMap() { + // Act + ANY_DATE_COLUMN.accept(visitor); + + // Assert + assertThat(visitor.get().get(ANY_DATE_COLUMN.getName())) + .isEqualTo(TimeRelatedColumnEncodingUtils.encode(ANY_DATE_COLUMN)); + } + + @Test + public void visit_DateColumnWithNullValueAcceptCalled_ShouldGetMap() { + // Act + DateColumn.ofNull("any_date").accept(visitor); + + // Assert + assertThat(visitor.get().containsKey("any_date")).isTrue(); + assertThat(visitor.get().get("any_date")).isNull(); + } + + @Test + public void visit_TimeColumnAcceptCalled_ShouldGetMap() { + // Act + ANY_TIME_COLUMN.accept(visitor); + + // Assert + assertThat(visitor.get().get(ANY_TIME_COLUMN.getName())) + .isEqualTo(TimeRelatedColumnEncodingUtils.encode(ANY_TIME_COLUMN)); + } + + @Test + public void visit_TimeColumnWithNullValueAcceptCalled_ShouldGetMap() { + // Act + TimeColumn.ofNull("any_time").accept(visitor); + + // Assert + assertThat(visitor.get().containsKey("any_time")).isTrue(); + assertThat(visitor.get().get("any_time")).isNull(); + } + + @Test + public void visit_TimestampColumnAcceptCalled_ShouldGetMap() { + // Act + ANY_TIMESTAMP_COLUMN.accept(visitor); + + // Assert + assertThat(visitor.get().get(ANY_TIMESTAMP_COLUMN.getName())) + .isEqualTo(TimeRelatedColumnEncodingUtils.encode(ANY_TIMESTAMP_COLUMN)); + } + + @Test + public void visit_TimestampColumnWithNullValueAcceptCalled_ShouldGetMap() { + // Act + TimestampColumn.ofNull("any_timestamp").accept(visitor); + + // Assert + assertThat(visitor.get().containsKey("any_timestamp")).isTrue(); + assertThat(visitor.get().get("any_timestamp")).isNull(); + } + + @Test + public void visit_TimestampTZColumnAcceptCalled_ShouldGetMap() { + // Act + ANY_TIMESTAMPTZ_COLUMN.accept(visitor); + + // Assert + assertThat(visitor.get().get(ANY_TIMESTAMPTZ_COLUMN.getName())) + .isEqualTo(TimeRelatedColumnEncodingUtils.encode(ANY_TIMESTAMPTZ_COLUMN)); + } + + @Test + public void visit_TimestampTZColumnWithNullValueAcceptCalled_ShouldGetMap() { + // Act + TimestampTZColumn.ofNull("any_timestamptz").accept(visitor); + + // Assert + assertThat(visitor.get().containsKey("any_timestamptz")).isTrue(); + assertThat(visitor.get().get("any_timestamptz")).isNull(); + } +} diff --git a/core/src/test/java/com/scalar/db/storage/objectstorage/MutateStatementHandlerTest.java b/core/src/test/java/com/scalar/db/storage/objectstorage/MutateStatementHandlerTest.java new file mode 100644 index 0000000000..e7b5a38af0 --- /dev/null +++ b/core/src/test/java/com/scalar/db/storage/objectstorage/MutateStatementHandlerTest.java @@ -0,0 +1,908 @@ +package com.scalar.db.storage.objectstorage; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.scalar.db.api.ConditionBuilder; +import com.scalar.db.api.Delete; +import com.scalar.db.api.Operation; +import com.scalar.db.api.Put; +import com.scalar.db.api.TableMetadata; +import com.scalar.db.common.TableMetadataManager; +import com.scalar.db.exception.storage.ExecutionException; +import com.scalar.db.exception.storage.NoMutationException; +import com.scalar.db.io.DataType; +import com.scalar.db.io.Key; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.ArgumentCaptor; +import org.mockito.Captor; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +public class MutateStatementHandlerTest { + private static final String ANY_NAMESPACE_NAME = "namespace"; + private static final String ANY_TABLE_NAME = "table"; + private static final String ANY_NAME_1 = "name1"; + private static final String ANY_NAME_2 = "name2"; + private static final String ANY_NAME_3 = "name3"; + private static final String ANY_NAME_4 = "name4"; + private static final String ANY_TEXT_1 = "text1"; + private static final String ANY_TEXT_2 = "text2"; + private static final int ANY_INT_1 = 1; + private static final int ANY_INT_2 = 2; + private static final String VERSION = "version1"; + + private MutateStatementHandler handler; + @Mock private ObjectStorageWrapper wrapper; + @Mock private TableMetadataManager metadataManager; + @Mock private TableMetadata metadata; + + @Captor private ArgumentCaptor objectKeyCaptor; + @Captor private ArgumentCaptor payloadCaptor; + @Captor private ArgumentCaptor versionCaptor; + + @BeforeEach + public void setUp() throws Exception { + MockitoAnnotations.openMocks(this).close(); + + handler = new MutateStatementHandler(wrapper, metadataManager); + + when(metadataManager.getTableMetadata(any(Operation.class))).thenReturn(metadata); + when(metadata.getPartitionKeyNames()) + .thenReturn(new LinkedHashSet<>(Collections.singletonList(ANY_NAME_1))); + when(metadata.getClusteringKeyNames()) + .thenReturn(new LinkedHashSet<>(Collections.singletonList(ANY_NAME_2))); + when(metadata.getColumnDataType(ANY_NAME_3)).thenReturn(DataType.INT); + when(metadata.getColumnDataType(ANY_NAME_4)).thenReturn(DataType.INT); + } + + private Put preparePut() { + Key partitionKey = Key.ofText(ANY_NAME_1, ANY_TEXT_1); + Key clusteringKey = Key.ofText(ANY_NAME_2, ANY_TEXT_2); + return Put.newBuilder() + .namespace(ANY_NAMESPACE_NAME) + .table(ANY_TABLE_NAME) + .partitionKey(partitionKey) + .clusteringKey(clusteringKey) + .intValue(ANY_NAME_3, ANY_INT_1) + .intValue(ANY_NAME_4, ANY_INT_2) + .build(); + } + + private Put preparePutWithoutClusteringKey() { + Key partitionKey = Key.ofText(ANY_NAME_1, ANY_TEXT_1); + return Put.newBuilder() + .namespace(ANY_NAMESPACE_NAME) + .table(ANY_TABLE_NAME) + .partitionKey(partitionKey) + .intValue(ANY_NAME_3, ANY_INT_1) + .intValue(ANY_NAME_4, ANY_INT_2) + .build(); + } + + private Delete prepareDelete() { + Key partitionKey = Key.ofText(ANY_NAME_1, ANY_TEXT_1); + Key clusteringKey = Key.ofText(ANY_NAME_2, ANY_TEXT_2); + return Delete.newBuilder() + .namespace(ANY_NAMESPACE_NAME) + .table(ANY_TABLE_NAME) + .partitionKey(partitionKey) + .clusteringKey(clusteringKey) + .build(); + } + + private Delete prepareDeleteWithoutClusteringKey() { + Key partitionKey = Key.ofText(ANY_NAME_1, ANY_TEXT_1); + return Delete.newBuilder() + .namespace(ANY_NAMESPACE_NAME) + .table(ANY_TABLE_NAME) + .partitionKey(partitionKey) + .build(); + } + + private ObjectStorageRecord prepareExistingRecord() { + Map values = new HashMap<>(); + values.put(ANY_NAME_3, ANY_INT_1); + values.put(ANY_NAME_4, ANY_INT_2); + return ObjectStorageRecord.newBuilder().id("concat_key").values(values).build(); + } + + private void setupNonExistentPartition() throws ObjectStorageWrapperException { + when(wrapper.get(anyString())).thenReturn(Optional.empty()); + } + + private void setupPartitionWithRecord(String recordId) throws ObjectStorageWrapperException { + Map partition = new HashMap<>(); + partition.put(recordId, prepareExistingRecord()); + String serializedPartition = Serializer.serialize(partition); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serializedPartition, VERSION); + when(wrapper.get(anyString())).thenReturn(Optional.of(response)); + } + + private void setupPartitionWithRecords(String recordId, String... additionalRecordIds) + throws ObjectStorageWrapperException { + Map partition = new HashMap<>(); + partition.put(recordId, prepareExistingRecord()); + for (String additionalRecordId : additionalRecordIds) { + partition.put(additionalRecordId, prepareExistingRecord()); + } + String serializedPartition = Serializer.serialize(partition); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serializedPartition, VERSION); + when(wrapper.get(anyString())).thenReturn(Optional.of(response)); + } + + @Test + public void handle_PutWithoutConditionsGiven_WhenPartitionDoesNotExist_ShouldCallWrapperInsert() + throws Exception { + // Arrange + Put put = preparePut(); + ObjectStorageMutation mutation = new ObjectStorageMutation(put, metadata); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, mutation.getConcatenatedPartitionKey()); + setupNonExistentPartition(); + + // Act + handler.handle(put); + + // Assert + assert_Put_WhenPartitionDoesNotExist_ShouldCallWrapperInsert( + expectedObjectKey, mutation.getRecordId()); + } + + @Test + public void handle_PutWithoutConditionsGiven_WhenPartitionExists_ShouldCallWrapperUpdate() + throws Exception { + // Arrange + Put put = preparePut(); + ObjectStorageMutation mutation = new ObjectStorageMutation(put, metadata); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, mutation.getConcatenatedPartitionKey()); + setupPartitionWithRecord(mutation.getRecordId()); + + // Act + handler.handle(put); + + // Assert + assert_Put_WhenPartitionExists_ShouldCallWrapperUpdate( + expectedObjectKey, mutation.getRecordId()); + } + + @Test + public void + handle_PutWithoutClusteringKeyGiven_WhenPartitionDoesNotExist_ShouldCallWrapperInsert() + throws Exception { + // Arrange + Put put = preparePutWithoutClusteringKey(); + ObjectStorageMutation mutation = new ObjectStorageMutation(put, metadata); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, mutation.getConcatenatedPartitionKey()); + setupNonExistentPartition(); + + // Act + handler.handle(put); + + // Assert + assert_Put_WhenPartitionDoesNotExist_ShouldCallWrapperInsert( + expectedObjectKey, mutation.getRecordId()); + } + + @Test + public void handle_PutWithoutClusteringKeyGiven_WhenPartitionExists_ShouldCallWrapperUpdate() + throws Exception { + // Arrange + Put put = preparePutWithoutClusteringKey(); + ObjectStorageMutation mutation = new ObjectStorageMutation(put, metadata); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, mutation.getConcatenatedPartitionKey()); + setupPartitionWithRecord(mutation.getRecordId()); + + // Act + handler.handle(put); + + // Assert + verify(wrapper).get(objectKeyCaptor.capture()); + assertThat(objectKeyCaptor.getValue()).isEqualTo(expectedObjectKey); + + assert_Put_WhenPartitionExists_ShouldCallWrapperUpdate( + expectedObjectKey, mutation.getRecordId()); + } + + @Test + public void handle_PutWithoutConditionsWrapperExceptionThrown_ShouldThrowExecutionException() + throws Exception { + // Arrange + Put put = preparePut(); + ObjectStorageWrapperException exception = new ObjectStorageWrapperException("Test error"); + when(wrapper.get(anyString())).thenThrow(exception); + + // Act & Assert + assertThatThrownBy(() -> handler.handle(put)) + .isInstanceOf(ExecutionException.class) + .hasCause(exception); + } + + @Test + public void handle_PutIfNotExistsGiven_WhenPartitionDoesNotExist_ShouldCallWrapperInsert() + throws Exception { + // Arrange + Put put = Put.newBuilder(preparePut()).condition(ConditionBuilder.putIfNotExists()).build(); + ObjectStorageMutation mutation = new ObjectStorageMutation(put, metadata); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, mutation.getConcatenatedPartitionKey()); + setupNonExistentPartition(); + + // Act + handler.handle(put); + + // Assert + assert_Put_WhenPartitionDoesNotExist_ShouldCallWrapperInsert( + expectedObjectKey, mutation.getRecordId()); + } + + @Test + public void + handle_PutIfNotExistsGiven_WhenPartitionExistsButRecordDoesNotExist_ShouldCallWrapperUpdate() + throws Exception { + // Arrange + Put put = Put.newBuilder(preparePut()).condition(ConditionBuilder.putIfNotExists()).build(); + ObjectStorageMutation mutation = new ObjectStorageMutation(put, metadata); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, mutation.getConcatenatedPartitionKey()); + setupPartitionWithRecord("another_record_key"); + + // Act + handler.handle(put); + + // Assert + assert_Put_WhenPartitionExists_ShouldCallWrapperUpdate( + expectedObjectKey, mutation.getRecordId()); + } + + @Test + public void + handle_PutIfNotExistsGiven_WhenPartitionAndRecordExist_ShouldThrowNoMutationException() + throws Exception { + // Arrange + Put put = Put.newBuilder(preparePut()).condition(ConditionBuilder.putIfNotExists()).build(); + ObjectStorageMutation mutation = new ObjectStorageMutation(put, metadata); + setupPartitionWithRecord(mutation.getRecordId()); + + // Act & Assert + assertThatThrownBy(() -> handler.handle(put)).isInstanceOf(NoMutationException.class); + } + + @Test + public void handle_PutIfExistsGiven_WhenPartitionDoesNotExist_ShouldThrowNoMutationException() + throws Exception { + // Arrange + Put put = Put.newBuilder(preparePut()).condition(ConditionBuilder.putIfExists()).build(); + setupNonExistentPartition(); + + // Act & Assert + assertThatThrownBy(() -> handler.handle(put)).isInstanceOf(NoMutationException.class); + } + + @Test + public void + handle_PutIfExistsGiven_WhenPartitionExistsButRecordDoesNotExist_ShouldThrowNoMutationException() + throws Exception { + // Arrange + Put put = Put.newBuilder(preparePut()).condition(ConditionBuilder.putIfExists()).build(); + setupPartitionWithRecord("another_record_key"); + + // Act & Assert + assertThatThrownBy(() -> handler.handle(put)).isInstanceOf(NoMutationException.class); + } + + @Test + public void handle_PutIfExistsGiven_WhenPartitionAndRecordExist_ShouldCallWrapperUpdate() + throws Exception { + // Arrange + Put put = Put.newBuilder(preparePut()).condition(ConditionBuilder.putIfExists()).build(); + ObjectStorageMutation mutation = new ObjectStorageMutation(put, metadata); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, mutation.getConcatenatedPartitionKey()); + setupPartitionWithRecord(mutation.getRecordId()); + + // Act + handler.handle(put); + + // Assert + assert_Put_WhenPartitionExists_ShouldCallWrapperUpdate( + expectedObjectKey, mutation.getRecordId()); + } + + @Test + public void + handle_PutIfGiven_WhenConditionMatchesAndPartitionDoesNotExist_ShouldThrowNoMutationException() + throws Exception { + // Arrange + Put put = + Put.newBuilder(preparePut()) + .condition( + ConditionBuilder.putIf(ConditionBuilder.column(ANY_NAME_3).isEqualToInt(ANY_INT_1)) + .build()) + .build(); + setupNonExistentPartition(); + + // Act & Assert + assertThatThrownBy(() -> handler.handle(put)).isInstanceOf(NoMutationException.class); + } + + @Test + public void + handle_PutIfGiven_WhenConditionMatchesAndPartitionExistsButRecordDoesNotExist_ShouldThrowNoMutationException() + throws Exception { + // Arrange + Put put = + Put.newBuilder(preparePut()) + .condition( + ConditionBuilder.putIf(ConditionBuilder.column(ANY_NAME_3).isEqualToInt(ANY_INT_1)) + .build()) + .build(); + setupPartitionWithRecord("another_record_key"); + + // Act & Assert + assertThatThrownBy(() -> handler.handle(put)).isInstanceOf(NoMutationException.class); + } + + @Test + public void + handle_PutIfGiven_WhenConditionMatchesAndPartitionAndRecordExist_ShouldCallWrapperUpdate() + throws Exception { + // Arrange + Put put = + Put.newBuilder(preparePut()) + .condition( + ConditionBuilder.putIf(ConditionBuilder.column(ANY_NAME_3).isEqualToInt(ANY_INT_1)) + .build()) + .build(); + ObjectStorageMutation mutation = new ObjectStorageMutation(put, metadata); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, mutation.getConcatenatedPartitionKey()); + setupPartitionWithRecord(mutation.getRecordId()); + + // Act + handler.handle(put); + + // Assert + assert_Put_WhenPartitionExists_ShouldCallWrapperUpdate( + expectedObjectKey, mutation.getRecordId()); + } + + @Test + public void handle_PutIfGiven_WhenConditionDoesNotMatch_ShouldThrowNoMutationException() + throws Exception { + // Arrange + Put put = + Put.newBuilder(preparePut()) + .condition( + ConditionBuilder.putIf(ConditionBuilder.column(ANY_NAME_3).isEqualToInt(999)) + .build()) + .build(); + ObjectStorageMutation mutation = new ObjectStorageMutation(put, metadata); + setupPartitionWithRecord(mutation.getRecordId()); + + // Act & Assert + assertThatThrownBy(() -> handler.handle(put)).isInstanceOf(NoMutationException.class); + } + + private void assert_Put_WhenPartitionDoesNotExist_ShouldCallWrapperInsert( + String expectedObjectKey, String expectedConcatenatedKey) + throws ObjectStorageWrapperException { + verify(wrapper).get(objectKeyCaptor.capture()); + assertThat(objectKeyCaptor.getValue()).isEqualTo(expectedObjectKey); + + verify(wrapper).insert(objectKeyCaptor.capture(), payloadCaptor.capture()); + assertThat(objectKeyCaptor.getValue()).isEqualTo(expectedObjectKey); + + Map insertedPartition = + Serializer.deserialize( + payloadCaptor.getValue(), new TypeReference>() {}); + assertThat(insertedPartition).containsKey(expectedConcatenatedKey); + assertThat(insertedPartition.get(expectedConcatenatedKey).getValues()) + .containsEntry(ANY_NAME_3, ANY_INT_1) + .containsEntry(ANY_NAME_4, ANY_INT_2); + } + + private void assert_Put_WhenPartitionExists_ShouldCallWrapperUpdate( + String expectedObjectKey, String expectedConcatenatedKey) + throws ObjectStorageWrapperException { + verify(wrapper) + .update(objectKeyCaptor.capture(), payloadCaptor.capture(), versionCaptor.capture()); + assertThat(objectKeyCaptor.getValue()).isEqualTo(expectedObjectKey); + + Map updatedPartition = + Serializer.deserialize( + payloadCaptor.getValue(), new TypeReference>() {}); + assertThat(updatedPartition).containsKey(expectedConcatenatedKey); + assertThat(updatedPartition.get(expectedConcatenatedKey).getValues()) + .containsEntry(ANY_NAME_3, ANY_INT_1) + .containsEntry(ANY_NAME_4, ANY_INT_2); + assertThat(versionCaptor.getValue()).isEqualTo(VERSION); + } + + @Test + public void + handle_DeleteWithoutConditionsGiven_WhenNewPartitionIsNotEmpty_ShouldCallWrapperUpdate() + throws Exception { + // Arrange + Delete delete = prepareDelete(); + ObjectStorageMutation mutation = new ObjectStorageMutation(delete, metadata); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, mutation.getConcatenatedPartitionKey()); + String expectedExistingRecordKey = "existing_record_key"; + setupPartitionWithRecords(mutation.getRecordId(), expectedExistingRecordKey); + + // Act + handler.handle(delete); + + // Assert + assert_Delete_WhenNewPartitionIsNotEmpty_ShouldCallWrapperUpdate( + expectedObjectKey, mutation.getConcatenatedPartitionKey(), expectedExistingRecordKey); + } + + @Test + public void handle_DeleteWithoutConditionsGiven_WhenNewPartitionIsEmpty_ShouldCallWrapperDelete() + throws Exception { + // Arrange + Delete delete = prepareDelete(); + ObjectStorageMutation mutation = new ObjectStorageMutation(delete, metadata); + String concatenatedPartitionKey = mutation.getConcatenatedPartitionKey(); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, concatenatedPartitionKey); + setupPartitionWithRecord(mutation.getRecordId()); + + // Act + handler.handle(delete); + + // Assert + assert_Delete_WhenNewPartitionIsEmpty_ShouldCallWrapperDelete(expectedObjectKey); + } + + @Test + public void + handle_DeleteWithoutClusteringKeyGiven_WhenNewPartitionIsNotEmpty_ShouldCallWrapperUpdate() + throws Exception { + // Arrange + Delete delete = prepareDeleteWithoutClusteringKey(); + ObjectStorageMutation mutation = new ObjectStorageMutation(delete, metadata); + String concatenatedPartitionKey = mutation.getConcatenatedPartitionKey(); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, concatenatedPartitionKey); + String expectedExistingRecordKey = "existing_record_key"; + setupPartitionWithRecords(mutation.getRecordId(), expectedExistingRecordKey); + + // Act + handler.handle(delete); + + // Assert + assert_Delete_WhenNewPartitionIsNotEmpty_ShouldCallWrapperUpdate( + expectedObjectKey, mutation.getRecordId(), expectedExistingRecordKey); + } + + @Test + public void + handle_DeleteWithoutClusteringKeyGiven_WhenNewPartitionIsEmpty_ShouldCallWrapperDelete() + throws Exception { + // Arrange + Delete delete = prepareDeleteWithoutClusteringKey(); + ObjectStorageMutation mutation = new ObjectStorageMutation(delete, metadata); + String concatenatedPartitionKey = mutation.getConcatenatedPartitionKey(); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, concatenatedPartitionKey); + setupPartitionWithRecord(mutation.getRecordId()); + + // Act + handler.handle(delete); + + // Assert + assert_Delete_WhenNewPartitionIsEmpty_ShouldCallWrapperDelete(expectedObjectKey); + } + + @Test + public void handle_DeleteWithoutConditionsWrapperExceptionThrown_ShouldThrowExecutionException() + throws Exception { + // Arrange + Delete delete = prepareDelete(); + ObjectStorageWrapperException exception = new ObjectStorageWrapperException("Test error"); + when(wrapper.get(anyString())).thenThrow(exception); + + // Act & Assert + assertThatThrownBy(() -> handler.handle(delete)) + .isInstanceOf(ExecutionException.class) + .hasCause(exception); + } + + @Test + public void handle_DeleteIfExistsGiven_WhenNewPartitionIsNotEmpty_ShouldCallWrapperUpdate() + throws Exception { + // Arrange + Delete delete = + Delete.newBuilder(prepareDelete()).condition(ConditionBuilder.deleteIfExists()).build(); + ObjectStorageMutation mutation = new ObjectStorageMutation(delete, metadata); + String concatenatedPartitionKey = mutation.getConcatenatedPartitionKey(); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, concatenatedPartitionKey); + String expectedExistingRecordKey = "existing_record_key"; + setupPartitionWithRecords(mutation.getRecordId(), expectedExistingRecordKey); + + // Act + handler.handle(delete); + + // Assert + assert_Delete_WhenNewPartitionIsNotEmpty_ShouldCallWrapperUpdate( + expectedObjectKey, mutation.getRecordId(), expectedExistingRecordKey); + } + + @Test + public void handle_DeleteIfExistsGiven_WhenNewPartitionIsEmpty_ShouldCallWrapperDelete() + throws Exception { + // Arrange + Delete delete = + Delete.newBuilder(prepareDelete()).condition(ConditionBuilder.deleteIfExists()).build(); + ObjectStorageMutation mutation = new ObjectStorageMutation(delete, metadata); + String concatenatedPartitionKey = mutation.getConcatenatedPartitionKey(); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, concatenatedPartitionKey); + setupPartitionWithRecord(mutation.getRecordId()); + + // Act + handler.handle(delete); + + // Assert + assert_Delete_WhenNewPartitionIsEmpty_ShouldCallWrapperDelete(expectedObjectKey); + } + + @Test + public void handle_DeleteIfExistsGiven_WhenPartitionDoesNotExist_ShouldThrowNoMutationException() + throws Exception { + // Arrange + Delete delete = + Delete.newBuilder(prepareDelete()).condition(ConditionBuilder.deleteIfExists()).build(); + setupNonExistentPartition(); + + // Act & Assert + assertThatThrownBy(() -> handler.handle(delete)).isInstanceOf(NoMutationException.class); + } + + @Test + public void + handle_DeleteIfExistsGiven_WhenPartitionExistsButRecordDoesNotExist_ShouldThrowNoMutationException() + throws Exception { + // Arrange + Delete delete = + Delete.newBuilder(prepareDelete()).condition(ConditionBuilder.deleteIfExists()).build(); + setupPartitionWithRecord("another_record_key"); + + // Act & Assert + assertThatThrownBy(() -> handler.handle(delete)).isInstanceOf(NoMutationException.class); + } + + @Test + public void + handle_DeleteIfGiven_WhenConditionMatchesAndPartitionAndRecordExistAndNewPartitionIsNotEmpty_ShouldCallWrapperUpdate() + throws Exception { + // Arrange + Delete delete = + Delete.newBuilder(prepareDelete()) + .condition( + ConditionBuilder.deleteIf( + ConditionBuilder.column(ANY_NAME_3).isEqualToInt(ANY_INT_1)) + .build()) + .build(); + ObjectStorageMutation mutation = new ObjectStorageMutation(delete, metadata); + String concatenatedPartitionKey = mutation.getConcatenatedPartitionKey(); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, concatenatedPartitionKey); + String expectedExistingRecordKey = "existing_record_key"; + setupPartitionWithRecords(mutation.getRecordId(), expectedExistingRecordKey); + + // Act + handler.handle(delete); + + // Assert + assert_Delete_WhenNewPartitionIsNotEmpty_ShouldCallWrapperUpdate( + expectedObjectKey, mutation.getRecordId(), expectedExistingRecordKey); + } + + @Test + public void + handle_DeleteIfGiven_WhenConditionMatchesAndPartitionAndRecordExistAndPartitionIsEmpty_ShouldCallWrapperDelete() + throws Exception { + // Arrange + Delete delete = + Delete.newBuilder(prepareDelete()) + .condition( + ConditionBuilder.deleteIf( + ConditionBuilder.column(ANY_NAME_3).isEqualToInt(ANY_INT_1)) + .build()) + .build(); + ObjectStorageMutation mutation = new ObjectStorageMutation(delete, metadata); + String concatenatedPartitionKey = mutation.getConcatenatedPartitionKey(); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, concatenatedPartitionKey); + setupPartitionWithRecord(mutation.getRecordId()); + + // Act + handler.handle(delete); + + // Assert + assert_Delete_WhenNewPartitionIsEmpty_ShouldCallWrapperDelete(expectedObjectKey); + } + + @Test + public void + handle_DeleteIfGiven_WhenConditionMatchesAndPartitionExistsButRecordDoesNotExist_ShouldThrowNoMutationException() + throws Exception { + // Arrange + Delete delete = + Delete.newBuilder(prepareDelete()) + .condition( + ConditionBuilder.deleteIf( + ConditionBuilder.column(ANY_NAME_3).isEqualToInt(ANY_INT_1)) + .build()) + .build(); + setupPartitionWithRecord("another_record_key"); + + // Act & Assert + assertThatThrownBy(() -> handler.handle(delete)).isInstanceOf(NoMutationException.class); + } + + @Test + public void + handle_DeleteIfGiven_WhenConditionMatchesAndPartitionDoesNotExist_ShouldThrowNoMutationException() + throws Exception { + // Arrange + Delete delete = + Delete.newBuilder(prepareDelete()) + .condition( + ConditionBuilder.deleteIf( + ConditionBuilder.column(ANY_NAME_3).isEqualToInt(ANY_INT_1)) + .build()) + .build(); + setupNonExistentPartition(); + + // Act & Assert + assertThatThrownBy(() -> handler.handle(delete)).isInstanceOf(NoMutationException.class); + } + + @Test + public void handle_DeleteIfGiven_WhenConditionDoesNotMatch_ShouldThrowNoMutationException() + throws Exception { + // Arrange + Delete delete = + Delete.newBuilder(prepareDelete()) + .condition( + ConditionBuilder.deleteIf(ConditionBuilder.column(ANY_NAME_3).isEqualToInt(999)) + .build()) + .build(); + ObjectStorageMutation mutation = new ObjectStorageMutation(delete, metadata); + setupPartitionWithRecord(mutation.getRecordId()); + + // Act & Assert + assertThatThrownBy(() -> handler.handle(delete)).isInstanceOf(NoMutationException.class); + } + + private void assert_Delete_WhenNewPartitionIsNotEmpty_ShouldCallWrapperUpdate( + String expectedObjectKey, String expectedConcatenatedKey, String expectedExistingRecordKey) + throws ObjectStorageWrapperException { + verify(wrapper) + .update(objectKeyCaptor.capture(), payloadCaptor.capture(), versionCaptor.capture()); + assertThat(objectKeyCaptor.getValue()).isEqualTo(expectedObjectKey); + + Map updatedPartition = + Serializer.deserialize( + payloadCaptor.getValue(), new TypeReference>() {}); + assertThat(updatedPartition).doesNotContainKey(expectedConcatenatedKey); + assertThat(updatedPartition).containsKey(expectedExistingRecordKey); + } + + private void assert_Delete_WhenNewPartitionIsEmpty_ShouldCallWrapperDelete( + String expectedObjectKey) throws ObjectStorageWrapperException { + verify(wrapper).delete(objectKeyCaptor.capture(), versionCaptor.capture()); + assertThat(objectKeyCaptor.getValue()).isEqualTo(expectedObjectKey); + assertThat(versionCaptor.getValue()).isEqualTo(VERSION); + } + + @Test + public void + handle_MultipleMutationsForSinglePartitionGiven_WhenPartitionDoesNotExist_ShouldCallWrapperInsert() + throws Exception { + // Arrange + Put put1 = preparePut(); + Put put2 = Put.newBuilder(preparePut()).clusteringKey(Key.ofText(ANY_NAME_2, "put2")).build(); + Put put3 = Put.newBuilder(preparePut()).clusteringKey(Key.ofText(ANY_NAME_2, "put3")).build(); + Put put4 = Put.newBuilder(preparePut()).clusteringKey(Key.ofText(ANY_NAME_2, "put4")).build(); + ObjectStorageMutation mutation1 = new ObjectStorageMutation(put1, metadata); + ObjectStorageMutation mutation2 = new ObjectStorageMutation(put2, metadata); + ObjectStorageMutation mutation3 = new ObjectStorageMutation(put3, metadata); + ObjectStorageMutation mutation4 = new ObjectStorageMutation(put4, metadata); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, mutation1.getConcatenatedPartitionKey()); + setupNonExistentPartition(); + + // Act + handler.handle(Arrays.asList(put1, put2, put3, put4)); + + // Assert + verify(wrapper).get(objectKeyCaptor.capture()); + assertThat(objectKeyCaptor.getValue()).isEqualTo(expectedObjectKey); + + verify(wrapper).insert(objectKeyCaptor.capture(), payloadCaptor.capture()); + assertThat(objectKeyCaptor.getValue()).isEqualTo(expectedObjectKey); + + Map insertedPartition = + Serializer.deserialize( + payloadCaptor.getValue(), new TypeReference>() {}); + assertThat(insertedPartition).containsKey(mutation1.getRecordId()); + assertThat(insertedPartition.get(mutation1.getRecordId()).getValues()) + .containsEntry(ANY_NAME_3, ANY_INT_1) + .containsEntry(ANY_NAME_4, ANY_INT_2); + assertThat(insertedPartition).containsKey(mutation2.getRecordId()); + assertThat(insertedPartition.get(mutation2.getRecordId()).getValues()) + .containsEntry(ANY_NAME_3, ANY_INT_1) + .containsEntry(ANY_NAME_4, ANY_INT_2); + assertThat(insertedPartition).containsKey(mutation3.getRecordId()); + assertThat(insertedPartition.get(mutation3.getRecordId()).getValues()) + .containsEntry(ANY_NAME_3, ANY_INT_1) + .containsEntry(ANY_NAME_4, ANY_INT_2); + assertThat(insertedPartition).containsKey(mutation4.getRecordId()); + assertThat(insertedPartition.get(mutation4.getRecordId()).getValues()) + .containsEntry(ANY_NAME_3, ANY_INT_1) + .containsEntry(ANY_NAME_4, ANY_INT_2); + } + + @Test + public void + handle_MultipleMutationsForSinglePartitionGiven_WhenPartitionExists_ShouldCallWrapperUpdate() + throws Exception { + // Arrange + Put put1 = preparePut(); + Put put2 = Put.newBuilder(preparePut()).clusteringKey(Key.ofText(ANY_NAME_2, "put2")).build(); + Put put3 = Put.newBuilder(preparePut()).clusteringKey(Key.ofText(ANY_NAME_2, "put3")).build(); + Put put4 = Put.newBuilder(preparePut()).clusteringKey(Key.ofText(ANY_NAME_2, "put4")).build(); + ObjectStorageMutation mutation1 = new ObjectStorageMutation(put1, metadata); + ObjectStorageMutation mutation2 = new ObjectStorageMutation(put2, metadata); + ObjectStorageMutation mutation3 = new ObjectStorageMutation(put3, metadata); + ObjectStorageMutation mutation4 = new ObjectStorageMutation(put4, metadata); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, mutation1.getConcatenatedPartitionKey()); + setupPartitionWithRecords( + mutation1.getRecordId(), + mutation2.getRecordId(), + mutation3.getRecordId(), + mutation4.getRecordId()); + + // Act + handler.handle(Arrays.asList(put1, put2, put3, put4)); + + // Assert + verify(wrapper) + .update(objectKeyCaptor.capture(), payloadCaptor.capture(), versionCaptor.capture()); + assertThat(objectKeyCaptor.getValue()).isEqualTo(expectedObjectKey); + Map updatedPartition = + Serializer.deserialize( + payloadCaptor.getValue(), new TypeReference>() {}); + assertThat(updatedPartition).containsKey(mutation1.getRecordId()); + assertThat(updatedPartition.get(mutation1.getRecordId()).getValues()) + .containsEntry(ANY_NAME_3, ANY_INT_1) + .containsEntry(ANY_NAME_4, ANY_INT_2); + assertThat(updatedPartition).containsKey(mutation2.getRecordId()); + assertThat(updatedPartition.get(mutation2.getRecordId()).getValues()) + .containsEntry(ANY_NAME_3, ANY_INT_1) + .containsEntry(ANY_NAME_4, ANY_INT_2); + assertThat(updatedPartition).containsKey(mutation3.getRecordId()); + assertThat(updatedPartition.get(mutation3.getRecordId()).getValues()) + .containsEntry(ANY_NAME_3, ANY_INT_1) + .containsEntry(ANY_NAME_4, ANY_INT_2); + assertThat(updatedPartition).containsKey(mutation4.getRecordId()); + assertThat(updatedPartition.get(mutation4.getRecordId()).getValues()) + .containsEntry(ANY_NAME_3, ANY_INT_1) + .containsEntry(ANY_NAME_4, ANY_INT_2); + assertThat(versionCaptor.getValue()).isEqualTo(VERSION); + } + + @Test + public void + handle_MultipleMutationsForDifferentPartitionGiven_WhenPartitionDoesNotExist_ShouldCallWrapperInsert() + throws Exception { + // Arrange + Put put1 = preparePut(); + Put put2 = Put.newBuilder(preparePut()).clusteringKey(Key.ofText(ANY_NAME_2, "put2")).build(); + Put put3 = + Put.newBuilder(preparePut()) + .partitionKey(Key.ofText(ANY_NAME_1, ANY_TEXT_2)) + .clusteringKey(Key.ofText(ANY_NAME_2, "put3")) + .build(); + Put put4 = + Put.newBuilder(preparePut()) + .partitionKey(Key.ofText(ANY_NAME_1, ANY_TEXT_2)) + .clusteringKey(Key.ofText(ANY_NAME_2, "put4")) + .build(); + ObjectStorageMutation mutation1 = new ObjectStorageMutation(put1, metadata); + ObjectStorageMutation mutation2 = new ObjectStorageMutation(put2, metadata); + ObjectStorageMutation mutation3 = new ObjectStorageMutation(put3, metadata); + ObjectStorageMutation mutation4 = new ObjectStorageMutation(put4, metadata); + String expectedObjectKey1 = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, mutation1.getConcatenatedPartitionKey()); + String expectedObjectKey2 = + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, mutation3.getConcatenatedPartitionKey()); + setupNonExistentPartition(); + + // Act + handler.handle(Arrays.asList(put1, put2, put3, put4)); + + // Assert + verify(wrapper, times(2)).get(objectKeyCaptor.capture()); + List capturedObjectKeys = objectKeyCaptor.getAllValues(); + assertThat(capturedObjectKeys) + .containsExactlyInAnyOrder(expectedObjectKey1, expectedObjectKey2); + verify(wrapper, times(2)).insert(objectKeyCaptor.capture(), payloadCaptor.capture()); + List insertedObjectKeys = objectKeyCaptor.getAllValues().subList(2, 4); + assertThat(insertedObjectKeys) + .containsExactlyInAnyOrder(expectedObjectKey1, expectedObjectKey2); + + List insertedPayloads = payloadCaptor.getAllValues(); + for (int i = 0; i < insertedPayloads.size(); i++) { + Map insertedPartition = + Serializer.deserialize( + insertedPayloads.get(i), new TypeReference>() {}); + if (insertedObjectKeys.get(i).equals(expectedObjectKey1)) { + assertThat(insertedPartition).containsKey(mutation1.getRecordId()); + assertThat(insertedPartition.get(mutation1.getRecordId()).getValues()) + .containsEntry(ANY_NAME_3, ANY_INT_1) + .containsEntry(ANY_NAME_4, ANY_INT_2); + assertThat(insertedPartition).containsKey(mutation2.getRecordId()); + assertThat(insertedPartition.get(mutation2.getRecordId()).getValues()) + .containsEntry(ANY_NAME_3, ANY_INT_1) + .containsEntry(ANY_NAME_4, ANY_INT_2); + } else if (insertedObjectKeys.get(i).equals(expectedObjectKey2)) { + assertThat(insertedPartition).containsKey(mutation3.getRecordId()); + assertThat(insertedPartition.get(mutation3.getRecordId()).getValues()) + .containsEntry(ANY_NAME_3, ANY_INT_1) + .containsEntry(ANY_NAME_4, ANY_INT_2); + assertThat(insertedPartition).containsKey(mutation4.getRecordId()); + assertThat(insertedPartition.get(mutation4.getRecordId()).getValues()) + .containsEntry(ANY_NAME_3, ANY_INT_1) + .containsEntry(ANY_NAME_4, ANY_INT_2); + } + } + } +} diff --git a/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageMutationTest.java b/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageMutationTest.java new file mode 100644 index 0000000000..4f8cd0a2c4 --- /dev/null +++ b/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageMutationTest.java @@ -0,0 +1,114 @@ +package com.scalar.db.storage.objectstorage; + +import static org.assertj.core.api.AssertionsForClassTypes.assertThat; +import static org.mockito.Mockito.when; + +import com.scalar.db.api.Delete; +import com.scalar.db.api.Put; +import com.scalar.db.api.TableMetadata; +import com.scalar.db.io.Key; +import java.util.Collections; +import java.util.LinkedHashSet; +import org.assertj.core.api.Assertions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +public class ObjectStorageMutationTest { + private static final String ANY_NAMESPACE_NAME = "namespace"; + private static final String ANY_TABLE_NAME = "table"; + private static final String ANY_NAME_1 = "name1"; + private static final String ANY_NAME_2 = "name2"; + private static final String ANY_NAME_3 = "name3"; + private static final String ANY_NAME_4 = "name4"; + private static final String ANY_TEXT_1 = "text1"; + private static final String ANY_TEXT_2 = "text2"; + private static final int ANY_INT_1 = 1; + private static final int ANY_INT_2 = 2; + + @Mock private TableMetadata metadata; + + @BeforeEach + public void setUp() throws Exception { + MockitoAnnotations.openMocks(this).close(); + + when(metadata.getPartitionKeyNames()) + .thenReturn(new LinkedHashSet<>(Collections.singletonList(ANY_NAME_1))); + } + + private Put preparePut() { + Key partitionKey = Key.ofText(ANY_NAME_1, ANY_TEXT_1); + Key clusteringKey = Key.ofText(ANY_NAME_2, ANY_TEXT_2); + return Put.newBuilder() + .namespace(ANY_NAMESPACE_NAME) + .table(ANY_TABLE_NAME) + .partitionKey(partitionKey) + .clusteringKey(clusteringKey) + .intValue(ANY_NAME_3, ANY_INT_1) + .intValue(ANY_NAME_4, ANY_INT_2) + .build(); + } + + private Delete prepareDelete() { + Key partitionKey = Key.ofText(ANY_NAME_1, ANY_TEXT_1); + Key clusteringKey = Key.ofText(ANY_NAME_2, ANY_TEXT_2); + return Delete.newBuilder() + .namespace(ANY_NAMESPACE_NAME) + .table(ANY_TABLE_NAME) + .partitionKey(partitionKey) + .clusteringKey(clusteringKey) + .build(); + } + + @Test + public void makeRecord_PutGiven_ShouldReturnWithValues() { + // Arrange + Put put = preparePut(); + ObjectStorageMutation objectStorageMutation = new ObjectStorageMutation(put, metadata); + String concatenatedKey = objectStorageMutation.getRecordId(); + + // Act + ObjectStorageRecord actual = objectStorageMutation.makeRecord(); + + // Assert + assertThat(actual.getId()).isEqualTo(concatenatedKey); + Assertions.assertThat(actual.getPartitionKey().get(ANY_NAME_1)).isEqualTo(ANY_TEXT_1); + Assertions.assertThat(actual.getClusteringKey().get(ANY_NAME_2)).isEqualTo(ANY_TEXT_2); + Assertions.assertThat(actual.getValues().get(ANY_NAME_3)).isEqualTo(ANY_INT_1); + Assertions.assertThat(actual.getValues().get(ANY_NAME_4)).isEqualTo(ANY_INT_2); + } + + @Test + public void makeRecord_PutWithNullValueGiven_ShouldReturnWithValues() { + // Arrange + Put put = preparePut(); + put = Put.newBuilder(put).intValue(ANY_NAME_3, null).build(); + ObjectStorageMutation objectStorageMutation = new ObjectStorageMutation(put, metadata); + String concatenatedKey = objectStorageMutation.getRecordId(); + + // Act + ObjectStorageRecord actual = objectStorageMutation.makeRecord(); + + // Assert + assertThat(actual.getId()).isEqualTo(concatenatedKey); + Assertions.assertThat(actual.getPartitionKey().get(ANY_NAME_1)).isEqualTo(ANY_TEXT_1); + Assertions.assertThat(actual.getClusteringKey().get(ANY_NAME_2)).isEqualTo(ANY_TEXT_2); + Assertions.assertThat(actual.getValues().containsKey(ANY_NAME_3)).isTrue(); + Assertions.assertThat(actual.getValues().get(ANY_NAME_3)).isNull(); + Assertions.assertThat(actual.getValues().get(ANY_NAME_4)).isEqualTo(ANY_INT_2); + } + + @Test + public void makeRecord_DeleteGiven_ShouldReturnEmpty() { + // Arrange + Delete delete = prepareDelete(); + ObjectStorageMutation objectStorageMutation = new ObjectStorageMutation(delete, metadata); + + // Act + ObjectStorageRecord actual = objectStorageMutation.makeRecord(); + + // Assert + assertThat(actual.getId()).isEqualTo(""); + } +} diff --git a/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationCheckerTest.java b/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationCheckerTest.java new file mode 100644 index 0000000000..bf80c632f6 --- /dev/null +++ b/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationCheckerTest.java @@ -0,0 +1,830 @@ +package com.scalar.db.storage.objectstorage; + +import static com.scalar.db.api.ConditionBuilder.column; +import static com.scalar.db.api.ConditionBuilder.deleteIf; +import static com.scalar.db.api.ConditionBuilder.deleteIfExists; +import static com.scalar.db.api.ConditionBuilder.putIf; +import static com.scalar.db.api.ConditionBuilder.putIfExists; +import static com.scalar.db.api.ConditionBuilder.putIfNotExists; +import static org.assertj.core.api.Assertions.assertThatCode; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.when; +import static org.mockito.MockitoAnnotations.openMocks; + +import com.scalar.db.api.Delete; +import com.scalar.db.api.Get; +import com.scalar.db.api.MutationCondition; +import com.scalar.db.api.Put; +import com.scalar.db.api.Scan; +import com.scalar.db.api.StorageInfo; +import com.scalar.db.api.TableMetadata; +import com.scalar.db.common.StorageInfoImpl; +import com.scalar.db.common.StorageInfoProvider; +import com.scalar.db.common.TableMetadataManager; +import com.scalar.db.config.DatabaseConfig; +import com.scalar.db.exception.storage.ExecutionException; +import com.scalar.db.io.DataType; +import com.scalar.db.io.Key; +import java.util.Arrays; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.Mock; + +public class ObjectStorageOperationCheckerTest { + private static final String NAMESPACE_NAME = "n1"; + private static final String TABLE_NAME = "t1"; + private static final String PKEY1 = "p1"; + private static final String CKEY1 = "c1"; + private static final String COL1 = "v1"; + private static final String COL2 = "v2"; + private static final String COL3 = "v3"; + private static final String COL4 = "v4"; + private static final StorageInfo STORAGE_INFO = + new StorageInfoImpl("ObjectStorage", StorageInfo.MutationAtomicityUnit.STORAGE, 100); + + private static final TableMetadata TABLE_METADATA1 = + TableMetadata.newBuilder() + .addColumn(PKEY1, DataType.INT) + .addColumn(CKEY1, DataType.INT) + .addColumn(COL1, DataType.INT) + .addColumn(COL2, DataType.BOOLEAN) + .addColumn(COL3, DataType.TEXT) + .addColumn(COL4, DataType.BLOB) + .addPartitionKey(PKEY1) + .addClusteringKey(CKEY1) + .build(); + + private static final TableMetadata TABLE_METADATA2 = + TableMetadata.newBuilder() + .addColumn(PKEY1, DataType.TEXT) + .addColumn(CKEY1, DataType.TEXT) + .addPartitionKey(PKEY1) + .addClusteringKey(CKEY1) + .build(); + + @Mock private DatabaseConfig databaseConfig; + @Mock private TableMetadataManager metadataManager; + @Mock private StorageInfoProvider storageInfoProvider; + private ObjectStorageOperationChecker operationChecker; + + @BeforeEach + public void setUp() throws Exception { + openMocks(this).close(); + when(storageInfoProvider.getStorageInfo(any())).thenReturn(STORAGE_INFO); + operationChecker = + new ObjectStorageOperationChecker(databaseConfig, metadataManager, storageInfoProvider); + } + + @Test + public void check_ForMutationsWithPut_ShouldDoNothing() throws ExecutionException { + // Arrange + when(metadataManager.getTableMetadata(any())).thenReturn(TABLE_METADATA1); + Put putWithoutSettingIndex = + Put.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofInt(PKEY1, 0)) + .clusteringKey(Key.ofInt(CKEY1, 0)) + .build(); + Put put = + Put.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofInt(PKEY1, 0)) + .clusteringKey(Key.ofInt(CKEY1, 1)) + .intValue(COL1, 1) + .build(); + + // Act Assert + assertThatCode(() -> operationChecker.check(Arrays.asList(putWithoutSettingIndex, put))) + .doesNotThrowAnyException(); + } + + @Test + public void check_ForMutationsWithDelete_ShouldDoNothing() throws ExecutionException { + // Arrange + when(metadataManager.getTableMetadata(any())).thenReturn(TABLE_METADATA1); + Delete deleteWithoutSettingIndex = + Delete.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofInt(PKEY1, 0)) + .clusteringKey(Key.ofInt(CKEY1, 0)) + .build(); + Delete delete = + Delete.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofInt(PKEY1, 0)) + .clusteringKey(Key.ofInt(CKEY1, 1)) + .build(); + + // Act Assert + assertThatCode(() -> operationChecker.check(Arrays.asList(deleteWithoutSettingIndex, delete))) + .doesNotThrowAnyException(); + } + + @Test + public void + check_GetGiven_WhenIllegalCharacterInPrimaryKeyColumn_ShouldThrowIllegalArgumentException() + throws ExecutionException { + // Arrange + when(metadataManager.getTableMetadata(any())).thenReturn(TABLE_METADATA2); + + Get get1 = + Get.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab")) + .clusteringKey(Key.ofText(CKEY1, "ab")) + .build(); + Get get2 = + Get.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab" + ObjectStorageUtils.OBJECT_KEY_DELIMITER)) + .clusteringKey(Key.ofText(CKEY1, "ab")) + .build(); + Get get3 = + Get.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab")) + .clusteringKey(Key.ofText(CKEY1, "ab" + ObjectStorageUtils.OBJECT_KEY_DELIMITER)) + .build(); + Get get4 = + Get.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab" + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER)) + .clusteringKey(Key.ofText(CKEY1, "ab")) + .build(); + Get get5 = + Get.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab")) + .clusteringKey(Key.ofText(CKEY1, "ab" + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER)) + .build(); + + // Act Assert + assertThatCode(() -> operationChecker.check(get1)).doesNotThrowAnyException(); + assertThatThrownBy(() -> operationChecker.check(get2)) + .isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy(() -> operationChecker.check(get3)) + .isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy(() -> operationChecker.check(get4)) + .isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy(() -> operationChecker.check(get5)) + .isInstanceOf(IllegalArgumentException.class); + } + + @Test + public void + check_ScanGiven_WhenIllegalCharacterInPrimaryKeyColumn_ShouldThrowIllegalArgumentException() + throws ExecutionException { + // Arrange + when(metadataManager.getTableMetadata(any())).thenReturn(TABLE_METADATA2); + + Scan scan1 = + Scan.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab")) + .start(Key.ofText(CKEY1, "ab")) + .end(Key.ofText(CKEY1, "ab")) + .build(); + Scan scan2 = + Scan.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab" + ObjectStorageUtils.OBJECT_KEY_DELIMITER)) + .start(Key.ofText(CKEY1, "ab")) + .end(Key.ofText(CKEY1, "ab")) + .build(); + Scan scan3 = + Scan.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab")) + .start(Key.ofText(CKEY1, "ab" + ObjectStorageUtils.OBJECT_KEY_DELIMITER)) + .end(Key.ofText(CKEY1, "ab")) + .build(); + Scan scan4 = + Scan.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab")) + .start(Key.ofText(CKEY1, "ab")) + .end(Key.ofText(CKEY1, "ab" + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER)) + .build(); + + // Act Assert + assertThatCode(() -> operationChecker.check(scan1)).doesNotThrowAnyException(); + assertThatThrownBy(() -> operationChecker.check(scan2)) + .isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy(() -> operationChecker.check(scan3)) + .isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy(() -> operationChecker.check(scan4)) + .isInstanceOf(IllegalArgumentException.class); + } + + @Test + public void + check_PutGiven_WhenIllegalCharacterInPrimaryKeyColumn_ShouldThrowIllegalArgumentException() + throws ExecutionException { + // Arrange + when(metadataManager.getTableMetadata(any())).thenReturn(TABLE_METADATA2); + + Put put1 = + Put.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab")) + .clusteringKey(Key.ofText(CKEY1, "ab")) + .build(); + Put put2 = + Put.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab" + ObjectStorageUtils.OBJECT_KEY_DELIMITER)) + .clusteringKey(Key.ofText(CKEY1, "ab")) + .build(); + Put put3 = + Put.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab")) + .clusteringKey(Key.ofText(CKEY1, "ab" + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER)) + .build(); + + // Act Assert + assertThatCode(() -> operationChecker.check(put1)).doesNotThrowAnyException(); + assertThatThrownBy(() -> operationChecker.check(put2)) + .isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy(() -> operationChecker.check(put3)) + .isInstanceOf(IllegalArgumentException.class); + } + + @Test + public void + check_DeleteGiven_WhenIllegalCharacterInPrimaryKeyColumn_ShouldThrowIllegalArgumentException() + throws ExecutionException { + // Arrange + when(metadataManager.getTableMetadata(any())).thenReturn(TABLE_METADATA2); + + Delete delete1 = + Delete.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab")) + .clusteringKey(Key.ofText(CKEY1, "ab")) + .build(); + Delete delete2 = + Delete.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab" + ObjectStorageUtils.OBJECT_KEY_DELIMITER)) + .clusteringKey(Key.ofText(CKEY1, "ab")) + .build(); + Delete delete3 = + Delete.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab")) + .clusteringKey(Key.ofText(CKEY1, "ab" + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER)) + .build(); + + // Act Assert + assertThatCode(() -> operationChecker.check(delete1)).doesNotThrowAnyException(); + assertThatThrownBy(() -> operationChecker.check(delete2)) + .isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy(() -> operationChecker.check(delete3)) + .isInstanceOf(IllegalArgumentException.class); + } + + @Test + public void + check_MutationsGiven_WhenIllegalCharacterInPrimaryKeyColumn_ShouldThrowIllegalArgumentException() + throws ExecutionException { + // Arrange + when(metadataManager.getTableMetadata(any())).thenReturn(TABLE_METADATA2); + when(storageInfoProvider.getStorageInfo(any())).thenReturn(STORAGE_INFO); + + Put put1 = + Put.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab")) + .clusteringKey(Key.ofText(CKEY1, "ab")) + .build(); + Put put2 = + Put.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab" + ObjectStorageUtils.OBJECT_KEY_DELIMITER)) + .clusteringKey(Key.ofText(CKEY1, "ab")) + .build(); + Delete delete1 = + Delete.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab")) + .clusteringKey(Key.ofText(CKEY1, "ab")) + .build(); + Delete delete2 = + Delete.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofText(PKEY1, "ab" + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER)) + .clusteringKey(Key.ofText(CKEY1, "ab")) + .build(); + + // Act Assert + assertThatCode(() -> operationChecker.check(Arrays.asList(put1, delete1))) + .doesNotThrowAnyException(); + assertThatThrownBy(() -> operationChecker.check(Arrays.asList(put2, delete1))) + .isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy(() -> operationChecker.check(Arrays.asList(put1, delete2))) + .isInstanceOf(IllegalArgumentException.class); + } + + @Test + public void check_ForPutWithCondition_ShouldBehaveProperly() throws ExecutionException { + // Arrange + when(metadataManager.getTableMetadata(any())).thenReturn(TABLE_METADATA1); + + // Act Assert + assertThatCode(() -> operationChecker.check(buildPutWithCondition(putIfExists()))) + .doesNotThrowAnyException(); + assertThatCode(() -> operationChecker.check(buildPutWithCondition(putIfNotExists()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildPutWithCondition(putIf(column(COL1).isEqualToInt(1)).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildPutWithCondition( + putIf(column(COL1).isGreaterThanOrEqualToInt(1)).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildPutWithCondition(putIf(column(COL1).isNullInt()).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildPutWithCondition(putIf(column(COL2).isEqualToBoolean(true)).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildPutWithCondition(putIf(column(COL2).isNotEqualToBoolean(true)).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildPutWithCondition(putIf(column(COL2).isNullBoolean()).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildPutWithCondition(putIf(column(COL2).isNotNullBoolean()).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildPutWithCondition(putIf(column(COL2).isGreaterThanBoolean(false)).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildPutWithCondition( + putIf(column(COL2).isLessThanOrEqualToBoolean(true)).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildPutWithCondition( + putIf(column(COL4).isEqualToBlob(new byte[] {1, 2, 3})).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildPutWithCondition( + putIf(column(COL4).isNotEqualToBlob(new byte[] {1, 2, 3})).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildPutWithCondition(putIf(column(COL4).isNullBlob()).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildPutWithCondition(putIf(column(COL4).isNotNullBlob()).build()))) + .doesNotThrowAnyException(); + assertThatThrownBy( + () -> + operationChecker.check( + buildPutWithCondition( + putIf(column(COL4).isGreaterThanBlob(new byte[] {1, 2, 3})).build()))) + .isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy( + () -> + operationChecker.check( + buildPutWithCondition( + putIf(column(COL4).isLessThanOrEqualToBlob(new byte[] {1, 2, 3})).build()))) + .isInstanceOf(IllegalArgumentException.class); + } + + @Test + public void check_ForDeleteWithCondition_ShouldBehaveProperly() throws ExecutionException { + // Arrange + when(metadataManager.getTableMetadata(any())).thenReturn(TABLE_METADATA1); + + // Act Assert + assertThatCode(() -> operationChecker.check(buildDeleteWithCondition(deleteIfExists()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildDeleteWithCondition(deleteIf(column(COL1).isEqualToInt(1)).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildDeleteWithCondition( + deleteIf(column(COL1).isGreaterThanOrEqualToInt(1)).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildDeleteWithCondition(deleteIf(column(COL1).isNullInt()).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildDeleteWithCondition( + deleteIf(column(COL2).isEqualToBoolean(true)).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildDeleteWithCondition( + deleteIf(column(COL2).isNotEqualToBoolean(true)).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildDeleteWithCondition(deleteIf(column(COL2).isNullBoolean()).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildDeleteWithCondition(deleteIf(column(COL2).isNotNullBoolean()).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildDeleteWithCondition( + deleteIf(column(COL2).isGreaterThanBoolean(false)).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildDeleteWithCondition( + deleteIf(column(COL2).isLessThanOrEqualToBoolean(true)).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildDeleteWithCondition( + deleteIf(column(COL4).isEqualToBlob(new byte[] {1, 2, 3})).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildDeleteWithCondition( + deleteIf(column(COL4).isNotEqualToBlob(new byte[] {1, 2, 3})).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildDeleteWithCondition(deleteIf(column(COL4).isNullBlob()).build()))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + buildDeleteWithCondition(deleteIf(column(COL4).isNotNullBlob()).build()))) + .doesNotThrowAnyException(); + assertThatThrownBy( + () -> + operationChecker.check( + buildDeleteWithCondition( + deleteIf(column(COL4).isGreaterThanBlob(new byte[] {1, 2, 3})).build()))) + .isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy( + () -> + operationChecker.check( + buildDeleteWithCondition( + deleteIf(column(COL4).isLessThanOrEqualToBlob(new byte[] {1, 2, 3})) + .build()))) + .isInstanceOf(IllegalArgumentException.class); + } + + @Test + public void check_ForMutationsWithPutWithCondition_ShouldBehaveProperly() + throws ExecutionException { + // Arrange + when(metadataManager.getTableMetadata(any())).thenReturn(TABLE_METADATA1); + Put put = + Put.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofInt(PKEY1, 0)) + .clusteringKey(Key.ofInt(CKEY1, 1)) + .intValue(COL1, 1) + .build(); + + // Act Assert + assertThatCode( + () -> operationChecker.check(Arrays.asList(buildPutWithCondition(putIfExists()), put))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check(Arrays.asList(buildPutWithCondition(putIfNotExists()), put))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildPutWithCondition(putIf(column(COL1).isEqualToInt(1)).build()), put))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildPutWithCondition( + putIf(column(COL1).isGreaterThanOrEqualToInt(1)).build()), + put))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildPutWithCondition(putIf(column(COL1).isNullInt()).build()), put))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildPutWithCondition(putIf(column(COL2).isEqualToBoolean(true)).build()), + put))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildPutWithCondition( + putIf(column(COL2).isNotEqualToBoolean(true)).build()), + put))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildPutWithCondition(putIf(column(COL2).isNullBoolean()).build()), put))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildPutWithCondition(putIf(column(COL2).isNotNullBoolean()).build()), + put))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildPutWithCondition( + putIf(column(COL2).isGreaterThanBoolean(false)).build()), + put))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildPutWithCondition( + putIf(column(COL2).isLessThanOrEqualToBoolean(true)).build()), + put))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildPutWithCondition( + putIf(column(COL4).isEqualToBlob(new byte[] {1, 2, 3})).build()), + put))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildPutWithCondition( + putIf(column(COL4).isNotEqualToBlob(new byte[] {1, 2, 3})).build()), + put))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildPutWithCondition(putIf(column(COL4).isNullBlob()).build()), put))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildPutWithCondition(putIf(column(COL4).isNotNullBlob()).build()), put))) + .doesNotThrowAnyException(); + assertThatThrownBy( + () -> + operationChecker.check( + Arrays.asList( + buildPutWithCondition( + putIf(column(COL4).isGreaterThanBlob(new byte[] {1, 2, 3})).build()), + put))) + .isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy( + () -> + operationChecker.check( + Arrays.asList( + buildPutWithCondition( + putIf(column(COL4).isLessThanOrEqualToBlob(new byte[] {1, 2, 3})) + .build()), + put))) + .isInstanceOf(IllegalArgumentException.class); + } + + @Test + public void check_ForMutationsWithDeleteWithCondition_ShouldBehaveProperly() + throws ExecutionException { + // Arrange + when(metadataManager.getTableMetadata(any())).thenReturn(TABLE_METADATA1); + Delete delete = + Delete.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofInt(PKEY1, 0)) + .clusteringKey(Key.ofInt(CKEY1, 1)) + .build(); + + // Act Assert + assertThatCode( + () -> + operationChecker.check( + Arrays.asList(buildDeleteWithCondition(deleteIfExists()), delete))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildDeleteWithCondition(deleteIf(column(COL1).isEqualToInt(1)).build()), + delete))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildDeleteWithCondition( + deleteIf(column(COL1).isGreaterThanOrEqualToInt(1)).build()), + delete))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildDeleteWithCondition(deleteIf(column(COL1).isNullInt()).build()), + delete))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildDeleteWithCondition( + deleteIf(column(COL2).isEqualToBoolean(true)).build()), + delete))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildDeleteWithCondition( + deleteIf(column(COL2).isNotEqualToBoolean(true)).build()), + delete))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildDeleteWithCondition(deleteIf(column(COL2).isNullBoolean()).build()), + delete))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildDeleteWithCondition(deleteIf(column(COL2).isNotNullBoolean()).build()), + delete))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildDeleteWithCondition( + deleteIf(column(COL2).isGreaterThanBoolean(false)).build()), + delete))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildDeleteWithCondition( + deleteIf(column(COL2).isLessThanOrEqualToBoolean(true)).build()), + delete))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildDeleteWithCondition( + deleteIf(column(COL4).isEqualToBlob(new byte[] {1, 2, 3})).build()), + delete))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildDeleteWithCondition( + deleteIf(column(COL4).isNotEqualToBlob(new byte[] {1, 2, 3})).build()), + delete))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildDeleteWithCondition(deleteIf(column(COL4).isNullBlob()).build()), + delete))) + .doesNotThrowAnyException(); + assertThatCode( + () -> + operationChecker.check( + Arrays.asList( + buildDeleteWithCondition(deleteIf(column(COL4).isNotNullBlob()).build()), + delete))) + .doesNotThrowAnyException(); + assertThatThrownBy( + () -> + operationChecker.check( + Arrays.asList( + buildDeleteWithCondition( + deleteIf(column(COL4).isGreaterThanBlob(new byte[] {1, 2, 3})).build()), + delete))) + .isInstanceOf(IllegalArgumentException.class); + assertThatThrownBy( + () -> + operationChecker.check( + Arrays.asList( + buildDeleteWithCondition( + deleteIf(column(COL4).isLessThanOrEqualToBlob(new byte[] {1, 2, 3})) + .build()), + delete))) + .isInstanceOf(IllegalArgumentException.class); + } + + private Put buildPutWithCondition(MutationCondition condition) { + return Put.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofInt(PKEY1, 0)) + .clusteringKey(Key.ofInt(CKEY1, 1)) + .intValue(COL1, 1) + .condition(condition) + .build(); + } + + private Delete buildDeleteWithCondition(MutationCondition condition) { + return Delete.newBuilder() + .namespace(NAMESPACE_NAME) + .table(TABLE_NAME) + .partitionKey(Key.ofInt(PKEY1, 0)) + .clusteringKey(Key.ofInt(CKEY1, 1)) + .condition(condition) + .build(); + } +} diff --git a/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationTest.java b/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationTest.java new file mode 100644 index 0000000000..6bdb1347d4 --- /dev/null +++ b/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationTest.java @@ -0,0 +1,109 @@ +package com.scalar.db.storage.objectstorage; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import com.scalar.db.api.Get; +import com.scalar.db.api.Operation; +import com.scalar.db.api.Put; +import com.scalar.db.api.TableMetadata; +import com.scalar.db.io.Key; +import java.util.Arrays; +import java.util.Collections; +import java.util.LinkedHashSet; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +public class ObjectStorageOperationTest { + private static final String ANY_NAMESPACE_NAME = "namespace"; + private static final String ANY_TABLE_NAME = "table"; + private static final String ANY_NAME_1 = "name1"; + private static final String ANY_NAME_2 = "name2"; + private static final String ANY_NAME_3 = "name3"; + private static final String ANY_TEXT_1 = "text1"; + private static final String ANY_TEXT_2 = "text2"; + private static final int ANY_INT_1 = 1; + + @Mock private TableMetadata metadata; + + @BeforeEach + public void setUp() throws Exception { + MockitoAnnotations.openMocks(this).close(); + } + + @Test + public void checkArgument_WrongOperationGiven_ShouldThrowIllegalArgumentException() { + // Arrange + Operation operation = mock(Put.class); + ObjectStorageOperation objectStorageOperation = new ObjectStorageOperation(operation, metadata); + + // Act Assert + assertThatThrownBy(() -> objectStorageOperation.checkArgument(Get.class)) + .isInstanceOf(IllegalArgumentException.class); + } + + @Test + public void getConcatenatedPartitionKey_MultipleKeysGiven_ShouldReturnConcatenatedPartitionKey() { + // Arrange + when(metadata.getPartitionKeyNames()) + .thenReturn(new LinkedHashSet<>(Arrays.asList(ANY_NAME_1, ANY_NAME_2, ANY_NAME_3))); + + Key partitionKey = + Key.of(ANY_NAME_1, ANY_TEXT_1, ANY_NAME_2, ANY_TEXT_2, ANY_NAME_3, ANY_INT_1); + Get get = + Get.newBuilder() + .namespace(ANY_NAMESPACE_NAME) + .table(ANY_TABLE_NAME) + .partitionKey(partitionKey) + .build(); + ObjectStorageOperation objectStorageOperation = new ObjectStorageOperation(get, metadata); + + // Act + String actual = objectStorageOperation.getConcatenatedPartitionKey(); + + // Assert + assertThat(actual) + .isEqualTo( + String.join( + String.valueOf(ObjectStorageUtils.CONCATENATED_KEY_DELIMITER), + ANY_TEXT_1, + ANY_TEXT_2, + String.valueOf(ANY_INT_1))); + } + + @Test + public void getId_MultipleKeysGiven_ShouldReturnConcatenatedPartitionKey() { + // Arrange + when(metadata.getPartitionKeyNames()) + .thenReturn(new LinkedHashSet<>(Arrays.asList(ANY_NAME_1, ANY_NAME_3))); + when(metadata.getClusteringKeyNames()) + .thenReturn(new LinkedHashSet<>(Collections.singletonList(ANY_NAME_2))); + + Key partitionKey = Key.of(ANY_NAME_1, ANY_TEXT_1, ANY_NAME_3, ANY_INT_1); + Key clusteringKey = Key.ofText(ANY_NAME_2, ANY_TEXT_2); + Get get = + Get.newBuilder() + .namespace(ANY_NAMESPACE_NAME) + .table(ANY_TABLE_NAME) + .partitionKey(partitionKey) + .clusteringKey(clusteringKey) + .build(); + ObjectStorageOperation objectStorageOperation = new ObjectStorageOperation(get, metadata); + + // Act + String actual = objectStorageOperation.getRecordId(); + + // Assert + assertThat(actual) + .isEqualTo( + String.join( + String.valueOf(ObjectStorageUtils.CONCATENATED_KEY_DELIMITER), + ANY_TEXT_1, + String.valueOf(ANY_INT_1), + ANY_TEXT_2)); + } +} diff --git a/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageTest.java b/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageTest.java new file mode 100644 index 0000000000..7d0ed486fb --- /dev/null +++ b/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageTest.java @@ -0,0 +1,319 @@ +package com.scalar.db.storage.objectstorage; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.scalar.db.api.ConditionBuilder; +import com.scalar.db.api.ConditionalExpression; +import com.scalar.db.api.Delete; +import com.scalar.db.api.Get; +import com.scalar.db.api.Put; +import com.scalar.db.api.Result; +import com.scalar.db.api.Scan; +import com.scalar.db.api.Scanner; +import com.scalar.db.common.FilterableScanner; +import com.scalar.db.common.checker.OperationChecker; +import com.scalar.db.config.DatabaseConfig; +import com.scalar.db.exception.storage.ExecutionException; +import com.scalar.db.io.Key; +import java.util.Arrays; +import java.util.Optional; +import java.util.Properties; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.ArgumentCaptor; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +public class ObjectStorageTest { + private static final int ANY_LIMIT = 100; + private ObjectStorage objectStorage; + @Mock private ObjectStorageWrapper wrapper; + @Mock private SelectStatementHandler selectStatementHandler; + @Mock private MutateStatementHandler mutateStatementHandler; + @Mock private OperationChecker operationChecker; + @Mock private ScannerImpl scanner; + @Mock private Key partitionKey; + + @BeforeEach + public void setUp() throws Exception { + MockitoAnnotations.openMocks(this).close(); + Properties objectStorageConfigProperties = new Properties(); + objectStorage = + new ObjectStorage( + new DatabaseConfig(objectStorageConfigProperties), + wrapper, + selectStatementHandler, + mutateStatementHandler, + operationChecker); + } + + @Test + public void get_WithoutConjunction_ShouldHandledWithOriginalGet() throws ExecutionException { + // Arrange + Get get = + Get.newBuilder() + .namespace("ns") + .table("tbl") + .partitionKey(partitionKey) + .projection("col1") + .build(); + when(selectStatementHandler.handle(any(Get.class))).thenReturn(scanner); + + // Act + Optional actual = objectStorage.get(get); + + // Assert + assertThat(actual.isPresent()).isFalse(); + ArgumentCaptor captor = ArgumentCaptor.forClass(Get.class); + verify(selectStatementHandler).handle(captor.capture()); + Get actualGet = captor.getValue(); + assertThat(actualGet).isEqualTo(get); + } + + @Test + public void get_WithConjunctionWithoutProjections_ShouldHandledWithoutProjections() + throws ExecutionException { + // Arrange + Get get = + Get.newBuilder() + .namespace("ns") + .table("tbl") + .partitionKey(partitionKey) + .where(ConditionBuilder.column("col2").isLessThanInt(0)) + .build(); + when(selectStatementHandler.handle(any(Get.class))).thenReturn(scanner); + + // Act + Optional actual = objectStorage.get(get); + + // Assert + assertThat(actual.isPresent()).isFalse(); + ArgumentCaptor captor = ArgumentCaptor.forClass(Get.class); + verify(selectStatementHandler).handle(captor.capture()); + Get actualGet = captor.getValue(); + assertThat(actualGet.getProjections()).isEmpty(); + } + + @Test + public void get_WithConjunctionAndProjections_ShouldHandledWithExtendedProjections() + throws ExecutionException { + // Arrange + Get get = + Get.newBuilder() + .namespace("ns") + .table("tbl") + .partitionKey(partitionKey) + .projections("col1") + .where(ConditionBuilder.column("col2").isLessThanInt(0)) + .build(); + when(selectStatementHandler.handle(any(Get.class))).thenReturn(scanner); + + // Act + Optional actual = objectStorage.get(get); + + // Assert + assertThat(actual.isPresent()).isFalse(); + ArgumentCaptor captor = ArgumentCaptor.forClass(Get.class); + verify(selectStatementHandler).handle(captor.capture()); + Get actualGet = captor.getValue(); + assertThat(actualGet.getProjections()).containsExactlyInAnyOrder("col1", "col2"); + } + + @Test + public void scan_WithLimitWithoutConjunction_ShouldHandledWithLimit() throws ExecutionException { + // Arrange + Scan scan = Scan.newBuilder().namespace("ns").table("tbl").all().limit(ANY_LIMIT).build(); + when(selectStatementHandler.handle(scan)).thenReturn(scanner); + + // Act + Scanner actual = objectStorage.scan(scan); + + // Assert + assertThat(actual).isInstanceOf(ScannerImpl.class); + ArgumentCaptor captor = ArgumentCaptor.forClass(Scan.class); + verify(selectStatementHandler).handle(captor.capture()); + Scan actualScan = captor.getValue(); + assertThat(actualScan.getLimit()).isEqualTo(ANY_LIMIT); + } + + @Test + public void scan_WithLimitAndConjunction_ShouldHandledWithoutLimit() throws ExecutionException { + // Arrange + Scan scan = + Scan.newBuilder() + .namespace("ns") + .table("tbl") + .all() + .where(mock(ConditionalExpression.class)) + .limit(ANY_LIMIT) + .build(); + when(selectStatementHandler.handle(scan)).thenReturn(scanner); + + // Act + Scanner actual = objectStorage.scan(scan); + + // Assert + assertThat(actual).isInstanceOf(FilterableScanner.class); + ArgumentCaptor captor = ArgumentCaptor.forClass(Scan.class); + verify(selectStatementHandler).handle(captor.capture()); + Scan actualScan = captor.getValue(); + assertThat(actualScan.getLimit()).isEqualTo(0); + } + + @Test + public void scan_WithConjunctionWithoutProjections_ShouldHandledWithoutProjections() + throws ExecutionException { + // Arrange + Scan scan = + Scan.newBuilder() + .namespace("ns") + .table("tbl") + .all() + .where(ConditionBuilder.column("col2").isLessThanInt(0)) + .build(); + when(selectStatementHandler.handle(scan)).thenReturn(scanner); + + // Act + Scanner actual = objectStorage.scan(scan); + + // Assert + assertThat(actual).isInstanceOf(FilterableScanner.class); + ArgumentCaptor captor = ArgumentCaptor.forClass(Scan.class); + verify(selectStatementHandler).handle(captor.capture()); + Scan actualScan = captor.getValue(); + assertThat(actualScan.getProjections()).isEmpty(); + } + + @Test + public void scan_WithConjunctionAndProjections_ShouldHandledWithExtendedProjections() + throws ExecutionException { + // Arrange + Scan scan = + Scan.newBuilder() + .namespace("ns") + .table("tbl") + .all() + .projections("col1") + .where(ConditionBuilder.column("col2").isLessThanInt(0)) + .build(); + when(selectStatementHandler.handle(scan)).thenReturn(scanner); + + // Act + Scanner actual = objectStorage.scan(scan); + + // Assert + assertThat(actual).isInstanceOf(FilterableScanner.class); + ArgumentCaptor captor = ArgumentCaptor.forClass(Scan.class); + verify(selectStatementHandler).handle(captor.capture()); + Scan actualScan = captor.getValue(); + assertThat(actualScan.getProjections()).containsExactlyInAnyOrder("col1", "col2"); + } + + @Test + public void + get_IllegalArgumentExceptionThrownByOperationChecker_ShouldThrowIllegalArgumentException() + throws ExecutionException { + // Arrange + Get get = Get.newBuilder().namespace("ns").table("tbl").partitionKey(partitionKey).build(); + doThrow(IllegalArgumentException.class).when(operationChecker).check(get); + + // Act Assert + assertThatThrownBy(() -> objectStorage.get(get)).isInstanceOf(IllegalArgumentException.class); + } + + @Test + public void + scan_IllegalArgumentExceptionThrownByOperationChecker_ShouldThrowIllegalArgumentException() + throws ExecutionException { + // Arrange + Scan scan = Scan.newBuilder().namespace("ns").table("tbl").partitionKey(partitionKey).build(); + doThrow(IllegalArgumentException.class).when(operationChecker).check(scan); + + // Act Assert + assertThatThrownBy(() -> objectStorage.scan(scan)).isInstanceOf(IllegalArgumentException.class); + } + + @Test + public void + put_IllegalArgumentExceptionThrownByOperationChecker_ShouldThrowIllegalArgumentException() + throws ExecutionException { + // Arrange + Put put = Put.newBuilder().namespace("ns").table("tbl").partitionKey(partitionKey).build(); + doThrow(IllegalArgumentException.class).when(operationChecker).check(put); + + // Act Assert + assertThatThrownBy(() -> objectStorage.put(put)).isInstanceOf(IllegalArgumentException.class); + } + + @Test + public void + put_MultiplePutsGiven_IllegalArgumentExceptionThrownByOperationChecker_ShouldThrowIllegalArgumentException() + throws ExecutionException { + // Arrange + Put put1 = Put.newBuilder().namespace("ns").table("tbl").partitionKey(partitionKey).build(); + Put put2 = Put.newBuilder().namespace("ns").table("tbl").partitionKey(partitionKey).build(); + + doThrow(IllegalArgumentException.class).when(operationChecker).check(Arrays.asList(put1, put2)); + + // Act Assert + assertThatThrownBy(() -> objectStorage.put(Arrays.asList(put1, put2))) + .isInstanceOf(IllegalArgumentException.class); + } + + @Test + public void + delete_IllegalArgumentExceptionThrownByOperationChecker_ShouldThrowIllegalArgumentException() + throws ExecutionException { + // Arrange + Delete delete = + Delete.newBuilder().namespace("ns").table("tbl").partitionKey(partitionKey).build(); + doThrow(IllegalArgumentException.class).when(operationChecker).check(delete); + + // Act Assert + assertThatThrownBy(() -> objectStorage.delete(delete)) + .isInstanceOf(IllegalArgumentException.class); + } + + @Test + public void + delete_MultipleDeletesGiven_IllegalArgumentExceptionThrownByOperationChecker_ShouldThrowIllegalArgumentException() + throws ExecutionException { + // Arrange + Delete delete1 = + Delete.newBuilder().namespace("ns").table("tbl").partitionKey(partitionKey).build(); + Delete delete2 = + Delete.newBuilder().namespace("ns").table("tbl").partitionKey(partitionKey).build(); + + doThrow(IllegalArgumentException.class) + .when(operationChecker) + .check(Arrays.asList(delete1, delete2)); + + // Act Assert + assertThatThrownBy(() -> objectStorage.delete(Arrays.asList(delete1, delete2))) + .isInstanceOf(IllegalArgumentException.class); + } + + @Test + public void + mutate_IllegalArgumentExceptionThrownByOperationChecker_ShouldThrowIllegalArgumentException() + throws ExecutionException { + // Arrange + Put put = Put.newBuilder().namespace("ns").table("tbl").partitionKey(partitionKey).build(); + Delete delete = + Delete.newBuilder().namespace("ns").table("tbl").partitionKey(partitionKey).build(); + + doThrow(IllegalArgumentException.class) + .when(operationChecker) + .check(Arrays.asList(put, delete)); + + // Act Assert + assertThatThrownBy(() -> objectStorage.mutate(Arrays.asList(put, delete))) + .isInstanceOf(IllegalArgumentException.class); + } +} diff --git a/core/src/test/java/com/scalar/db/storage/objectstorage/ResultInterpreterTest.java b/core/src/test/java/com/scalar/db/storage/objectstorage/ResultInterpreterTest.java new file mode 100644 index 0000000000..ce72eb9b4c --- /dev/null +++ b/core/src/test/java/com/scalar/db/storage/objectstorage/ResultInterpreterTest.java @@ -0,0 +1,312 @@ +package com.scalar.db.storage.objectstorage; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.google.common.collect.ImmutableMap; +import com.scalar.db.api.Result; +import com.scalar.db.api.TableMetadata; +import com.scalar.db.io.BigIntColumn; +import com.scalar.db.io.Column; +import com.scalar.db.io.DataType; +import com.scalar.db.io.DateColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; +import com.scalar.db.util.TimeRelatedColumnEncodingUtils; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.time.Instant; +import java.time.LocalDate; +import java.time.LocalDateTime; +import java.time.LocalTime; +import java.util.Base64; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import org.junit.jupiter.api.Test; + +public class ResultInterpreterTest { + private static final String ANY_NAME_1 = "name1"; + private static final String ANY_NAME_2 = "name2"; + private static final String ANY_TEXT_1 = "text1"; + private static final String ANY_TEXT_2 = "text2"; + private static final String ANY_COLUMN_NAME_1 = "col1"; + private static final String ANY_COLUMN_NAME_2 = "col2"; + private static final String ANY_COLUMN_NAME_3 = "col3"; + private static final String ANY_COLUMN_NAME_4 = "col4"; + private static final String ANY_COLUMN_NAME_5 = "col5"; + private static final String ANY_COLUMN_NAME_6 = "col6"; + private static final String ANY_COLUMN_NAME_7 = "col7"; + private static final String ANY_COLUMN_NAME_8 = "col8"; + private static final String ANY_COLUMN_NAME_9 = "col9"; + private static final String ANY_COLUMN_NAME_10 = "col10"; + private static final String ANY_COLUMN_NAME_11 = "col11"; + private static final String ANY_ID_1 = "id"; + + private static final TableMetadata TABLE_METADATA = + TableMetadata.newBuilder() + .addColumn(ANY_NAME_1, DataType.TEXT) + .addColumn(ANY_NAME_2, DataType.TEXT) + .addColumn(ANY_COLUMN_NAME_1, DataType.BOOLEAN) + .addColumn(ANY_COLUMN_NAME_2, DataType.INT) + .addColumn(ANY_COLUMN_NAME_3, DataType.BIGINT) + .addColumn(ANY_COLUMN_NAME_4, DataType.FLOAT) + .addColumn(ANY_COLUMN_NAME_5, DataType.DOUBLE) + .addColumn(ANY_COLUMN_NAME_6, DataType.TEXT) + .addColumn(ANY_COLUMN_NAME_7, DataType.BLOB) + .addColumn(ANY_COLUMN_NAME_8, DataType.DATE) + .addColumn(ANY_COLUMN_NAME_9, DataType.TIME) + .addColumn(ANY_COLUMN_NAME_10, DataType.TIMESTAMP) + .addColumn(ANY_COLUMN_NAME_11, DataType.TIMESTAMPTZ) + .addPartitionKey(ANY_NAME_1) + .addClusteringKey(ANY_NAME_2) + .build(); + + private static final LocalDate ANY_DATE = DateColumn.MAX_VALUE; + private static final LocalTime ANY_TIME = TimeColumn.MAX_VALUE; + private static final LocalDateTime ANY_TIMESTAMP = TimestampColumn.MAX_VALUE; + private static final Instant ANY_TIMESTAMPTZ = TimestampTZColumn.MAX_VALUE; + + @Test + public void interpret_ShouldReturnWhatsSet() { + // Arrange + Map values = + ImmutableMap.builder() + .put(ANY_COLUMN_NAME_1, true) + .put(ANY_COLUMN_NAME_2, Integer.MAX_VALUE) + .put(ANY_COLUMN_NAME_3, BigIntColumn.MAX_VALUE) + .put(ANY_COLUMN_NAME_4, Float.MAX_VALUE) + .put(ANY_COLUMN_NAME_5, Double.MAX_VALUE) + .put(ANY_COLUMN_NAME_6, "string") + .put( + ANY_COLUMN_NAME_7, + Base64.getEncoder().encodeToString("bytes".getBytes(StandardCharsets.UTF_8))) + .put( + ANY_COLUMN_NAME_8, + TimeRelatedColumnEncodingUtils.encode(DateColumn.of(ANY_COLUMN_NAME_8, ANY_DATE))) + .put( + ANY_COLUMN_NAME_9, + TimeRelatedColumnEncodingUtils.encode(TimeColumn.of(ANY_COLUMN_NAME_9, ANY_TIME))) + .put( + ANY_COLUMN_NAME_10, + TimeRelatedColumnEncodingUtils.encode( + TimestampColumn.of(ANY_COLUMN_NAME_10, ANY_TIMESTAMP))) + .put( + ANY_COLUMN_NAME_11, + TimeRelatedColumnEncodingUtils.encode( + TimestampTZColumn.of(ANY_COLUMN_NAME_11, ANY_TIMESTAMPTZ))) + .build(); + ObjectStorageRecord record = + new ObjectStorageRecord( + ANY_ID_1, + ImmutableMap.of(ANY_NAME_1, ANY_TEXT_1), + ImmutableMap.of(ANY_NAME_2, ANY_TEXT_2), + values); + List projections = Collections.emptyList(); + ResultInterpreter interpreter = new ResultInterpreter(projections, TABLE_METADATA); + + // Act + Result result = interpreter.interpret(record); + + // Assert + assertThat(result.contains(ANY_NAME_1)).isTrue(); + assertThat(result.isNull(ANY_NAME_1)).isFalse(); + assertThat(result.getText(ANY_NAME_1)).isEqualTo(ANY_TEXT_1); + assertThat(result.contains(ANY_NAME_2)).isTrue(); + assertThat(result.isNull(ANY_NAME_2)).isFalse(); + assertThat(result.getText(ANY_NAME_2)).isEqualTo(ANY_TEXT_2); + assertThat(result.contains(ANY_COLUMN_NAME_1)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_1)).isFalse(); + assertThat(result.getBoolean(ANY_COLUMN_NAME_1)).isTrue(); + assertThat(result.contains(ANY_COLUMN_NAME_2)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_2)).isFalse(); + assertThat(result.getInt(ANY_COLUMN_NAME_2)).isEqualTo(Integer.MAX_VALUE); + assertThat(result.contains(ANY_COLUMN_NAME_3)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_3)).isFalse(); + assertThat(result.getBigInt(ANY_COLUMN_NAME_3)).isEqualTo(BigIntColumn.MAX_VALUE); + assertThat(result.contains(ANY_COLUMN_NAME_4)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_4)).isFalse(); + assertThat(result.getFloat(ANY_COLUMN_NAME_4)).isEqualTo(Float.MAX_VALUE); + assertThat(result.contains(ANY_COLUMN_NAME_5)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_5)).isFalse(); + assertThat(result.getDouble(ANY_COLUMN_NAME_5)).isEqualTo(Double.MAX_VALUE); + assertThat(result.contains(ANY_COLUMN_NAME_6)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_6)).isFalse(); + assertThat(result.getText(ANY_COLUMN_NAME_6)).isEqualTo("string"); + assertThat(result.contains(ANY_COLUMN_NAME_7)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_7)).isFalse(); + assertThat(result.getBlob(ANY_COLUMN_NAME_7)) + .isEqualTo(ByteBuffer.wrap("bytes".getBytes(StandardCharsets.UTF_8))); + assertThat(result.getBlobAsBytes(ANY_COLUMN_NAME_7)) + .isEqualTo("bytes".getBytes(StandardCharsets.UTF_8)); + assertThat(result.contains(ANY_COLUMN_NAME_8)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_8)).isFalse(); + assertThat(result.getDate(ANY_COLUMN_NAME_8)).isEqualTo(ANY_DATE); + assertThat(result.contains(ANY_COLUMN_NAME_9)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_9)).isFalse(); + assertThat(result.getTime(ANY_COLUMN_NAME_9)).isEqualTo(ANY_TIME); + assertThat(result.contains(ANY_COLUMN_NAME_10)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_10)).isFalse(); + assertThat(result.getTimestamp(ANY_COLUMN_NAME_10)).isEqualTo(ANY_TIMESTAMP); + assertThat(result.contains(ANY_COLUMN_NAME_11)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_11)).isFalse(); + assertThat(result.getTimestampTZ(ANY_COLUMN_NAME_11)).isEqualTo(ANY_TIMESTAMPTZ); + + Map> columns = result.getColumns(); + assertThat(columns.containsKey(ANY_NAME_1)).isTrue(); + assertThat(columns.get(ANY_NAME_1).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_NAME_1).getTextValue()).isEqualTo(ANY_TEXT_1); + assertThat(columns.containsKey(ANY_NAME_2)).isTrue(); + assertThat(columns.get(ANY_NAME_2).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_NAME_2).getTextValue()).isEqualTo(ANY_TEXT_2); + assertThat(columns.containsKey(ANY_COLUMN_NAME_1)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_1).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_1).getBooleanValue()).isTrue(); + assertThat(columns.containsKey(ANY_COLUMN_NAME_2)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_2).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_2).getIntValue()).isEqualTo(Integer.MAX_VALUE); + assertThat(columns.containsKey(ANY_COLUMN_NAME_3)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_3).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_3).getBigIntValue()).isEqualTo(BigIntColumn.MAX_VALUE); + assertThat(columns.containsKey(ANY_COLUMN_NAME_4)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_4).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_4).getFloatValue()).isEqualTo(Float.MAX_VALUE); + assertThat(columns.containsKey(ANY_COLUMN_NAME_5)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_5).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_5).getDoubleValue()).isEqualTo(Double.MAX_VALUE); + assertThat(columns.containsKey(ANY_COLUMN_NAME_6)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_6).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_6).getTextValue()).isEqualTo("string"); + assertThat(columns.containsKey(ANY_COLUMN_NAME_7)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_7).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_7).getBlobValueAsBytes()) + .isEqualTo("bytes".getBytes(StandardCharsets.UTF_8)); + assertThat(columns.get(ANY_COLUMN_NAME_7).getBlobValueAsByteBuffer()) + .isEqualTo(ByteBuffer.wrap("bytes".getBytes(StandardCharsets.UTF_8))); + assertThat(columns.containsKey(ANY_COLUMN_NAME_8)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_8).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_8).getDateValue()).isEqualTo(ANY_DATE); + assertThat(columns.containsKey(ANY_COLUMN_NAME_9)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_9).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_9).getTimeValue()).isEqualTo(ANY_TIME); + assertThat(columns.containsKey(ANY_COLUMN_NAME_10)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_10).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_10).getTimestampValue()).isEqualTo(ANY_TIMESTAMP); + assertThat(columns.containsKey(ANY_COLUMN_NAME_11)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_11).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_COLUMN_NAME_11).getTimestampTZValue()).isEqualTo(ANY_TIMESTAMPTZ); + } + + @Test + public void interpret_ShouldReturnWhatsSetWithNullValues() { + // Arrange + Map values = new HashMap<>(); + values.put(ANY_COLUMN_NAME_1, null); + values.put(ANY_COLUMN_NAME_2, null); + values.put(ANY_COLUMN_NAME_3, null); + values.put(ANY_COLUMN_NAME_4, null); + values.put(ANY_COLUMN_NAME_5, null); + values.put(ANY_COLUMN_NAME_6, null); + values.put(ANY_COLUMN_NAME_7, null); + values.put(ANY_COLUMN_NAME_8, null); + values.put(ANY_COLUMN_NAME_9, null); + values.put(ANY_COLUMN_NAME_10, null); + values.put(ANY_COLUMN_NAME_11, null); + ObjectStorageRecord record = + new ObjectStorageRecord( + ANY_ID_1, + ImmutableMap.of(ANY_NAME_1, ANY_TEXT_1), + ImmutableMap.of(ANY_NAME_2, ANY_TEXT_2), + values); + + List projections = Collections.emptyList(); + + ResultInterpreter interpreter = new ResultInterpreter(projections, TABLE_METADATA); + + // Act + Result result = interpreter.interpret(record); + + // Assert + assertThat(result.contains(ANY_NAME_1)).isTrue(); + assertThat(result.isNull(ANY_NAME_1)).isFalse(); + assertThat(result.getText(ANY_NAME_1)).isEqualTo(ANY_TEXT_1); + assertThat(result.contains(ANY_NAME_2)).isTrue(); + assertThat(result.isNull(ANY_NAME_2)).isFalse(); + assertThat(result.getText(ANY_NAME_2)).isEqualTo(ANY_TEXT_2); + assertThat(result.contains(ANY_COLUMN_NAME_1)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_1)).isTrue(); + assertThat(result.getBoolean(ANY_COLUMN_NAME_1)).isFalse(); + assertThat(result.contains(ANY_COLUMN_NAME_2)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_2)).isTrue(); + assertThat(result.getInt(ANY_COLUMN_NAME_2)).isEqualTo(0); + assertThat(result.contains(ANY_COLUMN_NAME_3)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_3)).isTrue(); + assertThat(result.getBigInt(ANY_COLUMN_NAME_3)).isEqualTo(0L); + assertThat(result.contains(ANY_COLUMN_NAME_4)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_4)).isTrue(); + assertThat(result.getFloat(ANY_COLUMN_NAME_4)).isEqualTo(0.0F); + assertThat(result.contains(ANY_COLUMN_NAME_5)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_5)).isTrue(); + assertThat(result.getDouble(ANY_COLUMN_NAME_5)).isEqualTo(0D); + assertThat(result.contains(ANY_COLUMN_NAME_6)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_6)).isTrue(); + assertThat(result.getText(ANY_COLUMN_NAME_6)).isNull(); + assertThat(result.contains(ANY_COLUMN_NAME_7)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_7)).isTrue(); + assertThat(result.getBlob(ANY_COLUMN_NAME_7)).isNull(); + assertThat(result.contains(ANY_COLUMN_NAME_8)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_8)).isTrue(); + assertThat(result.getDate(ANY_COLUMN_NAME_8)).isNull(); + assertThat(result.contains(ANY_COLUMN_NAME_9)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_9)).isTrue(); + assertThat(result.getTime(ANY_COLUMN_NAME_9)).isNull(); + assertThat(result.contains(ANY_COLUMN_NAME_10)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_10)).isTrue(); + assertThat(result.getTimestamp(ANY_COLUMN_NAME_10)).isNull(); + assertThat(result.contains(ANY_COLUMN_NAME_11)).isTrue(); + assertThat(result.isNull(ANY_COLUMN_NAME_11)).isTrue(); + assertThat(result.getTimestampTZ(ANY_COLUMN_NAME_11)).isNull(); + + Map> columns = result.getColumns(); + assertThat(columns.containsKey(ANY_NAME_1)).isTrue(); + assertThat(columns.get(ANY_NAME_1).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_NAME_1).getTextValue()).isEqualTo(ANY_TEXT_1); + assertThat(columns.containsKey(ANY_NAME_2)).isTrue(); + assertThat(columns.get(ANY_NAME_2).hasNullValue()).isFalse(); + assertThat(columns.get(ANY_NAME_2).getTextValue()).isEqualTo(ANY_TEXT_2); + assertThat(columns.containsKey(ANY_COLUMN_NAME_1)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_1).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_1).getBooleanValue()).isFalse(); + assertThat(columns.containsKey(ANY_COLUMN_NAME_2)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_2).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_2).getIntValue()).isEqualTo(0); + assertThat(columns.containsKey(ANY_COLUMN_NAME_3)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_3).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_3).getBigIntValue()).isEqualTo(0L); + assertThat(columns.containsKey(ANY_COLUMN_NAME_4)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_4).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_4).getFloatValue()).isEqualTo(0.0F); + assertThat(columns.containsKey(ANY_COLUMN_NAME_5)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_5).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_5).getDoubleValue()).isEqualTo(0D); + assertThat(columns.containsKey(ANY_COLUMN_NAME_6)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_6).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_6).getTextValue()).isNull(); + assertThat(columns.containsKey(ANY_COLUMN_NAME_7)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_7).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_7).getBlobValueAsBytes()).isNull(); + assertThat(columns.get(ANY_COLUMN_NAME_7).getBlobValueAsByteBuffer()).isNull(); + assertThat(columns.get(ANY_COLUMN_NAME_8).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_8).getDateValue()).isNull(); + assertThat(columns.containsKey(ANY_COLUMN_NAME_9)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_9).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_9).getTimeValue()).isNull(); + assertThat(columns.containsKey(ANY_COLUMN_NAME_10)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_10).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_10).getTimestampValue()).isNull(); + assertThat(columns.containsKey(ANY_COLUMN_NAME_11)).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_11).hasNullValue()).isTrue(); + assertThat(columns.get(ANY_COLUMN_NAME_11).getTimestampTZValue()).isNull(); + } +} diff --git a/core/src/test/java/com/scalar/db/storage/objectstorage/ScannerImplTest.java b/core/src/test/java/com/scalar/db/storage/objectstorage/ScannerImplTest.java new file mode 100644 index 0000000000..53b8b42769 --- /dev/null +++ b/core/src/test/java/com/scalar/db/storage/objectstorage/ScannerImplTest.java @@ -0,0 +1,210 @@ +package com.scalar.db.storage.objectstorage; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.mockito.Mockito.when; + +import com.scalar.db.api.Result; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; +import java.util.Optional; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +public class ScannerImplTest { + + @Mock ResultInterpreter resultInterpreter; + @Mock ObjectStorageRecord record1; + @Mock ObjectStorageRecord record2; + @Mock ObjectStorageRecord record3; + @Mock ObjectStorageRecord record4; + @Mock Result result1; + @Mock Result result2; + @Mock Result result3; + @Mock Result result4; + + @BeforeEach + public void setUp() throws Exception { + MockitoAnnotations.openMocks(this).close(); + + when(resultInterpreter.interpret(record1)).thenReturn(result1); + when(resultInterpreter.interpret(record2)).thenReturn(result2); + when(resultInterpreter.interpret(record3)).thenReturn(result3); + when(resultInterpreter.interpret(record4)).thenReturn(result4); + } + + @Test + public void one_WithSingleRecord_ShouldContainOnlyOneResult() { + // Arrange + ScannerImpl scanner = buildScanner(Collections.singletonList(record1)); + + // Act + Optional actualResult1 = scanner.one(); + Optional emptyResult = scanner.one(); + + // Assert + assertThat(actualResult1).contains(result1); + assertThat(emptyResult).isEmpty(); + } + + @Test + public void all_WithSingleRecord_ShouldContainOnlyOneResult() { + // Arrange + ScannerImpl scanner = buildScanner(Collections.singletonList(record1)); + + // Act + List actualResults = scanner.all(); + List emptyResults = scanner.all(); + + // Assert + assertThat(actualResults).containsExactly(result1); + assertThat(emptyResults).isEmpty(); + } + + @Test + public void all_WithMultipleRecords_ShouldReturnAllResults() { + // Arrange + ScannerImpl scanner = buildScanner(Arrays.asList(record1, record2, record3, record4)); + + // Act + List actualResults = scanner.all(); + + // Assert + assertThat(actualResults).containsExactly(result1, result2, result3, result4); + } + + @Test + public void one_WithMultipleRecords_ShouldReturnAllResults() { + // Arrange + ScannerImpl scanner = buildScanner(Arrays.asList(record1, record2, record3, record4)); + + // Act + Optional actualResult1 = scanner.one(); + Optional actualResult2 = scanner.one(); + Optional actualResult3 = scanner.one(); + Optional actualResult4 = scanner.one(); + Optional actualResult5 = scanner.one(); + + // Assert + assertThat(actualResult1).contains(result1); + assertThat(actualResult2).contains(result2); + assertThat(actualResult3).contains(result3); + assertThat(actualResult4).contains(result4); + assertThat(actualResult5).isEmpty(); + } + + @Test + public void oneAndAll_WithMultipleRecords_ShouldReturnAllResults() { + // Arrange + ScannerImpl scanner = buildScanner(Arrays.asList(record1, record2, record3, record4)); + + // Act + Optional oneResult = scanner.one(); + List remainingResults = scanner.all(); + Optional emptyResultForOne = scanner.one(); + List emptyResultForAll = scanner.all(); + + // Assert + assertThat(oneResult).contains(result1); + assertThat(remainingResults).containsExactly(result2, result3, result4); + assertThat(emptyResultForOne).isEmpty(); + assertThat(emptyResultForAll).isEmpty(); + } + + @Test + public void one_WithNoRecord_ShouldReturnEmpty() { + // Arrange + ScannerImpl scanner = buildScanner(Collections.emptyList()); + + // Act + Optional oneResult = scanner.one(); + + // Assert + assertThat(oneResult).isEmpty(); + } + + @Test + public void all_WithNoRecord_ShouldReturnEmpty() { + // Arrange + ScannerImpl scanner = buildScanner(Collections.emptyList()); + + // Act + List allResults = scanner.all(); + + // Assert + assertThat(allResults).isEmpty(); + } + + @Test + public void one_WithRecordCountLimit_ShouldReturnLimitedResults() { + // Arrange + ScannerImpl scanner = + buildScannerWithLimit(Arrays.asList(record1, record2, record3, record4), 2); + + // Act + Optional actualResult1 = scanner.one(); + Optional actualResult2 = scanner.one(); + Optional actualResult3 = scanner.one(); + + // Assert + assertThat(actualResult1).contains(result1); + assertThat(actualResult2).contains(result2); + assertThat(actualResult3).isEmpty(); + } + + @Test + public void all_WithRecordCountLimit_ShouldReturnLimitedResults() { + // Arrange + ScannerImpl scanner = + buildScannerWithLimit(Arrays.asList(record1, record2, record3, record4), 2); + + // Act + List actualResults = scanner.all(); + + // Assert + assertThat(actualResults).containsExactly(result1, result2); + } + + @Test + public void oneAndAll_WithRecordCountLimit_ShouldReturnLimitedResults() { + // Arrange + ScannerImpl scanner = + buildScannerWithLimit(Arrays.asList(record1, record2, record3, record4), 3); + + // Act + Optional oneResult = scanner.one(); + List remainingResults = scanner.all(); + + // Assert + assertThat(oneResult).contains(result1); + assertThat(remainingResults).containsExactly(result2, result3); + } + + @Test + public void all_WithZeroRecordCountLimit_ShouldReturnAllResults() { + // Arrange + ScannerImpl scanner = buildScannerWithLimit(Arrays.asList(record1, record2, record3), 0); + + // Act + List actualResults = scanner.all(); + + // Assert + assertThat(actualResults).containsExactly(result1, result2, result3); + } + + private ScannerImpl buildScanner(List records) { + return buildScannerWithLimit(records, 0); + } + + private ScannerImpl buildScannerWithLimit(List records, int limit) { + List recordList = new ArrayList<>(records); + Iterator iterator = recordList.iterator(); + return new ScannerImpl(iterator, resultInterpreter, limit); + } +} diff --git a/core/src/test/java/com/scalar/db/storage/objectstorage/SelectStatementHandlerTest.java b/core/src/test/java/com/scalar/db/storage/objectstorage/SelectStatementHandlerTest.java new file mode 100644 index 0000000000..fe227f5445 --- /dev/null +++ b/core/src/test/java/com/scalar/db/storage/objectstorage/SelectStatementHandlerTest.java @@ -0,0 +1,443 @@ +package com.scalar.db.storage.objectstorage; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.scalar.db.api.Get; +import com.scalar.db.api.Operation; +import com.scalar.db.api.Scan; +import com.scalar.db.api.Scanner; +import com.scalar.db.api.TableMetadata; +import com.scalar.db.common.TableMetadataManager; +import com.scalar.db.exception.storage.ExecutionException; +import com.scalar.db.io.DataType; +import com.scalar.db.io.Key; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedHashSet; +import java.util.Map; +import java.util.Optional; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +public class SelectStatementHandlerTest { + private static final String ANY_NAMESPACE_NAME = "namespace"; + private static final String ANY_TABLE_NAME = "table"; + private static final String ANY_NAME_1 = "name1"; + private static final String ANY_NAME_2 = "name2"; + private static final String ANY_NAME_3 = "name3"; + private static final String ANY_TEXT_1 = "text1"; + private static final String ANY_TEXT_2 = "text2"; + private static final String ANY_TEXT_3 = "text3"; + + private SelectStatementHandler handler; + @Mock private ObjectStorageWrapper wrapper; + @Mock private TableMetadataManager metadataManager; + @Mock private TableMetadata metadata; + + @BeforeEach + public void setUp() throws Exception { + MockitoAnnotations.openMocks(this).close(); + + handler = new SelectStatementHandler(wrapper, metadataManager); + + when(metadataManager.getTableMetadata(any(Operation.class))).thenReturn(metadata); + when(metadata.getPartitionKeyNames()) + .thenReturn(new LinkedHashSet<>(Collections.singletonList(ANY_NAME_1))); + when(metadata.getClusteringKeyNames()) + .thenReturn(new LinkedHashSet<>(Collections.singletonList(ANY_NAME_2))); + when(metadata.getSecondaryIndexNames()) + .thenReturn(new LinkedHashSet<>(Collections.singletonList(ANY_NAME_3))); + when(metadata.getClusteringOrder(ANY_NAME_2)).thenReturn(Scan.Ordering.Order.ASC); + when(metadata.getColumnDataType(anyString())).thenReturn(DataType.TEXT); + } + + private Get prepareGet() { + Key partitionKey = Key.ofText(ANY_NAME_1, ANY_TEXT_1); + Key clusteringKey = Key.ofText(ANY_NAME_2, ANY_TEXT_2); + return Get.newBuilder() + .namespace(ANY_NAMESPACE_NAME) + .table(ANY_TABLE_NAME) + .partitionKey(partitionKey) + .clusteringKey(clusteringKey) + .build(); + } + + private Scan prepareScan() { + Key partitionKey = Key.ofText(ANY_NAME_1, ANY_TEXT_1); + return Scan.newBuilder() + .namespace(ANY_NAMESPACE_NAME) + .table(ANY_TABLE_NAME) + .partitionKey(partitionKey) + .build(); + } + + private Scan prepareScanAll() { + return Scan.newBuilder().namespace(ANY_NAMESPACE_NAME).table(ANY_TABLE_NAME).all().build(); + } + + private Map createPartitionWithRecord() { + Map partitionKey = Collections.singletonMap(ANY_NAME_1, ANY_TEXT_1); + Map clusteringKey = Collections.singletonMap(ANY_NAME_2, ANY_TEXT_2); + Map values = Collections.singletonMap(ANY_NAME_3, ANY_TEXT_3); + Map partition = new HashMap<>(); + addRecordToPartition(partition, partitionKey, clusteringKey, values); + return partition; + } + + private ObjectStorageRecord createRecord( + Map partitionKey, + Map clusteringKey, + Map values) { + String recordId = buildRecordId(partitionKey, clusteringKey); + return ObjectStorageRecord.newBuilder() + .id(recordId) + .partitionKey(partitionKey) + .clusteringKey(clusteringKey) + .values(values) + .build(); + } + + private void addRecordToPartition( + Map partition, + Map partitionKey, + Map clusteringKey, + Map values) { + ObjectStorageRecord record = createRecord(partitionKey, clusteringKey, values); + String recordId = buildRecordId(partitionKey, clusteringKey); + partition.put(recordId, record); + } + + private String buildRecordId( + Map partitionKey, Map clusteringKey) { + String partitionKeyValue = (String) partitionKey.get(ANY_NAME_1); + String clusteringKeyValue = (String) clusteringKey.get(ANY_NAME_2); + return partitionKeyValue + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER + clusteringKeyValue; + } + + @Test + public void handle_GetOperationGiven_ShouldReturnScanner() throws Exception { + // Arrange + Get get = prepareGet(); + Map partition = createPartitionWithRecord(); + String serialized = Serializer.serialize(partition); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serialized, "version1"); + when(wrapper.get(anyString())).thenReturn(Optional.of(response)); + + // Act + Scanner scanner = handler.handle(get); + + // Assert + assertThat(scanner).isNotNull(); + verify(wrapper) + .get(ObjectStorageUtils.getObjectKey(ANY_NAMESPACE_NAME, ANY_TABLE_NAME, ANY_TEXT_1)); + } + + @Test + public void handle_GetOperationWhenRecordNotFound_ShouldReturnEmptyScanner() throws Exception { + // Arrange + Get get = prepareGet(); + Map partition = new HashMap<>(); + String serialized = Serializer.serialize(partition); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serialized, "version1"); + when(wrapper.get(anyString())).thenReturn(Optional.of(response)); + + // Act + Scanner scanner = handler.handle(get); + + // Assert + assertThat(scanner).isNotNull(); + assertThat(scanner.all()).isEmpty(); + } + + @Test + public void handle_GetOperationWhenPartitionNotFound_ShouldReturnEmptyScanner() throws Exception { + // Arrange + Get get = prepareGet(); + when(wrapper.get(anyString())).thenReturn(Optional.empty()); + + // Act + Scanner scanner = handler.handle(get); + + // Assert + assertThat(scanner).isNotNull(); + assertThat(scanner.all()).isEmpty(); + } + + @Test + public void handle_GetOperationWithSecondaryIndex_ShouldThrowUnsupportedOperationException() { + // Arrange + Key indexKey = Key.ofText(ANY_NAME_3, ANY_TEXT_3); + Get get = + Get.newBuilder() + .namespace(ANY_NAMESPACE_NAME) + .table(ANY_TABLE_NAME) + .partitionKey(indexKey) + .build(); + + // Act Assert + assertThatThrownBy(() -> handler.handle(get)).isInstanceOf(UnsupportedOperationException.class); + } + + @Test + public void handle_GetOperationWhenExceptionThrown_ShouldThrowExecutionException() + throws Exception { + // Arrange + Get get = prepareGet(); + when(wrapper.get(anyString())).thenThrow(new ObjectStorageWrapperException("error")); + + // Act Assert + assertThatThrownBy(() -> handler.handle(get)).isInstanceOf(ExecutionException.class); + } + + @Test + public void handle_ScanOperationGiven_ShouldReturnScanner() throws Exception { + // Arrange + Scan scan = prepareScan(); + Map partition = createPartitionWithRecord(); + String serialized = Serializer.serialize(partition); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serialized, "version1"); + when(wrapper.get(anyString())).thenReturn(Optional.of(response)); + + // Act + Scanner scanner = handler.handle(scan); + + // Assert + assertThat(scanner).isNotNull(); + verify(wrapper) + .get(ObjectStorageUtils.getObjectKey(ANY_NAMESPACE_NAME, ANY_TABLE_NAME, ANY_TEXT_1)); + } + + @Test + public void handle_ScanOperationWithSecondaryIndex_ShouldThrowUnsupportedOperationException() { + // Arrange + Key indexKey = Key.ofText(ANY_NAME_3, ANY_TEXT_3); + Scan scan = + Scan.newBuilder() + .namespace(ANY_NAMESPACE_NAME) + .table(ANY_TABLE_NAME) + .partitionKey(indexKey) + .build(); + + // Act Assert + assertThatThrownBy(() -> handler.handle(scan)) + .isInstanceOf(UnsupportedOperationException.class); + } + + @Test + public void handle_ScanOperationWhenExceptionThrown_ShouldThrowExecutionException() + throws Exception { + // Arrange + Scan scan = prepareScan(); + when(wrapper.get(anyString())).thenThrow(new ObjectStorageWrapperException("error")); + + // Act Assert + assertThatThrownBy(() -> handler.handle(scan)).isInstanceOf(ExecutionException.class); + } + + @Test + public void handle_ScanOperationWithLimit_ShouldReturnLimitedResults() throws Exception { + // Arrange + Scan scan = Scan.newBuilder(prepareScan()).limit(1).build(); + Map partition = new HashMap<>(); + + // Create multiple records + for (int i = 0; i < 5; i++) { + Map partitionKey = Collections.singletonMap(ANY_NAME_1, ANY_TEXT_1); + Map clusteringKey = Collections.singletonMap(ANY_NAME_2, ANY_TEXT_2 + i); + addRecordToPartition(partition, partitionKey, clusteringKey, Collections.emptyMap()); + } + + String serialized = Serializer.serialize(partition); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serialized, "version1"); + when(wrapper.get(anyString())).thenReturn(Optional.of(response)); + + // Act + Scanner scanner = handler.handle(scan); + + // Assert + assertThat(scanner).isNotNull(); + assertThat(scanner.all()).hasSize(1); + } + + @Test + public void handle_ScanAllOperationGiven_ShouldReturnScanner() throws Exception { + // Arrange + Scan scanAll = prepareScanAll(); + when(wrapper.getKeys(anyString())) + .thenReturn( + new HashSet<>( + Arrays.asList( + ObjectStorageUtils.getObjectKey(ANY_NAMESPACE_NAME, ANY_TABLE_NAME, ANY_TEXT_1), + ObjectStorageUtils.getObjectKey( + ANY_NAMESPACE_NAME, ANY_TABLE_NAME, ANY_TEXT_2)))); + + Map partition1 = createPartitionWithRecord(); + String serialized1 = Serializer.serialize(partition1); + ObjectStorageWrapperResponse response1 = + new ObjectStorageWrapperResponse(serialized1, "version1"); + + Map partition2 = new HashMap<>(); + Map partitionKey2 = Collections.singletonMap(ANY_NAME_1, ANY_TEXT_2); + Map clusteringKey2 = Collections.singletonMap(ANY_NAME_2, ANY_TEXT_3); + addRecordToPartition(partition2, partitionKey2, clusteringKey2, Collections.emptyMap()); + String serialized2 = Serializer.serialize(partition2); + ObjectStorageWrapperResponse response2 = + new ObjectStorageWrapperResponse(serialized2, "version2"); + + when(wrapper.get( + ObjectStorageUtils.getObjectKey(ANY_NAMESPACE_NAME, ANY_TABLE_NAME, ANY_TEXT_1))) + .thenReturn(Optional.of(response1)); + when(wrapper.get( + ObjectStorageUtils.getObjectKey(ANY_NAMESPACE_NAME, ANY_TABLE_NAME, ANY_TEXT_2))) + .thenReturn(Optional.of(response2)); + + // Act + Scanner scanner = handler.handle(scanAll); + + // Assert + assertThat(scanner).isNotNull(); + assertThat(scanner.all()).hasSize(2); + } + + @Test + public void handle_ScanAllOperationWithLimit_ShouldReturnLimitedResults() throws Exception { + // Arrange + Scan scanAll = Scan.newBuilder(prepareScanAll()).limit(1).build(); + String objectKey1 = + ObjectStorageUtils.getObjectKey(ANY_NAMESPACE_NAME, ANY_TABLE_NAME, ANY_TEXT_1); + String objectKey2 = + ObjectStorageUtils.getObjectKey(ANY_NAMESPACE_NAME, ANY_TABLE_NAME, ANY_TEXT_2); + when(wrapper.getKeys(anyString())) + .thenReturn(new HashSet<>(Arrays.asList(objectKey1, objectKey2))); + + Map partition1 = createPartitionWithRecord(); + String serialized1 = Serializer.serialize(partition1); + ObjectStorageWrapperResponse response1 = + new ObjectStorageWrapperResponse(serialized1, "version1"); + + Map partition2 = new HashMap<>(); + Map partitionKey2 = Collections.singletonMap(ANY_NAME_1, ANY_TEXT_2); + Map clusteringKey2 = Collections.singletonMap(ANY_NAME_2, ANY_TEXT_3); + addRecordToPartition(partition2, partitionKey2, clusteringKey2, Collections.emptyMap()); + String serialized2 = Serializer.serialize(partition2); + ObjectStorageWrapperResponse response2 = + new ObjectStorageWrapperResponse(serialized2, "version2"); + + when(wrapper.get(objectKey1)).thenReturn(Optional.of(response1)); + when(wrapper.get(objectKey2)).thenReturn(Optional.of(response2)); + + // Act + Scanner scanner = handler.handle(scanAll); + + // Assert + assertThat(scanner).isNotNull(); + assertThat(scanner.all()).hasSize(1); + } + + @Test + public void handle_ScanAllOperationWhenExceptionThrown_ShouldThrowExecutionException() + throws Exception { + // Arrange + Scan scanAll = prepareScanAll(); + when(wrapper.getKeys(anyString())).thenThrow(new ObjectStorageWrapperException("error")); + + // Act Assert + assertThatThrownBy(() -> handler.handle(scanAll)).isInstanceOf(ExecutionException.class); + } + + @Test + public void handle_ScanOperationWithStartClusteringKey_ShouldFilterResults() throws Exception { + // Arrange + Scan scan = + Scan.newBuilder(prepareScan()).start(Key.ofText(ANY_NAME_2, ANY_TEXT_2 + "2")).build(); + Map partition = new HashMap<>(); + + // Create multiple records with different clustering keys + for (int i = 0; i < 5; i++) { + Map partitionKey = Collections.singletonMap(ANY_NAME_1, ANY_TEXT_1); + Map clusteringKey = Collections.singletonMap(ANY_NAME_2, ANY_TEXT_2 + i); + addRecordToPartition(partition, partitionKey, clusteringKey, Collections.emptyMap()); + } + + String serialized = Serializer.serialize(partition); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serialized, "version1"); + when(wrapper.get(anyString())).thenReturn(Optional.of(response)); + + // Act + Scanner scanner = handler.handle(scan); + + // Assert + assertThat(scanner).isNotNull(); + // Should filter records with clustering key >= "text22" + assertThat(scanner.all()).hasSizeGreaterThanOrEqualTo(1); + } + + @Test + public void handle_ScanOperationWithEndClusteringKey_ShouldFilterResults() throws Exception { + // Arrange + Scan scan = + Scan.newBuilder(prepareScan()).end(Key.ofText(ANY_NAME_2, ANY_TEXT_2 + "2")).build(); + Map partition = new HashMap<>(); + + // Create multiple records with different clustering keys + for (int i = 0; i < 5; i++) { + Map partitionKey = Collections.singletonMap(ANY_NAME_1, ANY_TEXT_1); + Map clusteringKey = Collections.singletonMap(ANY_NAME_2, ANY_TEXT_2 + i); + addRecordToPartition(partition, partitionKey, clusteringKey, Collections.emptyMap()); + } + + String serialized = Serializer.serialize(partition); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serialized, "version1"); + when(wrapper.get(anyString())).thenReturn(Optional.of(response)); + + // Act + Scanner scanner = handler.handle(scan); + + // Assert + assertThat(scanner).isNotNull(); + // Should filter records with clustering key <= "text22" + assertThat(scanner.all()).hasSizeGreaterThanOrEqualTo(1); + } + + @Test + public void handle_ScanOperationWithDescOrdering_ShouldReverseResults() throws Exception { + // Arrange + when(metadata.getClusteringOrder(ANY_NAME_2)).thenReturn(Scan.Ordering.Order.ASC); + Scan scan = Scan.newBuilder(prepareScan()).ordering(Scan.Ordering.desc(ANY_NAME_2)).build(); + Map partition = new HashMap<>(); + + // Create multiple records + for (int i = 0; i < 3; i++) { + Map partitionKey = Collections.singletonMap(ANY_NAME_1, ANY_TEXT_1); + Map clusteringKey = Collections.singletonMap(ANY_NAME_2, ANY_TEXT_2 + i); + addRecordToPartition(partition, partitionKey, clusteringKey, Collections.emptyMap()); + } + + String serialized = Serializer.serialize(partition); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serialized, "version1"); + when(wrapper.get(anyString())).thenReturn(Optional.of(response)); + + // Act + Scanner scanner = handler.handle(scan); + + // Assert + assertThat(scanner).isNotNull(); + assertThat(scanner.all()).hasSize(3); + } +} diff --git a/core/src/test/java/com/scalar/db/storage/objectstorage/StatementHandlerTest.java b/core/src/test/java/com/scalar/db/storage/objectstorage/StatementHandlerTest.java new file mode 100644 index 0000000000..81e6088622 --- /dev/null +++ b/core/src/test/java/com/scalar/db/storage/objectstorage/StatementHandlerTest.java @@ -0,0 +1,339 @@ +package com.scalar.db.storage.objectstorage; + +import static org.assertj.core.api.Assertions.assertThatCode; +import static org.assertj.core.api.Assertions.assertThatThrownBy; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.when; + +import com.google.common.collect.ImmutableMap; +import com.scalar.db.api.ConditionBuilder; +import com.scalar.db.api.ConditionalExpression; +import com.scalar.db.api.TableMetadata; +import com.scalar.db.common.TableMetadataManager; +import com.scalar.db.exception.storage.ExecutionException; +import com.scalar.db.io.DataType; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +public class StatementHandlerTest { + + private static final String COLUMN_NAME_1 = "col1"; + private static final String COLUMN_NAME_2 = "col2"; + + @Mock private ObjectStorageWrapper wrapper; + @Mock private TableMetadataManager metadataManager; + @Mock private TableMetadata metadata; + + private StatementHandler handler; + + @BeforeEach + public void setUp() throws Exception { + MockitoAnnotations.openMocks(this).close(); + handler = new StatementHandler(wrapper, metadataManager); + when(metadata.getColumnDataType(anyString())).thenReturn(DataType.INT); + } + + @Test + public void validateConditions_WithEqConditionAndMatchingValue_ShouldNotThrowException() { + // Arrange + ObjectStorageRecord record = createRecord(10); + ConditionalExpression condition = ConditionBuilder.column(COLUMN_NAME_1).isEqualToInt(10); + List expressions = Collections.singletonList(condition); + + // Act Assert + assertThatCode(() -> handler.validateConditions(record, expressions, metadata)) + .doesNotThrowAnyException(); + } + + @Test + public void validateConditions_WithEqConditionAndDifferentValue_ShouldThrowExecutionException() { + // Arrange + ObjectStorageRecord record = createRecord(10); + ConditionalExpression condition = ConditionBuilder.column(COLUMN_NAME_1).isEqualToInt(20); + List expressions = Collections.singletonList(condition); + + // Act Assert + assertThatThrownBy(() -> handler.validateConditions(record, expressions, metadata)) + .isInstanceOf(ExecutionException.class); + } + + @Test + public void validateConditions_WithEqConditionAndNullValue_ShouldThrowExecutionException() { + // Arrange + ObjectStorageRecord record = createRecordWithNull(); + ConditionalExpression condition = ConditionBuilder.column(COLUMN_NAME_1).isEqualToInt(10); + List expressions = Collections.singletonList(condition); + + // Act Assert + assertThatThrownBy(() -> handler.validateConditions(record, expressions, metadata)) + .isInstanceOf(ExecutionException.class); + } + + @Test + public void validateConditions_WithNeConditionAndDifferentValue_ShouldNotThrowException() { + // Arrange + ObjectStorageRecord record = createRecord(10); + ConditionalExpression condition = ConditionBuilder.column(COLUMN_NAME_1).isNotEqualToInt(20); + List expressions = Collections.singletonList(condition); + + // Act Assert + assertThatCode(() -> handler.validateConditions(record, expressions, metadata)) + .doesNotThrowAnyException(); + } + + @Test + public void validateConditions_WithNeConditionAndSameValue_ShouldThrowExecutionException() { + // Arrange + ObjectStorageRecord record = createRecord(10); + ConditionalExpression condition = ConditionBuilder.column(COLUMN_NAME_1).isNotEqualToInt(10); + List expressions = Collections.singletonList(condition); + + // Act Assert + assertThatThrownBy(() -> handler.validateConditions(record, expressions, metadata)) + .isInstanceOf(ExecutionException.class); + } + + @Test + public void validateConditions_WithGtConditionAndGreaterValue_ShouldNotThrowException() { + // Arrange + ObjectStorageRecord record = createRecord(20); + ConditionalExpression condition = ConditionBuilder.column(COLUMN_NAME_1).isGreaterThanInt(10); + List expressions = Collections.singletonList(condition); + + // Act Assert + assertThatCode(() -> handler.validateConditions(record, expressions, metadata)) + .doesNotThrowAnyException(); + } + + @Test + public void validateConditions_WithGtConditionAndSameValue_ShouldThrowExecutionException() { + // Arrange + ObjectStorageRecord record = createRecord(10); + ConditionalExpression condition = ConditionBuilder.column(COLUMN_NAME_1).isGreaterThanInt(10); + List expressions = Collections.singletonList(condition); + + // Act Assert + assertThatThrownBy(() -> handler.validateConditions(record, expressions, metadata)) + .isInstanceOf(ExecutionException.class); + } + + @Test + public void validateConditions_WithGteConditionAndGreaterValue_ShouldNotThrowException() { + // Arrange + ObjectStorageRecord record = createRecord(20); + ConditionalExpression condition = + ConditionBuilder.column(COLUMN_NAME_1).isGreaterThanOrEqualToInt(10); + List expressions = Collections.singletonList(condition); + + // Act Assert + assertThatCode(() -> handler.validateConditions(record, expressions, metadata)) + .doesNotThrowAnyException(); + } + + @Test + public void validateConditions_WithGteConditionAndSameValue_ShouldNotThrowException() { + // Arrange + ObjectStorageRecord record = createRecord(10); + ConditionalExpression condition = + ConditionBuilder.column(COLUMN_NAME_1).isGreaterThanOrEqualToInt(10); + List expressions = Collections.singletonList(condition); + + // Act Assert + assertThatCode(() -> handler.validateConditions(record, expressions, metadata)) + .doesNotThrowAnyException(); + } + + @Test + public void validateConditions_WithGteConditionAndSmallerValue_ShouldThrowExecutionException() { + // Arrange + ObjectStorageRecord record = createRecord(5); + ConditionalExpression condition = + ConditionBuilder.column(COLUMN_NAME_1).isGreaterThanOrEqualToInt(10); + List expressions = Collections.singletonList(condition); + + // Act Assert + assertThatThrownBy(() -> handler.validateConditions(record, expressions, metadata)) + .isInstanceOf(ExecutionException.class); + } + + @Test + public void validateConditions_WithLtConditionAndSmallerValue_ShouldNotThrowException() { + // Arrange + ObjectStorageRecord record = createRecord(5); + ConditionalExpression condition = ConditionBuilder.column(COLUMN_NAME_1).isLessThanInt(10); + List expressions = Collections.singletonList(condition); + + // Act Assert + assertThatCode(() -> handler.validateConditions(record, expressions, metadata)) + .doesNotThrowAnyException(); + } + + @Test + public void validateConditions_WithLtConditionAndSameValue_ShouldThrowExecutionException() { + // Arrange + ObjectStorageRecord record = createRecord(10); + ConditionalExpression condition = ConditionBuilder.column(COLUMN_NAME_1).isLessThanInt(10); + List expressions = Collections.singletonList(condition); + + // Act Assert + assertThatThrownBy(() -> handler.validateConditions(record, expressions, metadata)) + .isInstanceOf(ExecutionException.class); + } + + @Test + public void validateConditions_WithLteConditionAndSmallerValue_ShouldNotThrowException() { + // Arrange + ObjectStorageRecord record = createRecord(5); + ConditionalExpression condition = + ConditionBuilder.column(COLUMN_NAME_1).isLessThanOrEqualToInt(10); + List expressions = Collections.singletonList(condition); + + // Act Assert + assertThatCode(() -> handler.validateConditions(record, expressions, metadata)) + .doesNotThrowAnyException(); + } + + @Test + public void validateConditions_WithLteConditionAndSameValue_ShouldNotThrowException() { + // Arrange + ObjectStorageRecord record = createRecord(10); + ConditionalExpression condition = + ConditionBuilder.column(COLUMN_NAME_1).isLessThanOrEqualToInt(10); + List expressions = Collections.singletonList(condition); + + // Act Assert + assertThatCode(() -> handler.validateConditions(record, expressions, metadata)) + .doesNotThrowAnyException(); + } + + @Test + public void validateConditions_WithLteConditionAndGreaterValue_ShouldThrowExecutionException() { + // Arrange + ObjectStorageRecord record = createRecord(20); + ConditionalExpression condition = + ConditionBuilder.column(COLUMN_NAME_1).isLessThanOrEqualToInt(10); + List expressions = Collections.singletonList(condition); + + // Act Assert + assertThatThrownBy(() -> handler.validateConditions(record, expressions, metadata)) + .isInstanceOf(ExecutionException.class); + } + + @Test + public void validateConditions_WithIsNullConditionAndNullValue_ShouldNotThrowException() { + // Arrange + ObjectStorageRecord record = createRecordWithNull(); + ConditionalExpression condition = ConditionBuilder.column(COLUMN_NAME_1).isNullInt(); + List expressions = Collections.singletonList(condition); + + // Act Assert + assertThatCode(() -> handler.validateConditions(record, expressions, metadata)) + .doesNotThrowAnyException(); + } + + @Test + public void + validateConditions_WithIsNullConditionAndNonNullValue_ShouldThrowExecutionException() { + // Arrange + ObjectStorageRecord record = createRecord(10); + ConditionalExpression condition = ConditionBuilder.column(COLUMN_NAME_1).isNullInt(); + List expressions = Collections.singletonList(condition); + + // Act Assert + assertThatThrownBy(() -> handler.validateConditions(record, expressions, metadata)) + .isInstanceOf(ExecutionException.class); + } + + @Test + public void validateConditions_WithIsNotNullConditionAndNonNullValue_ShouldNotThrowException() { + // Arrange + ObjectStorageRecord record = createRecord(10); + ConditionalExpression condition = ConditionBuilder.column(COLUMN_NAME_1).isNotNullInt(); + List expressions = Collections.singletonList(condition); + + // Act Assert + assertThatCode(() -> handler.validateConditions(record, expressions, metadata)) + .doesNotThrowAnyException(); + } + + @Test + public void + validateConditions_WithIsNotNullConditionAndNullValue_ShouldThrowExecutionException() { + // Arrange + ObjectStorageRecord record = createRecordWithNull(); + ConditionalExpression condition = ConditionBuilder.column(COLUMN_NAME_1).isNotNullInt(); + List expressions = Collections.singletonList(condition); + + // Act Assert + assertThatThrownBy(() -> handler.validateConditions(record, expressions, metadata)) + .isInstanceOf(ExecutionException.class); + } + + @Test + public void validateConditions_WithMultipleConditionsAllMatching_ShouldNotThrowException() { + // Arrange + ObjectStorageRecord record = createRecordWithMultipleColumns(); + when(metadata.getColumnDataType(COLUMN_NAME_1)).thenReturn(DataType.INT); + when(metadata.getColumnDataType(COLUMN_NAME_2)).thenReturn(DataType.TEXT); + + ConditionalExpression condition1 = ConditionBuilder.column(COLUMN_NAME_1).isEqualToInt(10); + ConditionalExpression condition2 = + ConditionBuilder.column(COLUMN_NAME_2).isEqualToText("value"); + List expressions = Arrays.asList(condition1, condition2); + + // Act Assert + assertThatCode(() -> handler.validateConditions(record, expressions, metadata)) + .doesNotThrowAnyException(); + } + + @Test + public void + validateConditions_WithMultipleConditionsOneNotMatching_ShouldThrowExecutionException() { + // Arrange + ObjectStorageRecord record = createRecordWithMultipleColumns(); + when(metadata.getColumnDataType(COLUMN_NAME_1)).thenReturn(DataType.INT); + when(metadata.getColumnDataType(COLUMN_NAME_2)).thenReturn(DataType.TEXT); + + ConditionalExpression condition1 = ConditionBuilder.column(COLUMN_NAME_1).isEqualToInt(10); + ConditionalExpression condition2 = + ConditionBuilder.column(COLUMN_NAME_2).isEqualToText("different"); + List expressions = Arrays.asList(condition1, condition2); + + // Act Assert + assertThatThrownBy(() -> handler.validateConditions(record, expressions, metadata)) + .isInstanceOf(ExecutionException.class); + } + + private ObjectStorageRecord createRecord(int value) { + return ObjectStorageRecord.newBuilder() + .id("id") + .partitionKey(new HashMap<>()) + .clusteringKey(new HashMap<>()) + .values(Collections.singletonMap(COLUMN_NAME_1, value)) + .build(); + } + + private ObjectStorageRecord createRecordWithNull() { + return ObjectStorageRecord.newBuilder() + .id("id") + .partitionKey(new HashMap<>()) + .clusteringKey(new HashMap<>()) + .values(Collections.singletonMap(COLUMN_NAME_1, null)) + .build(); + } + + private ObjectStorageRecord createRecordWithMultipleColumns() { + return ObjectStorageRecord.newBuilder() + .id("id") + .partitionKey(new HashMap<>()) + .clusteringKey(new HashMap<>()) + .values(new HashMap<>(ImmutableMap.of(COLUMN_NAME_1, 10, COLUMN_NAME_2, "value"))) + .build(); + } +} From 4811884c043eeb0b1f999ba4eae78f0fe35bcb46 Mon Sep 17 00:00:00 2001 From: Kodai Doki Date: Fri, 7 Nov 2025 08:24:36 +0900 Subject: [PATCH 2/5] Apply suggestions --- ...ageConditionalMutationIntegrationTest.java | 16 +++++++++--- ...tipleClusteringKeyScanIntegrationTest.java | 22 ---------------- ...geMultiplePartitionKeyIntegrationTest.java | 23 ----------------- ...ingleClusteringKeyScanIntegrationTest.java | 17 ------------- ...rageSinglePartitionKeyIntegrationTest.java | 18 ------------- .../objectstorage/ObjectStorageTestUtils.java | 19 -------------- .../java/com/scalar/db/common/CoreError.java | 2 +- .../storage/objectstorage/ObjectStorage.java | 10 +++++--- .../objectstorage/ObjectStorageMutation.java | 18 ++++++++----- .../objectstorage/ObjectStorageRecord.java | 22 ++++++++-------- .../objectstorage/PartitionIdentifier.java | 2 +- .../objectstorage/ResultInterpreter.java | 3 ++- .../ObjectStorageMutationTest.java | 25 ------------------- 13 files changed, 45 insertions(+), 152 deletions(-) delete mode 100644 core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageTestUtils.java diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageConditionalMutationIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageConditionalMutationIntegrationTest.java index 759dd22507..69979feec3 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageConditionalMutationIntegrationTest.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageConditionalMutationIntegrationTest.java @@ -2,6 +2,7 @@ import com.scalar.db.api.ConditionalExpression; import com.scalar.db.api.DistributedStorageConditionalMutationIntegrationTestBase; +import com.scalar.db.io.DataType; import java.util.List; import java.util.Properties; import java.util.stream.Collectors; @@ -22,9 +23,18 @@ protected int getThreadNum() { protected List getOperatorAndDataTypeListForTest() { return super.getOperatorAndDataTypeListForTest().stream() .filter( - operatorAndDataType -> - operatorAndDataType.getOperator() == ConditionalExpression.Operator.EQ - || operatorAndDataType.getOperator() == ConditionalExpression.Operator.NE) + operatorAndDataType -> { + // Object Storage only supports EQ, NE, IS_NULL, and IS_NOT_NULL conditions for BLOB + // type + if (operatorAndDataType.getDataType() == DataType.BLOB) { + return operatorAndDataType.getOperator() == ConditionalExpression.Operator.EQ + || operatorAndDataType.getOperator() == ConditionalExpression.Operator.NE + || operatorAndDataType.getOperator() == ConditionalExpression.Operator.IS_NULL + || operatorAndDataType.getOperator() + == ConditionalExpression.Operator.IS_NOT_NULL; + } + return true; + }) .collect(Collectors.toList()); } } diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMultipleClusteringKeyScanIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMultipleClusteringKeyScanIntegrationTest.java index e3a93e8ff6..68b7b74d04 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMultipleClusteringKeyScanIntegrationTest.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMultipleClusteringKeyScanIntegrationTest.java @@ -1,7 +1,6 @@ package com.scalar.db.storage.objectstorage; import com.scalar.db.api.DistributedStorageMultipleClusteringKeyScanIntegrationTestBase; -import com.scalar.db.io.Column; import com.scalar.db.io.DataType; import java.util.List; import java.util.Properties; @@ -27,25 +26,4 @@ protected List getDataTypes() { protected boolean isParallelDdlSupported() { return false; } - - @Override - protected int getThreadNum() { - return 3; - } - - @Override - protected Column getColumnWithMinValue(String columnName, DataType dataType) { - if (dataType == DataType.TEXT) { - return ObjectStorageTestUtils.getMinTextValue(columnName); - } - return super.getColumnWithMinValue(columnName, dataType); - } - - @Override - protected Column getColumnWithMaxValue(String columnName, DataType dataType) { - if (dataType == DataType.TEXT) { - return ObjectStorageTestUtils.getMaxTextValue(columnName); - } - return super.getColumnWithMaxValue(columnName, dataType); - } } diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMultiplePartitionKeyIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMultiplePartitionKeyIntegrationTest.java index d3b077df18..fdef8cfdeb 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMultiplePartitionKeyIntegrationTest.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMultiplePartitionKeyIntegrationTest.java @@ -1,8 +1,6 @@ package com.scalar.db.storage.objectstorage; import com.scalar.db.api.DistributedStorageMultiplePartitionKeyIntegrationTestBase; -import com.scalar.db.io.Column; -import com.scalar.db.io.DataType; import java.util.Properties; public class ObjectStorageMultiplePartitionKeyIntegrationTest @@ -12,29 +10,8 @@ protected Properties getProperties(String testName) { return ObjectStorageEnv.getProperties(testName); } - @Override - protected int getThreadNum() { - return 3; - } - @Override protected boolean isParallelDdlSupported() { return false; } - - @Override - protected Column getColumnWithMinValue(String columnName, DataType dataType) { - if (dataType == DataType.TEXT) { - return ObjectStorageTestUtils.getMinTextValue(columnName); - } - return super.getColumnWithMinValue(columnName, dataType); - } - - @Override - protected Column getColumnWithMaxValue(String columnName, DataType dataType) { - if (dataType == DataType.TEXT) { - return ObjectStorageTestUtils.getMaxTextValue(columnName); - } - return super.getColumnWithMaxValue(columnName, dataType); - } } diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageSingleClusteringKeyScanIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageSingleClusteringKeyScanIntegrationTest.java index 955b94330b..36727d5649 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageSingleClusteringKeyScanIntegrationTest.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageSingleClusteringKeyScanIntegrationTest.java @@ -1,7 +1,6 @@ package com.scalar.db.storage.objectstorage; import com.scalar.db.api.DistributedStorageSingleClusteringKeyScanIntegrationTestBase; -import com.scalar.db.io.Column; import com.scalar.db.io.DataType; import java.util.ArrayList; import java.util.List; @@ -26,20 +25,4 @@ protected List getClusteringKeyTypes() { } return clusteringKeyTypes; } - - @Override - protected Column getColumnWithMinValue(String columnName, DataType dataType) { - if (dataType == DataType.TEXT) { - return ObjectStorageTestUtils.getMinTextValue(columnName); - } - return super.getColumnWithMinValue(columnName, dataType); - } - - @Override - protected Column getColumnWithMaxValue(String columnName, DataType dataType) { - if (dataType == DataType.TEXT) { - return ObjectStorageTestUtils.getMaxTextValue(columnName); - } - return super.getColumnWithMaxValue(columnName, dataType); - } } diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageSinglePartitionKeyIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageSinglePartitionKeyIntegrationTest.java index 215993d078..5ce073100b 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageSinglePartitionKeyIntegrationTest.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageSinglePartitionKeyIntegrationTest.java @@ -1,8 +1,6 @@ package com.scalar.db.storage.objectstorage; import com.scalar.db.api.DistributedStorageSinglePartitionKeyIntegrationTestBase; -import com.scalar.db.io.Column; -import com.scalar.db.io.DataType; import java.util.Properties; public class ObjectStorageSinglePartitionKeyIntegrationTest @@ -11,20 +9,4 @@ public class ObjectStorageSinglePartitionKeyIntegrationTest protected Properties getProperties(String testName) { return ObjectStorageEnv.getProperties(testName); } - - @Override - protected Column getColumnWithMinValue(String columnName, DataType dataType) { - if (dataType == DataType.TEXT) { - return ObjectStorageTestUtils.getMinTextValue(columnName); - } - return super.getColumnWithMinValue(columnName, dataType); - } - - @Override - protected Column getColumnWithMaxValue(String columnName, DataType dataType) { - if (dataType == DataType.TEXT) { - return ObjectStorageTestUtils.getMaxTextValue(columnName); - } - return super.getColumnWithMaxValue(columnName, dataType); - } } diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageTestUtils.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageTestUtils.java deleted file mode 100644 index 0263043fed..0000000000 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageTestUtils.java +++ /dev/null @@ -1,19 +0,0 @@ -package com.scalar.db.storage.objectstorage; - -import com.scalar.db.io.TextColumn; -import com.scalar.db.util.TestUtils; -import java.util.stream.IntStream; - -public class ObjectStorageTestUtils { - public static TextColumn getMinTextValue(String columnName) { - // Since ObjectStorage can't handle an empty string correctly, we use "0" as the min value - return TextColumn.of(columnName, "0"); - } - - public static TextColumn getMaxTextValue(String columnName) { - // Since ObjectStorage can't handle 0xFF character correctly, we use "ZZZ..." as the max value - StringBuilder builder = new StringBuilder(); - IntStream.range(0, TestUtils.MAX_TEXT_COUNT).forEach(i -> builder.append('Z')); - return TextColumn.of(columnName, builder.toString()); - } -} diff --git a/core/src/main/java/com/scalar/db/common/CoreError.java b/core/src/main/java/com/scalar/db/common/CoreError.java index 45ab74504a..99764e8ae5 100644 --- a/core/src/main/java/com/scalar/db/common/CoreError.java +++ b/core/src/main/java/com/scalar/db/common/CoreError.java @@ -898,7 +898,7 @@ public enum CoreError implements ScalarDbError { OBJECT_STORAGE_PRIMARY_KEY_CONTAINS_ILLEGAL_CHARACTER( Category.USER_ERROR, "0257", - "The value of the column %s in the primary key contains an illegal character. ", + "The value of the column %s in the primary key contains an illegal character.", "", ""), OBJECT_STORAGE_CONDITION_OPERATION_NOT_SUPPORTED_FOR_BLOB_TYPE( diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorage.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorage.java index a3c5a3856b..88170ac942 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorage.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorage.java @@ -79,13 +79,15 @@ public Optional get(Get get) throws ExecutionException { new FilterableScanner( get, selectStatementHandler.handle(copyAndPrepareForDynamicFiltering(get))); } - Optional ret = scanner.one(); - if (!scanner.one().isPresent()) { - return ret; - } else { + Optional result = scanner.one(); + if (!result.isPresent()) { + return Optional.empty(); + } + if (scanner.one().isPresent()) { throw new IllegalArgumentException( CoreError.GET_OPERATION_USED_FOR_NON_EXACT_MATCH_SELECTION.buildMessage(get)); } + return result; } finally { if (scanner != null) { try { diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageMutation.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageMutation.java index 3556189489..fba170044c 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageMutation.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageMutation.java @@ -7,6 +7,7 @@ import com.scalar.db.io.Column; import java.util.Collection; import java.util.Collections; +import java.util.HashMap; import java.util.Map; import javax.annotation.Nonnull; import javax.annotation.concurrent.Immutable; @@ -22,9 +23,9 @@ public ObjectStorageRecord makeRecord() { Mutation mutation = (Mutation) getOperation(); if (mutation instanceof Delete) { - return new ObjectStorageRecord(); + throw new IllegalStateException("Delete mutation should not make a new record."); } - Put put = (Put) mutation; + Put put = (Put) getOperation(); return ObjectStorageRecord.newBuilder() .id(getRecordId()) @@ -40,13 +41,18 @@ public ObjectStorageRecord makeRecord(ObjectStorageRecord existingRecord) { Mutation mutation = (Mutation) getOperation(); if (mutation instanceof Delete) { - return new ObjectStorageRecord(); + throw new IllegalStateException("Delete mutation should not make a new record."); } Put put = (Put) mutation; - ObjectStorageRecord newRecord = new ObjectStorageRecord(existingRecord); - toMapForPut(put).forEach((k, v) -> newRecord.getValues().put(k, v)); - return newRecord; + Map newValues = new HashMap<>(existingRecord.getValues()); + newValues.putAll(toMapForPut(put)); + return ObjectStorageRecord.newBuilder() + .id(existingRecord.getId()) + .partitionKey(existingRecord.getPartitionKey()) + .clusteringKey(existingRecord.getClusteringKey()) + .values(newValues) + .build(); } private Map toMap(Collection> columns) { diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageRecord.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageRecord.java index 5f6ff6a035..f0693918b1 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageRecord.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageRecord.java @@ -1,5 +1,7 @@ package com.scalar.db.storage.objectstorage; +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; import java.util.Collections; import java.util.HashMap; @@ -16,16 +18,12 @@ public class ObjectStorageRecord { private final Map clusteringKey; private final Map values; - // The default constructor is required by Jackson to deserialize JSON object - public ObjectStorageRecord() { - this(null, null, null, null); - } - + @JsonCreator public ObjectStorageRecord( - @Nullable String id, - @Nullable Map partitionKey, - @Nullable Map clusteringKey, - @Nullable Map values) { + @JsonProperty("id") @Nullable String id, + @JsonProperty("partitionKey") @Nullable Map partitionKey, + @JsonProperty("clusteringKey") @Nullable Map clusteringKey, + @JsonProperty("values") @Nullable Map values) { this.id = id != null ? id : ""; this.partitionKey = partitionKey != null ? new HashMap<>(partitionKey) : Collections.emptyMap(); this.clusteringKey = @@ -42,15 +40,15 @@ public String getId() { } public Map getPartitionKey() { - return partitionKey; + return Collections.unmodifiableMap(partitionKey); } public Map getClusteringKey() { - return clusteringKey; + return Collections.unmodifiableMap(clusteringKey); } public Map getValues() { - return values; + return Collections.unmodifiableMap(values); } @Override diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/PartitionIdentifier.java b/core/src/main/java/com/scalar/db/storage/objectstorage/PartitionIdentifier.java index 41d65deb90..f0ce28baec 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/PartitionIdentifier.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/PartitionIdentifier.java @@ -30,7 +30,7 @@ public String getPartitionName() { @Override public int hashCode() { - return (namespaceName + tableName + partitionName).hashCode(); + return java.util.Objects.hash(namespaceName, tableName, partitionName); } @Override diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ResultInterpreter.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ResultInterpreter.java index 19246231c0..6eaa4d2066 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/ResultInterpreter.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ResultInterpreter.java @@ -5,6 +5,7 @@ import com.scalar.db.common.ResultImpl; import com.scalar.db.io.Column; import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -18,7 +19,7 @@ public class ResultInterpreter { @SuppressFBWarnings("EI_EXPOSE_REP2") public ResultInterpreter(List projections, TableMetadata metadata) { - this.projections = Objects.requireNonNull(projections); + this.projections = Collections.unmodifiableList(Objects.requireNonNull(projections)); this.metadata = Objects.requireNonNull(metadata); } diff --git a/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageMutationTest.java b/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageMutationTest.java index 4f8cd0a2c4..3177c64cd1 100644 --- a/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageMutationTest.java +++ b/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageMutationTest.java @@ -3,7 +3,6 @@ import static org.assertj.core.api.AssertionsForClassTypes.assertThat; import static org.mockito.Mockito.when; -import com.scalar.db.api.Delete; import com.scalar.db.api.Put; import com.scalar.db.api.TableMetadata; import com.scalar.db.io.Key; @@ -50,17 +49,6 @@ private Put preparePut() { .build(); } - private Delete prepareDelete() { - Key partitionKey = Key.ofText(ANY_NAME_1, ANY_TEXT_1); - Key clusteringKey = Key.ofText(ANY_NAME_2, ANY_TEXT_2); - return Delete.newBuilder() - .namespace(ANY_NAMESPACE_NAME) - .table(ANY_TABLE_NAME) - .partitionKey(partitionKey) - .clusteringKey(clusteringKey) - .build(); - } - @Test public void makeRecord_PutGiven_ShouldReturnWithValues() { // Arrange @@ -98,17 +86,4 @@ public void makeRecord_PutWithNullValueGiven_ShouldReturnWithValues() { Assertions.assertThat(actual.getValues().get(ANY_NAME_3)).isNull(); Assertions.assertThat(actual.getValues().get(ANY_NAME_4)).isEqualTo(ANY_INT_2); } - - @Test - public void makeRecord_DeleteGiven_ShouldReturnEmpty() { - // Arrange - Delete delete = prepareDelete(); - ObjectStorageMutation objectStorageMutation = new ObjectStorageMutation(delete, metadata); - - // Act - ObjectStorageRecord actual = objectStorageMutation.makeRecord(); - - // Assert - assertThat(actual.getId()).isEqualTo(""); - } } From 0a73daf9c75951cb6477423f62f9398028d6115f Mon Sep 17 00:00:00 2001 From: Kodai Doki Date: Fri, 7 Nov 2025 11:30:27 +0900 Subject: [PATCH 3/5] Fix to remove BLOB type limitations --- ...ommitIntegrationTestWithObjectStorage.java | 5 ---- ...ageConditionalMutationIntegrationTest.java | 28 ------------------- ...rageCrossPartitionScanIntegrationTest.java | 5 ---- ...tipleClusteringKeyScanIntegrationTest.java | 11 -------- ...ingleClusteringKeyScanIntegrationTest.java | 16 ----------- .../java/com/scalar/db/common/CoreError.java | 6 ---- .../ObjectStorageOperationChecker.java | 28 ------------------- 7 files changed, 99 deletions(-) diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitIntegrationTestWithObjectStorage.java index abb84e4a5d..2add0b3c09 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitIntegrationTestWithObjectStorage.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitIntegrationTestWithObjectStorage.java @@ -11,11 +11,6 @@ protected Properties getProps(String testName) { return ConsensusCommitObjectStorageEnv.getProperties(testName); } - @Override - protected boolean isTimestampTypeSupported() { - return false; - } - @Override @Disabled("Object Storage does not support index-related operations") public void get_GetGivenForIndexColumn_ShouldReturnRecords() {} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageConditionalMutationIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageConditionalMutationIntegrationTest.java index 69979feec3..2767c2f074 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageConditionalMutationIntegrationTest.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageConditionalMutationIntegrationTest.java @@ -1,11 +1,7 @@ package com.scalar.db.storage.objectstorage; -import com.scalar.db.api.ConditionalExpression; import com.scalar.db.api.DistributedStorageConditionalMutationIntegrationTestBase; -import com.scalar.db.io.DataType; -import java.util.List; import java.util.Properties; -import java.util.stream.Collectors; public class ObjectStorageConditionalMutationIntegrationTest extends DistributedStorageConditionalMutationIntegrationTestBase { @@ -13,28 +9,4 @@ public class ObjectStorageConditionalMutationIntegrationTest protected Properties getProperties(String testName) { return ObjectStorageEnv.getProperties(testName); } - - @Override - protected int getThreadNum() { - return 3; - } - - @Override - protected List getOperatorAndDataTypeListForTest() { - return super.getOperatorAndDataTypeListForTest().stream() - .filter( - operatorAndDataType -> { - // Object Storage only supports EQ, NE, IS_NULL, and IS_NOT_NULL conditions for BLOB - // type - if (operatorAndDataType.getDataType() == DataType.BLOB) { - return operatorAndDataType.getOperator() == ConditionalExpression.Operator.EQ - || operatorAndDataType.getOperator() == ConditionalExpression.Operator.NE - || operatorAndDataType.getOperator() == ConditionalExpression.Operator.IS_NULL - || operatorAndDataType.getOperator() - == ConditionalExpression.Operator.IS_NOT_NULL; - } - return true; - }) - .collect(Collectors.toList()); - } } diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageCrossPartitionScanIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageCrossPartitionScanIntegrationTest.java index e3761048db..507e5f742a 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageCrossPartitionScanIntegrationTest.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageCrossPartitionScanIntegrationTest.java @@ -13,11 +13,6 @@ protected Properties getProperties(String testName) { return ObjectStorageEnv.getProperties(testName); } - @Override - protected int getThreadNum() { - return 3; - } - @Override protected boolean isParallelDdlSupported() { return false; diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMultipleClusteringKeyScanIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMultipleClusteringKeyScanIntegrationTest.java index 68b7b74d04..bf8693042f 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMultipleClusteringKeyScanIntegrationTest.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageMultipleClusteringKeyScanIntegrationTest.java @@ -1,10 +1,7 @@ package com.scalar.db.storage.objectstorage; import com.scalar.db.api.DistributedStorageMultipleClusteringKeyScanIntegrationTestBase; -import com.scalar.db.io.DataType; -import java.util.List; import java.util.Properties; -import java.util.stream.Collectors; public class ObjectStorageMultipleClusteringKeyScanIntegrationTest extends DistributedStorageMultipleClusteringKeyScanIntegrationTestBase { @@ -14,14 +11,6 @@ protected Properties getProperties(String testName) { return ObjectStorageEnv.getProperties(testName); } - @Override - protected List getDataTypes() { - // Return types without BLOB because blob is not supported for clustering key for now - return super.getDataTypes().stream() - .filter(type -> type != DataType.BLOB) - .collect(Collectors.toList()); - } - @Override protected boolean isParallelDdlSupported() { return false; diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageSingleClusteringKeyScanIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageSingleClusteringKeyScanIntegrationTest.java index 36727d5649..4ca86ca7e2 100644 --- a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageSingleClusteringKeyScanIntegrationTest.java +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageSingleClusteringKeyScanIntegrationTest.java @@ -1,9 +1,6 @@ package com.scalar.db.storage.objectstorage; import com.scalar.db.api.DistributedStorageSingleClusteringKeyScanIntegrationTestBase; -import com.scalar.db.io.DataType; -import java.util.ArrayList; -import java.util.List; import java.util.Properties; public class ObjectStorageSingleClusteringKeyScanIntegrationTest @@ -12,17 +9,4 @@ public class ObjectStorageSingleClusteringKeyScanIntegrationTest protected Properties getProperties(String testName) { return ObjectStorageEnv.getProperties(testName); } - - @Override - protected List getClusteringKeyTypes() { - // Return types without BLOB because blob is not supported for clustering key for now - List clusteringKeyTypes = new ArrayList<>(); - for (DataType dataType : DataType.values()) { - if (dataType == DataType.BLOB) { - continue; - } - clusteringKeyTypes.add(dataType); - } - return clusteringKeyTypes; - } } diff --git a/core/src/main/java/com/scalar/db/common/CoreError.java b/core/src/main/java/com/scalar/db/common/CoreError.java index 99764e8ae5..0816892164 100644 --- a/core/src/main/java/com/scalar/db/common/CoreError.java +++ b/core/src/main/java/com/scalar/db/common/CoreError.java @@ -901,12 +901,6 @@ public enum CoreError implements ScalarDbError { "The value of the column %s in the primary key contains an illegal character.", "", ""), - OBJECT_STORAGE_CONDITION_OPERATION_NOT_SUPPORTED_FOR_BLOB_TYPE( - Category.USER_ERROR, - "0258", - "Object Storage supports only EQ, NE, IS_NULL, and IS_NOT_NULL operations for the BLOB type in conditions. Mutation: %s", - "", - ""), // // Errors for the concurrency error category diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationChecker.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationChecker.java index e5700df190..38322b88c4 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationChecker.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationChecker.java @@ -1,13 +1,10 @@ package com.scalar.db.storage.objectstorage; -import com.scalar.db.api.ConditionalExpression; import com.scalar.db.api.Delete; import com.scalar.db.api.Get; -import com.scalar.db.api.Mutation; import com.scalar.db.api.Operation; import com.scalar.db.api.Put; import com.scalar.db.api.Scan; -import com.scalar.db.api.TableMetadata; import com.scalar.db.common.CoreError; import com.scalar.db.common.StorageInfoProvider; import com.scalar.db.common.TableMetadataManager; @@ -18,7 +15,6 @@ import com.scalar.db.io.BlobColumn; import com.scalar.db.io.BooleanColumn; import com.scalar.db.io.ColumnVisitor; -import com.scalar.db.io.DataType; import com.scalar.db.io.DateColumn; import com.scalar.db.io.DoubleColumn; import com.scalar.db.io.FloatColumn; @@ -109,18 +105,12 @@ public void check(Scan scan) throws ExecutionException { public void check(Put put) throws ExecutionException { super.check(put); checkPrimaryKey(put); - - TableMetadata metadata = getTableMetadata(put); - checkCondition(put, metadata); } @Override public void check(Delete delete) throws ExecutionException { super.check(delete); checkPrimaryKey(delete); - - TableMetadata metadata = getTableMetadata(delete); - checkCondition(delete, metadata); } private void checkPrimaryKey(Operation operation) { @@ -133,22 +123,4 @@ private void checkPrimaryKey(Operation operation) { .ifPresent( c -> c.getColumns().forEach(column -> column.accept(PRIMARY_KEY_COLUMN_CHECKER))); } - - private void checkCondition(Mutation mutation, TableMetadata metadata) { - if (!mutation.getCondition().isPresent()) { - return; - } - for (ConditionalExpression expression : mutation.getCondition().get().getExpressions()) { - if (metadata.getColumnDataType(expression.getColumn().getName()) == DataType.BLOB) { - if (expression.getOperator() != ConditionalExpression.Operator.EQ - && expression.getOperator() != ConditionalExpression.Operator.NE - && expression.getOperator() != ConditionalExpression.Operator.IS_NULL - && expression.getOperator() != ConditionalExpression.Operator.IS_NOT_NULL) { - throw new IllegalArgumentException( - CoreError.OBJECT_STORAGE_CONDITION_OPERATION_NOT_SUPPORTED_FOR_BLOB_TYPE.buildMessage( - mutation)); - } - } - } - } } From dcc742ef7eab3a4d1888cf66ea098e9436cf4478 Mon Sep 17 00:00:00 2001 From: Kodai Doki Date: Fri, 7 Nov 2025 13:22:58 +0900 Subject: [PATCH 4/5] Update the OperationChecker unit test to reflect the code change --- .../ObjectStorageOperationCheckerTest.java | 32 +++++++++---------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationCheckerTest.java b/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationCheckerTest.java index bf80c632f6..b68f4c56be 100644 --- a/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationCheckerTest.java +++ b/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageOperationCheckerTest.java @@ -429,18 +429,18 @@ public void check_ForPutWithCondition_ShouldBehaveProperly() throws ExecutionExc operationChecker.check( buildPutWithCondition(putIf(column(COL4).isNotNullBlob()).build()))) .doesNotThrowAnyException(); - assertThatThrownBy( + assertThatCode( () -> operationChecker.check( buildPutWithCondition( putIf(column(COL4).isGreaterThanBlob(new byte[] {1, 2, 3})).build()))) - .isInstanceOf(IllegalArgumentException.class); - assertThatThrownBy( + .doesNotThrowAnyException(); + assertThatCode( () -> operationChecker.check( buildPutWithCondition( putIf(column(COL4).isLessThanOrEqualToBlob(new byte[] {1, 2, 3})).build()))) - .isInstanceOf(IllegalArgumentException.class); + .doesNotThrowAnyException(); } @Test @@ -523,19 +523,19 @@ public void check_ForDeleteWithCondition_ShouldBehaveProperly() throws Execution operationChecker.check( buildDeleteWithCondition(deleteIf(column(COL4).isNotNullBlob()).build()))) .doesNotThrowAnyException(); - assertThatThrownBy( + assertThatCode( () -> operationChecker.check( buildDeleteWithCondition( deleteIf(column(COL4).isGreaterThanBlob(new byte[] {1, 2, 3})).build()))) - .isInstanceOf(IllegalArgumentException.class); - assertThatThrownBy( + .doesNotThrowAnyException(); + assertThatCode( () -> operationChecker.check( buildDeleteWithCondition( deleteIf(column(COL4).isLessThanOrEqualToBlob(new byte[] {1, 2, 3})) .build()))) - .isInstanceOf(IllegalArgumentException.class); + .doesNotThrowAnyException(); } @Test @@ -652,15 +652,15 @@ public void check_ForMutationsWithPutWithCondition_ShouldBehaveProperly() Arrays.asList( buildPutWithCondition(putIf(column(COL4).isNotNullBlob()).build()), put))) .doesNotThrowAnyException(); - assertThatThrownBy( + assertThatCode( () -> operationChecker.check( Arrays.asList( buildPutWithCondition( putIf(column(COL4).isGreaterThanBlob(new byte[] {1, 2, 3})).build()), put))) - .isInstanceOf(IllegalArgumentException.class); - assertThatThrownBy( + .doesNotThrowAnyException(); + assertThatCode( () -> operationChecker.check( Arrays.asList( @@ -668,7 +668,7 @@ public void check_ForMutationsWithPutWithCondition_ShouldBehaveProperly() putIf(column(COL4).isLessThanOrEqualToBlob(new byte[] {1, 2, 3})) .build()), put))) - .isInstanceOf(IllegalArgumentException.class); + .doesNotThrowAnyException(); } @Test @@ -788,15 +788,15 @@ public void check_ForMutationsWithDeleteWithCondition_ShouldBehaveProperly() buildDeleteWithCondition(deleteIf(column(COL4).isNotNullBlob()).build()), delete))) .doesNotThrowAnyException(); - assertThatThrownBy( + assertThatCode( () -> operationChecker.check( Arrays.asList( buildDeleteWithCondition( deleteIf(column(COL4).isGreaterThanBlob(new byte[] {1, 2, 3})).build()), delete))) - .isInstanceOf(IllegalArgumentException.class); - assertThatThrownBy( + .doesNotThrowAnyException(); + assertThatCode( () -> operationChecker.check( Arrays.asList( @@ -804,7 +804,7 @@ public void check_ForMutationsWithDeleteWithCondition_ShouldBehaveProperly() deleteIf(column(COL4).isLessThanOrEqualToBlob(new byte[] {1, 2, 3})) .build()), delete))) - .isInstanceOf(IllegalArgumentException.class); + .doesNotThrowAnyException(); } private Put buildPutWithCondition(MutationCondition condition) { From 533f3cd24d515bdfffb6d4e264f7ac1eceff23e4 Mon Sep 17 00:00:00 2001 From: Kodai Doki Date: Fri, 7 Nov 2025 16:32:55 +0900 Subject: [PATCH 5/5] Fix based on review --- .../db/storage/objectstorage/ClusteringKeyComparator.java | 8 ++++---- .../db/storage/objectstorage/SelectStatementHandler.java | 6 +++--- .../scalar/db/storage/objectstorage/StatementHandler.java | 2 +- 3 files changed, 8 insertions(+), 8 deletions(-) diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ClusteringKeyComparator.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ClusteringKeyComparator.java index 7e5188c63a..6031edc2f8 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/ClusteringKeyComparator.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ClusteringKeyComparator.java @@ -4,6 +4,7 @@ import com.scalar.db.api.Scan; import com.scalar.db.api.TableMetadata; import com.scalar.db.io.Column; +import com.scalar.db.io.DataType; import java.util.Comparator; import java.util.Map; @@ -19,12 +20,11 @@ public int compare(Map clusteringKey1, Map clust for (String columnName : metadata.getClusteringKeyNames()) { Scan.Ordering.Order order = metadata.getClusteringOrder(columnName); + DataType dataType = metadata.getColumnDataType(columnName); Column column1 = - ColumnValueMapper.convert( - clusteringKey1.get(columnName), columnName, metadata.getColumnDataType(columnName)); + ColumnValueMapper.convert(clusteringKey1.get(columnName), columnName, dataType); Column column2 = - ColumnValueMapper.convert( - clusteringKey2.get(columnName), columnName, metadata.getColumnDataType(columnName)); + ColumnValueMapper.convert(clusteringKey2.get(columnName), columnName, dataType); int cmp = order == Scan.Ordering.Order.ASC diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/SelectStatementHandler.java b/core/src/main/java/com/scalar/db/storage/objectstorage/SelectStatementHandler.java index abc190b170..e473cbb51c 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/SelectStatementHandler.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/SelectStatementHandler.java @@ -205,12 +205,12 @@ private boolean isReverseOrder(Scan scan, TableMetadata metadata) { throw new IllegalArgumentException( CoreError.OPERATION_CHECK_ERROR_ORDERING_NOT_PROPERLY_SPECIFIED.buildMessage(scan)); } - boolean rightOrder = + boolean isValidOrder = ordering.getOrder() != metadata.getClusteringOrder(ordering.getColumnName()); if (reverse == null) { - reverse = rightOrder; + reverse = isValidOrder; } else { - if (reverse != rightOrder) { + if (reverse != isValidOrder) { throw new IllegalArgumentException( CoreError.OPERATION_CHECK_ERROR_ORDERING_NOT_PROPERLY_SPECIFIED.buildMessage(scan)); } diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/StatementHandler.java b/core/src/main/java/com/scalar/db/storage/objectstorage/StatementHandler.java index e86445e796..16841d575d 100644 --- a/core/src/main/java/com/scalar/db/storage/objectstorage/StatementHandler.java +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/StatementHandler.java @@ -128,7 +128,7 @@ protected void validateConditions( throw new ExecutionException( String.format( "A condition failed. ConditionalExpression: %s, Column: %s", - expectedColumn, actualColumn)); + expression, actualColumn)); } } }