diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 76b183ede2..404cade9da 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -2122,6 +2122,76 @@ jobs: name: tidb_8_5_integration_test_reports_${{ matrix.mode.label }} path: core/build/reports/tests/integrationTestJdbc + integration-test-for-blob-storage: + name: Blob Storage integration test (${{ matrix.mode.label }}) + runs-on: ubuntu-latest + + strategy: + fail-fast: false + matrix: + mode: + - label: default + group_commit_enabled: false + - label: with_group_commit + group_commit_enabled: true + + services: + postgres: + image: mcr.microsoft.com/azure-storage/azurite + env: + AZURITE_ACCOUNTS: "test:test" + ports: + - 10000:10000 + + steps: + - uses: actions/checkout@v5 + + - name: Set up JDK ${{ env.JAVA_VERSION }} (${{ env.JAVA_VENDOR }}) + uses: actions/setup-java@v5 + with: + java-version: ${{ env.JAVA_VERSION }} + distribution: ${{ env.JAVA_VENDOR }} + + - name: Set up JDK ${{ env.INT_TEST_JAVA_RUNTIME_VERSION }} (${{ env.INT_TEST_JAVA_RUNTIME_VENDOR }}) to run integration test + uses: actions/setup-java@v5 + if: ${{ env.SET_UP_INT_TEST_RUNTIME_NON_ORACLE_JDK == 'true'}} + with: + java-version: ${{ env.INT_TEST_JAVA_RUNTIME_VERSION }} + distribution: ${{ env.INT_TEST_JAVA_RUNTIME_VENDOR }} + + - name: Login to Oracle container registry + uses: docker/login-action@v3 + if: ${{ env.INT_TEST_JAVA_RUNTIME_VENDOR == 'oracle' }} + with: + registry: container-registry.oracle.com + username: ${{ secrets.OCR_USERNAME }} + password: ${{ secrets.OCR_TOKEN }} + + - name: Set up JDK ${{ env.INT_TEST_JAVA_RUNTIME_VERSION }} (oracle) to run the integration test + if: ${{ env.INT_TEST_JAVA_RUNTIME_VENDOR == 'oracle' }} + run: | + container_id=$(docker create "container-registry.oracle.com/java/jdk:${{ env.INT_TEST_JAVA_RUNTIME_VERSION }}") + docker cp -L "$container_id:/usr/java/default" /usr/lib/jvm/oracle-jdk && docker rm "$container_id" + + - name: Setup Gradle + uses: gradle/actions/setup-gradle@v5 + + - name: Create Blob Storage container + run: | + az storage container create \ + --name test-container \ + --connection-string "DefaultEndpointsProtocol=http;AccountName=test;AccountKey=test;BlobEndpoint=http://localhost:10000/test;" + + - name: Execute Gradle 'integrationTestObjectStorage' task + run: ./gradlew integrationTestObjectStorage -Dscalardb.object_storage.endpoint=http://localhost:10000/test/test-container -Dscalardb.object_storage.username=test -Dscalardb.object_storage.password=test ${{ matrix.mode.group_commit_enabled && env.INT_TEST_GRADLE_OPTIONS_FOR_GROUP_COMMIT || '' }} + + - name: Upload Gradle test reports + if: always() + uses: actions/upload-artifact@v4 + with: + name: blob_storage_integration_test_reports_${{ matrix.mode.label }} + path: core/build/reports/tests/integrationTestObjectStorage + integration-test-for-multi-storage: name: Multi-storage integration test (${{ matrix.mode.label }}) runs-on: ubuntu-latest diff --git a/build.gradle b/build.gradle index c9df4db940..8207acaa38 100644 --- a/build.gradle +++ b/build.gradle @@ -28,6 +28,7 @@ subprojects { slf4jVersion = '1.7.36' cassandraDriverVersion = '3.11.5' azureCosmosVersion = '4.75.0' + azureBlobStorageVersion = '12.31.3' jooqVersion = '3.14.16' awssdkVersion = '2.37.3' commonsDbcp2Version = '2.13.0' diff --git a/core/build.gradle b/core/build.gradle index 811bd26d19..1b86bff0df 100644 --- a/core/build.gradle +++ b/core/build.gradle @@ -64,6 +64,16 @@ sourceSets { } resources.srcDir file('src/integration-test/resources') } + integrationTestObjectStorage { + java { + compileClasspath += main.output + test.output + runtimeClasspath += main.output + test.output + srcDir file('src/integration-test/java') + include '**/com/scalar/db/common/*.java' + include '**/com/scalar/db/storage/objectstorage/*.java' + } + resources.srcDir file('src/integration-test/resources') + } integrationTestMultiStorage { java { compileClasspath += main.output + test.output @@ -136,6 +146,9 @@ configurations { integrationTestJdbcImplementation.extendsFrom testImplementation integrationTestJdbcRuntimeOnly.extendsFrom testRuntimeOnly integrationTestJdbcCompileOnly.extendsFrom testCompileOnly + integrationTestObjectStorageImplementation.extendsFrom testImplementation + integrationTestObjectStorageRuntimeOnly.extendsFrom testRuntimeOnly + integrationTestObjectStorageCompileOnly.extendsFrom testCompileOnly integrationTestMultiStorageImplementation.extendsFrom testImplementation integrationTestMultiStorageRuntimeOnly.extendsFrom testRuntimeOnly integrationTestMultiStorageCompileOnly.extendsFrom testCompileOnly @@ -156,6 +169,7 @@ dependencies { implementation "org.slf4j:slf4j-api:${slf4jVersion}" implementation "com.datastax.cassandra:cassandra-driver-core:${cassandraDriverVersion}" implementation "com.azure:azure-cosmos:${azureCosmosVersion}" + implementation "com.azure:azure-storage-blob:${azureBlobStorageVersion}" implementation "org.jooq:jooq:${jooqVersion}" implementation platform("software.amazon.awssdk:bom:${awssdkVersion}") implementation 'software.amazon.awssdk:applicationautoscaling' @@ -254,6 +268,17 @@ task integrationTestJdbc(type: Test) { maxHeapSize = "4g" } +task integrationTestObjectStorage(type: Test) { + description = 'Runs the integration tests for Object Storages.' + group = 'verification' + testClassesDirs = sourceSets.integrationTestObjectStorage.output.classesDirs + classpath = sourceSets.integrationTestObjectStorage.runtimeClasspath + outputs.upToDateWhen { false } // ensures integration tests are run every time when called + options { + systemProperties(System.getProperties().findAll{it.key.toString().startsWith("scalardb")}) + } +} + task integrationTestMultiStorage(type: Test) { description = 'Runs the integration tests for multi-storage.' group = 'verification' diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitAdminIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitAdminIntegrationTestWithObjectStorage.java new file mode 100644 index 0000000000..c8fdd98764 --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitAdminIntegrationTestWithObjectStorage.java @@ -0,0 +1,143 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.config.DatabaseConfig; +import com.scalar.db.transaction.consensuscommit.ConsensusCommitAdminIntegrationTestBase; +import com.scalar.db.transaction.consensuscommit.ConsensusCommitConfig; +import com.scalar.db.transaction.consensuscommit.Coordinator; +import java.util.Properties; +import org.junit.jupiter.api.Disabled; + +public class ConsensusCommitAdminIntegrationTestWithObjectStorage + extends ConsensusCommitAdminIntegrationTestBase { + + @Override + protected Properties getProps(String testName) { + return ObjectStorageEnv.getProperties(testName); + } + + @Override + protected String getSystemNamespaceName(Properties properties) { + return ObjectStorageUtils.getObjectStorageConfig(new DatabaseConfig(properties)) + .getMetadataNamespace(); + } + + @Override + protected String getCoordinatorNamespaceName(String testName) { + return new ConsensusCommitConfig(new DatabaseConfig(getProperties(testName))) + .getCoordinatorNamespace() + .orElse(Coordinator.NAMESPACE); + } + + @Override + @Disabled("Temporarily disabled because it includes DML operations") + public void truncateTable_ShouldTruncateProperly() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void createIndex_ForAllDataTypesWithExistingData_ShouldCreateIndexesCorrectly() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void createIndex_ForNonExistingTable_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void createIndex_ForNonExistingColumn_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void createIndex_ForAlreadyExistingIndex_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void createIndex_IfNotExists_ForAlreadyExistingIndex_ShouldNotThrowAnyException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void dropIndex_ForAllDataTypesWithExistingData_ShouldDropIndexCorrectly() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void dropIndex_ForNonExistingTable_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void dropIndex_ForNonExistingIndex_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void dropIndex_IfExists_ForNonExistingIndex_ShouldNotThrowAnyException() {} + + @Override + @Disabled("Object Storage does not support dropping columns") + public void dropColumnFromTable_DropColumnForEachExistingDataType_ShouldDropColumnsCorrectly() {} + + @Override + @Disabled("Object Storage does not support dropping columns") + public void dropColumnFromTable_ForNonExistingTable_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support dropping columns") + public void dropColumnFromTable_ForNonExistingColumn_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support dropping columns") + public void dropColumnFromTable_ForPrimaryKeyColumn_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support dropping columns") + public void dropColumnFromTable_ForIndexedColumn_ShouldDropColumnAndIndexCorrectly() {} + + @Override + @Disabled("Object Storage does not support renaming columns") + public void renameColumn_ShouldRenameColumnCorrectly() {} + + @Override + @Disabled("Object Storage does not support renaming columns") + public void renameColumn_ForNonExistingTable_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support renaming columns") + public void renameColumn_ForNonExistingColumn_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support renaming columns") + public void renameColumn_ForPrimaryKeyColumn_ShouldRenameColumnCorrectly() {} + + @Override + @Disabled("Object Storage does not support renaming columns") + public void renameColumn_ForIndexKeyColumn_ShouldRenameColumnAndIndexCorrectly() {} + + @Override + @Disabled("Object Storage does not support altering column types") + public void + alterColumnType_AlterColumnTypeFromEachExistingDataTypeToText_ShouldAlterColumnTypesCorrectly() {} + + @Override + @Disabled("Object Storage does not support altering column types") + public void alterColumnType_WideningConversion_ShouldAlterColumnTypesCorrectly() {} + + @Override + @Disabled("Object Storage does not support altering column types") + public void alterColumnType_ForPrimaryKeyOrIndexKeyColumn_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support renaming tables") + public void renameTable_ForExistingTable_ShouldRenameTableCorrectly() {} + + @Override + @Disabled("Object Storage does not support renaming tables") + public void renameTable_ForNonExistingTable_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support renaming tables") + public void renameTable_IfNewTableNameAlreadyExists_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support renaming tables") + public void renameTable_ForExistingTableWithIndexes_ShouldRenameTableAndIndexesCorrectly() {} + + @Override + @Disabled("Object Storage does not support renaming tables") + public void renameTable_IfOnlyOneTableExists_ShouldRenameTableCorrectly() {} +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitAdminRepairTableIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitAdminRepairTableIntegrationTestWithObjectStorage.java new file mode 100644 index 0000000000..05d3d32602 --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ConsensusCommitAdminRepairTableIntegrationTestWithObjectStorage.java @@ -0,0 +1,29 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.transaction.consensuscommit.ConsensusCommitAdminRepairTableIntegrationTestBase; +import com.scalar.db.util.AdminTestUtils; +import java.util.Properties; +import org.junit.jupiter.api.Disabled; + +public class ConsensusCommitAdminRepairTableIntegrationTestWithObjectStorage + extends ConsensusCommitAdminRepairTableIntegrationTestBase { + + @Override + protected Properties getProps(String testName) { + return ObjectStorageEnv.getProperties(testName); + } + + @Override + protected AdminTestUtils getAdminTestUtils(String testName) { + return new ObjectStorageAdminTestUtils(getProperties(testName)); + } + + @Override + @Disabled("Object Storage recreates missing coordinator tables") + public void + repairTableAndCoordinatorTable_CoordinatorTablesDoNotExist_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage recreates missing coordinator tables") + public void repairTable_ForNonExistingTable_ShouldThrowIllegalArgument() {} +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminCaseSensitivityIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminCaseSensitivityIntegrationTest.java new file mode 100644 index 0000000000..41f474c60b --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminCaseSensitivityIntegrationTest.java @@ -0,0 +1,155 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.DistributedStorageAdminCaseSensitivityIntegrationTestBase; +import com.scalar.db.config.DatabaseConfig; +import com.scalar.db.util.AdminTestUtils; +import java.util.Map; +import java.util.Properties; +import org.junit.jupiter.api.Disabled; + +public class ObjectStorageAdminCaseSensitivityIntegrationTest + extends DistributedStorageAdminCaseSensitivityIntegrationTestBase { + + @Override + protected Properties getProperties(String testName) { + return ObjectStorageEnv.getProperties(testName); + } + + @Override + protected String getSystemNamespaceName(Properties properties) { + return ObjectStorageUtils.getObjectStorageConfig(new DatabaseConfig(properties)) + .getMetadataNamespace(); + } + + @Override + protected Map getCreationOptions() { + return ObjectStorageEnv.getCreationOptions(); + } + + @Override + protected AdminTestUtils getAdminTestUtils(String testName) { + return new ObjectStorageAdminTestUtils(getProperties(testName)); + } + + @Override + @Disabled("Temporarily disabled because it includes DML operations") + public void truncateTable_ShouldTruncateProperly() {} + + @Override + @Disabled("Object Storage does not have a concept of namespaces") + public void + dropNamespace_ForNamespaceWithNonScalarDBManagedTables_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void createIndex_ForAllDataTypesWithExistingData_ShouldCreateIndexesCorrectly() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void createIndex_ForNonExistingTable_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void createIndex_ForNonExistingColumn_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void createIndex_ForAlreadyExistingIndex_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void createIndex_IfNotExists_ForAlreadyExistingIndex_ShouldNotThrowAnyException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void dropIndex_ForAllDataTypesWithExistingData_ShouldDropIndexCorrectly() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void dropIndex_ForNonExistingTable_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void dropIndex_ForNonExistingIndex_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void dropIndex_IfExists_ForNonExistingIndex_ShouldNotThrowAnyException() {} + + @Override + @Disabled("Object Storage does not support dropping columns") + public void dropColumnFromTable_DropColumnForEachExistingDataType_ShouldDropColumnsCorrectly() {} + + @Override + @Disabled("Object Storage does not support dropping columns") + public void dropColumnFromTable_ForNonExistingTable_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support dropping columns") + public void dropColumnFromTable_ForNonExistingColumn_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support dropping columns") + public void dropColumnFromTable_ForPrimaryKeyColumn_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support dropping columns") + public void dropColumnFromTable_ForIndexedColumn_ShouldDropColumnAndIndexCorrectly() {} + + @Override + @Disabled("Object Storage does not support dropping columns") + public void dropColumnFromTable_IfExists_ForNonExistingColumn_ShouldNotThrowAnyException() {} + + @Override + @Disabled("Object Storage does not support renaming columns") + public void renameColumn_ShouldRenameColumnCorrectly() {} + + @Override + @Disabled("Object Storage does not support renaming columns") + public void renameColumn_ForNonExistingTable_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support renaming columns") + public void renameColumn_ForNonExistingColumn_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support renaming columns") + public void renameColumn_ForPrimaryKeyColumn_ShouldRenameColumnCorrectly() {} + + @Override + @Disabled("Object Storage does not support renaming columns") + public void renameColumn_ForIndexKeyColumn_ShouldRenameColumnAndIndexCorrectly() {} + + @Override + @Disabled("Object Storage does not support altering column types") + public void + alterColumnType_AlterColumnTypeFromEachExistingDataTypeToText_ShouldAlterColumnTypesCorrectly() {} + + @Override + @Disabled("Object Storage does not support altering column types") + public void alterColumnType_WideningConversion_ShouldAlterColumnTypesCorrectly() {} + + @Override + @Disabled("Object Storage does not support altering column types") + public void alterColumnType_ForPrimaryKeyOrIndexKeyColumn_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support renaming tables") + public void renameTable_ForExistingTable_ShouldRenameTableCorrectly() {} + + @Override + @Disabled("Object Storage does not support renaming tables") + public void renameTable_ForNonExistingTable_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support renaming tables") + public void renameTable_IfNewTableNameAlreadyExists_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support renaming tables") + public void renameTable_ForExistingTableWithIndexes_ShouldRenameTableAndIndexesCorrectly() {} + + @Override + @Disabled("Object Storage does not support renaming tables") + public void renameTable_IfOnlyOneTableExists_ShouldRenameTableCorrectly() {} +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminIntegrationTest.java new file mode 100644 index 0000000000..8e5b59fbed --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminIntegrationTest.java @@ -0,0 +1,153 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.DistributedStorageAdminIntegrationTestBase; +import com.scalar.db.config.DatabaseConfig; +import com.scalar.db.util.AdminTestUtils; +import java.util.Properties; +import org.junit.jupiter.api.Disabled; + +public class ObjectStorageAdminIntegrationTest extends DistributedStorageAdminIntegrationTestBase { + + @Override + protected Properties getProperties(String testName) { + return ObjectStorageEnv.getProperties(testName); + } + + @Override + protected String getSystemNamespaceName(Properties properties) { + return ObjectStorageUtils.getObjectStorageConfig(new DatabaseConfig(properties)) + .getMetadataNamespace(); + } + + @Override + protected boolean isIndexOnBooleanColumnSupported() { + return false; + } + + @Override + protected AdminTestUtils getAdminTestUtils(String testName) { + return new ObjectStorageAdminTestUtils(getProperties(testName)); + } + + @Override + @Disabled("Temporarily disabled because it includes DML operations") + public void truncateTable_ShouldTruncateProperly() {} + + @Override + @Disabled("Object Storage does not have a concept of namespaces") + public void + dropNamespace_ForNamespaceWithNonScalarDBManagedTables_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void createIndex_ForAllDataTypesWithExistingData_ShouldCreateIndexesCorrectly() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void createIndex_ForNonExistingTable_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void createIndex_ForNonExistingColumn_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void createIndex_ForAlreadyExistingIndex_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void createIndex_IfNotExists_ForAlreadyExistingIndex_ShouldNotThrowAnyException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void dropIndex_ForAllDataTypesWithExistingData_ShouldDropIndexCorrectly() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void dropIndex_ForNonExistingTable_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void dropIndex_ForNonExistingIndex_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void dropIndex_IfExists_ForNonExistingIndex_ShouldNotThrowAnyException() {} + + @Override + @Disabled("Object Storage does not support dropping columns") + public void dropColumnFromTable_DropColumnForEachExistingDataType_ShouldDropColumnsCorrectly() {} + + @Override + @Disabled("Object Storage does not support dropping columns") + public void dropColumnFromTable_ForNonExistingTable_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support dropping columns") + public void dropColumnFromTable_ForNonExistingColumn_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support dropping columns") + public void dropColumnFromTable_ForPrimaryKeyColumn_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support dropping columns") + public void dropColumnFromTable_ForIndexedColumn_ShouldDropColumnAndIndexCorrectly() {} + + @Override + @Disabled("Object Storage does not support dropping columns") + public void dropColumnFromTable_IfExists_ForNonExistingColumn_ShouldNotThrowAnyException() {} + + @Override + @Disabled("Object Storage does not support renaming columns") + public void renameColumn_ShouldRenameColumnCorrectly() {} + + @Override + @Disabled("Object Storage does not support renaming columns") + public void renameColumn_ForNonExistingTable_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support renaming columns") + public void renameColumn_ForNonExistingColumn_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support renaming columns") + public void renameColumn_ForPrimaryKeyColumn_ShouldRenameColumnCorrectly() {} + + @Override + @Disabled("Object Storage does not support renaming columns") + public void renameColumn_ForIndexKeyColumn_ShouldRenameColumnAndIndexCorrectly() {} + + @Override + @Disabled("Object Storage does not support altering column types") + public void + alterColumnType_AlterColumnTypeFromEachExistingDataTypeToText_ShouldAlterColumnTypesCorrectly() {} + + @Override + @Disabled("Object Storage does not support altering column types") + public void alterColumnType_WideningConversion_ShouldAlterColumnTypesCorrectly() {} + + @Override + @Disabled("Object Storage does not support altering column types") + public void alterColumnType_ForPrimaryKeyOrIndexKeyColumn_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support renaming tables") + public void renameTable_ForExistingTable_ShouldRenameTableCorrectly() {} + + @Override + @Disabled("Object Storage does not support renaming tables") + public void renameTable_ForNonExistingTable_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support renaming tables") + public void renameTable_IfNewTableNameAlreadyExists_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support renaming tables") + public void renameTable_ForExistingTableWithIndexes_ShouldRenameTableAndIndexesCorrectly() {} + + @Override + @Disabled("Object Storage does not support renaming tables") + public void renameTable_IfOnlyOneTableExists_ShouldRenameTableCorrectly() {} +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminRepairTableIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminRepairTableIntegrationTest.java new file mode 100644 index 0000000000..923e518764 --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminRepairTableIntegrationTest.java @@ -0,0 +1,24 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.DistributedStorageAdminRepairTableIntegrationTestBase; +import com.scalar.db.util.AdminTestUtils; +import java.util.Properties; +import org.junit.jupiter.api.Disabled; + +public class ObjectStorageAdminRepairTableIntegrationTest + extends DistributedStorageAdminRepairTableIntegrationTestBase { + + @Override + protected Properties getProperties(String testName) { + return ObjectStorageEnv.getProperties(testName); + } + + @Override + protected AdminTestUtils getAdminTestUtils(String testName) { + return new ObjectStorageAdminTestUtils(getProperties(testName)); + } + + @Override + @Disabled("Object Storage recreates missing coordinator tables") + public void repairTable_ForNonExistingTable_ShouldThrowIllegalArgument() {} +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminTestUtils.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminTestUtils.java new file mode 100644 index 0000000000..4acc53373f --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminTestUtils.java @@ -0,0 +1,111 @@ +package com.scalar.db.storage.objectstorage; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.scalar.db.config.DatabaseConfig; +import com.scalar.db.util.AdminTestUtils; +import java.util.Collections; +import java.util.LinkedHashSet; +import java.util.Map; +import java.util.Optional; +import java.util.Properties; + +public class ObjectStorageAdminTestUtils extends AdminTestUtils { + private final ObjectStorageWrapper wrapper; + private final String metadataNamespace; + + public ObjectStorageAdminTestUtils(Properties properties) { + super(properties); + ObjectStorageConfig objectStorageConfig = + ObjectStorageUtils.getObjectStorageConfig(new DatabaseConfig(properties)); + wrapper = ObjectStorageWrapperFactory.create(objectStorageConfig); + metadataNamespace = objectStorageConfig.getMetadataNamespace(); + } + + @Override + public void dropMetadataTable() { + // Object Storage does not have a concept of table + } + + @Override + public void truncateMetadataTable() throws Exception { + try { + wrapper.delete( + ObjectStorageUtils.getObjectKey( + metadataNamespace, ObjectStorageAdmin.TABLE_METADATA_TABLE)); + } catch (PreconditionFailedException e) { + // The table metadata table object does not exist, so do nothing + } + } + + @Override + public void corruptMetadata(String namespace, String table) throws Exception { + String objectKey = + ObjectStorageUtils.getObjectKey(metadataNamespace, ObjectStorageAdmin.TABLE_METADATA_TABLE); + Optional response = wrapper.get(objectKey); + if (!response.isPresent()) { + throw new IllegalArgumentException("The specified table metadata does not exist"); + } + Map metadataTable = + Serializer.deserialize( + response.get().getPayload(), + new TypeReference>() {}); + + String tableMetadataKey = + String.join( + String.valueOf(ObjectStorageUtils.CONCATENATED_KEY_DELIMITER), namespace, table); + metadataTable.put( + tableMetadataKey, + ObjectStorageTableMetadata.newBuilder() + .partitionKeyNames(new LinkedHashSet<>(Collections.singletonList("corrupted"))) + .build()); + + wrapper.update(objectKey, Serializer.serialize(metadataTable), response.get().getVersion()); + } + + @Override + public void deleteMetadata(String namespace, String table) throws Exception { + String objectKey = + ObjectStorageUtils.getObjectKey(metadataNamespace, ObjectStorageAdmin.TABLE_METADATA_TABLE); + Optional response = wrapper.get(objectKey); + if (!response.isPresent()) { + throw new IllegalArgumentException("The specified table metadata does not exist"); + } + Map metadataTable = + Serializer.deserialize( + response.get().getPayload(), + new TypeReference>() {}); + + String tableMetadataKey = + String.join( + String.valueOf(ObjectStorageUtils.CONCATENATED_KEY_DELIMITER), namespace, table); + metadataTable.remove(tableMetadataKey); + + if (metadataTable.isEmpty()) { + wrapper.delete(objectKey); + } else { + wrapper.update(objectKey, Serializer.serialize(metadataTable), response.get().getVersion()); + } + } + + @Override + public boolean namespaceExists(String namespace) { + // Object Storage does not have a concept of namespace + return true; + } + + @Override + public boolean tableExists(String namespace, String table) { + // Object Storage does not have a concept of table + return true; + } + + @Override + public void dropTable(String namespace, String table) { + // Object Storage does not have a concept of table + } + + @Override + public void close() { + // Do nothing + } +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageEnv.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageEnv.java new file mode 100644 index 0000000000..4345c32ef7 --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageEnv.java @@ -0,0 +1,49 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.config.DatabaseConfig; +import com.scalar.db.storage.objectstorage.blobstorage.BlobStorageConfig; +import java.util.Collections; +import java.util.Map; +import java.util.Properties; + +public class ObjectStorageEnv { + private static final String PROP_OBJECT_STORAGE_ENDPOINT = "scalardb.object_storage.endpoint"; + private static final String PROP_OBJECT_STORAGE_USERNAME = "scalardb.object_storage.username"; + private static final String PROP_OBJECT_STORAGE_PASSWORD = "scalardb.object_storage.password"; + + private static final String DEFAULT_OBJECT_STORAGE_ENDPOINT = + "http://localhost:10000/test/test-container"; + private static final String DEFAULT_OBJECT_STORAGE_USERNAME = "test"; + private static final String DEFAULT_OBJECT_STORAGE_PASSWORD = "test"; + + private ObjectStorageEnv() {} + + public static Properties getProperties(String testName) { + String accountName = + System.getProperty(PROP_OBJECT_STORAGE_USERNAME, DEFAULT_OBJECT_STORAGE_USERNAME); + String accountKey = + System.getProperty(PROP_OBJECT_STORAGE_PASSWORD, DEFAULT_OBJECT_STORAGE_PASSWORD); + String endpoint = + System.getProperty(PROP_OBJECT_STORAGE_ENDPOINT, DEFAULT_OBJECT_STORAGE_ENDPOINT); + + Properties properties = new Properties(); + properties.setProperty(DatabaseConfig.CONTACT_POINTS, endpoint); + properties.setProperty(DatabaseConfig.USERNAME, accountName); + properties.setProperty(DatabaseConfig.PASSWORD, accountKey); + properties.setProperty(DatabaseConfig.STORAGE, BlobStorageConfig.STORAGE_NAME); + properties.setProperty(DatabaseConfig.CROSS_PARTITION_SCAN, "true"); + properties.setProperty(DatabaseConfig.CROSS_PARTITION_SCAN_FILTERING, "true"); + properties.setProperty(DatabaseConfig.CROSS_PARTITION_SCAN_ORDERING, "false"); + + // Add testName as a metadata namespace suffix + properties.setProperty( + BlobStorageConfig.TABLE_METADATA_NAMESPACE, + DatabaseConfig.DEFAULT_SYSTEM_NAMESPACE_NAME + "_" + testName); + + return properties; + } + + public static Map getCreationOptions() { + return Collections.emptyMap(); + } +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperIntegrationTest.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperIntegrationTest.java new file mode 100644 index 0000000000..e32ab2ebac --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperIntegrationTest.java @@ -0,0 +1,287 @@ +package com.scalar.db.storage.objectstorage; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatCode; + +import com.scalar.db.config.DatabaseConfig; +import java.util.Optional; +import java.util.Properties; +import java.util.Set; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@TestInstance(TestInstance.Lifecycle.PER_CLASS) +public class ObjectStorageWrapperIntegrationTest { + private static final Logger logger = + LoggerFactory.getLogger(ObjectStorageWrapperIntegrationTest.class); + + private static final String TEST_NAME = "object_storage_wrapper_integration_test"; + private static final String TEST_KEY1 = "test-key1"; + private static final String TEST_KEY2 = "test-key2"; + private static final String TEST_KEY3 = "test-key3"; + private static final String TEST_KEY_PREFIX = "test-key"; + private static final String TEST_OBJECT1 = "test-object1"; + private static final String TEST_OBJECT2 = "test-object2"; + private static final String TEST_OBJECT3 = "test-object3"; + + protected ObjectStorageWrapper wrapper; + + @BeforeAll + public void beforeAll() throws ObjectStorageWrapperException { + Properties properties = getProperties(TEST_NAME); + ObjectStorageConfig objectStorageConfig = + ObjectStorageUtils.getObjectStorageConfig(new DatabaseConfig(properties)); + wrapper = ObjectStorageWrapperFactory.create(objectStorageConfig); + createObjects(); + } + + @AfterAll + public void afterAll() { + try { + deleteObjects(); + } catch (Exception e) { + logger.warn("Failed to delete objects", e); + } + + try { + if (wrapper != null) { + wrapper.close(); + } + } catch (Exception e) { + logger.warn("Failed to close wrapper", e); + } + } + + protected Properties getProperties(String testName) { + return ObjectStorageEnv.getProperties(testName); + } + + private void createObjects() throws ObjectStorageWrapperException { + wrapper.insert(TEST_KEY1, TEST_OBJECT1); + wrapper.insert(TEST_KEY2, TEST_OBJECT2); + wrapper.insert(TEST_KEY3, TEST_OBJECT3); + } + + protected void deleteObjects() throws ObjectStorageWrapperException { + wrapper.delete(TEST_KEY1); + wrapper.delete(TEST_KEY2); + wrapper.delete(TEST_KEY3); + } + + @Test + public void get_ExistingObjectKeyGiven_ShouldReturnCorrectObject() throws Exception { + // Arrange + + // Act + Optional response = wrapper.get(TEST_KEY1); + + // Assert + assertThat(response.isPresent()).isTrue(); + assertThat(response.get().getPayload()).isEqualTo(TEST_OBJECT1); + } + + @Test + public void get_NonExistingObjectKeyGiven_ShouldReturnEmptyOptional() throws Exception { + // Arrange + + // Act + Optional response = wrapper.get("non-existing-key"); + + // Assert + assertThat(response.isPresent()).isFalse(); + } + + @Test + public void insert_NewObjectKeyGiven_ShouldInsertObjectSuccessfully() throws Exception { + // Arrange + String objectKey = "new-object-key"; + String object = "new-object"; + + try { + // Act + wrapper.insert(objectKey, object); + + // Assert + Optional response = wrapper.get(objectKey); + assertThat(response.isPresent()).isTrue(); + assertThat(response.get().getPayload()).isEqualTo(object); + } finally { + wrapper.delete(objectKey); + } + } + + @Test + public void insert_ExistingObjectKeyGiven_ShouldThrowPreconditionFailedException() { + // Arrange + + // Act Assert + assertThatCode(() -> wrapper.insert(TEST_KEY2, "another-object")) + .isInstanceOf(PreconditionFailedException.class); + } + + @Test + public void update_ExistingObjectKeyGiven_ShouldUpdateObjectSuccessfully() throws Exception { + // Arrange + String updatedObject = "updated-object2"; + Optional response1 = wrapper.get(TEST_KEY2); + assertThat(response1.isPresent()).isTrue(); + String version = response1.get().getVersion(); + + try { + // Act + wrapper.update(TEST_KEY2, updatedObject, version); + + // Assert + Optional response2 = wrapper.get(TEST_KEY2); + assertThat(response2.isPresent()).isTrue(); + assertThat(response2.get().getPayload()).isEqualTo(updatedObject); + } finally { + wrapper.delete(TEST_KEY2); + wrapper.insert(TEST_KEY2, TEST_OBJECT2); + } + } + + @Test + public void update_NonExistingObjectKeyGiven_ShouldThrowPreconditionFailedException() { + // Arrange + String objectKey = "non-existing-key"; + + // Act Assert + assertThatCode(() -> wrapper.update(objectKey, "some-object", "some-version")) + .isInstanceOf(PreconditionFailedException.class); + } + + @Test + public void update_WrongVersionGiven_ShouldThrowPreconditionFailedException() throws Exception { + // Arrange + String wrongVersion = "wrong-version"; + + // Act Assert + assertThatCode(() -> wrapper.update(TEST_KEY2, "another-object", wrongVersion)) + .isInstanceOf(PreconditionFailedException.class); + } + + @Test + public void delete_ExistingObjectKeyGiven_ShouldDeleteObjectSuccessfully() throws Exception { + // Arrange + Optional response1 = wrapper.get(TEST_KEY3); + assertThat(response1.isPresent()).isTrue(); + + try { + // Act + wrapper.delete(TEST_KEY3); + + // Assert + Optional response2 = wrapper.get(TEST_KEY3); + assertThat(response2.isPresent()).isFalse(); + } finally { + wrapper.insert(TEST_KEY3, TEST_OBJECT3); + } + } + + @Test + public void delete_NonExistingObjectKeyGiven_ShouldThrowPreconditionFailedException() { + // Arrange + String objectKey = "non-existing-key"; + + // Act Assert + assertThatCode(() -> wrapper.delete(objectKey)).isInstanceOf(PreconditionFailedException.class); + } + + @Test + public void delete_ExistingObjectKeyWithCorrectVersionGiven_ShouldDeleteObjectSuccessfully() + throws Exception { + // Arrange + Optional response1 = wrapper.get(TEST_KEY1); + assertThat(response1.isPresent()).isTrue(); + String version = response1.get().getVersion(); + + try { + // Act + wrapper.delete(TEST_KEY1, version); + + // Assert + Optional response2 = wrapper.get(TEST_KEY1); + assertThat(response2.isPresent()).isFalse(); + } finally { + wrapper.insert(TEST_KEY1, TEST_OBJECT1); + } + } + + @Test + public void delete_ExistingObjectKeyWithWrongVersionGiven_ShouldThrowPreconditionFailedException() + throws Exception { + // Arrange + Optional response1 = wrapper.get(TEST_KEY1); + assertThat(response1.isPresent()).isTrue(); + String wrongVersion = "wrong-version"; + + // Act Assert + assertThatCode(() -> wrapper.delete(TEST_KEY1, wrongVersion)) + .isInstanceOf(PreconditionFailedException.class); + } + + @Test + public void getKeys_WithPrefix_ShouldReturnCorrectKeys() throws Exception { + // Arrange + + // Act + Set keys = wrapper.getKeys(TEST_KEY_PREFIX); + + // Assert + assertThat(keys).containsExactlyInAnyOrder(TEST_KEY1, TEST_KEY2, TEST_KEY3); + } + + @Test + public void getKeys_WithNonExistingPrefix_ShouldReturnEmptySet() throws Exception { + // Arrange + String nonExistingPrefix = "non-existing-prefix"; + + // Act + Set keys = wrapper.getKeys(nonExistingPrefix); + + // Assert + assertThat(keys).isEmpty(); + } + + @Test + public void deleteByPrefix_WithExistingPrefix_ShouldDeleteObjectsSuccessfully() throws Exception { + // Arrange + + try { + // Act + wrapper.deleteByPrefix(TEST_KEY_PREFIX); + + // Assert + Set keys = wrapper.getKeys(TEST_KEY_PREFIX); + assertThat(keys).isEmpty(); + } finally { + createObjects(); + } + } + + @Test + public void deleteByPrefix_WithNonExistingPrefix_ShouldDoNothing() throws Exception { + // Arrange + String nonExistingPrefix = "non-existing-prefix"; + + // Act + wrapper.deleteByPrefix(nonExistingPrefix); + + // Assert + Set keys = wrapper.getKeys(TEST_KEY_PREFIX); + assertThat(keys).containsExactlyInAnyOrder(TEST_KEY1, TEST_KEY2, TEST_KEY3); + } + + @Test + public void close_ShouldNotThrowException() { + // Arrange + + // Act Assert + assertThatCode(() -> wrapper.close()).doesNotThrowAnyException(); + } +} diff --git a/core/src/integration-test/java/com/scalar/db/storage/objectstorage/SingleCrudOperationTransactionAdminIntegrationTestWithObjectStorage.java b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/SingleCrudOperationTransactionAdminIntegrationTestWithObjectStorage.java new file mode 100644 index 0000000000..18da22a7ff --- /dev/null +++ b/core/src/integration-test/java/com/scalar/db/storage/objectstorage/SingleCrudOperationTransactionAdminIntegrationTestWithObjectStorage.java @@ -0,0 +1,134 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.config.DatabaseConfig; +import com.scalar.db.transaction.singlecrudoperation.SingleCrudOperationTransactionAdminIntegrationTestBase; +import java.util.Properties; +import org.junit.jupiter.api.Disabled; + +public class SingleCrudOperationTransactionAdminIntegrationTestWithObjectStorage + extends SingleCrudOperationTransactionAdminIntegrationTestBase { + + @Override + protected Properties getProps(String testName) { + return ObjectStorageEnv.getProperties(testName); + } + + @Override + protected String getSystemNamespaceName(Properties properties) { + return ObjectStorageUtils.getObjectStorageConfig(new DatabaseConfig(properties)) + .getMetadataNamespace(); + } + + @Override + @Disabled("Temporarily disabled because it includes DML operations") + public void truncateTable_ShouldTruncateProperly() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void createIndex_ForAllDataTypesWithExistingData_ShouldCreateIndexesCorrectly() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void createIndex_ForNonExistingTable_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void createIndex_ForNonExistingColumn_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void createIndex_ForAlreadyExistingIndex_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void createIndex_IfNotExists_ForAlreadyExistingIndex_ShouldNotThrowAnyException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void dropIndex_ForAllDataTypesWithExistingData_ShouldDropIndexCorrectly() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void dropIndex_ForNonExistingTable_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void dropIndex_ForNonExistingIndex_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support index-related operations") + public void dropIndex_IfExists_ForNonExistingIndex_ShouldNotThrowAnyException() {} + + @Override + @Disabled("Object Storage does not support dropping columns") + public void dropColumnFromTable_DropColumnForEachExistingDataType_ShouldDropColumnsCorrectly() {} + + @Override + @Disabled("Object Storage does not support dropping columns") + public void dropColumnFromTable_ForNonExistingTable_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support dropping columns") + public void dropColumnFromTable_ForNonExistingColumn_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support dropping columns") + public void dropColumnFromTable_ForPrimaryKeyColumn_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support dropping columns") + public void dropColumnFromTable_ForIndexedColumn_ShouldDropColumnAndIndexCorrectly() {} + + @Override + @Disabled("Object Storage does not support renaming columns") + public void renameColumn_ShouldRenameColumnCorrectly() {} + + @Override + @Disabled("Object Storage does not support renaming columns") + public void renameColumn_ForNonExistingTable_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support renaming columns") + public void renameColumn_ForNonExistingColumn_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support renaming columns") + public void renameColumn_ForPrimaryKeyColumn_ShouldRenameColumnCorrectly() {} + + @Override + @Disabled("Object Storage does not support renaming columns") + public void renameColumn_ForIndexKeyColumn_ShouldRenameColumnAndIndexCorrectly() {} + + @Override + @Disabled("Object Storage does not support altering column types") + public void + alterColumnType_AlterColumnTypeFromEachExistingDataTypeToText_ShouldAlterColumnTypesCorrectly() {} + + @Override + @Disabled("Object Storage does not support altering column types") + public void alterColumnType_WideningConversion_ShouldAlterColumnTypesCorrectly() {} + + @Override + @Disabled("Object Storage does not support altering column types") + public void alterColumnType_ForPrimaryKeyOrIndexKeyColumn_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support renaming tables") + public void renameTable_ForExistingTable_ShouldRenameTableCorrectly() {} + + @Override + @Disabled("Object Storage does not support renaming tables") + public void renameTable_ForNonExistingTable_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support renaming tables") + public void renameTable_IfNewTableNameAlreadyExists_ShouldThrowIllegalArgumentException() {} + + @Override + @Disabled("Object Storage does not support renaming tables") + public void renameTable_ForExistingTableWithIndexes_ShouldRenameTableAndIndexesCorrectly() {} + + @Override + @Disabled("Object Storage does not support renaming tables") + public void renameTable_IfOnlyOneTableExists_ShouldRenameTableCorrectly() {} +} diff --git a/core/src/main/java/com/scalar/db/common/CoreError.java b/core/src/main/java/com/scalar/db/common/CoreError.java index 25857f0759..fa097bad68 100644 --- a/core/src/main/java/com/scalar/db/common/CoreError.java +++ b/core/src/main/java/com/scalar/db/common/CoreError.java @@ -687,19 +687,27 @@ public enum CoreError implements ScalarDbError { COSMOS_DROP_COLUMN_NOT_SUPPORTED( Category.USER_ERROR, "0217", - "Cosmos DB does not support the dropping column feature", + "Cosmos DB does not support the feature for dropping columns", "", ""), DYNAMO_DROP_COLUMN_NOT_SUPPORTED( - Category.USER_ERROR, "0218", "DynamoDB does not support the dropping column feature", "", ""), + Category.USER_ERROR, + "0218", + "DynamoDB does not support the feature for dropping columns", + "", + ""), COSMOS_RENAME_COLUMN_NOT_SUPPORTED( Category.USER_ERROR, "0219", - "Cosmos DB does not support the renaming column feature", + "Cosmos DB does not support the feature for renaming columns", "", ""), DYNAMO_RENAME_COLUMN_NOT_SUPPORTED( - Category.USER_ERROR, "0220", "DynamoDB does not support the renaming column feature", "", ""), + Category.USER_ERROR, + "0220", + "DynamoDB does not support the feature for renaming columns", + "", + ""), CASSANDRA_RENAME_NON_PRIMARY_KEY_COLUMN_NOT_SUPPORTED( Category.USER_ERROR, "0221", @@ -775,25 +783,25 @@ public enum CoreError implements ScalarDbError { CASSANDRA_ALTER_COLUMN_TYPE_NOT_SUPPORTED( Category.USER_ERROR, "0235", - "Cassandra does not support the altering column type feature", + "Cassandra does not support the feature for altering column types", "", ""), COSMOS_ALTER_COLUMN_TYPE_NOT_SUPPORTED( Category.USER_ERROR, "0236", - "Cosmos DB does not support the altering column type feature", + "Cosmos DB does not support the feature for altering column types", "", ""), DYNAMO_ALTER_COLUMN_TYPE_NOT_SUPPORTED( Category.USER_ERROR, "0237", - "DynamoDB does not support the altering column type feature", + "DynamoDB does not support the feature for altering column types", "", ""), JDBC_SQLITE_ALTER_COLUMN_TYPE_NOT_SUPPORTED( Category.USER_ERROR, "0238", - "SQLite does not support the altering column type feature", + "SQLite does not support the feature for altering column types", "", ""), JDBC_ORACLE_UNSUPPORTED_COLUMN_TYPE_CONVERSION( @@ -849,6 +857,38 @@ public enum CoreError implements ScalarDbError { "The namespace has non-ScalarDB tables and cannot be dropped. Namespace: %s; Tables in the namespace: %s", "", ""), + OBJECT_STORAGE_IMPORT_NOT_SUPPORTED( + Category.USER_ERROR, + "0250", + "Import-related functionality is not supported in Object Storage", + "", + ""), + OBJECT_STORAGE_INDEX_NOT_SUPPORTED( + Category.USER_ERROR, + "0251", + "Index-related functionality is not supported in Object Storage", + "", + ""), + OBJECT_STORAGE_DROP_COLUMN_NOT_SUPPORTED( + Category.USER_ERROR, + "0252", + "Object Storage does not support the feature for dropping columns", + "", + ""), + OBJECT_STORAGE_RENAME_COLUMN_NOT_SUPPORTED( + Category.USER_ERROR, + "0253", + "Object Storage does not support the feature for renaming columns", + "", + ""), + OBJECT_STORAGE_RENAME_TABLE_NOT_SUPPORTED( + Category.USER_ERROR, "0254", "Renaming tables is not supported in Object Storage", "", ""), + OBJECT_STORAGE_ALTER_COLUMN_TYPE_NOT_SUPPORTED( + Category.USER_ERROR, + "0255", + "Object Storage does not support the feature for altering column types", + "", + ""), // // Errors for the concurrency error category diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ConcatenationVisitor.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ConcatenationVisitor.java new file mode 100644 index 0000000000..3141ed3ff7 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ConcatenationVisitor.java @@ -0,0 +1,136 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.io.BigIntColumn; +import com.scalar.db.io.BlobColumn; +import com.scalar.db.io.BooleanColumn; +import com.scalar.db.io.ColumnVisitor; +import com.scalar.db.io.DateColumn; +import com.scalar.db.io.DoubleColumn; +import com.scalar.db.io.FloatColumn; +import com.scalar.db.io.IntColumn; +import com.scalar.db.io.TextColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; +import com.scalar.db.util.TimeRelatedColumnEncodingUtils; +import java.util.ArrayList; +import java.util.Base64; +import java.util.List; +import javax.annotation.concurrent.NotThreadSafe; + +/** A visitor class to make a concatenated key string for the partition key. */ +@NotThreadSafe +public class ConcatenationVisitor implements ColumnVisitor { + private final List columns; + + public ConcatenationVisitor() { + columns = new ArrayList<>(); + } + + public String build() { + return String.join(String.valueOf(ObjectStorageUtils.CONCATENATED_KEY_DELIMITER), columns); + } + + /** + * Sets the specified {@code BooleanColumn} to the key string + * + * @param column a {@code BooleanColumn} to be set + */ + @Override + public void visit(BooleanColumn column) { + assert !column.hasNullValue(); + columns.add(String.valueOf(column.getBooleanValue())); + } + + /** + * Sets the specified {@code IntColumn} to the key string + * + * @param column a {@code IntColumn} to be set + */ + @Override + public void visit(IntColumn column) { + assert !column.hasNullValue(); + columns.add(String.valueOf(column.getIntValue())); + } + + /** + * Sets the specified {@code BigIntColumn} to the key string + * + * @param column a {@code BigIntColumn} to be set + */ + @Override + public void visit(BigIntColumn column) { + assert !column.hasNullValue(); + columns.add(String.valueOf(column.getBigIntValue())); + } + + /** + * Sets the specified {@code FloatColumn} to the key string + * + * @param column a {@code FloatColumn} to be set + */ + @Override + public void visit(FloatColumn column) { + assert !column.hasNullValue(); + columns.add(String.valueOf(column.getFloatValue())); + } + + /** + * Sets the specified {@code DoubleColumn} to the key string + * + * @param column a {@code DoubleColumn} to be set + */ + @Override + public void visit(DoubleColumn column) { + assert !column.hasNullValue(); + columns.add(String.valueOf(column.getDoubleValue())); + } + + /** + * Sets the specified {@code TextColumn} to the key string + * + * @param column a {@code TextColumn} to be set + */ + @Override + public void visit(TextColumn column) { + assert !column.hasNullValue(); + column.getValue().ifPresent(columns::add); + } + + /** + * Sets the specified {@code BlobColumn} to the key string + * + * @param column a {@code BlobColumn} to be set + */ + @Override + public void visit(BlobColumn column) { + assert !column.hasNullValue(); + // Use Base64 encoding + columns.add( + Base64.getUrlEncoder().withoutPadding().encodeToString(column.getBlobValueAsBytes())); + } + + @Override + public void visit(DateColumn column) { + assert !column.hasNullValue(); + columns.add(String.valueOf(TimeRelatedColumnEncodingUtils.encode(column))); + } + + @Override + public void visit(TimeColumn column) { + assert !column.hasNullValue(); + columns.add(String.valueOf(TimeRelatedColumnEncodingUtils.encode(column))); + } + + @Override + public void visit(TimestampColumn column) { + assert !column.hasNullValue(); + columns.add(String.valueOf(TimeRelatedColumnEncodingUtils.encode(column))); + } + + @Override + public void visit(TimestampTZColumn column) { + assert !column.hasNullValue(); + columns.add(String.valueOf(TimeRelatedColumnEncodingUtils.encode(column))); + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageAdmin.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageAdmin.java new file mode 100644 index 0000000000..937e9702f7 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageAdmin.java @@ -0,0 +1,448 @@ +package com.scalar.db.storage.objectstorage; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.google.common.base.Splitter; +import com.google.inject.Inject; +import com.scalar.db.api.DistributedStorageAdmin; +import com.scalar.db.api.StorageInfo; +import com.scalar.db.api.TableMetadata; +import com.scalar.db.common.CoreError; +import com.scalar.db.common.StorageInfoImpl; +import com.scalar.db.config.DatabaseConfig; +import com.scalar.db.exception.storage.ExecutionException; +import com.scalar.db.io.DataType; +import com.scalar.db.util.ScalarDbUtils; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.stream.Collectors; +import javax.annotation.Nullable; + +public class ObjectStorageAdmin implements DistributedStorageAdmin { + public static final String NAMESPACE_METADATA_TABLE = "namespaces"; + public static final String TABLE_METADATA_TABLE = "metadata"; + + private static final StorageInfo STORAGE_INFO = + new StorageInfoImpl( + "object_storage", + StorageInfo.MutationAtomicityUnit.PARTITION, + // No limit on the number of mutations + Integer.MAX_VALUE); + + private final ObjectStorageWrapper wrapper; + private final String metadataNamespace; + + @Inject + public ObjectStorageAdmin(DatabaseConfig databaseConfig) { + ObjectStorageConfig objectStorageConfig = + ObjectStorageUtils.getObjectStorageConfig(databaseConfig); + wrapper = ObjectStorageWrapperFactory.create(objectStorageConfig); + metadataNamespace = objectStorageConfig.getMetadataNamespace(); + } + + ObjectStorageAdmin(ObjectStorageWrapper wrapper, ObjectStorageConfig objectStorageConfig) { + this.wrapper = wrapper; + metadataNamespace = objectStorageConfig.getMetadataNamespace(); + } + + @Override + public StorageInfo getStorageInfo(String namespace) throws ExecutionException { + return STORAGE_INFO; + } + + @Override + public void close() { + wrapper.close(); + } + + @Override + public void createNamespace(String namespace, Map options) + throws ExecutionException { + try { + // Insert the namespace metadata + Map readVersionMap = new HashMap<>(); + Map metadataTable = + getNamespaceMetadataTable(readVersionMap); + assert !metadataTable.containsKey(namespace); + if (metadataTable.isEmpty()) { + Map newMetadataTable = + Collections.singletonMap(namespace, new ObjectStorageNamespaceMetadata(namespace)); + insertMetadataTable(NAMESPACE_METADATA_TABLE, newMetadataTable); + } else { + metadataTable.put(namespace, new ObjectStorageNamespaceMetadata(namespace)); + updateMetadataTable( + NAMESPACE_METADATA_TABLE, metadataTable, readVersionMap.get(NAMESPACE_METADATA_TABLE)); + } + } catch (Exception e) { + throw new ExecutionException( + String.format("Failed to create the namespace %s", namespace), e); + } + } + + @Override + public void createTable( + String namespace, String table, TableMetadata metadata, Map options) + throws ExecutionException { + try { + // Insert the table metadata + String tableMetadataKey = getTableMetadataKey(namespace, table); + Map readVersionMap = new HashMap<>(); + Map metadataTable = getTableMetadataTable(readVersionMap); + assert !metadataTable.containsKey(tableMetadataKey); + if (metadataTable.isEmpty()) { + Map newMetadataTable = + Collections.singletonMap(tableMetadataKey, new ObjectStorageTableMetadata(metadata)); + insertMetadataTable(TABLE_METADATA_TABLE, newMetadataTable); + } else { + metadataTable.put(tableMetadataKey, new ObjectStorageTableMetadata(metadata)); + updateMetadataTable( + TABLE_METADATA_TABLE, metadataTable, readVersionMap.get(TABLE_METADATA_TABLE)); + } + } catch (Exception e) { + throw new ExecutionException( + String.format( + "Failed to create the table %s", ScalarDbUtils.getFullTableName(namespace, table)), + e); + } + } + + @Override + public void dropTable(String namespace, String table) throws ExecutionException { + try { + deleteTableData(namespace, table); + // Delete the table metadata + String tableMetadataKey = getTableMetadataKey(namespace, table); + Map readVersionMap = new HashMap<>(); + Map metadataTable = getTableMetadataTable(readVersionMap); + assert metadataTable.containsKey(tableMetadataKey); + metadataTable.remove(tableMetadataKey); + String readVersion = readVersionMap.get(TABLE_METADATA_TABLE); + if (metadataTable.isEmpty()) { + deleteMetadataTable(TABLE_METADATA_TABLE, readVersion); + } else { + updateMetadataTable(TABLE_METADATA_TABLE, metadataTable, readVersion); + } + } catch (Exception e) { + throw new ExecutionException( + String.format( + "Failed to drop the table %s", ScalarDbUtils.getFullTableName(namespace, table)), + e); + } + } + + @Override + public void createIndex( + String namespace, String table, String columnName, Map options) + throws ExecutionException { + throw new UnsupportedOperationException( + CoreError.OBJECT_STORAGE_INDEX_NOT_SUPPORTED.buildMessage()); + } + + @Override + public void dropIndex(String namespace, String table, String columnName) + throws ExecutionException { + throw new UnsupportedOperationException( + CoreError.OBJECT_STORAGE_INDEX_NOT_SUPPORTED.buildMessage()); + } + + @Override + public void dropNamespace(String namespace) throws ExecutionException { + try { + // Delete the namespace metadata + Map readVersionMap = new HashMap<>(); + Map metadataTable = + getNamespaceMetadataTable(readVersionMap); + assert metadataTable.containsKey(namespace); + metadataTable.remove(namespace); + String readVersion = readVersionMap.get(NAMESPACE_METADATA_TABLE); + if (metadataTable.isEmpty()) { + deleteMetadataTable(NAMESPACE_METADATA_TABLE, readVersion); + } else { + updateMetadataTable(NAMESPACE_METADATA_TABLE, metadataTable, readVersion); + } + } catch (Exception e) { + throw new ExecutionException(String.format("Failed to drop the namespace %s", namespace), e); + } + } + + @Override + public void truncateTable(String namespace, String table) throws ExecutionException { + try { + deleteTableData(namespace, table); + } catch (Exception e) { + throw new ExecutionException( + String.format( + "Failed to truncate the table %s", ScalarDbUtils.getFullTableName(namespace, table)), + e); + } + } + + @Nullable + @Override + public TableMetadata getTableMetadata(String namespace, String table) throws ExecutionException { + try { + String tableMetadataKey = getTableMetadataKey(namespace, table); + Map metadataTable = getTableMetadataTable(); + ObjectStorageTableMetadata tableMetadata = metadataTable.get(tableMetadataKey); + return tableMetadata != null ? tableMetadata.toTableMetadata() : null; + } catch (Exception e) { + throw new ExecutionException( + String.format( + "Failed to get the table metadata of %s", + ScalarDbUtils.getFullTableName(namespace, table)), + e); + } + } + + @Override + public Set getNamespaceTableNames(String namespace) throws ExecutionException { + try { + if (!namespaceExists(namespace)) { + return Collections.emptySet(); + } + Map metadataTable = getTableMetadataTable(); + return metadataTable.keySet().stream() + .filter( + tableMetadataKey -> + getNamespaceNameFromTableMetadataKey(tableMetadataKey).equals(namespace)) + .map(ObjectStorageAdmin::getTableNameFromTableMetadataKey) + .collect(Collectors.toSet()); + } catch (Exception e) { + throw new ExecutionException( + String.format("Failed to get the tables in the namespace %s", namespace), e); + } + } + + @Override + public boolean namespaceExists(String namespace) throws ExecutionException { + if (metadataNamespace.equals(namespace)) { + return true; + } + try { + return getNamespaceMetadataTable().containsKey(namespace); + } catch (Exception e) { + throw new ExecutionException( + String.format("Failed to check the existence of the namespace %s", namespace), e); + } + } + + @Override + public void importTable( + String namespace, + String table, + Map options, + Map overrideColumnsType) + throws ExecutionException { + throw new UnsupportedOperationException( + CoreError.OBJECT_STORAGE_IMPORT_NOT_SUPPORTED.buildMessage()); + } + + @Override + public void repairTable( + String namespace, String table, TableMetadata metadata, Map options) + throws ExecutionException { + try { + // Upsert the table metadata + String tableMetadataKey = getTableMetadataKey(namespace, table); + Map readVersionMap = new HashMap<>(); + Map metadataTable = getTableMetadataTable(readVersionMap); + if (metadataTable.isEmpty()) { + insertMetadataTable( + TABLE_METADATA_TABLE, + Collections.singletonMap(tableMetadataKey, new ObjectStorageTableMetadata(metadata))); + } else { + metadataTable.put(tableMetadataKey, new ObjectStorageTableMetadata(metadata)); + updateMetadataTable( + TABLE_METADATA_TABLE, metadataTable, readVersionMap.get(TABLE_METADATA_TABLE)); + } + } catch (Exception e) { + throw new ExecutionException( + String.format( + "Failed to repair the table %s", ScalarDbUtils.getFullTableName(namespace, table)), + e); + } + } + + @Override + public void addNewColumnToTable( + String namespace, String table, String columnName, DataType columnType) + throws ExecutionException { + try { + // Update the table metadata + String tableMetadataKey = getTableMetadataKey(namespace, table); + Map readVersionMap = new HashMap<>(); + Map metadataTable = getTableMetadataTable(readVersionMap); + TableMetadata currentTableMetadata = metadataTable.get(tableMetadataKey).toTableMetadata(); + TableMetadata updatedTableMetadata = + TableMetadata.newBuilder(currentTableMetadata).addColumn(columnName, columnType).build(); + metadataTable.put(tableMetadataKey, new ObjectStorageTableMetadata(updatedTableMetadata)); + updateMetadataTable( + TABLE_METADATA_TABLE, metadataTable, readVersionMap.get(TABLE_METADATA_TABLE)); + } catch (Exception e) { + throw new ExecutionException( + String.format( + "Failed to add a new column to the table %s", + ScalarDbUtils.getFullTableName(namespace, table)), + e); + } + } + + @Override + public void dropColumnFromTable(String namespace, String table, String columnName) + throws ExecutionException { + throw new UnsupportedOperationException( + CoreError.OBJECT_STORAGE_DROP_COLUMN_NOT_SUPPORTED.buildMessage()); + } + + @Override + public void renameColumn( + String namespace, String table, String oldColumnName, String newColumnName) + throws ExecutionException { + throw new UnsupportedOperationException( + CoreError.OBJECT_STORAGE_RENAME_COLUMN_NOT_SUPPORTED.buildMessage()); + } + + @Override + public void alterColumnType( + String namespace, String table, String columnName, DataType newColumnType) + throws ExecutionException { + throw new UnsupportedOperationException( + CoreError.OBJECT_STORAGE_ALTER_COLUMN_TYPE_NOT_SUPPORTED.buildMessage()); + } + + @Override + public void renameTable(String namespace, String oldTableName, String newTableName) + throws ExecutionException { + throw new UnsupportedOperationException( + CoreError.OBJECT_STORAGE_RENAME_TABLE_NOT_SUPPORTED.buildMessage()); + } + + @Override + public Set getNamespaceNames() throws ExecutionException { + try { + Set namespaceNames = new HashSet<>(getNamespaceMetadataTable().keySet()); + namespaceNames.add(metadataNamespace); + return namespaceNames; + } catch (Exception e) { + throw new ExecutionException("Failed to get the namespace names", e); + } + } + + private Map getNamespaceMetadataTable() + throws ExecutionException { + return getNamespaceMetadataTable(null); + } + + private Map getNamespaceMetadataTable( + @Nullable Map readVersionMap) throws ExecutionException { + try { + Optional response = + wrapper.get(ObjectStorageUtils.getObjectKey(metadataNamespace, NAMESPACE_METADATA_TABLE)); + if (!response.isPresent()) { + return Collections.emptyMap(); + } + if (readVersionMap != null) { + readVersionMap.put(NAMESPACE_METADATA_TABLE, response.get().getVersion()); + } + return Serializer.deserialize( + response.get().getPayload(), + new TypeReference>() {}); + } catch (ObjectStorageWrapperException e) { + throw new ExecutionException("Failed to get the metadata table.", e); + } + } + + private Map getTableMetadataTable() + throws ExecutionException { + return getTableMetadataTable(null); + } + + private Map getTableMetadataTable( + @Nullable Map readVersionMap) throws ExecutionException { + try { + Optional response = + wrapper.get(ObjectStorageUtils.getObjectKey(metadataNamespace, TABLE_METADATA_TABLE)); + if (!response.isPresent()) { + return Collections.emptyMap(); + } + if (readVersionMap != null) { + readVersionMap.put(TABLE_METADATA_TABLE, response.get().getVersion()); + } + return Serializer.deserialize( + response.get().getPayload(), + new TypeReference>() {}); + } catch (ObjectStorageWrapperException e) { + throw new ExecutionException("Failed to get the metadata table.", e); + } + } + + private void insertMetadataTable(String table, Map metadataTable) + throws ExecutionException { + try { + wrapper.insert( + ObjectStorageUtils.getObjectKey(metadataNamespace, table), + Serializer.serialize(metadataTable)); + } catch (ObjectStorageWrapperException e) { + throw new ExecutionException("Failed to insert the metadata table.", e); + } + } + + private void updateMetadataTable( + String table, Map metadataTable, String readVersion) throws ExecutionException { + try { + wrapper.update( + ObjectStorageUtils.getObjectKey(metadataNamespace, table), + Serializer.serialize(metadataTable), + readVersion); + } catch (Exception e) { + throw new ExecutionException("Failed to update the metadata table.", e); + } + } + + private void deleteMetadataTable(String table, String readVersion) throws ExecutionException { + try { + wrapper.delete(ObjectStorageUtils.getObjectKey(metadataNamespace, table), readVersion); + } catch (Exception e) { + throw new ExecutionException("Failed to delete the metadata table.", e); + } + } + + private void deleteTableData(String namespace, String table) throws ExecutionException { + try { + wrapper.deleteByPrefix(ObjectStorageUtils.getObjectKey(namespace, table, "")); + } catch (Exception e) { + throw new ExecutionException( + String.format( + "Failed to delete the table data of %s", + ScalarDbUtils.getFullTableName(namespace, table)), + e); + } + } + + private static String getTableMetadataKey(String namespace, String table) { + return String.join( + String.valueOf(ObjectStorageUtils.CONCATENATED_KEY_DELIMITER), namespace, table); + } + + private static String getNamespaceNameFromTableMetadataKey(String tableMetadataKey) { + List parts = + Splitter.on(ObjectStorageUtils.CONCATENATED_KEY_DELIMITER).splitToList(tableMetadataKey); + if (parts.size() != 2 || parts.get(0).isEmpty()) { + throw new IllegalArgumentException("Invalid table metadata key: " + tableMetadataKey); + } + return parts.get(0); + } + + private static String getTableNameFromTableMetadataKey(String tableMetadataKey) { + List parts = + Splitter.on(ObjectStorageUtils.CONCATENATED_KEY_DELIMITER).splitToList(tableMetadataKey); + if (parts.size() != 2 || parts.get(1).isEmpty()) { + throw new IllegalArgumentException("Invalid table metadata key: " + tableMetadataKey); + } + return parts.get(1); + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageConfig.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageConfig.java new file mode 100644 index 0000000000..adfd517208 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageConfig.java @@ -0,0 +1,46 @@ +package com.scalar.db.storage.objectstorage; + +public interface ObjectStorageConfig { + + /** + * Returns the storage name. + * + * @return the storage name + */ + String getStorageName(); + + /** + * Returns the endpoint for the object storage service. + * + * @return the endpoint + */ + String getEndpoint(); + + /** + * Returns the username for authentication. + * + * @return the username + */ + String getUsername(); + + /** + * Returns the password for authentication. + * + * @return the password + */ + String getPassword(); + + /** + * Returns the bucket name. + * + * @return the bucket name + */ + String getBucket(); + + /** + * Returns the metadata namespace. + * + * @return the metadata namespace + */ + String getMetadataNamespace(); +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageNamespaceMetadata.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageNamespaceMetadata.java new file mode 100644 index 0000000000..3c4e0588db --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageNamespaceMetadata.java @@ -0,0 +1,39 @@ +package com.scalar.db.storage.objectstorage; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Objects; +import javax.annotation.Nullable; +import javax.annotation.concurrent.Immutable; + +@Immutable +public class ObjectStorageNamespaceMetadata { + private final String name; + + @JsonCreator + public ObjectStorageNamespaceMetadata(@JsonProperty("name") @Nullable String name) { + this.name = name != null ? name : ""; + } + + public String getName() { + return name; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof ObjectStorageNamespaceMetadata)) { + return false; + } + ObjectStorageNamespaceMetadata that = (ObjectStorageNamespaceMetadata) o; + + return name.equals(that.name); + } + + @Override + public int hashCode() { + return Objects.hash(name); + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageProvider.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageProvider.java new file mode 100644 index 0000000000..0d7c0f35a8 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageProvider.java @@ -0,0 +1,20 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.api.DistributedStorage; +import com.scalar.db.api.DistributedStorageAdmin; +import com.scalar.db.api.DistributedStorageProvider; +import com.scalar.db.common.CommonDistributedStorageAdmin; +import com.scalar.db.config.DatabaseConfig; + +public interface ObjectStorageProvider extends DistributedStorageProvider { + + @Override + default DistributedStorage createDistributedStorage(DatabaseConfig config) { + return null; + } + + @Override + default DistributedStorageAdmin createDistributedStorageAdmin(DatabaseConfig config) { + return new CommonDistributedStorageAdmin(new ObjectStorageAdmin(config)); + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageTableMetadata.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageTableMetadata.java new file mode 100644 index 0000000000..38cbfcfad0 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageTableMetadata.java @@ -0,0 +1,202 @@ +package com.scalar.db.storage.objectstorage; + +import com.fasterxml.jackson.annotation.JsonCreator; +import com.fasterxml.jackson.annotation.JsonProperty; +import com.scalar.db.api.Scan; +import com.scalar.db.api.TableMetadata; +import com.scalar.db.io.DataType; +import edu.umd.cs.findbugs.annotations.SuppressFBWarnings; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.LinkedHashSet; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.function.Function; +import java.util.stream.Collectors; +import javax.annotation.Nullable; +import javax.annotation.concurrent.Immutable; + +@SuppressFBWarnings({"EI_EXPOSE_REP", "EI_EXPOSE_REP2"}) +@Immutable +public class ObjectStorageTableMetadata { + private final LinkedHashSet partitionKeyNames; + private final LinkedHashSet clusteringKeyNames; + private final Map clusteringOrders; + private final Set secondaryIndexNames; + private final Map columns; + + @JsonCreator + public ObjectStorageTableMetadata( + @JsonProperty("partitionKeyNames") @Nullable LinkedHashSet partitionKeyNames, + @JsonProperty("clusteringKeyNames") @Nullable LinkedHashSet clusteringKeyNames, + @JsonProperty("clusteringOrders") @Nullable Map clusteringOrders, + @JsonProperty("secondaryIndexNames") @Nullable Set secondaryIndexNames, + @JsonProperty("columns") @Nullable Map columns) { + this.partitionKeyNames = + partitionKeyNames != null ? new LinkedHashSet<>(partitionKeyNames) : new LinkedHashSet<>(); + this.clusteringKeyNames = + clusteringKeyNames != null + ? new LinkedHashSet<>(clusteringKeyNames) + : new LinkedHashSet<>(); + this.clusteringOrders = + clusteringOrders != null ? new HashMap<>(clusteringOrders) : Collections.emptyMap(); + this.secondaryIndexNames = + secondaryIndexNames != null ? new HashSet<>(secondaryIndexNames) : Collections.emptySet(); + this.columns = columns != null ? new HashMap<>(columns) : Collections.emptyMap(); + } + + public ObjectStorageTableMetadata(TableMetadata tableMetadata) { + Map clusteringOrders = + tableMetadata.getClusteringKeyNames().stream() + .collect( + Collectors.toMap( + Function.identity(), c -> tableMetadata.getClusteringOrder(c).name())); + Map columnTypeByName = new HashMap<>(); + tableMetadata + .getColumnNames() + .forEach( + columnName -> + columnTypeByName.put( + columnName, tableMetadata.getColumnDataType(columnName).name().toLowerCase())); + this.partitionKeyNames = tableMetadata.getPartitionKeyNames(); + this.clusteringKeyNames = tableMetadata.getClusteringKeyNames(); + this.clusteringOrders = clusteringOrders; + this.secondaryIndexNames = tableMetadata.getSecondaryIndexNames(); + this.columns = columnTypeByName; + } + + private ObjectStorageTableMetadata(Builder builder) { + this( + builder.partitionKeyNames, + builder.clusteringKeyNames, + builder.clusteringOrders, + builder.secondaryIndexNames, + builder.columns); + } + + public LinkedHashSet getPartitionKeyNames() { + return partitionKeyNames; + } + + public LinkedHashSet getClusteringKeyNames() { + return clusteringKeyNames; + } + + public Map getClusteringOrders() { + return clusteringOrders; + } + + public Set getSecondaryIndexNames() { + return secondaryIndexNames; + } + + public Map getColumns() { + return columns; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof ObjectStorageTableMetadata)) { + return false; + } + ObjectStorageTableMetadata that = (ObjectStorageTableMetadata) o; + return Objects.equals(partitionKeyNames, that.partitionKeyNames) + && Objects.equals(clusteringKeyNames, that.clusteringKeyNames) + && Objects.equals(clusteringOrders, that.clusteringOrders) + && Objects.equals(secondaryIndexNames, that.secondaryIndexNames) + && Objects.equals(columns, that.columns); + } + + @Override + public int hashCode() { + return Objects.hash( + partitionKeyNames, clusteringKeyNames, clusteringOrders, secondaryIndexNames, columns); + } + + public TableMetadata toTableMetadata() { + TableMetadata.Builder builder = TableMetadata.newBuilder(); + partitionKeyNames.forEach(builder::addPartitionKey); + clusteringKeyNames.forEach( + n -> builder.addClusteringKey(n, Scan.Ordering.Order.valueOf(clusteringOrders.get(n)))); + secondaryIndexNames.forEach(builder::addSecondaryIndex); + columns.forEach((key, value) -> builder.addColumn(key, convertDataType(value))); + return builder.build(); + } + + private DataType convertDataType(String columnType) { + switch (columnType) { + case "int": + return DataType.INT; + case "bigint": + return DataType.BIGINT; + case "float": + return DataType.FLOAT; + case "double": + return DataType.DOUBLE; + case "text": + return DataType.TEXT; + case "boolean": + return DataType.BOOLEAN; + case "blob": + return DataType.BLOB; + case "date": + return DataType.DATE; + case "time": + return DataType.TIME; + case "timestamp": + return DataType.TIMESTAMP; + case "timestamptz": + return DataType.TIMESTAMPTZ; + default: + throw new AssertionError("Unknown column type: " + columnType); + } + } + + public static ObjectStorageTableMetadata.Builder newBuilder() { + return new ObjectStorageTableMetadata.Builder(); + } + + public static final class Builder { + private LinkedHashSet partitionKeyNames; + private LinkedHashSet clusteringKeyNames; + private Map clusteringOrders; + private Set secondaryIndexNames; + private Map columns; + + private Builder() {} + + public ObjectStorageTableMetadata.Builder partitionKeyNames(LinkedHashSet val) { + partitionKeyNames = val; + return this; + } + + public ObjectStorageTableMetadata.Builder clusteringKeyNames(LinkedHashSet val) { + clusteringKeyNames = val; + return this; + } + + public ObjectStorageTableMetadata.Builder clusteringOrders(Map val) { + clusteringOrders = val; + return this; + } + + public ObjectStorageTableMetadata.Builder secondaryIndexNames(Set val) { + secondaryIndexNames = val; + return this; + } + + public ObjectStorageTableMetadata.Builder columns(Map val) { + columns = val; + return this; + } + + public ObjectStorageTableMetadata build() { + return new ObjectStorageTableMetadata(this); + } + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageUtils.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageUtils.java new file mode 100644 index 0000000000..ac04b7df21 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageUtils.java @@ -0,0 +1,27 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.config.DatabaseConfig; +import com.scalar.db.storage.objectstorage.blobstorage.BlobStorageConfig; +import java.util.Objects; + +public class ObjectStorageUtils { + public static final char OBJECT_KEY_DELIMITER = '/'; + public static final char CONCATENATED_KEY_DELIMITER = '!'; + + public static String getObjectKey(String namespace, String table, String partition) { + return String.join(String.valueOf(OBJECT_KEY_DELIMITER), namespace, table, partition); + } + + public static String getObjectKey(String namespace, String table) { + return String.join(String.valueOf(OBJECT_KEY_DELIMITER), namespace, table); + } + + public static ObjectStorageConfig getObjectStorageConfig(DatabaseConfig databaseConfig) { + if (Objects.equals(databaseConfig.getStorage(), BlobStorageConfig.STORAGE_NAME)) { + return new BlobStorageConfig(databaseConfig); + } else { + throw new IllegalArgumentException( + "Unsupported Object Storage: " + databaseConfig.getStorage()); + } + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapper.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapper.java new file mode 100644 index 0000000000..cf28f16574 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapper.java @@ -0,0 +1,76 @@ +package com.scalar.db.storage.objectstorage; + +import java.util.Optional; +import java.util.Set; + +public interface ObjectStorageWrapper { + + /** + * Get the object from the storage. + * + * @param key the key of the object + * @throws ObjectStorageWrapperException if an error occurs + * @return the object and its version wrapped in an Optional if found, otherwise an empty Optional + */ + Optional get(String key) throws ObjectStorageWrapperException; + + /** + * Get object keys with the specified prefix. + * + * @param prefix the prefix of the keys + * @throws ObjectStorageWrapperException if an error occurs + * @return the set of keys with the specified prefix + */ + Set getKeys(String prefix) throws ObjectStorageWrapperException; + + /** + * Insert the object into the storage. + * + * @param key the key of the object + * @param object the object to insert + * @throws PreconditionFailedException if the object already exists + * @throws ObjectStorageWrapperException if an error occurs + */ + void insert(String key, String object) throws ObjectStorageWrapperException; + + /** + * Update the object in the storage if the version matches. + * + * @param key the key of the object + * @param object the updated object + * @param version the expected version of the object + * @throws PreconditionFailedException if the version does not match or the object does not exist + * @throws ObjectStorageWrapperException if an error occurs + */ + void update(String key, String object, String version) throws ObjectStorageWrapperException; + + /** + * Delete the object from the storage. + * + * @param key the key of the object + * @throws PreconditionFailedException if the object does not exist + * @throws ObjectStorageWrapperException if an error occurs + */ + void delete(String key) throws ObjectStorageWrapperException; + + /** + * Delete the object from the storage if the version matches. + * + * @param key the key of the object + * @param version the expected version of the object + * @throws PreconditionFailedException if the version does not match or the object does not exist + * @throws ObjectStorageWrapperException if an error occurs + */ + void delete(String key, String version) throws ObjectStorageWrapperException; + + /** + * Delete objects with the specified prefix from the storage. + * + * @param prefix the prefix of the objects to delete + * @throws ObjectStorageWrapperException if an error occurs + */ + void deleteByPrefix(String prefix) throws ObjectStorageWrapperException; + + /** Close the storage wrapper. */ + void close(); +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperException.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperException.java new file mode 100644 index 0000000000..2dc9546264 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperException.java @@ -0,0 +1,12 @@ +package com.scalar.db.storage.objectstorage; + +public class ObjectStorageWrapperException extends Exception { + + public ObjectStorageWrapperException(String message) { + super(message); + } + + public ObjectStorageWrapperException(String message, Throwable cause) { + super(message, cause); + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperFactory.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperFactory.java new file mode 100644 index 0000000000..2fb1eda076 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperFactory.java @@ -0,0 +1,18 @@ +package com.scalar.db.storage.objectstorage; + +import com.scalar.db.storage.objectstorage.blobstorage.BlobStorageConfig; +import com.scalar.db.storage.objectstorage.blobstorage.BlobStorageWrapper; +import java.util.Objects; + +public class ObjectStorageWrapperFactory { + + public static ObjectStorageWrapper create(ObjectStorageConfig objectStorageConfig) { + if (Objects.equals(objectStorageConfig.getStorageName(), BlobStorageConfig.STORAGE_NAME)) { + assert objectStorageConfig instanceof BlobStorageConfig; + return new BlobStorageWrapper((BlobStorageConfig) objectStorageConfig); + } else { + throw new IllegalArgumentException( + "Unsupported Object Storage: " + objectStorageConfig.getStorageName()); + } + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperResponse.java b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperResponse.java new file mode 100644 index 0000000000..02aa6f6454 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/ObjectStorageWrapperResponse.java @@ -0,0 +1,19 @@ +package com.scalar.db.storage.objectstorage; + +public class ObjectStorageWrapperResponse { + private final String payload; + private final String version; + + public ObjectStorageWrapperResponse(String payload, String version) { + this.payload = payload; + this.version = version; + } + + public String getPayload() { + return payload; + } + + public String getVersion() { + return version; + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/PreconditionFailedException.java b/core/src/main/java/com/scalar/db/storage/objectstorage/PreconditionFailedException.java new file mode 100644 index 0000000000..1b13c5d722 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/PreconditionFailedException.java @@ -0,0 +1,12 @@ +package com.scalar.db.storage.objectstorage; + +public class PreconditionFailedException extends ObjectStorageWrapperException { + + public PreconditionFailedException(String message) { + super(message); + } + + public PreconditionFailedException(String message, Throwable cause) { + super(message, cause); + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/Serializer.java b/core/src/main/java/com/scalar/db/storage/objectstorage/Serializer.java new file mode 100644 index 0000000000..93c41822d5 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/Serializer.java @@ -0,0 +1,33 @@ +package com.scalar.db.storage.objectstorage; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.fasterxml.jackson.databind.DeserializationFeature; +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.SerializationFeature; +import com.fasterxml.jackson.datatype.jsr310.JavaTimeModule; + +public class Serializer { + private static final ObjectMapper mapper = new ObjectMapper(); + + static { + mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); + mapper.configure(SerializationFeature.WRAP_ROOT_VALUE, false); + mapper.registerModule(new JavaTimeModule()); + } + + public static T deserialize(String json, TypeReference typeReference) { + try { + return mapper.readValue(json, typeReference); + } catch (Exception e) { + throw new RuntimeException("Failed to deserialize the object.", e); + } + } + + public static String serialize(T object) { + try { + return mapper.writeValueAsString(object); + } catch (Exception e) { + throw new RuntimeException("Failed to serialize the object.", e); + } + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/blobstorage/BlobStorageConfig.java b/core/src/main/java/com/scalar/db/storage/objectstorage/blobstorage/BlobStorageConfig.java new file mode 100644 index 0000000000..419c1964a5 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/blobstorage/BlobStorageConfig.java @@ -0,0 +1,139 @@ +package com.scalar.db.storage.objectstorage.blobstorage; + +import static com.scalar.db.config.ConfigUtils.getInt; +import static com.scalar.db.config.ConfigUtils.getLong; +import static com.scalar.db.config.ConfigUtils.getString; + +import com.scalar.db.common.CoreError; +import com.scalar.db.config.DatabaseConfig; +import com.scalar.db.storage.objectstorage.ObjectStorageConfig; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class BlobStorageConfig implements ObjectStorageConfig { + public static final String STORAGE_NAME = "blob-storage"; + public static final String PREFIX = DatabaseConfig.PREFIX + STORAGE_NAME + "."; + public static final String TABLE_METADATA_NAMESPACE = PREFIX + "table_metadata.namespace"; + + public static final String PARALLEL_UPLOAD_BLOCK_SIZE_IN_BYTES = + PREFIX + "parallel_upload_block_size_in_bytes"; + public static final String PARALLEL_UPLOAD_MAX_PARALLELISM = + PREFIX + "parallel_upload_max_parallelism"; + public static final String PARALLEL_UPLOAD_THRESHOLD_IN_BYTES = + PREFIX + "parallel_upload_threshold_in_bytes"; + public static final String REQUEST_TIMEOUT_IN_SECONDS = PREFIX + "request_timeout_in_seconds"; + + public static final long DEFAULT_PARALLEL_UPLOAD_BLOCK_SIZE_IN_BYTES = 4 * 1024 * 1024; // 4MB + public static final int DEFAULT_PARALLEL_UPLOAD_MAX_PARALLELISM = 4; + public static final long DEFAULT_PARALLEL_UPLOAD_THRESHOLD_IN_BYTES = 4 * 1024 * 1024; // 4MB + public static final int DEFAULT_REQUEST_TIMEOUT_IN_SECONDS = 15; + + private static final Logger logger = LoggerFactory.getLogger(BlobStorageConfig.class); + private final String endpoint; + private final String username; + private final String password; + private final String bucket; + private final String metadataNamespace; + + private final long parallelUploadBlockSizeInBytes; + private final int parallelUploadMaxParallelism; + private final long parallelUploadThresholdInBytes; + private final int requestTimeoutInSeconds; + + public BlobStorageConfig(DatabaseConfig databaseConfig) { + String storage = databaseConfig.getStorage(); + if (!storage.equals(STORAGE_NAME)) { + throw new IllegalArgumentException( + DatabaseConfig.STORAGE + " should be '" + STORAGE_NAME + "'"); + } + if (databaseConfig.getContactPoints().isEmpty()) { + throw new IllegalArgumentException(CoreError.INVALID_CONTACT_POINTS.buildMessage()); + } + String fullEndpoint = databaseConfig.getContactPoints().get(0); + int lastSlashIndex = fullEndpoint.lastIndexOf('/'); + if (lastSlashIndex != -1 && lastSlashIndex < fullEndpoint.length() - 1) { + endpoint = fullEndpoint.substring(0, lastSlashIndex); + bucket = fullEndpoint.substring(lastSlashIndex + 1); + } else { + throw new IllegalArgumentException( + "Invalid contact points format. Expected: BLOB_URI/BUCKET_NAME"); + } + username = databaseConfig.getUsername().orElse(null); + password = databaseConfig.getPassword().orElse(null); + metadataNamespace = getString(databaseConfig.getProperties(), TABLE_METADATA_NAMESPACE, null); + + if (databaseConfig.getScanFetchSize() != DatabaseConfig.DEFAULT_SCAN_FETCH_SIZE) { + logger.warn( + "The configuration property \"" + + DatabaseConfig.SCAN_FETCH_SIZE + + "\" is not applicable to Blob Storage and will be ignored."); + } + + parallelUploadBlockSizeInBytes = + getLong( + databaseConfig.getProperties(), + PARALLEL_UPLOAD_BLOCK_SIZE_IN_BYTES, + DEFAULT_PARALLEL_UPLOAD_BLOCK_SIZE_IN_BYTES); + parallelUploadMaxParallelism = + getInt( + databaseConfig.getProperties(), + PARALLEL_UPLOAD_MAX_PARALLELISM, + DEFAULT_PARALLEL_UPLOAD_MAX_PARALLELISM); + parallelUploadThresholdInBytes = + getLong( + databaseConfig.getProperties(), + PARALLEL_UPLOAD_THRESHOLD_IN_BYTES, + DEFAULT_PARALLEL_UPLOAD_THRESHOLD_IN_BYTES); + requestTimeoutInSeconds = + getInt( + databaseConfig.getProperties(), + REQUEST_TIMEOUT_IN_SECONDS, + DEFAULT_REQUEST_TIMEOUT_IN_SECONDS); + } + + @Override + public String getStorageName() { + return STORAGE_NAME; + } + + @Override + public String getEndpoint() { + return endpoint; + } + + @Override + public String getUsername() { + return username; + } + + @Override + public String getPassword() { + return password; + } + + @Override + public String getBucket() { + return bucket; + } + + @Override + public String getMetadataNamespace() { + return metadataNamespace; + } + + public long getParallelUploadBlockSizeInBytes() { + return parallelUploadBlockSizeInBytes; + } + + public int getParallelUploadMaxParallelism() { + return parallelUploadMaxParallelism; + } + + public long getParallelUploadThresholdInBytes() { + return parallelUploadThresholdInBytes; + } + + public int getRequestTimeoutInSeconds() { + return requestTimeoutInSeconds; + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/blobstorage/BlobStorageProvider.java b/core/src/main/java/com/scalar/db/storage/objectstorage/blobstorage/BlobStorageProvider.java new file mode 100644 index 0000000000..dcf1242d3d --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/blobstorage/BlobStorageProvider.java @@ -0,0 +1,11 @@ +package com.scalar.db.storage.objectstorage.blobstorage; + +import com.scalar.db.storage.objectstorage.ObjectStorageProvider; + +public class BlobStorageProvider implements ObjectStorageProvider { + + @Override + public String getName() { + return BlobStorageConfig.STORAGE_NAME; + } +} diff --git a/core/src/main/java/com/scalar/db/storage/objectstorage/blobstorage/BlobStorageWrapper.java b/core/src/main/java/com/scalar/db/storage/objectstorage/blobstorage/BlobStorageWrapper.java new file mode 100644 index 0000000000..b69f11cec1 --- /dev/null +++ b/core/src/main/java/com/scalar/db/storage/objectstorage/blobstorage/BlobStorageWrapper.java @@ -0,0 +1,198 @@ +package com.scalar.db.storage.objectstorage.blobstorage; + +import com.azure.core.http.HttpHeaderName; +import com.azure.core.util.BinaryData; +import com.azure.storage.blob.BlobClient; +import com.azure.storage.blob.BlobContainerClient; +import com.azure.storage.blob.BlobServiceClientBuilder; +import com.azure.storage.blob.models.BlobDownloadContentResponse; +import com.azure.storage.blob.models.BlobErrorCode; +import com.azure.storage.blob.models.BlobItem; +import com.azure.storage.blob.models.BlobRequestConditions; +import com.azure.storage.blob.models.BlobStorageException; +import com.azure.storage.blob.models.ListBlobsOptions; +import com.azure.storage.blob.models.ParallelTransferOptions; +import com.azure.storage.blob.options.BlobParallelUploadOptions; +import com.azure.storage.common.StorageSharedKeyCredential; +import com.scalar.db.storage.objectstorage.ObjectStorageWrapper; +import com.scalar.db.storage.objectstorage.ObjectStorageWrapperException; +import com.scalar.db.storage.objectstorage.ObjectStorageWrapperResponse; +import com.scalar.db.storage.objectstorage.PreconditionFailedException; +import java.time.Duration; +import java.util.Optional; +import java.util.Set; +import java.util.stream.Collectors; + +public class BlobStorageWrapper implements ObjectStorageWrapper { + private final BlobContainerClient client; + private final Duration requestTimeoutInSeconds; + private final ParallelTransferOptions parallelTransferOptions; + + public BlobStorageWrapper(BlobStorageConfig config) { + this.client = + new BlobServiceClientBuilder() + .endpoint(config.getEndpoint()) + .credential(new StorageSharedKeyCredential(config.getUsername(), config.getPassword())) + .buildClient() + .getBlobContainerClient(config.getBucket()); + this.requestTimeoutInSeconds = Duration.ofSeconds(config.getRequestTimeoutInSeconds()); + this.parallelTransferOptions = + new ParallelTransferOptions() + .setBlockSizeLong(config.getParallelUploadBlockSizeInBytes()) + .setMaxConcurrency(config.getParallelUploadMaxParallelism()) + .setMaxSingleUploadSizeLong(config.getParallelUploadThresholdInBytes()); + } + + @Override + public Optional get(String key) + throws ObjectStorageWrapperException { + try { + BlobClient blobClient = client.getBlobClient(key); + BlobDownloadContentResponse response = + blobClient.downloadContentWithResponse(null, null, requestTimeoutInSeconds, null); + String data = response.getValue().toString(); + String eTag = response.getHeaders().getValue(HttpHeaderName.ETAG); + return Optional.of(new ObjectStorageWrapperResponse(data, eTag)); + } catch (BlobStorageException e) { + if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)) { + return Optional.empty(); + } + throw new ObjectStorageWrapperException( + String.format("Failed to get the object with key '%s'", key), e); + } catch (Exception e) { + throw new ObjectStorageWrapperException( + String.format("Failed to get the object with key '%s'", key), e); + } + } + + @Override + public Set getKeys(String prefix) throws ObjectStorageWrapperException { + try { + return client.listBlobs(new ListBlobsOptions().setPrefix(prefix), requestTimeoutInSeconds) + .stream() + .map(BlobItem::getName) + .collect(Collectors.toSet()); + } catch (Exception e) { + throw new ObjectStorageWrapperException( + String.format("Failed to get the object keys with prefix '%s'", prefix), e); + } + } + + @Override + public void insert(String key, String object) throws ObjectStorageWrapperException { + try { + BlobClient blobClient = client.getBlobClient(key); + BlobParallelUploadOptions options = + new BlobParallelUploadOptions(BinaryData.fromString(object)) + .setRequestConditions(new BlobRequestConditions().setIfNoneMatch("*")) + .setParallelTransferOptions(parallelTransferOptions); + blobClient.uploadWithResponse(options, requestTimeoutInSeconds, null); + } catch (BlobStorageException e) { + if (e.getErrorCode().equals(BlobErrorCode.BLOB_ALREADY_EXISTS)) { + throw new PreconditionFailedException( + String.format( + "Failed to insert the object with key '%s' due to precondition failure", key), + e); + } + throw new ObjectStorageWrapperException( + String.format("Failed to insert the object with key '%s'", key), e); + } catch (Exception e) { + throw new ObjectStorageWrapperException( + String.format("Failed to insert the object with key '%s'", key), e); + } + } + + @Override + public void update(String key, String object, String version) + throws ObjectStorageWrapperException { + try { + BlobClient blobClient = client.getBlobClient(key); + BlobParallelUploadOptions options = + new BlobParallelUploadOptions(BinaryData.fromString(object)) + .setRequestConditions(new BlobRequestConditions().setIfMatch(version)) + .setParallelTransferOptions(parallelTransferOptions); + blobClient.uploadWithResponse(options, requestTimeoutInSeconds, null); + } catch (BlobStorageException e) { + if (e.getErrorCode().equals(BlobErrorCode.CONDITION_NOT_MET) + || e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)) { + throw new PreconditionFailedException( + String.format( + "Failed to update the object with key '%s' due to precondition failure", key), + e); + } + throw new ObjectStorageWrapperException( + String.format("Failed to update the object with key '%s'", key), e); + } catch (Exception e) { + throw new ObjectStorageWrapperException( + String.format("Failed to update the object with key '%s'", key), e); + } + } + + @Override + public void delete(String key) throws ObjectStorageWrapperException { + try { + BlobClient blobClient = client.getBlobClient(key); + blobClient.delete(); + } catch (BlobStorageException e) { + if (e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)) { + throw new PreconditionFailedException( + String.format( + "Failed to delete the object with key '%s' due to precondition failure", key), + e); + } + throw new ObjectStorageWrapperException( + String.format("Failed to delete the object with key '%s'", key), e); + } catch (Exception e) { + throw new ObjectStorageWrapperException( + String.format("Failed to delete the object with key '%s'", key), e); + } + } + + @Override + public void delete(String key, String version) throws ObjectStorageWrapperException { + try { + BlobClient blobClient = client.getBlobClient(key); + blobClient.deleteWithResponse( + null, new BlobRequestConditions().setIfMatch(version), requestTimeoutInSeconds, null); + } catch (BlobStorageException e) { + if (e.getErrorCode().equals(BlobErrorCode.CONDITION_NOT_MET) + || e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)) { + throw new PreconditionFailedException( + String.format( + "Failed to delete the object with key '%s' due to precondition failure", key), + e); + } + throw new ObjectStorageWrapperException( + String.format("Failed to delete the object with key '%s'", key), e); + } catch (Exception e) { + throw new ObjectStorageWrapperException( + String.format("Failed to delete the object with key '%s'", key), e); + } + } + + @Override + public void deleteByPrefix(String prefix) throws ObjectStorageWrapperException { + try { + client + .listBlobs(new ListBlobsOptions().setPrefix(prefix), requestTimeoutInSeconds) + .forEach( + blobItem -> { + try { + client.getBlobClient(blobItem.getName()).delete(); + } catch (BlobStorageException e) { + if (!e.getErrorCode().equals(BlobErrorCode.BLOB_NOT_FOUND)) { + throw e; + } + } + }); + } catch (Exception e) { + throw new ObjectStorageWrapperException( + String.format("Failed to delete the objects with prefix '%s'", prefix), e); + } + } + + @Override + public void close() { + // BlobContainerClient does not have a close method + } +} diff --git a/core/src/main/resources/META-INF/services/com.scalar.db.api.DistributedStorageProvider b/core/src/main/resources/META-INF/services/com.scalar.db.api.DistributedStorageProvider index bcee21a06b..e9ffeef06e 100644 --- a/core/src/main/resources/META-INF/services/com.scalar.db.api.DistributedStorageProvider +++ b/core/src/main/resources/META-INF/services/com.scalar.db.api.DistributedStorageProvider @@ -2,4 +2,5 @@ com.scalar.db.storage.cassandra.CassandraProvider com.scalar.db.storage.cosmos.CosmosProvider com.scalar.db.storage.dynamo.DynamoProvider com.scalar.db.storage.jdbc.JdbcProvider +com.scalar.db.storage.objectstorage.blobstorage.BlobStorageProvider com.scalar.db.storage.multistorage.MultiStorageProvider diff --git a/core/src/test/java/com/scalar/db/storage/objectstorage/ConcatenationVisitorTest.java b/core/src/test/java/com/scalar/db/storage/objectstorage/ConcatenationVisitorTest.java new file mode 100644 index 0000000000..d8416a84ef --- /dev/null +++ b/core/src/test/java/com/scalar/db/storage/objectstorage/ConcatenationVisitorTest.java @@ -0,0 +1,192 @@ +package com.scalar.db.storage.objectstorage; + +import static org.assertj.core.api.Assertions.assertThat; + +import com.scalar.db.io.BigIntColumn; +import com.scalar.db.io.BlobColumn; +import com.scalar.db.io.BooleanColumn; +import com.scalar.db.io.DateColumn; +import com.scalar.db.io.DoubleColumn; +import com.scalar.db.io.FloatColumn; +import com.scalar.db.io.IntColumn; +import com.scalar.db.io.TextColumn; +import com.scalar.db.io.TimeColumn; +import com.scalar.db.io.TimestampColumn; +import com.scalar.db.io.TimestampTZColumn; +import com.scalar.db.util.TimeRelatedColumnEncodingUtils; +import java.nio.charset.StandardCharsets; +import java.util.Base64; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +public class ConcatenationVisitorTest { + private static final boolean ANY_BOOLEAN = false; + private static final BooleanColumn ANY_BOOLEAN_COLUMN = + BooleanColumn.of("any_boolean", ANY_BOOLEAN); + private static final int ANY_INT = Integer.MIN_VALUE; + private static final IntColumn ANY_INT_COLUMN = IntColumn.of("any_int", ANY_INT); + private static final long ANY_BIGINT = BigIntColumn.MAX_VALUE; + private static final BigIntColumn ANY_BIGINT_COLUMN = BigIntColumn.of("any_bigint", ANY_BIGINT); + private static final float ANY_FLOAT = Float.MIN_NORMAL; + private static final FloatColumn ANY_FLOAT_COLUMN = FloatColumn.of("any_float", ANY_FLOAT); + private static final double ANY_DOUBLE = Double.MIN_NORMAL; + private static final DoubleColumn ANY_DOUBLE_COLUMN = DoubleColumn.of("any_double", ANY_DOUBLE); + private static final String ANY_TEXT = "test"; + private static final TextColumn ANY_TEXT_COLUMN = TextColumn.of("any_text", ANY_TEXT); + private static final byte[] ANY_BLOB = "scalar".getBytes(StandardCharsets.UTF_8); + private static final BlobColumn ANY_BLOB_COLUMN = BlobColumn.of("any_blob", ANY_BLOB); + private static final DateColumn ANY_DATE_COLUMN = DateColumn.of("any_date", DateColumn.MAX_VALUE); + private static final TimeColumn ANY_TIME_COLUMN = TimeColumn.of("any_time", TimeColumn.MAX_VALUE); + private static final TimestampColumn ANY_TIMESTAMP_COLUMN = + TimestampColumn.of("any_timestamp", TimestampColumn.MAX_VALUE); + private static final TimestampTZColumn ANY_TIMESTAMPTZ_COLUMN = + TimestampTZColumn.of("any_timestamp_tz", TimestampTZColumn.MAX_VALUE); + private ConcatenationVisitor visitor; + + @BeforeEach + public void setUp() { + visitor = new ConcatenationVisitor(); + } + + @Test + public void build_AllTypesGiven_ShouldBuildString() { + // Act + visitor.visit(ANY_BOOLEAN_COLUMN); + visitor.visit(ANY_INT_COLUMN); + visitor.visit(ANY_BIGINT_COLUMN); + visitor.visit(ANY_FLOAT_COLUMN); + visitor.visit(ANY_DOUBLE_COLUMN); + visitor.visit(ANY_TEXT_COLUMN); + visitor.visit(ANY_BLOB_COLUMN); + visitor.visit(ANY_DATE_COLUMN); + visitor.visit(ANY_TIME_COLUMN); + visitor.visit(ANY_TIMESTAMP_COLUMN); + visitor.visit(ANY_TIMESTAMPTZ_COLUMN); + String actual = visitor.build(); + + // Assert + String[] values = + actual.split(String.valueOf(ObjectStorageUtils.CONCATENATED_KEY_DELIMITER), -1); + assertThat(values.length).isEqualTo(11); + assertThat(values[0]).isEqualTo(String.valueOf(ANY_BOOLEAN)); + assertThat(values[1]).isEqualTo(String.valueOf(ANY_INT)); + assertThat(values[2]).isEqualTo(String.valueOf(ANY_BIGINT)); + assertThat(values[3]).isEqualTo(String.valueOf(ANY_FLOAT)); + assertThat(values[4]).isEqualTo(String.valueOf(ANY_DOUBLE)); + assertThat(values[5]).isEqualTo(ANY_TEXT); + assertThat(values[6]) + .isEqualTo(Base64.getUrlEncoder().withoutPadding().encodeToString(ANY_BLOB)); + assertThat(values[7]) + .isEqualTo(String.valueOf(TimeRelatedColumnEncodingUtils.encode(ANY_DATE_COLUMN))); + assertThat(values[8]) + .isEqualTo(String.valueOf(TimeRelatedColumnEncodingUtils.encode(ANY_TIME_COLUMN))); + assertThat(values[9]) + .isEqualTo(String.valueOf(TimeRelatedColumnEncodingUtils.encode(ANY_TIMESTAMP_COLUMN))); + assertThat(values[10]) + .isEqualTo(String.valueOf(TimeRelatedColumnEncodingUtils.encode(ANY_TIMESTAMPTZ_COLUMN))); + } + + @Test + public void visit_BooleanColumnAcceptCalled_ShouldBuildBooleanAsString() { + // Act + ANY_BOOLEAN_COLUMN.accept(visitor); + + // Assert + assertThat(visitor.build()).isEqualTo(String.valueOf(ANY_BOOLEAN)); + } + + @Test + public void visit_IntColumnAcceptCalled_ShouldBuildIntAsString() { + // Act + ANY_INT_COLUMN.accept(visitor); + + // Assert + assertThat(visitor.build()).isEqualTo(String.valueOf(ANY_INT)); + } + + @Test + public void visit_BigIntColumnAcceptCalled_ShouldBuildBigIntAsString() { + // Act + ANY_BIGINT_COLUMN.accept(visitor); + + // Assert + assertThat(visitor.build()).isEqualTo(String.valueOf(ANY_BIGINT)); + } + + @Test + public void visit_FloatColumnAcceptCalled_ShouldBuildFloatAsString() { + // Act + ANY_FLOAT_COLUMN.accept(visitor); + + // Assert + assertThat(visitor.build()).isEqualTo(String.valueOf(ANY_FLOAT)); + } + + @Test + public void visit_DoubleColumnAcceptCalled_ShouldBuildDoubleAsString() { + // Act + ANY_DOUBLE_COLUMN.accept(visitor); + + // Assert + assertThat(visitor.build()).isEqualTo(String.valueOf(ANY_DOUBLE)); + } + + @Test + public void visit_TextColumnAcceptCalled_ShouldBuildText() { + // Act + ANY_TEXT_COLUMN.accept(visitor); + + // Assert + assertThat(visitor.build()).isEqualTo(ANY_TEXT); + } + + @Test + public void visit_BlobColumnAcceptCalled_ShouldBuildBlobAsString() { + // Act + ANY_BLOB_COLUMN.accept(visitor); + + // Assert + assertThat(visitor.build()) + .isEqualTo(Base64.getUrlEncoder().withoutPadding().encodeToString(ANY_BLOB)); + } + + @Test + public void visit_DateColumnAcceptCalled_ShouldBuildDateAsString() { + // Act + ANY_DATE_COLUMN.accept(visitor); + + // Assert + assertThat(visitor.build()) + .isEqualTo(String.valueOf(TimeRelatedColumnEncodingUtils.encode(ANY_DATE_COLUMN))); + } + + @Test + public void visit_TimeColumnAcceptCalled_ShouldBuildTimeAsString() { + // Act + ANY_TIME_COLUMN.accept(visitor); + + // Assert + assertThat(visitor.build()) + .isEqualTo(String.valueOf(TimeRelatedColumnEncodingUtils.encode(ANY_TIME_COLUMN))); + } + + @Test + public void visit_TimestampColumnAcceptCalled_ShouldBuildTimestampAsString() { + // Act + ANY_TIMESTAMP_COLUMN.accept(visitor); + + // Assert + assertThat(visitor.build()) + .isEqualTo(String.valueOf(TimeRelatedColumnEncodingUtils.encode(ANY_TIMESTAMP_COLUMN))); + } + + @Test + public void visit_TimestampTZColumnAcceptCalled_ShouldBuildTimestampTZAsString() { + // Act + ANY_TIMESTAMPTZ_COLUMN.accept(visitor); + + // Assert + assertThat(visitor.build()) + .isEqualTo(String.valueOf(TimeRelatedColumnEncodingUtils.encode(ANY_TIMESTAMPTZ_COLUMN))); + } +} diff --git a/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminTest.java b/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminTest.java new file mode 100644 index 0000000000..219af287da --- /dev/null +++ b/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageAdminTest.java @@ -0,0 +1,573 @@ +package com.scalar.db.storage.objectstorage; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatCode; +import static org.assertj.core.api.Assertions.catchThrowable; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +import com.fasterxml.jackson.core.type.TypeReference; +import com.google.common.collect.ImmutableMap; +import com.scalar.db.api.Scan.Ordering.Order; +import com.scalar.db.api.TableMetadata; +import com.scalar.db.io.DataType; +import java.util.Collections; +import java.util.HashMap; +import java.util.LinkedHashSet; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import org.assertj.core.util.Sets; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.ArgumentCaptor; +import org.mockito.Captor; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +public class ObjectStorageAdminTest { + private static final String METADATA_NAMESPACE = "scalardb"; + + @Mock private ObjectStorageWrapper wrapper; + @Mock private ObjectStorageConfig config; + @Captor private ArgumentCaptor payloadCaptor; + private ObjectStorageAdmin admin; + + @BeforeEach + public void setUp() throws Exception { + MockitoAnnotations.openMocks(this).close(); + + when(config.getMetadataNamespace()).thenReturn(METADATA_NAMESPACE); + admin = new ObjectStorageAdmin(wrapper, config); + } + + @Test + public void getTableMetadata_ShouldReturnCorrectTableMetadata() throws Exception { + // Arrange + String namespace = "ns"; + String table = "table"; + String tableMetadataKey = namespace + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER + table; + String objectKey = + ObjectStorageUtils.getObjectKey( + METADATA_NAMESPACE, ObjectStorageAdmin.TABLE_METADATA_TABLE); + + Map columnsMap = + new ImmutableMap.Builder() + .put("c1", "int") + .put("c2", "text") + .put("c3", "bigint") + .put("c4", "boolean") + .put("c5", "blob") + .put("c6", "float") + .put("c7", "double") + .put("c8", "date") + .put("c9", "time") + .put("c10", "timestamp") + .put("c11", "timestamptz") + .build(); + + LinkedHashSet partitionKeyNames = Sets.newLinkedHashSet("c1"); + LinkedHashSet clusteringKeyNames = Sets.newLinkedHashSet("c2", "c3"); + Map clusteringOrders = ImmutableMap.of("c2", "ASC", "c3", "DESC"); + + ObjectStorageTableMetadata objectStorageTableMetadata = + ObjectStorageTableMetadata.newBuilder() + .partitionKeyNames(partitionKeyNames) + .clusteringKeyNames(clusteringKeyNames) + .clusteringOrders(clusteringOrders) + .secondaryIndexNames(Collections.emptySet()) + .columns(columnsMap) + .build(); + + Map metadataTable = new HashMap<>(); + metadataTable.put(tableMetadataKey, objectStorageTableMetadata); + String serializedMetadata = Serializer.serialize(metadataTable); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serializedMetadata, "version1"); + + when(wrapper.get(objectKey)).thenReturn(Optional.of(response)); + + // Act + TableMetadata actual = admin.getTableMetadata(namespace, table); + + // Assert + assertThat(actual) + .isEqualTo( + TableMetadata.newBuilder() + .addPartitionKey("c1") + .addClusteringKey("c2", Order.ASC) + .addClusteringKey("c3", Order.DESC) + .addColumn("c1", DataType.INT) + .addColumn("c2", DataType.TEXT) + .addColumn("c3", DataType.BIGINT) + .addColumn("c4", DataType.BOOLEAN) + .addColumn("c5", DataType.BLOB) + .addColumn("c6", DataType.FLOAT) + .addColumn("c7", DataType.DOUBLE) + .addColumn("c8", DataType.DATE) + .addColumn("c9", DataType.TIME) + .addColumn("c10", DataType.TIMESTAMP) + .addColumn("c11", DataType.TIMESTAMPTZ) + .build()); + } + + @Test + public void unsupportedOperations_ShouldThrowUnsupportedException() { + // Arrange + String namespace = "sample_ns"; + String table = "tbl"; + String column = "col"; + + // Act + Throwable thrown1 = + catchThrowable(() -> admin.createIndex(namespace, table, column, Collections.emptyMap())); + Throwable thrown2 = catchThrowable(() -> admin.dropIndex(namespace, table, column)); + Throwable thrown3 = + catchThrowable( + () -> + admin.importTable( + namespace, table, Collections.emptyMap(), Collections.emptyMap())); + Throwable thrown4 = catchThrowable(() -> admin.dropColumnFromTable(namespace, table, column)); + Throwable thrown5 = + catchThrowable(() -> admin.renameColumn(namespace, table, column, "newCol")); + Throwable thrown6 = catchThrowable(() -> admin.renameTable(namespace, table, "newTable")); + Throwable thrown7 = + catchThrowable(() -> admin.alterColumnType(namespace, table, column, DataType.INT)); + + // Assert + assertThat(thrown1).isInstanceOf(UnsupportedOperationException.class); + assertThat(thrown2).isInstanceOf(UnsupportedOperationException.class); + assertThat(thrown3).isInstanceOf(UnsupportedOperationException.class); + assertThat(thrown4).isInstanceOf(UnsupportedOperationException.class); + assertThat(thrown5).isInstanceOf(UnsupportedOperationException.class); + assertThat(thrown6).isInstanceOf(UnsupportedOperationException.class); + assertThat(thrown7).isInstanceOf(UnsupportedOperationException.class); + } + + @Test + public void getNamespaceNames_ShouldWorkProperly() throws Exception { + // Arrange + Map namespaceMetadataTable = new HashMap<>(); + namespaceMetadataTable.put("ns1", new ObjectStorageNamespaceMetadata("ns1")); + namespaceMetadataTable.put("ns2", new ObjectStorageNamespaceMetadata("ns2")); + String serializedMetadata = Serializer.serialize(namespaceMetadataTable); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serializedMetadata, "version1"); + + when(wrapper.get( + ObjectStorageUtils.getObjectKey( + METADATA_NAMESPACE, ObjectStorageAdmin.NAMESPACE_METADATA_TABLE))) + .thenReturn(Optional.of(response)); + + // Act + Set actualNamespaces = admin.getNamespaceNames(); + + // Assert + assertThat(actualNamespaces).containsExactlyInAnyOrder("ns1", "ns2", METADATA_NAMESPACE); + } + + @Test + public void + getNamespaceNames_NamespaceMetadataTableDoesNotExist_ShouldOnlyReturnMetadataNamespace() + throws Exception { + // Arrange + when(wrapper.get( + ObjectStorageUtils.getObjectKey( + METADATA_NAMESPACE, ObjectStorageAdmin.NAMESPACE_METADATA_TABLE))) + .thenReturn(Optional.empty()); + + // Act + Set actualNamespaces = admin.getNamespaceNames(); + + // Assert + assertThat(actualNamespaces).containsExactly(METADATA_NAMESPACE); + } + + @Test + public void namespaceExists_WithExistingNamespace_ShouldReturnTrue() throws Exception { + // Arrange + String namespace = "ns"; + Map metadataTable = new HashMap<>(); + metadataTable.put(namespace, new ObjectStorageNamespaceMetadata(namespace)); + String serializedMetadata = Serializer.serialize(metadataTable); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serializedMetadata, "version1"); + + when(wrapper.get( + ObjectStorageUtils.getObjectKey( + METADATA_NAMESPACE, ObjectStorageAdmin.NAMESPACE_METADATA_TABLE))) + .thenReturn(Optional.of(response)); + + // Act & Assert + assertThat(admin.namespaceExists(namespace)).isTrue(); + } + + @Test + public void namespaceExists_WithNonExistingNamespace_ShouldReturnFalse() throws Exception { + // Arrange + when(wrapper.get( + ObjectStorageUtils.getObjectKey( + METADATA_NAMESPACE, ObjectStorageAdmin.NAMESPACE_METADATA_TABLE))) + .thenReturn(Optional.empty()); + + // Act & Assert + assertThat(admin.namespaceExists("ns")).isFalse(); + } + + @Test + public void namespaceExists_WithMetadataNamespace_ShouldReturnTrue() throws Exception { + // Act & Assert + assertThat(admin.namespaceExists(METADATA_NAMESPACE)).isTrue(); + } + + @Test + public void createNamespace_ShouldInsertNamespaceMetadata() throws Exception { + // Arrange + String namespace = "ns"; + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + METADATA_NAMESPACE, ObjectStorageAdmin.NAMESPACE_METADATA_TABLE); + + when(wrapper.get(expectedObjectKey)).thenReturn(Optional.empty()); + + // Act + admin.createNamespace(namespace, Collections.emptyMap()); + + // Assert + verify(wrapper).insert(eq(expectedObjectKey), payloadCaptor.capture()); + + Map insertedMetadata = + Serializer.deserialize( + payloadCaptor.getValue(), + new TypeReference>() {}); + assertThat(insertedMetadata).containsKey(namespace); + assertThat(insertedMetadata.get(namespace).getName()).isEqualTo(namespace); + } + + @Test + public void createTable_ShouldInsertTableMetadata() throws Exception { + // Arrange + String namespace = "ns"; + String table = "sample_table"; + TableMetadata metadata = + TableMetadata.newBuilder() + .addPartitionKey("c3") + .addClusteringKey("c1", Order.DESC) + .addClusteringKey("c2", Order.ASC) + .addColumn("c1", DataType.TEXT) + .addColumn("c2", DataType.BIGINT) + .addColumn("c3", DataType.BOOLEAN) + .build(); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + METADATA_NAMESPACE, ObjectStorageAdmin.TABLE_METADATA_TABLE); + + when(wrapper.get(expectedObjectKey)).thenReturn(Optional.empty()); + + // Act + admin.createTable(namespace, table, metadata, Collections.emptyMap()); + + // Assert + verify(wrapper).insert(eq(expectedObjectKey), payloadCaptor.capture()); + + Map insertedMetadata = + Serializer.deserialize( + payloadCaptor.getValue(), + new TypeReference>() {}); + String tableMetadataKey = namespace + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER + table; + assertThat(insertedMetadata).containsKey(tableMetadataKey); + ObjectStorageTableMetadata tableMetadata = insertedMetadata.get(tableMetadataKey); + assertThat(tableMetadata.getPartitionKeyNames()).containsExactly("c3"); + assertThat(tableMetadata.getClusteringKeyNames()).containsExactly("c1", "c2"); + assertThat(tableMetadata.getClusteringOrders()) + .containsEntry("c1", "DESC") + .containsEntry("c2", "ASC"); + assertThat(tableMetadata.getColumns()) + .containsEntry("c1", "text") + .containsEntry("c2", "bigint") + .containsEntry("c3", "boolean"); + } + + @Test + public void dropNamespace_ShouldDeleteNamespaceMetadataAndDeleteMetadataTableIfEmpty() + throws Exception { + // Arrange + String namespace = "ns"; + Map metadataTable = new HashMap<>(); + metadataTable.put(namespace, new ObjectStorageNamespaceMetadata(namespace)); + String serializedMetadata = Serializer.serialize(metadataTable); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serializedMetadata, "version1"); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + METADATA_NAMESPACE, ObjectStorageAdmin.NAMESPACE_METADATA_TABLE); + + when(wrapper.get(expectedObjectKey)).thenReturn(Optional.of(response)); + + // Act + admin.dropNamespace(namespace); + + // Assert + verify(wrapper).delete(eq(expectedObjectKey), eq("version1")); + } + + @Test + public void dropNamespace_ShouldDeleteNamespaceMetadataAndUpdateMetadataTableIfNotEmpty() + throws Exception { + // Arrange + String namespace = "ns"; + String anotherNamespace = "other_ns"; + Map metadataTable = new HashMap<>(); + metadataTable.put(namespace, new ObjectStorageNamespaceMetadata(namespace)); + metadataTable.put(anotherNamespace, new ObjectStorageNamespaceMetadata(anotherNamespace)); + String serializedMetadata = Serializer.serialize(metadataTable); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serializedMetadata, "version1"); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + METADATA_NAMESPACE, ObjectStorageAdmin.NAMESPACE_METADATA_TABLE); + + when(wrapper.get(expectedObjectKey)).thenReturn(Optional.of(response)); + + // Act + admin.dropNamespace(namespace); + + // Assert + verify(wrapper).update(eq(expectedObjectKey), payloadCaptor.capture(), eq("version1")); + Map updatedMetadata = + Serializer.deserialize( + payloadCaptor.getValue(), + new TypeReference>() {}); + assertThat(updatedMetadata).doesNotContainKey(namespace); + assertThat(updatedMetadata).containsKey(anotherNamespace); + } + + @Test + public void dropTable_ShouldDeleteTableMetadataAndDropMetadataTableIfEmpty() throws Exception { + // Arrange + String namespace = "ns"; + String table = "table"; + String tableMetadataKey = namespace + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER + table; + + Map metadataTable = new HashMap<>(); + metadataTable.put(tableMetadataKey, ObjectStorageTableMetadata.newBuilder().build()); + String serializedMetadata = Serializer.serialize(metadataTable); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serializedMetadata, "version1"); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + METADATA_NAMESPACE, ObjectStorageAdmin.TABLE_METADATA_TABLE); + + when(wrapper.get(expectedObjectKey)).thenReturn(Optional.of(response)); + + // Act + admin.dropTable(namespace, table); + + // Assert + verify(wrapper).delete(eq(expectedObjectKey), eq("version1")); + } + + @Test + public void dropTable_ShouldDeleteTableMetadataAndUpdateMetadataTableIfNotEmpty() + throws Exception { + // Arrange + String namespace = "ns"; + String table = "table"; + String anotherTable = "tbl2"; + String tableMetadataKey = namespace + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER + table; + String anotherTableMetadataKey = + namespace + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER + anotherTable; + + Map metadataTable = new HashMap<>(); + metadataTable.put(tableMetadataKey, ObjectStorageTableMetadata.newBuilder().build()); + metadataTable.put(anotherTableMetadataKey, ObjectStorageTableMetadata.newBuilder().build()); + String serializedMetadata = Serializer.serialize(metadataTable); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serializedMetadata, "version1"); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + METADATA_NAMESPACE, ObjectStorageAdmin.TABLE_METADATA_TABLE); + + when(wrapper.get(expectedObjectKey)).thenReturn(Optional.of(response)); + + // Act + admin.dropTable(namespace, table); + + // Assert + verify(wrapper).update(eq(expectedObjectKey), payloadCaptor.capture(), eq("version1")); + Map updatedMetadata = + Serializer.deserialize( + payloadCaptor.getValue(), + new TypeReference>() {}); + assertThat(updatedMetadata).doesNotContainKey(tableMetadataKey); + assertThat(updatedMetadata).containsKey(anotherTableMetadataKey); + } + + @Test + public void truncateTable_ShouldDeleteTableData() throws Exception { + // Arrange + String namespace = "ns"; + String table = "table"; + String tableDataPrefix = ObjectStorageUtils.getObjectKey(namespace, table, ""); + + // Act + admin.truncateTable(namespace, table); + + // Assert + verify(wrapper).deleteByPrefix(tableDataPrefix); + verify(wrapper, never()) + .delete( + ObjectStorageUtils.getObjectKey( + METADATA_NAMESPACE, ObjectStorageAdmin.TABLE_METADATA_TABLE)); + } + + @Test + public void truncateTable_WithMetadataOnlyTable_ShouldNotThrowException() throws Exception { + // Arrange + String namespace = "ns"; + String table = "table"; + String tableDataPrefix = ObjectStorageUtils.getObjectKey(namespace, table, ""); + + // Act Assert + assertThatCode(() -> admin.truncateTable(namespace, table)).doesNotThrowAnyException(); + verify(wrapper).deleteByPrefix(tableDataPrefix); + verify(wrapper, never()) + .delete( + ObjectStorageUtils.getObjectKey( + METADATA_NAMESPACE, ObjectStorageAdmin.TABLE_METADATA_TABLE)); + } + + @Test + public void getNamespaceTableNames_ShouldReturnTableNamesProperly() throws Exception { + // Arrange + String namespace = "ns"; + String tableMetadataKey1 = namespace + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER + "t1"; + String tableMetadataKey2 = namespace + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER + "t2"; + String tableMetadataKey3 = "other_ns" + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER + "t3"; + + Map metadataTable = new HashMap<>(); + metadataTable.put(tableMetadataKey1, ObjectStorageTableMetadata.newBuilder().build()); + metadataTable.put(tableMetadataKey2, ObjectStorageTableMetadata.newBuilder().build()); + metadataTable.put(tableMetadataKey3, ObjectStorageTableMetadata.newBuilder().build()); + + String serializedTableMetadata = Serializer.serialize(metadataTable); + ObjectStorageWrapperResponse tableMetadataResponse = + new ObjectStorageWrapperResponse(serializedTableMetadata, "version1"); + + Map namespaceMetadata = new HashMap<>(); + namespaceMetadata.put(namespace, new ObjectStorageNamespaceMetadata(namespace)); + String serializedNamespaceMetadata = Serializer.serialize(namespaceMetadata); + ObjectStorageWrapperResponse namespaceResponse = + new ObjectStorageWrapperResponse(serializedNamespaceMetadata, "version1"); + + String tableMetadataObjectKey = + ObjectStorageUtils.getObjectKey( + METADATA_NAMESPACE, ObjectStorageAdmin.TABLE_METADATA_TABLE); + String namespaceMetadataObjectKey = + ObjectStorageUtils.getObjectKey( + METADATA_NAMESPACE, ObjectStorageAdmin.NAMESPACE_METADATA_TABLE); + + when(wrapper.get(tableMetadataObjectKey)).thenReturn(Optional.of(tableMetadataResponse)); + when(wrapper.get(namespaceMetadataObjectKey)).thenReturn(Optional.of(namespaceResponse)); + + // Act + Set actualTableNames = admin.getNamespaceTableNames(namespace); + + // Assert + assertThat(actualTableNames).containsExactlyInAnyOrder("t1", "t2"); + } + + @Test + public void addNewColumnToTable_ShouldWorkProperly() throws Exception { + // Arrange + String namespace = "ns"; + String table = "table"; + String currentColumn = "c1"; + String newColumn = "c2"; + String tableMetadataKey = namespace + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER + table; + + LinkedHashSet partitionKeyNames = Sets.newLinkedHashSet(currentColumn); + Map columns = ImmutableMap.of(currentColumn, "text"); + ObjectStorageTableMetadata existingTableMetadata = + ObjectStorageTableMetadata.newBuilder() + .partitionKeyNames(partitionKeyNames) + .secondaryIndexNames(Collections.emptySet()) + .columns(columns) + .build(); + + Map metadataTable = new HashMap<>(); + metadataTable.put(tableMetadataKey, existingTableMetadata); + String serializedMetadata = Serializer.serialize(metadataTable); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serializedMetadata, "version1"); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + METADATA_NAMESPACE, ObjectStorageAdmin.TABLE_METADATA_TABLE); + + when(wrapper.get(expectedObjectKey)).thenReturn(Optional.of(response)); + + // Act + admin.addNewColumnToTable(namespace, table, newColumn, DataType.INT); + + // Assert + verify(wrapper).update(eq(expectedObjectKey), payloadCaptor.capture(), eq("version1")); + + Map updatedMetadata = + Serializer.deserialize( + payloadCaptor.getValue(), + new TypeReference>() {}); + + ObjectStorageTableMetadata updatedTableMetadata = updatedMetadata.get(tableMetadataKey); + assertThat(updatedTableMetadata.getPartitionKeyNames()).containsExactly(currentColumn); + assertThat(updatedTableMetadata.getColumns()) + .containsEntry(currentColumn, "text") + .containsEntry(newColumn, "int"); + } + + @Test + public void repairTable_ShouldUpsertTableMetadata() throws Exception { + // Arrange + String namespace = "ns"; + String table = "tbl"; + TableMetadata tableMetadata = + TableMetadata.newBuilder() + .addColumn("c1", DataType.INT) + .addColumn("c2", DataType.TEXT) + .addColumn("c3", DataType.BIGINT) + .addPartitionKey("c1") + .build(); + + Map metadataTable = new HashMap<>(); + String serializedMetadata = Serializer.serialize(metadataTable); + ObjectStorageWrapperResponse response = + new ObjectStorageWrapperResponse(serializedMetadata, "version1"); + String expectedObjectKey = + ObjectStorageUtils.getObjectKey( + METADATA_NAMESPACE, ObjectStorageAdmin.TABLE_METADATA_TABLE); + + when(wrapper.get(expectedObjectKey)).thenReturn(Optional.of(response)); + + // Act + admin.repairTable(namespace, table, tableMetadata, Collections.emptyMap()); + + // Assert + verify(wrapper).insert(eq(expectedObjectKey), payloadCaptor.capture()); + + Map insertedMetadata = + Serializer.deserialize( + payloadCaptor.getValue(), + new TypeReference>() {}); + + String tableMetadataKey = namespace + ObjectStorageUtils.CONCATENATED_KEY_DELIMITER + table; + assertThat(insertedMetadata).containsKey(tableMetadataKey); + + ObjectStorageTableMetadata insertedTableMetadata = insertedMetadata.get(tableMetadataKey); + assertThat(insertedTableMetadata.getPartitionKeyNames()).containsExactly("c1"); + assertThat(insertedTableMetadata.getColumns()) + .containsEntry("c1", "int") + .containsEntry("c2", "text") + .containsEntry("c3", "bigint"); + } +} diff --git a/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageUtilsTest.java b/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageUtilsTest.java new file mode 100644 index 0000000000..b6ae057928 --- /dev/null +++ b/core/src/test/java/com/scalar/db/storage/objectstorage/ObjectStorageUtilsTest.java @@ -0,0 +1,49 @@ +package com.scalar.db.storage.objectstorage; + +import static org.assertj.core.api.Assertions.assertThat; + +import org.junit.jupiter.api.Test; + +public class ObjectStorageUtilsTest { + + @Test + public void getObjectKey_GivenAllNames_ShouldReturnExpectedObjectKey() { + // Arrange + String namespaceName = "namespace"; + String tableName = "table"; + String partitionName = "partition"; + + // Act + String actual = ObjectStorageUtils.getObjectKey(namespaceName, tableName, partitionName); + + // Assert + assertThat(actual).isEqualTo("namespace/table/partition"); + } + + @Test + public void getObjectKey_GivenNamespaceAndTableNames_ShouldReturnExpectedObjectKeyPrefix() { + // Arrange + String namespaceName = "namespace"; + String tableName = "table"; + String partitionName = ""; + + // Act + String actual = ObjectStorageUtils.getObjectKey(namespaceName, tableName, partitionName); + + // Assert + assertThat(actual).isEqualTo("namespace/table/"); + } + + @Test + public void getObjectKey_GivenNamespaceAndTableNames_ShouldReturnExpectedObjectKey() { + // Arrange + String namespaceName = "namespace"; + String tableName = "table"; + + // Act + String actual = ObjectStorageUtils.getObjectKey(namespaceName, tableName); + + // Assert + assertThat(actual).isEqualTo("namespace/table"); + } +} diff --git a/core/src/test/java/com/scalar/db/storage/objectstorage/blobstorage/BlobStorageConfigTest.java b/core/src/test/java/com/scalar/db/storage/objectstorage/blobstorage/BlobStorageConfigTest.java new file mode 100644 index 0000000000..1add5ff3a7 --- /dev/null +++ b/core/src/test/java/com/scalar/db/storage/objectstorage/blobstorage/BlobStorageConfigTest.java @@ -0,0 +1,113 @@ +package com.scalar.db.storage.objectstorage.blobstorage; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatThrownBy; + +import com.scalar.db.config.DatabaseConfig; +import java.util.Properties; +import org.junit.jupiter.api.Test; + +public class BlobStorageConfigTest { + private static final String ANY_USERNAME = "any_user"; + private static final String ANY_PASSWORD = "any_password"; + private static final String ANY_BUCKET = "bucket"; + private static final String ANY_ENDPOINT = "http://localhost:10000/" + ANY_USERNAME; + private static final String ANY_CONTACT_POINT = ANY_ENDPOINT + "/" + ANY_BUCKET; + private static final String BLOB_STORAGE = "blob-storage"; + private static final String ANY_TABLE_METADATA_NAMESPACE = "any_namespace"; + private static final String ANY_PARALLEL_UPLOAD_BLOCK_SIZE_IN_BYTES = "5242880"; // 5MB + private static final String ANY_PARALLEL_UPLOAD_MAX_PARALLELISM = "4"; + private static final String ANY_PARALLEL_UPLOAD_THRESHOLD_IN_BYTES = "10485760"; // 10MB + private static final String ANY_REQUEST_TIMEOUT_IN_SECONDS = "30"; + + @Test + public void constructor_AllPropertiesGiven_ShouldLoadProperly() { + // Arrange + Properties props = new Properties(); + props.setProperty(DatabaseConfig.CONTACT_POINTS, ANY_CONTACT_POINT); + props.setProperty(DatabaseConfig.USERNAME, ANY_USERNAME); + props.setProperty(DatabaseConfig.PASSWORD, ANY_PASSWORD); + props.setProperty(DatabaseConfig.STORAGE, BLOB_STORAGE); + props.setProperty(BlobStorageConfig.TABLE_METADATA_NAMESPACE, ANY_TABLE_METADATA_NAMESPACE); + props.setProperty( + BlobStorageConfig.PARALLEL_UPLOAD_BLOCK_SIZE_IN_BYTES, + ANY_PARALLEL_UPLOAD_BLOCK_SIZE_IN_BYTES); + props.setProperty( + BlobStorageConfig.PARALLEL_UPLOAD_MAX_PARALLELISM, ANY_PARALLEL_UPLOAD_MAX_PARALLELISM); + props.setProperty( + BlobStorageConfig.PARALLEL_UPLOAD_THRESHOLD_IN_BYTES, + ANY_PARALLEL_UPLOAD_THRESHOLD_IN_BYTES); + props.setProperty(BlobStorageConfig.REQUEST_TIMEOUT_IN_SECONDS, ANY_REQUEST_TIMEOUT_IN_SECONDS); + + // Act + BlobStorageConfig config = new BlobStorageConfig(new DatabaseConfig(props)); + + // Assert + assertThat(config.getEndpoint()).isEqualTo(ANY_ENDPOINT); + assertThat(config.getUsername()).isEqualTo(ANY_USERNAME); + assertThat(config.getPassword()).isEqualTo(ANY_PASSWORD); + assertThat(config.getBucket()).isEqualTo(ANY_BUCKET); + assertThat(config.getMetadataNamespace()).isEqualTo(ANY_TABLE_METADATA_NAMESPACE); + assertThat(config.getParallelUploadBlockSizeInBytes()) + .isEqualTo(Long.parseLong(ANY_PARALLEL_UPLOAD_BLOCK_SIZE_IN_BYTES)); + assertThat(config.getParallelUploadMaxParallelism()) + .isEqualTo(Integer.parseInt(ANY_PARALLEL_UPLOAD_MAX_PARALLELISM)); + assertThat(config.getParallelUploadThresholdInBytes()) + .isEqualTo(Long.parseLong(ANY_PARALLEL_UPLOAD_THRESHOLD_IN_BYTES)); + assertThat(config.getRequestTimeoutInSeconds()) + .isEqualTo(Integer.parseInt(ANY_REQUEST_TIMEOUT_IN_SECONDS)); + } + + @Test + public void constructor_PropertiesWithoutNonMandatoryOptionsGiven_ShouldLoadProperly() { + // Arrange + Properties props = new Properties(); + props.setProperty(DatabaseConfig.CONTACT_POINTS, ANY_CONTACT_POINT); + props.setProperty(DatabaseConfig.USERNAME, ANY_USERNAME); + props.setProperty(DatabaseConfig.PASSWORD, ANY_PASSWORD); + props.setProperty(DatabaseConfig.STORAGE, BLOB_STORAGE); + + // Act + BlobStorageConfig config = new BlobStorageConfig(new DatabaseConfig(props)); + + // Assert + assertThat(config.getEndpoint()).isEqualTo(ANY_ENDPOINT); + assertThat(config.getUsername()).isEqualTo(ANY_USERNAME); + assertThat(config.getPassword()).isEqualTo(ANY_PASSWORD); + assertThat(config.getBucket()).isEqualTo(ANY_BUCKET); + assertThat(config.getParallelUploadBlockSizeInBytes()) + .isEqualTo(BlobStorageConfig.DEFAULT_PARALLEL_UPLOAD_BLOCK_SIZE_IN_BYTES); + assertThat(config.getParallelUploadMaxParallelism()) + .isEqualTo(BlobStorageConfig.DEFAULT_PARALLEL_UPLOAD_MAX_PARALLELISM); + assertThat(config.getParallelUploadThresholdInBytes()) + .isEqualTo(BlobStorageConfig.DEFAULT_PARALLEL_UPLOAD_THRESHOLD_IN_BYTES); + assertThat(config.getRequestTimeoutInSeconds()) + .isEqualTo(BlobStorageConfig.DEFAULT_REQUEST_TIMEOUT_IN_SECONDS); + } + + @Test + public void constructor_WithoutStorage_ShouldThrowIllegalArgumentException() { + // Arrange + Properties props = new Properties(); + props.setProperty(DatabaseConfig.CONTACT_POINTS, ANY_CONTACT_POINT); + props.setProperty(DatabaseConfig.USERNAME, ANY_USERNAME); + props.setProperty(DatabaseConfig.PASSWORD, ANY_PASSWORD); + + // Act Assert + assertThatThrownBy(() -> new BlobStorageConfig(new DatabaseConfig(props))) + .isInstanceOf(IllegalArgumentException.class); + } + + @Test + public void + constructor_PropertiesWithEmptyContactPointsGiven_ShouldThrowIllegalArgumentException() { + // Arrange + Properties props = new Properties(); + props.setProperty(DatabaseConfig.PASSWORD, ANY_PASSWORD); + props.setProperty(DatabaseConfig.STORAGE, BLOB_STORAGE); + + // Act + assertThatThrownBy(() -> new BlobStorageConfig(new DatabaseConfig(props))) + .isInstanceOf(IllegalArgumentException.class); + } +} diff --git a/integration-test/src/main/java/com/scalar/db/transaction/consensuscommit/ConsensusCommitSpecificIntegrationTestBase.java b/integration-test/src/main/java/com/scalar/db/transaction/consensuscommit/ConsensusCommitSpecificIntegrationTestBase.java index 8d622a82c1..95610f9a87 100644 --- a/integration-test/src/main/java/com/scalar/db/transaction/consensuscommit/ConsensusCommitSpecificIntegrationTestBase.java +++ b/integration-test/src/main/java/com/scalar/db/transaction/consensuscommit/ConsensusCommitSpecificIntegrationTestBase.java @@ -4469,8 +4469,9 @@ void scan_OverlappingPutWithConjunctionsGivenBefore_ShouldThrowIllegalArgumentEx @ParameterizedTest @EnumSource(Isolation.class) - void scanWithIndex_PutWithOverlappedIndexKeyAndNonOverlappedConjunctionsGivenBefore_ShouldScan( - Isolation isolation) throws TransactionException { + public void + scanWithIndex_PutWithOverlappedIndexKeyAndNonOverlappedConjunctionsGivenBefore_ShouldScan( + Isolation isolation) throws TransactionException { // Arrange ConsensusCommitManager manager = createConsensusCommitManager(isolation); DistributedTransaction transaction = manager.begin(); @@ -4494,7 +4495,7 @@ void scanWithIndex_PutWithOverlappedIndexKeyAndNonOverlappedConjunctionsGivenBef @ParameterizedTest @EnumSource(Isolation.class) - void + public void scanWithIndex_OverlappingPutWithNonIndexedColumnGivenBefore_ShouldThrowIllegalArgumentException( Isolation isolation) throws TransactionException { // Arrange @@ -4517,7 +4518,7 @@ void scanWithIndex_PutWithOverlappedIndexKeyAndNonOverlappedConjunctionsGivenBef @ParameterizedTest @EnumSource(Isolation.class) - void + public void scanWithIndex_NonOverlappingPutWithIndexedColumnGivenBefore_ShouldThrowIllegalArgumentException( Isolation isolation) throws TransactionException { // Arrange @@ -4538,8 +4539,9 @@ void scanWithIndex_PutWithOverlappedIndexKeyAndNonOverlappedConjunctionsGivenBef @ParameterizedTest @EnumSource(Isolation.class) - void scanWithIndex_OverlappingPutWithIndexedColumnGivenBefore_ShouldThrowIllegalArgumentException( - Isolation isolation) throws TransactionException { + public void + scanWithIndex_OverlappingPutWithIndexedColumnGivenBefore_ShouldThrowIllegalArgumentException( + Isolation isolation) throws TransactionException { // Arrange ConsensusCommitManager manager = createConsensusCommitManager(isolation); populateRecord(manager, namespace1, TABLE_1); @@ -4558,7 +4560,7 @@ void scanWithIndex_OverlappingPutWithIndexedColumnGivenBefore_ShouldThrowIllegal @ParameterizedTest @EnumSource(Isolation.class) - void + public void scanWithIndex_OverlappingPutWithIndexedColumnAndConjunctionsGivenBefore_ShouldThrowIllegalArgumentException( Isolation isolation) throws TransactionException { // Arrange @@ -5695,7 +5697,7 @@ void scan_ScanAllWithLimitGiven_WithSerializable_ShouldNotThrowAnyException() } @Test - void scan_ScanWithIndexGiven_WithSerializable_ShouldNotThrowAnyException() + public void scan_ScanWithIndexGiven_WithSerializable_ShouldNotThrowAnyException() throws TransactionException { // Arrange ConsensusCommitManager manager = createConsensusCommitManager(Isolation.SERIALIZABLE); @@ -5761,7 +5763,7 @@ void scan_ScanWithIndexGiven_WithSerializable_ShouldNotThrowAnyException() } @Test - void + public void scan_ScanWithIndexGiven_RecordUpdatedByAnotherTransaction_WithSerializable_ShouldThrowCommitConflictException() throws TransactionException { // Arrange @@ -5838,8 +5840,9 @@ void scan_ScanWithIndexGiven_WithSerializable_ShouldNotThrowAnyException() } @Test - void scan_ScanWithIndexGiven_RecordUpdatedByMyself_WithSerializable_ShouldNotThrowAnyException() - throws TransactionException { + public void + scan_ScanWithIndexGiven_RecordUpdatedByMyself_WithSerializable_ShouldNotThrowAnyException() + throws TransactionException { // Arrange ConsensusCommitManager manager = createConsensusCommitManager(Isolation.SERIALIZABLE); manager.mutate( @@ -5913,7 +5916,7 @@ void scan_ScanWithIndexGiven_RecordUpdatedByMyself_WithSerializable_ShouldNotThr } @Test - void + public void scan_ScanWithIndexGiven_RecordDeletedByAnotherTransaction_WithSerializable_ShouldThrowCommitConflictException() throws TransactionException { // Arrange @@ -5989,8 +5992,9 @@ void scan_ScanWithIndexGiven_RecordUpdatedByMyself_WithSerializable_ShouldNotThr } @Test - void scan_ScanWithIndexGiven_RecordDeletedByMyself_WithSerializable_ShouldNotThrowAnyException() - throws TransactionException { + public void + scan_ScanWithIndexGiven_RecordDeletedByMyself_WithSerializable_ShouldNotThrowAnyException() + throws TransactionException { // Arrange ConsensusCommitManager manager = createConsensusCommitManager(Isolation.SERIALIZABLE); manager.mutate( @@ -6063,7 +6067,7 @@ void scan_ScanWithIndexGiven_RecordDeletedByMyself_WithSerializable_ShouldNotThr } @Test - void scan_ScanWithIndexWithLimitGiven_WithSerializable_ShouldNotThrowAnyException() + public void scan_ScanWithIndexWithLimitGiven_WithSerializable_ShouldNotThrowAnyException() throws TransactionException { // Arrange ConsensusCommitManager manager = createConsensusCommitManager(Isolation.SERIALIZABLE); @@ -6130,7 +6134,7 @@ void scan_ScanWithIndexWithLimitGiven_WithSerializable_ShouldNotThrowAnyExceptio } @Test - void get_GetWithIndexGiven_WithSerializable_ShouldNotThrowAnyException() + public void get_GetWithIndexGiven_WithSerializable_ShouldNotThrowAnyException() throws TransactionException { // Arrange ConsensusCommitManager manager = createConsensusCommitManager(Isolation.SERIALIZABLE); @@ -6162,7 +6166,7 @@ void get_GetWithIndexGiven_WithSerializable_ShouldNotThrowAnyException() } @Test - void + public void get_GetWithIndexGiven_RecordUpdatedByAnotherTransaction_WithSerializable_ShouldThrowCommitConflictException() throws TransactionException { // Arrange @@ -6205,8 +6209,9 @@ void get_GetWithIndexGiven_WithSerializable_ShouldNotThrowAnyException() } @Test - void get_GetWithIndexGiven_RecordUpdatedByMyself_WithSerializable_ShouldNotThrowAnyException() - throws TransactionException { + public void + get_GetWithIndexGiven_RecordUpdatedByMyself_WithSerializable_ShouldNotThrowAnyException() + throws TransactionException { // Arrange ConsensusCommitManager manager = createConsensusCommitManager(Isolation.SERIALIZABLE); manager.insert( @@ -6246,7 +6251,7 @@ void get_GetWithIndexGiven_RecordUpdatedByMyself_WithSerializable_ShouldNotThrow } @Test - void + public void get_GetWithIndexGiven_RecordDeletedByAnotherTransaction_WithSerializable_ShouldThrowCommitConflictException() throws TransactionException { // Arrange @@ -6288,8 +6293,9 @@ void get_GetWithIndexGiven_RecordUpdatedByMyself_WithSerializable_ShouldNotThrow } @Test - void get_GetWithIndexGiven_RecordDeletedByMyself_WithSerializable_ShouldNotThrowAnyException() - throws TransactionException { + public void + get_GetWithIndexGiven_RecordDeletedByMyself_WithSerializable_ShouldNotThrowAnyException() + throws TransactionException { // Arrange ConsensusCommitManager manager = createConsensusCommitManager(Isolation.SERIALIZABLE); manager.insert( @@ -6545,8 +6551,9 @@ void getScanner_RecordInsertedByAnotherTransaction_WithSerializable_ShouldNotThr } @Test - void get_GetWithIndexGiven_NoRecordsInIndexRange_WithSerializable_ShouldNotThrowAnyException() - throws TransactionException { + public void + get_GetWithIndexGiven_NoRecordsInIndexRange_WithSerializable_ShouldNotThrowAnyException() + throws TransactionException { // Arrange ConsensusCommitManager manager = createConsensusCommitManager(Isolation.SERIALIZABLE); @@ -6566,7 +6573,7 @@ void get_GetWithIndexGiven_NoRecordsInIndexRange_WithSerializable_ShouldNotThrow } @Test - void + public void get_GetWithIndexGiven_RecordInsertedIntoIndexRangeByMyself_WithSerializable_ShouldNotThrowAnyException() throws TransactionException { // Arrange @@ -6608,7 +6615,7 @@ void get_GetWithIndexGiven_NoRecordsInIndexRange_WithSerializable_ShouldNotThrow } @Test - void + public void get_GetWithIndexGiven_RecordInsertedIntoIndexRangeByAnotherTransaction_WithSerializable_ShouldThrowCommitConflictException() throws TransactionException { // Arrange @@ -6652,7 +6659,7 @@ void get_GetWithIndexGiven_NoRecordsInIndexRange_WithSerializable_ShouldNotThrow } @Test - void + public void get_GetWithIndexGiven_NoRecordsInIndexRange_RecordInsertedIntoIndexRangeByMyself_WithSerializable_ShouldNotThrowAnyException() throws TransactionException { // Arrange @@ -6683,7 +6690,7 @@ void get_GetWithIndexGiven_NoRecordsInIndexRange_WithSerializable_ShouldNotThrow } @Test - void + public void get_GetWithIndexGiven_NoRecordsInIndexRange_RecordInsertedIntoIndexRangeByAnotherTransaction_WithSerializable_ShouldThrowCommitConflictException() throws TransactionException { // Arrange @@ -6717,7 +6724,7 @@ void get_GetWithIndexGiven_NoRecordsInIndexRange_WithSerializable_ShouldNotThrow @ParameterizedTest @EnumSource(Isolation.class) - void getAndUpdate_GetWithIndexGiven_ShouldUpdate(Isolation isolation) + public void getAndUpdate_GetWithIndexGiven_ShouldUpdate(Isolation isolation) throws TransactionException { // Arrange ConsensusCommitManager manager = createConsensusCommitManager(isolation); @@ -6772,7 +6779,7 @@ void getAndUpdate_GetWithIndexGiven_ShouldUpdate(Isolation isolation) @ParameterizedTest @EnumSource(Isolation.class) - void scanAndUpdate_ScanWithIndexGiven_ShouldUpdate(Isolation isolation) + public void scanAndUpdate_ScanWithIndexGiven_ShouldUpdate(Isolation isolation) throws TransactionException { // Arrange ConsensusCommitManager manager = createConsensusCommitManager(isolation);