From 94734b93692c7531f307cd56840f0f2c630587c0 Mon Sep 17 00:00:00 2001 From: Piotr Korkus Date: Mon, 29 Dec 2025 19:08:06 +0100 Subject: [PATCH 1/2] fit: refactor --- .../workflows/build_and_test_on_every_pr.yml | 2 +- MODULE.bazel | 2 +- autosd/MODULE.bazel | 2 +- feature_integration_tests/README.md | 14 ++++---- .../python_test_cases/pytest.ini | 9 ------ .../{python_test_cases => test_cases}/BUILD | 10 +++--- .../conftest.py | 19 +---------- .../test_cases/internal/dir_utils.py | 32 +++++++++++++++++++ .../internal}/fit_scenario.py | 29 ----------------- .../test_cases/internal/test_properties.py | 10 ++++++ .../test_cases/pytest.ini | 7 ++++ .../requirements.txt | 0 .../requirements.txt.lock | 4 +-- .../test_orchestartion_with_persistency.py | 17 ++-------- .../rust}/BUILD | 4 +-- .../rust}/src/internals/mod.rs | 0 .../rust}/src/internals/runtime_helper.rs | 0 .../rust}/src/main.rs | 0 .../rust}/src/scenarios/basic/mod.rs | 0 .../basic/orchestration_with_persistency.rs | 0 .../rust}/src/scenarios/mod.rs | 0 scripts/generate_rust_analyzer_support.sh | 2 +- 22 files changed, 73 insertions(+), 90 deletions(-) delete mode 100644 feature_integration_tests/python_test_cases/pytest.ini rename feature_integration_tests/{python_test_cases => test_cases}/BUILD (75%) rename feature_integration_tests/{python_test_cases => test_cases}/conftest.py (76%) create mode 100644 feature_integration_tests/test_cases/internal/dir_utils.py rename feature_integration_tests/{python_test_cases => test_cases/internal}/fit_scenario.py (77%) create mode 100644 feature_integration_tests/test_cases/internal/test_properties.py create mode 100644 feature_integration_tests/test_cases/pytest.ini rename feature_integration_tests/{python_test_cases => test_cases}/requirements.txt (100%) rename feature_integration_tests/{python_test_cases => test_cases}/requirements.txt.lock (98%) rename feature_integration_tests/{python_test_cases => test_cases}/tests/basic/test_orchestartion_with_persistency.py (83%) rename feature_integration_tests/{rust_test_scenarios => test_scenarios/rust}/BUILD (91%) rename feature_integration_tests/{rust_test_scenarios => test_scenarios/rust}/src/internals/mod.rs (100%) rename feature_integration_tests/{rust_test_scenarios => test_scenarios/rust}/src/internals/runtime_helper.rs (100%) rename feature_integration_tests/{rust_test_scenarios => test_scenarios/rust}/src/main.rs (100%) rename feature_integration_tests/{rust_test_scenarios => test_scenarios/rust}/src/scenarios/basic/mod.rs (100%) rename feature_integration_tests/{rust_test_scenarios => test_scenarios/rust}/src/scenarios/basic/orchestration_with_persistency.rs (100%) rename feature_integration_tests/{rust_test_scenarios => test_scenarios/rust}/src/scenarios/mod.rs (100%) diff --git a/.github/workflows/build_and_test_on_every_pr.yml b/.github/workflows/build_and_test_on_every_pr.yml index 05e861166e..b0fafafe6e 100644 --- a/.github/workflows/build_and_test_on_every_pr.yml +++ b/.github/workflows/build_and_test_on_every_pr.yml @@ -53,4 +53,4 @@ jobs: done < ci/showcase_targets_run.txt - name: Feature Integration Tests run: | - bazel test --config bl-x86_64-linux //feature_integration_tests/python_test_cases:fit + bazel test --config bl-x86_64-linux //feature_integration_tests/test_cases:fit diff --git a/MODULE.bazel b/MODULE.bazel index 3db57550c8..2a6fdba698 100644 --- a/MODULE.bazel +++ b/MODULE.bazel @@ -37,7 +37,7 @@ pip = use_extension("@rules_python//python/extensions:pip.bzl", "pip", dev_depen pip.parse( hub_name = "pip_score_venv_test", python_version = PYTHON_VERSION, - requirements_lock = "//feature_integration_tests/python_test_cases:requirements.txt.lock", + requirements_lock = "//feature_integration_tests/test_cases:requirements.txt.lock", ) use_repo(pip, "pip_score_venv_test") diff --git a/autosd/MODULE.bazel b/autosd/MODULE.bazel index 1ea1e1e142..11c1e5ec3e 100644 --- a/autosd/MODULE.bazel +++ b/autosd/MODULE.bazel @@ -35,7 +35,7 @@ pip = use_extension("@rules_python//python/extensions:pip.bzl", "pip", dev_depen pip.parse( hub_name = "pip_score_venv_test", python_version = PYTHON_VERSION, - requirements_lock = "//feature_integration_tests/python_test_cases:requirements.txt.lock", + requirements_lock = "//feature_integration_tests/test_cases:requirements.txt.lock", ) use_repo(pip, "pip_score_venv_test") diff --git a/feature_integration_tests/README.md b/feature_integration_tests/README.md index 902e6d11fb..7ca394b931 100644 --- a/feature_integration_tests/README.md +++ b/feature_integration_tests/README.md @@ -4,13 +4,13 @@ This directory contains Feature Integration Tests for the S-CORE project. It inc ## Structure -- `python_test_cases/` — Python-based integration test cases +- `test_cases/` — Python-based integration test cases - `conftest.py` — Pytest configuration and fixtures - `fit_scenario.py` — Base scenario class - `requirements.txt` — Python dependencies - `BUILD` — Bazel build and test definitions - `tests/` — Test cases (e.g., orchestration with persistency) -- `rust_test_scenarios/` — Rust-based integration test scenarios +- `test_scenarios/` — Rust-based integration test scenarios - `src/` — Rust source code for test scenarios - `BUILD` — Bazel build definitions @@ -21,19 +21,19 @@ This directory contains Feature Integration Tests for the S-CORE project. It inc Python tests are managed with Bazel and Pytest. To run the main test target: ```sh -bazel test //feature_integration_tests/python_test_cases:fit +bazel test //feature_integration_tests/test_cases:fit ``` ### Rust Test Scenarios -Rust test scenarios are defined in `rust_test_scenarios/src/scenarios`. Build and run them using Bazel: +Rust test scenarios are defined in `test_scenarios/src/scenarios`. Build and run them using Bazel: ```sh -bazel build //feature_integration_tests/rust_test_scenarios +bazel build //feature_integration_tests/test_scenarios ``` ```sh -bazel run //feature_integration_tests/rust_test_scenarios -- --list-scenarios +bazel run //feature_integration_tests/test_scenarios -- --list-scenarios ``` ## Updating Python Requirements @@ -41,5 +41,5 @@ bazel run //feature_integration_tests/rust_test_scenarios -- --list-scenarios To update Python dependencies: ```sh -bazel run //feature_integration_tests/python_test_cases:requirements.update +bazel run //feature_integration_tests/test_cases:requirements.update ``` diff --git a/feature_integration_tests/python_test_cases/pytest.ini b/feature_integration_tests/python_test_cases/pytest.ini deleted file mode 100644 index 79d506db70..0000000000 --- a/feature_integration_tests/python_test_cases/pytest.ini +++ /dev/null @@ -1,9 +0,0 @@ -[pytest] -addopts = -v -testpaths = tests -markers = - PartiallyVerifies - FullyVerifies - Description - TestType - DerivationTechnique diff --git a/feature_integration_tests/python_test_cases/BUILD b/feature_integration_tests/test_cases/BUILD similarity index 75% rename from feature_integration_tests/python_test_cases/BUILD rename to feature_integration_tests/test_cases/BUILD index 3aa8031e9b..fc37e07539 100644 --- a/feature_integration_tests/python_test_cases/BUILD +++ b/feature_integration_tests/test_cases/BUILD @@ -3,10 +3,10 @@ load("@rules_python//python:pip.bzl", "compile_pip_requirements") load("@score_tooling//python_basics:defs.bzl", "score_py_pytest", "score_virtualenv") # In order to update the requirements, change the `requirements.txt` file and run: -# `bazel run //feature_integration_tests/python_test_cases:requirements.update`. +# `bazel run //feature_integration_tests/test_cases:requirements.update`. # This will update the `requirements.txt.lock` file. # To upgrade all dependencies to their latest versions, run: -# `bazel run //feature_integration_tests/python_test_cases:requirements.update -- --upgrade`. +# `bazel run //feature_integration_tests/test_cases:requirements.update -- --upgrade`. compile_pip_requirements( name = "requirements", srcs = [ @@ -28,14 +28,14 @@ score_virtualenv( # Tests targets score_py_pytest( name = "fit", - srcs = glob(["tests/**/*.py"]) + ["conftest.py", "fit_scenario.py"], + srcs = glob(["tests/**/*.py"]) + glob(["internal/**/*.py"]) + ["conftest.py"], args = [ "--traces=all", - "--rust-target-path=$(rootpath //feature_integration_tests/rust_test_scenarios)", + "--rust-target-path=$(rootpath //feature_integration_tests/test_scenarios/rust:test_scenarios)", ], data = [ ":python_tc_venv", - "//feature_integration_tests/rust_test_scenarios", + "//feature_integration_tests/test_scenarios/rust:test_scenarios", ], env = { "RUST_BACKTRACE": "1", diff --git a/feature_integration_tests/python_test_cases/conftest.py b/feature_integration_tests/test_cases/conftest.py similarity index 76% rename from feature_integration_tests/python_test_cases/conftest.py rename to feature_integration_tests/test_cases/conftest.py index b07758d82f..b06b60f28a 100644 --- a/feature_integration_tests/python_test_cases/conftest.py +++ b/feature_integration_tests/test_cases/conftest.py @@ -21,7 +21,7 @@ def pytest_addoption(parser): parser.addoption( "--rust-target-name", type=str, - default="//feature_integration_tests/rust_test_scenarios:rust_test_scenarios", + default="//feature_integration_tests/test_scenarios/rust:test_scenarios", help="Rust test scenario executable target.", ) parser.addoption( @@ -64,20 +64,3 @@ def pytest_sessionstart(session): except Exception as e: pytest.exit(str(e), returncode=1) - - -def pytest_collection_modifyitems(items: list[pytest.Function]): - markers_to_process = ( - "PartiallyVerifies", - "FullyVerifies", - "Description", - "TestType", - "DerivationTechnique", - ) - for item in items: - # Add custom markers info to XML report - for marker in item.iter_markers(): - if marker.name not in markers_to_process: - continue - - item.user_properties.append((marker.name, marker.args[0])) diff --git a/feature_integration_tests/test_cases/internal/dir_utils.py b/feature_integration_tests/test_cases/internal/dir_utils.py new file mode 100644 index 0000000000..eafb5eb64e --- /dev/null +++ b/feature_integration_tests/test_cases/internal/dir_utils.py @@ -0,0 +1,32 @@ +import shutil +from pathlib import Path +from typing import Generator + +import pytest + + +def temp_dir_common( + tmp_path_factory: pytest.TempPathFactory, base_name: str, *args: str +) -> Generator[Path, None, None]: + """ + Create temporary directory and remove it after test. + Common implementation to be reused by fixtures. + + Returns generator providing numbered path to temporary directory. + E.g., '/--/'. + + Parameters + ---------- + tmp_path_factory : pytest.TempPathFactory + Factory for temporary directories. + base_name : str + Base directory name. + 'self.__class__.__name__' use is recommended. + *args : Any + Other parameters to be included in directory name. + """ + parts = [base_name, *args] + dir_name = "-".join(parts) + dir_path = tmp_path_factory.mktemp(dir_name, numbered=True) + yield dir_path + shutil.rmtree(dir_path) diff --git a/feature_integration_tests/python_test_cases/fit_scenario.py b/feature_integration_tests/test_cases/internal/fit_scenario.py similarity index 77% rename from feature_integration_tests/python_test_cases/fit_scenario.py rename to feature_integration_tests/test_cases/internal/fit_scenario.py index f76406d614..c94c39662c 100644 --- a/feature_integration_tests/python_test_cases/fit_scenario.py +++ b/feature_integration_tests/test_cases/internal/fit_scenario.py @@ -1,6 +1,4 @@ -import shutil from pathlib import Path -from typing import Generator import pytest from testing_utils import ( @@ -12,33 +10,6 @@ ) -def temp_dir_common( - tmp_path_factory: pytest.TempPathFactory, base_name: str, *args: str -) -> Generator[Path, None, None]: - """ - Create temporary directory and remove it after test. - Common implementation to be reused by fixtures. - - Returns generator providing numbered path to temporary directory. - E.g., '/--/'. - - Parameters - ---------- - tmp_path_factory : pytest.TempPathFactory - Factory for temporary directories. - base_name : str - Base directory name. - 'self.__class__.__name__' use is recommended. - *args : Any - Other parameters to be included in directory name. - """ - parts = [base_name, *args] - dir_name = "-".join(parts) - dir_path = tmp_path_factory.mktemp(dir_name, numbered=True) - yield dir_path - shutil.rmtree(dir_path) - - class FitScenario(Scenario): """ CIT test scenario definition. diff --git a/feature_integration_tests/test_cases/internal/test_properties.py b/feature_integration_tests/test_cases/internal/test_properties.py new file mode 100644 index 0000000000..2f30b7b7a1 --- /dev/null +++ b/feature_integration_tests/test_cases/internal/test_properties.py @@ -0,0 +1,10 @@ +try: + from attribute_plugin import add_test_properties # type: ignore[import-untyped] +except ImportError: + # Define no-op decorator if attribute_plugin is not available (outside bazel) + # Keeps IDE debugging functionality + def add_test_properties(*args, **kwargs): + def decorator(func): + return func # No-op decorator + + return decorator diff --git a/feature_integration_tests/test_cases/pytest.ini b/feature_integration_tests/test_cases/pytest.ini new file mode 100644 index 0000000000..f3ad4f45dd --- /dev/null +++ b/feature_integration_tests/test_cases/pytest.ini @@ -0,0 +1,7 @@ +[pytest] +addopts = -v +testpaths = tests +pythonpath = + feature_integration_tests/test_cases + feature_integration_tests/test_cases/internal + feature_integration_tests/test_cases/tests diff --git a/feature_integration_tests/python_test_cases/requirements.txt b/feature_integration_tests/test_cases/requirements.txt similarity index 100% rename from feature_integration_tests/python_test_cases/requirements.txt rename to feature_integration_tests/test_cases/requirements.txt diff --git a/feature_integration_tests/python_test_cases/requirements.txt.lock b/feature_integration_tests/test_cases/requirements.txt.lock similarity index 98% rename from feature_integration_tests/python_test_cases/requirements.txt.lock rename to feature_integration_tests/test_cases/requirements.txt.lock index 3eb053f6e2..c6d9e18ebd 100644 --- a/feature_integration_tests/python_test_cases/requirements.txt.lock +++ b/feature_integration_tests/test_cases/requirements.txt.lock @@ -2,7 +2,7 @@ # This file is autogenerated by pip-compile with Python 3.12 # by the following command: # -# bazel run //feature_integration_tests/python_test_cases:requirements.update +# bazel run //feature_integration_tests/test_cases:requirements.update # basedpyright==1.29.2 \ --hash=sha256:12c49186003b9f69a028615da883ef97035ea2119a9e3f93a00091b3a27088a6 \ @@ -130,4 +130,4 @@ pytest-repeat==0.9.4 \ # WARNING: pip install will require the following package to be hashed. # Consider using a hashable URL like https://github.com/jazzband/pip-tools/archive/SOMECOMMIT.zip testing-utils @ git+https://github.com/eclipse-score/testing_tools.git@v0.3.0 - # via -r feature_integration_tests/python_test_cases/requirements.txt + # via -r feature_integration_tests/test_cases/requirements.txt diff --git a/feature_integration_tests/python_test_cases/tests/basic/test_orchestartion_with_persistency.py b/feature_integration_tests/test_cases/tests/basic/test_orchestartion_with_persistency.py similarity index 83% rename from feature_integration_tests/python_test_cases/tests/basic/test_orchestartion_with_persistency.py rename to feature_integration_tests/test_cases/tests/basic/test_orchestartion_with_persistency.py index 182f9e3eec..e9a5b0d9dc 100644 --- a/feature_integration_tests/python_test_cases/tests/basic/test_orchestartion_with_persistency.py +++ b/feature_integration_tests/test_cases/tests/basic/test_orchestartion_with_persistency.py @@ -3,20 +3,9 @@ from typing import Any, Generator import pytest - -try: - from attribute_plugin import add_test_properties # type: ignore[import-untyped] -except ImportError: - # Define no-op decorator if attribute_plugin is not available (outside bazel) - # Keeps IDE debugging functionality - def add_test_properties(*args, **kwargs): - def decorator(func): - return func # No-op decorator - - return decorator - - -from fit_scenario import FitScenario, temp_dir_common +from internal.dir_utils import temp_dir_common +from internal.fit_scenario import FitScenario +from internal.test_properties import add_test_properties from testing_utils import LogContainer diff --git a/feature_integration_tests/rust_test_scenarios/BUILD b/feature_integration_tests/test_scenarios/rust/BUILD similarity index 91% rename from feature_integration_tests/rust_test_scenarios/BUILD rename to feature_integration_tests/test_scenarios/rust/BUILD index 41f582aed7..e6b82e0a69 100644 --- a/feature_integration_tests/rust_test_scenarios/BUILD +++ b/feature_integration_tests/test_scenarios/rust/BUILD @@ -14,9 +14,9 @@ load("@rules_rust//rust:defs.bzl", "rust_binary") rust_binary( - name = "rust_test_scenarios", + name = "test_scenarios", srcs = glob(["src/**/*.rs"]), - visibility = ["//feature_integration_tests/python_test_cases:__pkg__"], + visibility = ["//feature_integration_tests/test_cases:__pkg__"], tags = [ "manual", ], diff --git a/feature_integration_tests/rust_test_scenarios/src/internals/mod.rs b/feature_integration_tests/test_scenarios/rust/src/internals/mod.rs similarity index 100% rename from feature_integration_tests/rust_test_scenarios/src/internals/mod.rs rename to feature_integration_tests/test_scenarios/rust/src/internals/mod.rs diff --git a/feature_integration_tests/rust_test_scenarios/src/internals/runtime_helper.rs b/feature_integration_tests/test_scenarios/rust/src/internals/runtime_helper.rs similarity index 100% rename from feature_integration_tests/rust_test_scenarios/src/internals/runtime_helper.rs rename to feature_integration_tests/test_scenarios/rust/src/internals/runtime_helper.rs diff --git a/feature_integration_tests/rust_test_scenarios/src/main.rs b/feature_integration_tests/test_scenarios/rust/src/main.rs similarity index 100% rename from feature_integration_tests/rust_test_scenarios/src/main.rs rename to feature_integration_tests/test_scenarios/rust/src/main.rs diff --git a/feature_integration_tests/rust_test_scenarios/src/scenarios/basic/mod.rs b/feature_integration_tests/test_scenarios/rust/src/scenarios/basic/mod.rs similarity index 100% rename from feature_integration_tests/rust_test_scenarios/src/scenarios/basic/mod.rs rename to feature_integration_tests/test_scenarios/rust/src/scenarios/basic/mod.rs diff --git a/feature_integration_tests/rust_test_scenarios/src/scenarios/basic/orchestration_with_persistency.rs b/feature_integration_tests/test_scenarios/rust/src/scenarios/basic/orchestration_with_persistency.rs similarity index 100% rename from feature_integration_tests/rust_test_scenarios/src/scenarios/basic/orchestration_with_persistency.rs rename to feature_integration_tests/test_scenarios/rust/src/scenarios/basic/orchestration_with_persistency.rs diff --git a/feature_integration_tests/rust_test_scenarios/src/scenarios/mod.rs b/feature_integration_tests/test_scenarios/rust/src/scenarios/mod.rs similarity index 100% rename from feature_integration_tests/rust_test_scenarios/src/scenarios/mod.rs rename to feature_integration_tests/test_scenarios/rust/src/scenarios/mod.rs diff --git a/scripts/generate_rust_analyzer_support.sh b/scripts/generate_rust_analyzer_support.sh index 23998bb4ce..a9840c2e45 100755 --- a/scripts/generate_rust_analyzer_support.sh +++ b/scripts/generate_rust_analyzer_support.sh @@ -3,4 +3,4 @@ set -e # Manual targets are not take into account, must be set explicitly -bazel run @rules_rust//tools/rust_analyzer:gen_rust_project -- "@//feature_showcase/..." "@//feature_integration_tests/rust_test_scenarios:rust_test_scenarios" \ No newline at end of file +bazel run @rules_rust//tools/rust_analyzer:gen_rust_project -- "@//feature_showcase/..." "@//feature_integration_tests/test_scenarios/rust:test_scenarios" \ No newline at end of file From 41fde1e2d39a930147b1446ae5465ee9381a2095 Mon Sep 17 00:00:00 2001 From: Piotr Korkus Date: Wed, 31 Dec 2025 10:22:46 +0100 Subject: [PATCH 2/2] wip --- .../test_orchestartion_with_persistency.py | 208 +++++++++++++++ .../rust/src/scenarios/basic/mod.rs | 8 +- .../basic/orchestration_with_persistency.rs | 246 ++++++++++++++---- 3 files changed, 409 insertions(+), 53 deletions(-) diff --git a/feature_integration_tests/test_cases/tests/basic/test_orchestartion_with_persistency.py b/feature_integration_tests/test_cases/tests/basic/test_orchestartion_with_persistency.py index e9a5b0d9dc..4c9759e6cc 100644 --- a/feature_integration_tests/test_cases/tests/basic/test_orchestartion_with_persistency.py +++ b/feature_integration_tests/test_cases/tests/basic/test_orchestartion_with_persistency.py @@ -57,3 +57,211 @@ def test_kvs_write_results(self, temp_dir: Path, run_count: int): kvs_file = temp_dir / "kvs_1_0.json" data = json.loads(kvs_file.read_text()) assert data["v"]["run_cycle_number"]["v"] == run_count + + +class TestConcurrentKVS(FitScenario): + """ + Tests orchestration with persistency scenario using multiple KVS files. + """ + + @pytest.fixture(scope="class") + def scenario_name(self) -> str: + return "basic.concurrent_kvs" + + @pytest.fixture(scope="class", params=[1, 5]) + def run_count(self, request) -> int: + return request.param + + @pytest.fixture(scope="class") + def temp_dir( + self, + tmp_path_factory: pytest.TempPathFactory, + run_count: int, # run_count is required to ensure proper order of fixture calls + ) -> Generator[Path, None, None]: + yield from temp_dir_common(tmp_path_factory, self.__class__.__name__) + + @pytest.fixture(scope="class") + def test_config(self, run_count: int, temp_dir: Path) -> dict[str, Any]: + return { + "runtime": {"task_queue_size": 2, "workers": 4}, + "test": {"run_count": run_count, "kvs_path": str(temp_dir)}, + } + + def test_kvs_logged_execution(self, run_count: int, logs_info_level: LogContainer): + """Verify that all runs have been logged.""" + logs = logs_info_level.get_logs(field="run_cycle_number") + for log_group in logs.group_by("kvs_instance_id").values(): + logged_cycles = [log.run_cycle_number for log in log_group] + expected_cycles = list(range(1, run_count + 1)) + assert logged_cycles == expected_cycles + + def test_kvs_write_results(self, temp_dir: Path, run_count: int): + """Verify that each KVS file contains correct final run count.""" + # Verify KVS Instance(1) + kvs1_file = temp_dir / "kvs_1_0.json" + data1 = json.loads(kvs1_file.read_text()) + assert data1["v"]["run_cycle_number"]["v"] == run_count + + # Verify KVS Instance(2) + kvs2_file = temp_dir / "kvs_2_0.json" + data2 = json.loads(kvs2_file.read_text()) + assert data2["v"]["run_cycle_number"]["v"] == run_count + + # Verify KVS Instance(3) + kvs3_file = temp_dir / "kvs_3_0.json" + data3 = json.loads(kvs3_file.read_text()) + assert data3["v"]["run_cycle_number"]["v"] == run_count + + +class TestMultipleKVS(FitScenario): + """ + Tests orchestration with persistency scenario using multiple KVS files. + """ + + @pytest.fixture(scope="class") + def scenario_name(self) -> str: + return "basic.multiple_kvs" + + @pytest.fixture(scope="class") + def run_count(self) -> int: + return 1 + + @pytest.fixture(scope="class") + def temp_dir( + self, + tmp_path_factory: pytest.TempPathFactory, + run_count: int, # run_count is required to ensure proper order of fixture calls + ) -> Generator[Path, None, None]: + yield from temp_dir_common(tmp_path_factory, self.__class__.__name__) + + @pytest.fixture(scope="class") + def test_config(self, run_count: int, temp_dir: Path) -> dict[str, Any]: + return { + "runtime": {"task_queue_size": 256, "workers": 4}, + "test": {"run_count": run_count, "kvs_path": str(temp_dir)}, + } + + def test_kvs_cycle_write_results(self, temp_dir: Path, run_count: int): + """Verify that each KVS file contains correct final run count.""" + # Verify KVS Instance(1) + kvs1_file = temp_dir / "kvs_1_0.json" + data1 = json.loads(kvs1_file.read_text()) + assert data1["v"]["run_cycle_number"]["v"] == run_count + + def test_kvs_write_i32_max(self, temp_dir: Path): + """Verify that each KVS file contains correct final run count.""" + # Verify KVS Instance(2) + kvs2_file = temp_dir / "kvs_2_0.json" + data2 = json.loads(kvs2_file.read_text()) + assert data2["v"]["key_i32_max"]["v"] == 2147483647 + assert data2["v"]["key_i32_max"]["t"] == "i32" + + def test_kvs_write_i32_min(self, temp_dir: Path): + """Verify that each KVS file contains correct final run count.""" + # Verify KVS Instance(2) + kvs2_file = temp_dir / "kvs_2_0.json" + data2 = json.loads(kvs2_file.read_text()) + assert data2["v"]["key_i32_min"]["v"] == -2147483648 + assert data2["v"]["key_i32_min"]["t"] == "i32" + + def test_kvs_write_u32_max(self, temp_dir: Path): + """Verify that each KVS file contains correct final run count.""" + # Verify KVS Instance(2) + kvs2_file = temp_dir / "kvs_2_0.json" + data2 = json.loads(kvs2_file.read_text()) + assert data2["v"]["key_u32_max"]["v"] == 4294967295 + assert data2["v"]["key_u32_max"]["t"] == "u32" + + def test_kvs_write_u32_min(self, temp_dir: Path): + """Verify that each KVS file contains correct final run count.""" + # Verify KVS Instance(2) + kvs2_file = temp_dir / "kvs_2_0.json" + data2 = json.loads(kvs2_file.read_text()) + assert data2["v"]["key_u32_min"]["v"] == 0 + assert data2["v"]["key_u32_min"]["t"] == "u32" + + def test_kvs_write_i64_max(self, temp_dir: Path): + """Verify that each KVS file contains correct final run count.""" + # Verify KVS Instance(2) + kvs2_file = temp_dir / "kvs_2_0.json" + data2 = json.loads(kvs2_file.read_text()) + assert data2["v"]["key_i64_max"]["v"] == 9223372036854775807 + assert data2["v"]["key_i64_max"]["t"] == "i64" + + def test_kvs_write_i64_min(self, temp_dir: Path): + """Verify that each KVS file contains correct final run count.""" + # Verify KVS Instance(2) + kvs2_file = temp_dir / "kvs_2_0.json" + data2 = json.loads(kvs2_file.read_text()) + assert data2["v"]["key_i64_min"]["v"] == -9223372036854775808 + assert data2["v"]["key_i64_min"]["t"] == "i64" + + def test_kvs_write_u64_max(self, temp_dir: Path): + """Verify that each KVS file contains correct final run count.""" + # Verify KVS Instance(2) + kvs2_file = temp_dir / "kvs_2_0.json" + data2 = json.loads(kvs2_file.read_text()) + assert data2["v"]["key_u64_max"]["v"] == 18446744073709551615 + assert data2["v"]["key_u64_max"]["t"] == "u64" + + def test_kvs_write_u64_min(self, temp_dir: Path): + """Verify that each KVS file contains correct final run count.""" + # Verify KVS Instance(2) + kvs2_file = temp_dir / "kvs_2_0.json" + data2 = json.loads(kvs2_file.read_text()) + assert data2["v"]["key_u64_min"]["v"] == 0 + assert data2["v"]["key_u64_min"]["t"] == "u64" + + def test_kvs_write_f64(self, temp_dir: Path): + """Verify that each KVS file contains correct final run count.""" + # Verify KVS Instance(2) + kvs2_file = temp_dir / "kvs_2_0.json" + data2 = json.loads(kvs2_file.read_text()) + assert data2["v"]["key_f64"]["v"] == 1.2345 + assert data2["v"]["key_f64"]["t"] == "f64" + + def test_kvs_write_bool(self, temp_dir: Path): + """Verify that each KVS file contains correct final run count.""" + # Verify KVS Instance(2) + kvs2_file = temp_dir / "kvs_2_0.json" + data2 = json.loads(kvs2_file.read_text()) + assert data2["v"]["key_bool"]["v"] == True # noqa: E712 + assert data2["v"]["key_bool"]["t"] == "bool" + + def test_kvs_write_string(self, temp_dir: Path): + """Verify that each KVS file contains correct final run count.""" + # Verify KVS Instance(2) + kvs2_file = temp_dir / "kvs_2_0.json" + data2 = json.loads(kvs2_file.read_text()) + assert data2["v"]["key_String"]["v"] == "TestString" + assert data2["v"]["key_String"]["t"] == "str" + + def test_kvs_write_null(self, temp_dir: Path): + """Verify that each KVS file contains correct final run count.""" + # Verify KVS Instance(2) + kvs2_file = temp_dir / "kvs_2_0.json" + data2 = json.loads(kvs2_file.read_text()) + assert data2["v"]["key_Null"]["v"] is None + assert data2["v"]["key_Null"]["t"] == "null" + + def test_kvs_write_array(self, temp_dir: Path): + """Verify that each KVS file contains correct final run count.""" + # Verify KVS Instance(2) + kvs2_file = temp_dir / "kvs_2_0.json" + data2 = json.loads(kvs2_file.read_text()) + assert data2["v"]["key_Array"]["v"] == [ + {"t": "i32", "v": 1}, + {"t": "i32", "v": 2}, + {"t": "i32", "v": 3}, + ] + assert data2["v"]["key_Array"]["t"] == "arr" + + def test_kvs_write_map(self, temp_dir: Path): + """Verify that each KVS file contains correct final run count.""" + # Verify KVS Instance(2) + kvs2_file = temp_dir / "kvs_2_0.json" + data2 = json.loads(kvs2_file.read_text()) + assert data2["v"]["key_Map"]["v"] == { + "inner_key": {"t": "i32", "v": 1}, + } + assert data2["v"]["key_Map"]["t"] == "obj" diff --git a/feature_integration_tests/test_scenarios/rust/src/scenarios/basic/mod.rs b/feature_integration_tests/test_scenarios/rust/src/scenarios/basic/mod.rs index 62667b93a6..84ee538c14 100644 --- a/feature_integration_tests/test_scenarios/rust/src/scenarios/basic/mod.rs +++ b/feature_integration_tests/test_scenarios/rust/src/scenarios/basic/mod.rs @@ -16,9 +16,11 @@ mod orchestration_with_persistency; pub fn basic_scenario_group() -> Box { Box::new(ScenarioGroupImpl::new( "basic", - vec![Box::new( - orchestration_with_persistency::OrchestrationWithPersistency, - )], + vec![ + Box::new(orchestration_with_persistency::OrchestrationWithPersistency), + Box::new(orchestration_with_persistency::ConcurrentKvsOrchestrationWithPersistency), + Box::new(orchestration_with_persistency::MultipleKvsOrchestrationWithPersistency), + ], vec![], )) } diff --git a/feature_integration_tests/test_scenarios/rust/src/scenarios/basic/orchestration_with_persistency.rs b/feature_integration_tests/test_scenarios/rust/src/scenarios/basic/orchestration_with_persistency.rs index a9a7641a77..a8e696d4ca 100644 --- a/feature_integration_tests/test_scenarios/rust/src/scenarios/basic/orchestration_with_persistency.rs +++ b/feature_integration_tests/test_scenarios/rust/src/scenarios/basic/orchestration_with_persistency.rs @@ -18,14 +18,12 @@ use orchestration::{ api::{design::Design, Orchestration}, common::DesignConfig, }; - -use rust_kvs::kvs_api::KvsApi; -use rust_kvs::Kvs; -use rust_kvs::KvsBuilder; +use rust_kvs::prelude::{InstanceId, KvsDefaults, KvsLoad, KvsMap, KvsValue}; +use rust_kvs::{kvs_api::KvsApi, Kvs, KvsBuilder}; use serde_json::Value; - +use std::{sync::Arc, vec}; use test_scenarios_rust::scenario::Scenario; -use tracing::info; +use tracing::{field, info}; use serde::Deserialize; @@ -42,50 +40,25 @@ impl TestInput { } } -use rust_kvs::prelude::{InstanceId, KvsDefaults, KvsLoad}; -use std::path::PathBuf; -pub struct KvsParameters { - pub instance_id: InstanceId, - pub defaults: Option, - pub kvs_load: Option, - pub dir: Option, - pub snapshot_max_count: Option, -} +fn create_kvs(instance_id: usize, path: String) -> Arc { + let builder = KvsBuilder::new(InstanceId(instance_id)) + .defaults(KvsDefaults::Optional) + .dir(path.clone()) + .kvs_load(KvsLoad::Optional) + .snapshot_max_count(3); -macro_rules! persistency_task { - ($path:expr) => { - move || kvs_save_cycle_number($path.clone()) - }; + let kvs = builder.build().expect("Failed to build KVS instance"); + Arc::new(kvs) } -async fn kvs_save_cycle_number(path: String) -> Result<(), UserErrValue> { - let params = KvsParameters { - instance_id: InstanceId(1), - defaults: Some(KvsDefaults::Optional), - kvs_load: Some(KvsLoad::Optional), - dir: Some(PathBuf::from(path)), - snapshot_max_count: Some(3), - }; - - // Set builder parameters. - let mut builder = KvsBuilder::new(params.instance_id); - if let Some(flag) = params.defaults { - builder = builder.defaults(flag); - } - if let Some(flag) = params.kvs_load { - builder = builder.kvs_load(flag); - } - if let Some(dir) = params.dir { - builder = builder.dir(dir.to_string_lossy().to_string()); - } - if let Some(max_count) = params.snapshot_max_count { - builder = builder.snapshot_max_count(max_count); - } - - // Create KVS. - let kvs: Kvs = builder.build().expect("Failed to build KVS instance"); +macro_rules! persistency_cycle_task { + ($kvs:expr) => {{ + let kvs = Arc::clone(&$kvs); + move || kvs_save_cycle_number(Arc::clone(&kvs)) + }}; +} - // Simple set/get. +async fn kvs_save_cycle_number(kvs: Arc) -> Result<(), UserErrValue> { let key = "run_cycle_number"; let last_cycle_number: u32 = kvs.get_value_as::(key).unwrap_or_else(|_| 0_u32); @@ -95,24 +68,86 @@ async fn kvs_save_cycle_number(path: String) -> Result<(), UserErrValue> { kvs.flush().expect("Failed to flush KVS"); - info!(run_cycle_number = value_read); + info!( + kvs_instance_id = field::debug(kvs.parameters().instance_id), + run_cycle_number = value_read + ); + + Ok(()) +} + +macro_rules! persistency_save_task { + ($kvs:expr) => {{ + let kvs = Arc::clone(&$kvs); + move || kvs_save_data(Arc::clone(&kvs)) + }}; +} + +async fn kvs_save_data(kvs: Arc) -> Result<(), UserErrValue> { + // i32 + kvs.set_value("key_i32_min", i32::MIN) + .expect("Failed to set value"); + kvs.set_value("key_i32_max", i32::MAX) + .expect("Failed to set value"); + // u32 + kvs.set_value("key_u32_min", u32::MIN) + .expect("Failed to set value"); + kvs.set_value("key_u32_max", u32::MAX) + .expect("Failed to set value"); + // i64 + kvs.set_value("key_i64_min", i64::MIN) + .expect("Failed to set value"); + kvs.set_value("key_i64_max", i64::MAX) + .expect("Failed to set value"); + // u64 + kvs.set_value("key_u64_min", u64::MIN) + .expect("Failed to set value"); + kvs.set_value("key_u64_max", u64::MAX) + .expect("Failed to set value"); + // f64 + kvs.set_value("key_f64", 1.2345_f64) + .expect("Failed to set value"); + // bool + kvs.set_value("key_bool", true) + .expect("Failed to set value"); + // String + kvs.set_value("key_String", "TestString".to_string()) + .expect("Failed to set value"); + // Null + kvs.set_value("key_Null", KvsValue::Null) + .expect("Failed to set value"); + // Array + kvs.set_value( + "key_Array", + vec![ + KvsValue::from(1i32), + KvsValue::from(2i32), + KvsValue::from(3i32), + ], + ) + .expect("Failed to set value"); + // Map + let mut map = KvsMap::new(); + map.insert("inner_key".to_string(), KvsValue::from(1i32)); + kvs.set_value("key_Map", map).expect("Failed to set value"); + + kvs.flush().expect("Failed to flush KVS"); Ok(()) } fn single_sequence_design(kvs_path: String) -> Result { let mut design = Design::new("SingleSequence".into(), DesignConfig::default()); + let kvs = create_kvs(1, kvs_path); let kvs_cycle_tag = - design.register_invoke_async("KVS save cycle".into(), persistency_task!(kvs_path))?; + design.register_invoke_async("KVS save cycle".into(), persistency_cycle_task!(kvs))?; - // Create a program with actions design.add_program(file!(), move |_design_instance, builder| { builder.with_run_action( SequenceBuilder::new() .with_step(Invoke::from_tag(&kvs_cycle_tag, _design_instance.config())) .build(), ); - Ok(()) }); @@ -147,3 +182,114 @@ impl Scenario for OrchestrationWithPersistency { Ok(()) } } + +fn concurrent_kvs_design(kvs_path: String) -> Result { + let mut design = Design::new("ConcurrentKvs".into(), DesignConfig::default()); + + let kvs1 = create_kvs(1, kvs_path.clone()); + let kvs1_tag = + design.register_invoke_async("KVS1 save cycle".into(), persistency_cycle_task!(kvs1))?; + + let kvs2 = create_kvs(2, kvs_path.clone()); + let kvs2_tag = + design.register_invoke_async("KVS2 save cycle".into(), persistency_cycle_task!(kvs2))?; + + let kvs3 = create_kvs(3, kvs_path.clone()); + let kvs3_tag = + design.register_invoke_async("KVS3 save cycle".into(), persistency_cycle_task!(kvs3))?; + + design.add_program(file!(), move |_design_instance, builder| { + builder.with_run_action( + ConcurrencyBuilder::new() + .with_branch(Invoke::from_tag(&kvs1_tag, _design_instance.config())) + .with_branch(Invoke::from_tag(&kvs2_tag, _design_instance.config())) + .with_branch(Invoke::from_tag(&kvs3_tag, _design_instance.config())) + .build(_design_instance), + ); + Ok(()) + }); + + Ok(design) +} + +pub struct ConcurrentKvsOrchestrationWithPersistency; + +impl Scenario for ConcurrentKvsOrchestrationWithPersistency { + fn name(&self) -> &str { + "concurrent_kvs" + } + + fn run(&self, input: &str) -> Result<(), String> { + let logic = TestInput::new(input); + let mut rt = Runtime::from_json(input)?.build(); + + let orch = Orchestration::new() + .add_design(concurrent_kvs_design(logic.kvs_path).expect("Failed to create design")) + .design_done(); + + let mut program_manager = orch + .into_program_manager() + .expect("Failed to create programs"); + let mut programs = program_manager.get_programs(); + + rt.block_on(async move { + let mut program = programs.pop().expect("Failed to pop program"); + let _ = program.run_n(logic.run_count).await; + }); + + Ok(()) + } +} + +fn multiple_kvs_design(kvs_path: String) -> Result { + let mut design = Design::new("ConcurrentKvs".into(), DesignConfig::default()); + + let kvs1 = create_kvs(1, kvs_path.clone()); + let kvs1_tag = + design.register_invoke_async("KVS1 save cycle".into(), persistency_cycle_task!(kvs1))?; + + let kvs2 = create_kvs(2, kvs_path.clone()); + let kvs2_tag = + design.register_invoke_async("KVS2 save cycle".into(), persistency_save_task!(kvs2))?; + + design.add_program(file!(), move |_design_instance, builder| { + builder.with_run_action( + ConcurrencyBuilder::new() + .with_branch(Invoke::from_tag(&kvs1_tag, _design_instance.config())) + .with_branch(Invoke::from_tag(&kvs2_tag, _design_instance.config())) + .build(_design_instance), + ); + Ok(()) + }); + + Ok(design) +} + +pub struct MultipleKvsOrchestrationWithPersistency; + +impl Scenario for MultipleKvsOrchestrationWithPersistency { + fn name(&self) -> &str { + "multiple_kvs" + } + + fn run(&self, input: &str) -> Result<(), String> { + let logic = TestInput::new(input); + let mut rt = Runtime::from_json(input)?.build(); + + let orch = Orchestration::new() + .add_design(multiple_kvs_design(logic.kvs_path).expect("Failed to create design")) + .design_done(); + + let mut program_manager = orch + .into_program_manager() + .expect("Failed to create programs"); + let mut programs = program_manager.get_programs(); + + rt.block_on(async move { + let mut program = programs.pop().expect("Failed to pop program"); + let _ = program.run_n(logic.run_count).await; + }); + + Ok(()) + } +}