diff --git a/partitioned-burgers-1d/.solver-nutils-dgalerkin/compare_burgers.py b/partitioned-burgers-1d/.solver-nutils-dgalerkin/compare_burgers.py
new file mode 100644
index 000000000..6fa0eeb9e
--- /dev/null
+++ b/partitioned-burgers-1d/.solver-nutils-dgalerkin/compare_burgers.py
@@ -0,0 +1,54 @@
+import os
+import numpy as np
+import matplotlib.pyplot as plt
+
+DATA_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__)))
+
+data_dir_dgalerkin = os.path.join(DATA_DIR, "solver-nutils-dgalerkin", "data-training")
+data_dir_diffuse = os.path.join(DATA_DIR, "solver-scipy-fvolumes", "data-training")
+
+# file names are the same, folders are different
+files_ = os.listdir(data_dir_dgalerkin)
+files_ = sorted(f for f in files_ if f.endswith(".npz"))
+
+
+file_num = 0
+timestep = 0
+
+file_dgalerkin = os.path.join(data_dir_dgalerkin, files_[file_num])
+file_diffuse = os.path.join(data_dir_diffuse, files_[file_num])
+
+data_dgalerkin = np.load(file_dgalerkin)["Solver-Mesh-1D-Internal"]
+data_diffuse = np.load(file_diffuse)["Solver-Mesh-1D-Internal"]
+
+print(f"DG data shape: {data_dgalerkin.shape}")
+print(f"FV data shape: {data_diffuse.shape}")
+
+plt.figure(figsize=(12, 5))
+plt.plot(data_dgalerkin[timestep, :], label='DG Solver', color='blue')
+plt.plot(data_diffuse[timestep, :], label='FV Solver', color='orange', linestyle='--')
+plt.title(f'Comparison at Timestep {timestep}')
+plt.xlabel('Spatial Position')
+plt.ylabel('Solution Value')
+plt.legend()
+plt.savefig(os.path.join(DATA_DIR, f'comparison_timestep_{timestep}.png'))
+
+# plot the imshow with unified colormap
+vmin = min(data_dgalerkin.min(), data_diffuse.min())
+vmax = max(data_dgalerkin.max(), data_diffuse.max())
+
+plt.figure(figsize=(12, 5))
+plt.subplot(1, 2, 1)
+plt.imshow(data_dgalerkin.T, aspect='auto', cmap='viridis', vmin=vmin, vmax=vmax)
+plt.title('DG Solver Evolution')
+plt.ylabel('Spatial Position')
+plt.xlabel('Timestep')
+plt.colorbar(label='Solution Value')
+plt.subplot(1, 2, 2)
+plt.imshow(data_diffuse.T, aspect='auto', cmap='viridis', vmin=vmin, vmax=vmax)
+plt.title('FV Solver Evolution')
+plt.ylabel('Spatial Position')
+plt.xlabel('Timestep')
+plt.colorbar(label='Solution Value')
+plt.tight_layout()
+plt.show()
diff --git a/partitioned-burgers-1d/.solver-nutils-dgalerkin/comparison_DG_FV.png b/partitioned-burgers-1d/.solver-nutils-dgalerkin/comparison_DG_FV.png
new file mode 100644
index 000000000..f60abf4a8
Binary files /dev/null and b/partitioned-burgers-1d/.solver-nutils-dgalerkin/comparison_DG_FV.png differ
diff --git a/partitioned-burgers-1d/.solver-nutils-dgalerkin/solver.py b/partitioned-burgers-1d/.solver-nutils-dgalerkin/solver.py
new file mode 100755
index 000000000..28f1cfd20
--- /dev/null
+++ b/partitioned-burgers-1d/.solver-nutils-dgalerkin/solver.py
@@ -0,0 +1,148 @@
+# Original source: https://github.com/evalf/nutils/blob/d73749ff7d64c9ccafdbb88cd442f80b9448c118/examples/burgers.py
+
+from nutils import mesh, function, export, testing
+from nutils.solver import System
+from nutils.expression_v2 import Namespace
+import treelog as log
+import numpy as np
+import itertools
+import precice
+import json
+import os
+import argparse
+
+def _generate_initial_condition(x_coords, ic_config, epoch):
+ np.random.seed(epoch)
+ ic_values = np.zeros(len(x_coords))
+ if ic_config["type"] == "sinusoidal":
+ num_modes = ic_config.get("num_modes", 1)
+ superpositions = np.random.randint(2, num_modes + 1)
+ for _ in range(superpositions):
+ amp = np.random.uniform(0.1, 2)
+ k = np.random.randint(ic_config["wavenumber_range"][0], ic_config["wavenumber_range"][1] + 1)
+ phase_shift = np.random.uniform(0, 2 * np.pi)
+ ic_values += amp * np.sin(2 * np.pi * k * x_coords + phase_shift)
+ return ic_values
+
+def project_initial_condition(domain_min, domain_max, nelems, ic_config, epoch):
+ # 1. Generate a high-resolution "truth" on a fine grid
+ fine_res = nelems * 10
+ fine_x = np.linspace(domain_min[0], domain_max[0], fine_res, endpoint=False)
+ fine_u = _generate_initial_condition(fine_x, ic_config, epoch)
+
+ # 2. Average the high-resolution truth over each coarse cell
+ u_projected = np.zeros(nelems)
+ for i in range(nelems):
+ cell_start = i * 10
+ cell_end = (i + 1) * 10
+ u_projected[i] = np.mean(fine_u[cell_start:cell_end])
+
+ return u_projected
+
+def main(dim: int,
+ epoch: int = 0,
+ btype: str = 'discont',
+ degree: int = 1,
+ newtontol: float = 1e-5,
+ config_file: str = "precice-config.xml"):
+
+ script_dir = os.path.dirname(os.path.abspath(__file__))
+ with open(os.path.join(script_dir, "..", "python_participant", "config.json"), 'r') as f:
+ config = json.load(f)["solver"]
+ with open(os.path.join(script_dir, "ic_params.json"), 'r') as f:
+ ic_config = json.load(f)["initial_conditions"]
+
+ config_path = os.path.join(script_dir, "..", f"{dim}d", config_file)
+ participant = precice.Participant("Solver", config_path, 0, 1)
+
+ mesh_internal_name = f"Solver-Mesh-{dim}D-Internal"
+ mesh_boundaries_name = f"Solver-Mesh-{dim}D-Boundaries"
+ data_name = f"Data_{dim}D"
+ res = config[f"{dim}d_resolution"]
+ domain_min = config[f"{dim}d_domain_min"]
+ domain_max = config[f"{dim}d_domain_max"]
+ nelems = res[0]
+
+ domain, geom = mesh.line(np.linspace(domain_min[0], domain_max[0], nelems + 1), periodic=True)
+
+ # Define all nelems +1 nodes for evaluation
+ eval_coords_x = np.linspace(domain_min[0], domain_max[0], nelems + 1)
+
+ # Define the nelems vertices for saving (all but the last)
+ trunc_coords_x = eval_coords_x[:-1]
+ internal_coords = np.array([trunc_coords_x, np.full(len(trunc_coords_x), domain_min[1])]).T
+ boundary_coords = np.array([[domain_min[0], domain_min[1]], [domain_max[0], domain_max[1]]])
+
+ internal_vertex_ids = participant.set_mesh_vertices(mesh_internal_name, internal_coords)
+ boundary_vertex_ids = participant.set_mesh_vertices(mesh_boundaries_name, boundary_coords)
+
+ sample = domain.locate(geom, eval_coords_x, tol=1e-5)
+
+ ns = Namespace()
+ ns.x = geom
+ ns.define_for('x', gradient='∇', normal='n', jacobians=('dV', 'dS'))
+ ns.u = domain.field('u', btype=btype, degree=degree)
+ ns.du = ns.u - function.replace_arguments(ns.u, 'u:u0')
+ ns.v = domain.field('v', btype=btype, degree=degree)
+ ns.t = function.field('t')
+ ns.dt = ns.t - function.field('t0')
+ ns.f = '.5 u^2'
+ ns.C = 1
+
+ res_pde = domain.integral('(v du / dt - ∇(v) f) dV' @ ns, degree=degree*2)
+ res_pde -= domain.interfaces.integral('[v] n ({f} - .5 C [u] n) dS' @ ns, degree=degree*2)
+ system = System(res_pde, trial='u', test='v')
+
+ # Project the initial condition
+ u_averaged = project_initial_condition(domain_min, domain_max, nelems, ic_config, epoch)
+ ns.uic = domain.basis('discont', degree=0).dot(u_averaged)
+
+ sqr = domain.integral('(u - uic)^2 dV' @ ns, degree=max(degree*2, 5))
+ args = System(sqr, trial='u').solve()
+
+ if participant.requires_initial_data():
+ # Evaluate at all nodes
+ all_data = sample.eval(ns.u, arguments=args)
+ # Truncate last element
+ trunc_data = all_data[:-1]
+ boundary_data_values = np.array([trunc_data[0], trunc_data[-1]])
+
+ participant.write_data(mesh_internal_name, data_name, internal_vertex_ids, trunc_data)
+ participant.write_data(mesh_boundaries_name, data_name, boundary_vertex_ids, boundary_data_values)
+
+ participant.initialize()
+
+ args['t'] = 0.
+
+ with log.iter.plain('timestep', itertools.count()) as steps:
+ for _ in steps:
+ if not participant.is_coupling_ongoing():
+ break
+
+ timestep = participant.get_max_time_step_size()
+
+ args = system.step(timestep=timestep, arguments=args, timearg='t', suffix='0', tol=newtontol)
+
+ all_data = sample.eval(ns.u, arguments=args)
+ trunc_data = all_data[:-1]
+ boundary_data_values = np.array([trunc_data[0], trunc_data[-1]])
+
+ participant.write_data(mesh_internal_name, data_name, internal_vertex_ids, trunc_data)
+ participant.write_data(mesh_boundaries_name, data_name, boundary_vertex_ids, boundary_data_values)
+
+ participant.advance(timestep)
+
+ participant.finalize()
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+ parser.add_argument("dim", type=int, choices=[1, 2], help="Dimension of the simulation")
+ parser.add_argument('--config_file', type=str, default="precice-config.xml")
+ parser.add_argument('--btype', type=str, default='discont')
+ parser.add_argument('--degree', type=int, default=1)
+ parser.add_argument('--newtontol', type=float, default=1e-5)
+ parser.add_argument("--epoch", type=int, default=0, help="Current epoch number")
+
+ args_cli = parser.parse_args()
+
+ main(dim=args_cli.dim, epoch=args_cli.epoch, btype=args_cli.btype, degree=args_cli.degree, newtontol=args_cli.newtontol, config_file=args_cli.config_file)
diff --git a/partitioned-burgers-1d/README.md b/partitioned-burgers-1d/README.md
new file mode 100644
index 000000000..8327f144c
--- /dev/null
+++ b/partitioned-burgers-1d/README.md
@@ -0,0 +1,142 @@
+---
+title: Partitioned 1D Burgers' Equation
+permalink: tutorials-partitioned-burgers-1d.html
+keywords: Python, Neural Network, Surrogate, Burgers Equation, Finite Volume, CFD
+summary: This tutorial demonstrates the partitioned solution of the 1D Burgers' equation using preCICE and a neural network surrogate solver.
+---
+
+{% note %}
+Get the [case files of this tutorial](https://github.com/precice/tutorials/tree/master/partitioned-burgers-1d). Read how in the [tutorials introduction](https://precice.org/tutorials.html).
+{% endnote %}
+
+## Setup
+
+We solve the 1D viscous Burgers' equation on the domain $[0,2]$:
+
+
+$$
+\frac{\partial u}{\partial t} = \nu \frac{\partial^2 u}{\partial x^2} - u \frac{\partial u}{\partial x},
+$$
+
+where $u(x,t)$ is the scalar velocity field and $\nu$ is the viscosity. In this tutorial by default $\nu$ is very small ($10^{-12}$), but can be changed in the solver.
+
+
+The domain is partitioned into participants at $x=1$:
+
+- **Dirichlet**: Solves the left half $[0,1]$ and receives Dirichlet boundary conditions at the interface.
+- **Neumann**: Solves the right half $[1,2]$ and receives Neumann boundary conditions at the interface.
+
+Both outer boundaries use zero-gradient conditions $\frac{\partial u}{\partial x} = 0$. The problem is solved for different initial conditions of superimposed sine waves, which can be generated using the provided script.
+
+
+
+
Initial condition used for the simulation (seed 0, see generate_ic.py)
+
+
+The conservative formulation of the Burgers' equation `solver-scipy` is implemented in a first-order finite volume code using Lax-Friedrichs fluxes and implicit Euler time stepping.
+
+This tutorial includes two versions for the Neumann participant:
+- A standard finite volume solver (`neumann-scipy`).
+- A pre-trained neural network surrogate that approximates the solver (`neumann-surrogate`).
+
+
+## Configuration
+
+preCICE configuration (image generated using the [precice-config-visualizer](https://precice.org/tooling-config-visualization.html)):
+
+
+
+
+
+
+## Running the tutorial
+
+### Initial condition
+
+Before running the participants, you must generate the initial condition file. From the root of the tutorial folder, run the command and replace `` with any integer.:
+
+```bash
+python3 generate_ic.py --epoch
+```
+
+This script requires the Python libraries `numpy` and `matplotlib`. It generates the initial condition file `initial_condition.npz` used by both participants. If you do not have the dependencies, then you can run either of the participant run scripts to create a Python virtual environment with the required packages and try again.
+
+Or, simply, run the helper scripts (see below), which will also generate the initial condition file if it does not exist.
+
+---
+
+#### Helper scripts
+
+There are helper scripts that automate runs and visualization of both participants. They also accept an integer seed argument to specify the initial condition.
+
+```bash
+run-partitioned-scipy.sh
+```
+
+and
+
+```bash
+run-partitioned-surrogate.sh
+```
+
+### Running the participants
+
+To run the partitioned simulation, open two separate terminals and start each participant individually:
+
+You can find the corresponding `run.sh` script for running the case in the folders corresponding to the participant you want to use:
+
+```bash
+cd dirichlet-scipy
+./run.sh
+```
+
+and
+
+```bash
+cd neumann-scipy
+./run.sh
+```
+
+or, to use the pretrained neural network surrogate participant:
+
+```bash
+cd neumann-surrogate
+./run.sh
+```
+
+**Note:** The surrogate participant requires PyTorch and related dependencies, which requires several gigabytes of disk space.
+
+### Monolithic solution (reference)
+
+You can run the whole domain using the monolithic solver for comparison:
+
+```bash
+cd solver-scipy
+./run.sh
+```
+
+
+
+## Visualization
+
+After both participants (and/or monolithic simulation) have finished, you can run the visualization script.
+`visualize_partitioned_domain.py` generates plots comparing the partitioned and monolithic solutions. You can specify which timestep to plot:
+
+```bash
+python3 visualize_partitioned_domain.py --neumann neumann-surrogate/surrogate.npz [timestep]
+```
+
+The script will produce the following output files in the `images/` directory:
+- `full-domain-timestep-slice.png`: Solution $u$ at a selected timestep
+
+
+
+
+
+- `gradient-timestep-slice.png`: Gradient $du/dx$ at a selected timestep
+
+- `full-domain-evolution.png`: Time evolution of the solution
+
+
+
+
diff --git a/partitioned-burgers-1d/clean-tutorial.sh b/partitioned-burgers-1d/clean-tutorial.sh
new file mode 120000
index 000000000..4713f5092
--- /dev/null
+++ b/partitioned-burgers-1d/clean-tutorial.sh
@@ -0,0 +1 @@
+../tools/clean-tutorial-base.sh
\ No newline at end of file
diff --git a/partitioned-burgers-1d/clean.sh b/partitioned-burgers-1d/clean.sh
new file mode 100755
index 000000000..17bb6d4b7
--- /dev/null
+++ b/partitioned-burgers-1d/clean.sh
@@ -0,0 +1,6 @@
+#!/usr/bin/env sh
+set -e -u
+
+rm -f solver-scipy/full_domain.npz
+rm -f images/full_domain_evolution.png images/full_domain_timestep_slice.png images/gradient_timestep_slice.png images/initial_condition.png
+rm -f initial_condition.npz # comment out?
\ No newline at end of file
diff --git a/partitioned-burgers-1d/dirichlet-scipy/clean.sh b/partitioned-burgers-1d/dirichlet-scipy/clean.sh
new file mode 100755
index 000000000..cd40261cc
--- /dev/null
+++ b/partitioned-burgers-1d/dirichlet-scipy/clean.sh
@@ -0,0 +1,6 @@
+#!/usr/bin/env sh
+set -e -u
+
+rm -rf precice-profiling
+rm -f dirichlet-scipy.log precice-Dirichlet-convergence.log precice-Dirichlet-iterations.log
+rm -f dirichlet.npz
diff --git a/partitioned-burgers-1d/dirichlet-scipy/run.sh b/partitioned-burgers-1d/dirichlet-scipy/run.sh
new file mode 100755
index 000000000..e6132ceae
--- /dev/null
+++ b/partitioned-burgers-1d/dirichlet-scipy/run.sh
@@ -0,0 +1,30 @@
+#!/usr/bin/env bash
+set -e -u
+
+solver_path="../solver-scipy"
+
+if [ -d "$solver_path/.venv" ]; then
+ echo "Using existing virtual environment"
+ . "$solver_path/.venv/bin/activate"
+else
+ echo "Creating new virtual environment"
+ python3 -m venv "$solver_path/.venv"
+ . "$solver_path/.venv/bin/activate"
+ pip install -r $solver_path/requirements.txt && pip freeze > $solver_path/pip-installed-packages.log
+fi
+
+if [ -f "../initial_condition.npz" ]; then
+ :
+else
+ echo "Error, missing initial condition file."
+ echo "Run 'python3 ../generate_ic.py --epoch ' to create one."
+ exit 1
+fi
+
+. ../../tools/log.sh
+exec > >(tee --append "$LOGFILE") 2>&1
+
+echo "[preCICE] Waiting for Neumann participant..."
+python3 "$solver_path/solver.py" Dirichlet
+
+close_log
diff --git a/partitioned-burgers-1d/generate-training-data.sh b/partitioned-burgers-1d/generate-training-data.sh
new file mode 100755
index 000000000..b0983a7e8
--- /dev/null
+++ b/partitioned-burgers-1d/generate-training-data.sh
@@ -0,0 +1,29 @@
+#!/usr/bin/env bash
+set -e -u
+
+mkdir -p solver-scipy/data-training
+
+# Number of training runs to generate
+NUM_RUNS=200
+
+echo "Generating ${NUM_RUNS} training data samples..."
+
+for i in $(seq 0 $((${NUM_RUNS}-1)))
+do
+ echo "--- Generating epoch ${i} ---"
+
+ # Generate IC
+ python3 generate_ic.py --epoch ${i}
+
+ SAVE_PATH="data-training/burgers_data_epoch_${i}.npz"
+
+ # Run the monolithic solver and save to save_path
+ # The 'None' argument tells the solver to run monolithic (preCICE participant name is none, run without preCICE)
+ cd solver-scipy
+ python3 solver.py None --savefile "${SAVE_PATH}"
+ cd ..
+done
+
+echo "---"
+echo "Training data generation complete."
+echo "Files are saved in solver-scipy/data-training/"
diff --git a/partitioned-burgers-1d/generate_ic.py b/partitioned-burgers-1d/generate_ic.py
new file mode 100644
index 000000000..74bc3f516
--- /dev/null
+++ b/partitioned-burgers-1d/generate_ic.py
@@ -0,0 +1,69 @@
+import numpy as np
+import json
+import os
+import argparse
+import matplotlib.pyplot as plt
+
+def _generate_initial_condition(x_coords, ic_config, epoch):
+ np.random.seed(epoch)
+ ic_values = np.zeros(len(x_coords))
+ if ic_config["type"] == "sinusoidal":
+ num_modes = ic_config.get("num_modes", 1)
+ superpositions = np.random.randint(2, num_modes + 1)
+ for _ in range(superpositions):
+ amp = np.random.uniform(0.1, 2)
+ k = np.random.randint(ic_config["wavenumber_range"][0], ic_config["wavenumber_range"][1] + 1)
+ phase_shift = np.random.uniform(0, 2 * np.pi)
+ ic_values += amp * np.sin(2 * np.pi * k * x_coords + phase_shift)
+ return ic_values
+
+def project_initial_condition(domain_min, domain_max, nelems, ic_config, epoch):
+ # 1. Generate a high-resolution "truth" on a fine grid
+ fine_res = nelems * 10
+ fine_x = np.linspace(domain_min, domain_max, fine_res, endpoint=False)
+ fine_u = _generate_initial_condition(fine_x, ic_config, epoch)
+
+ # 2. Average the high-resolution truth over each coarse cell
+ u_projected = np.zeros(nelems)
+ for i in range(nelems):
+ cell_start = i * 10
+ cell_end = (i + 1) * 10
+ u_projected[i] = np.mean(fine_u[cell_start:cell_end])
+
+ return u_projected
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser(description="Generate initial conditions for the Burgers equation simulation.")
+ parser.add_argument("--epoch", type=int, default=0, help="Seed for the random number generator to ensure reproducibility.")
+ args = parser.parse_args()
+
+ CASE_DIR = os.path.dirname(os.path.abspath(__file__))
+ IMAGES_DIR = os.path.join(CASE_DIR, "images")
+
+ with open(os.path.join(CASE_DIR, "ic_params.json"), 'r') as f:
+ config = json.load(f)
+
+ ic_config = config["initial_conditions"]
+ domain_config = config["domain"]
+
+ # full domain
+ full_domain_min = domain_config["full_domain_min"]
+ full_domain_max = domain_config["full_domain_max"]
+ nelems_total = domain_config["nelems_total"]
+
+ # Generate IC
+ initial_condition = project_initial_condition(full_domain_min, full_domain_max, nelems_total, ic_config, args.epoch)
+
+ output_path = os.path.join(CASE_DIR, "initial_condition.npz")
+ np.savez(output_path, initial_condition=initial_condition)
+
+ plt.figure(figsize=(8, 4))
+ x_coords = np.linspace(full_domain_min, full_domain_max, nelems_total, endpoint=False)
+ plt.figure(figsize=(10, 5))
+ plt.plot(x_coords, initial_condition, marker='.', linestyle='-')
+ plt.xlabel('Spatial Coordinate (x)')
+ plt.ylabel('Solution Value (u)')
+ plt.grid(True)
+ plt.savefig(os.path.join(IMAGES_DIR, "initial_condition.png"))
+ print(f"Initial condition and plot saved to {output_path}")
+ plt.close()
diff --git a/partitioned-burgers-1d/ic_params.json b/partitioned-burgers-1d/ic_params.json
new file mode 100644
index 000000000..55b3fb0f8
--- /dev/null
+++ b/partitioned-burgers-1d/ic_params.json
@@ -0,0 +1,12 @@
+{
+ "initial_conditions": {
+ "type": "sinusoidal",
+ "wavenumber_range": [1, 7],
+ "num_modes": 4
+ },
+ "domain": {
+ "nelems_total": 256,
+ "full_domain_min": 0.0,
+ "full_domain_max": 2.0
+ }
+}
diff --git a/partitioned-burgers-1d/images/tutorials-partitioned-burgers-1d-full-domain-evolution.png b/partitioned-burgers-1d/images/tutorials-partitioned-burgers-1d-full-domain-evolution.png
new file mode 100644
index 000000000..5b4a6a92c
Binary files /dev/null and b/partitioned-burgers-1d/images/tutorials-partitioned-burgers-1d-full-domain-evolution.png differ
diff --git a/partitioned-burgers-1d/images/tutorials-partitioned-burgers-1d-full-domain-timestep-slice.png b/partitioned-burgers-1d/images/tutorials-partitioned-burgers-1d-full-domain-timestep-slice.png
new file mode 100644
index 000000000..0c9e60d23
Binary files /dev/null and b/partitioned-burgers-1d/images/tutorials-partitioned-burgers-1d-full-domain-timestep-slice.png differ
diff --git a/partitioned-burgers-1d/images/tutorials-partitioned-burgers-1d-initial-condition.png b/partitioned-burgers-1d/images/tutorials-partitioned-burgers-1d-initial-condition.png
new file mode 100644
index 000000000..3379d1306
Binary files /dev/null and b/partitioned-burgers-1d/images/tutorials-partitioned-burgers-1d-initial-condition.png differ
diff --git a/partitioned-burgers-1d/images/tutorials-partitioned-burgers-1d-precice-config.png b/partitioned-burgers-1d/images/tutorials-partitioned-burgers-1d-precice-config.png
new file mode 100644
index 000000000..2ca2c428a
Binary files /dev/null and b/partitioned-burgers-1d/images/tutorials-partitioned-burgers-1d-precice-config.png differ
diff --git a/partitioned-burgers-1d/neumann-scipy/clean.sh b/partitioned-burgers-1d/neumann-scipy/clean.sh
new file mode 100755
index 000000000..729500fe3
--- /dev/null
+++ b/partitioned-burgers-1d/neumann-scipy/clean.sh
@@ -0,0 +1,6 @@
+#!/usr/bin/env sh
+set -e -u
+
+rm -rf precice-profiling
+rm -f neumann-scipy.log precice-Neumann-convergence.log precice-Neumann-iterations.log
+rm -f neumann.npz
diff --git a/partitioned-burgers-1d/neumann-scipy/run.sh b/partitioned-burgers-1d/neumann-scipy/run.sh
new file mode 100755
index 000000000..d9399c438
--- /dev/null
+++ b/partitioned-burgers-1d/neumann-scipy/run.sh
@@ -0,0 +1,30 @@
+#!/usr/bin/env bash
+set -e -u
+
+solver_path="../solver-scipy"
+
+if [ -d "$solver_path/.venv" ]; then
+ echo "Using existing virtual environment"
+ . "$solver_path/.venv/bin/activate"
+else
+ echo "Creating new virtual environment"
+ python3 -m venv "$solver_path/.venv"
+ . "$solver_path/.venv/bin/activate"
+ pip install -r $solver_path/requirements.txt && pip freeze > $solver_path/pip-installed-packages.log
+fi
+
+if [ -f "../initial_condition.npz" ]; then
+ :
+else
+ echo "Error, missing initial condition file."
+ echo "Run 'python3 ../generate_ic.py --epoch ' to create one."
+ exit 1
+fi
+
+. ../../tools/log.sh
+exec > >(tee --append "$LOGFILE") 2>&1
+
+echo "[preCICE] Waiting for Dirichlet participant..."
+python3 "$solver_path/solver.py" Neumann
+
+close_log
diff --git a/partitioned-burgers-1d/neumann-surrogate/CNN_RES_UNROLL_1.pth b/partitioned-burgers-1d/neumann-surrogate/CNN_RES_UNROLL_1.pth
new file mode 100644
index 000000000..c54a69e93
Binary files /dev/null and b/partitioned-burgers-1d/neumann-surrogate/CNN_RES_UNROLL_1.pth differ
diff --git a/partitioned-burgers-1d/neumann-surrogate/CNN_RES_UNROLL_7.pth b/partitioned-burgers-1d/neumann-surrogate/CNN_RES_UNROLL_7.pth
new file mode 100644
index 000000000..d9b84f8d5
Binary files /dev/null and b/partitioned-burgers-1d/neumann-surrogate/CNN_RES_UNROLL_7.pth differ
diff --git a/partitioned-burgers-1d/neumann-surrogate/clean.sh b/partitioned-burgers-1d/neumann-surrogate/clean.sh
new file mode 100755
index 000000000..c8b832041
--- /dev/null
+++ b/partitioned-burgers-1d/neumann-surrogate/clean.sh
@@ -0,0 +1,6 @@
+#!/usr/bin/env sh
+set -e -u
+
+rm -rf precice-profiling
+rm -f neumann-surrogate.log precice-Neumann-convergence.log precice-Neumann-iterations.log
+rm -f surrogate.npz
\ No newline at end of file
diff --git a/partitioned-burgers-1d/neumann-surrogate/config.py b/partitioned-burgers-1d/neumann-surrogate/config.py
new file mode 100644
index 000000000..d6cf1d62b
--- /dev/null
+++ b/partitioned-burgers-1d/neumann-surrogate/config.py
@@ -0,0 +1,15 @@
+import torch
+
+# Model architecture
+INPUT_SIZE = 128 + 2 # +2 for ghost cells
+HIDDEN_SIZE = 64 # num filters
+OUTPUT_SIZE = 128
+
+assert INPUT_SIZE >= OUTPUT_SIZE, "Input size must be greater or equal to output size."
+assert (INPUT_SIZE - OUTPUT_SIZE) % 2 == 0, "Input and output sizes must differ by an even number (for ghost cells)."
+
+NUM_RES_BLOCKS = 4
+KERNEL_SIZE = 5
+ACTIVATION = torch.nn.ReLU
+
+MODEL_NAME = "CNN_RES_UNROLL_7.pth"
\ No newline at end of file
diff --git a/partitioned-burgers-1d/neumann-surrogate/model.py b/partitioned-burgers-1d/neumann-surrogate/model.py
new file mode 100644
index 000000000..aeaf9addd
--- /dev/null
+++ b/partitioned-burgers-1d/neumann-surrogate/model.py
@@ -0,0 +1,110 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from torch.nn.utils import weight_norm
+
+def pad_with_ghost_cells(input_seq, bc_left, bc_right):
+ return torch.cat([bc_left, input_seq, bc_right], dim=1)
+
+class LinearExtrapolationPadding1D(nn.Module):
+ """Applies 'same' padding using linear extrapolation."""
+ def __init__(self, kernel_size: int, dilation: int = 1):
+ super().__init__()
+ self.pad_total = dilation * (kernel_size - 1)
+ self.pad_beg = self.pad_total // 2
+ self.pad_end = self.pad_total - self.pad_beg
+
+ def forward(self, x):
+ # Don't pad if not necessary
+ if self.pad_total == 0:
+ return x
+
+ ghost_cell_left = x[:, :, :1]
+ ghost_cell_right = x[:, :, -1:]
+
+ # Calculate the gradient at each boundary
+ grad_left = x[:, :, 1:2] - ghost_cell_left
+ grad_right = ghost_cell_right - x[:, :, -2:-1]
+
+ # Higher order finite difference gradient approximation
+ # grad_left = ( -11 * ghost_cell_left + 18 * x[:, :, 1:2] - 9 * x[:, :, 2:3] + 2 * x[:, :, 3:4]) / 6
+ # grad_right = (11 * ghost_cell_right - 18 * x[:, :, -2:-1] + 9 * x[:, :, -3:-2] - 2 * x[:, :, -4:-3]) / 6
+
+ # 3. Extrapolated padding tensors
+ left_ramp = torch.arange(self.pad_beg, 0, -1, device=x.device, dtype=x.dtype).view(1, 1, -1)
+ left_padding = ghost_cell_left - left_ramp * grad_left
+
+ right_ramp = torch.arange(1, self.pad_end + 1, device=x.device, dtype=x.dtype).view(1, 1, -1)
+ right_padding = ghost_cell_right + right_ramp * grad_right
+
+ return torch.cat([left_padding, x, right_padding], dim=2)
+
+class ResidualBlock1D(nn.Module):
+ """A residual block that uses custom 'same' padding with linear extrapolation and weight normalization."""
+ def __init__(self, channels, kernel_size=3, activation=nn.ReLU):
+ super(ResidualBlock1D, self).__init__()
+ self.activation = activation()
+ # Apply weight normalization
+ self.conv1 = weight_norm(nn.Conv1d(channels, channels, kernel_size, padding='valid', bias=True))
+ self.ghost_padding1 = LinearExtrapolationPadding1D(kernel_size)
+ self.conv2 = weight_norm(nn.Conv1d(channels, channels, kernel_size, padding='valid', bias=True))
+ self.ghost_padding2 = LinearExtrapolationPadding1D(kernel_size)
+
+ def forward(self, x):
+ identity = x
+
+ out = self.ghost_padding1(x)
+ out = self.conv1(out)
+ out = self.activation(out)
+
+ out = self.ghost_padding2(out)
+ out = self.conv2(out)
+
+ return self.activation(out) + identity
+
+class CNN_RES(nn.Module):
+ """
+ A CNN with residual blocks for 1D data.
+ Expects a pre-padded input with ghost_cells//2 number ghost cells on each side.
+ Applies a custom linear extrapolation padding for inner layers.
+ """
+ def __init__(self, hidden_channels, num_blocks=2, kernel_size=3, activation=nn.ReLU, ghost_cells=2):
+ super(CNN_RES, self).__init__()
+ self.activation = activation()
+ self.hidden_channels = hidden_channels
+ self.num_blocks = num_blocks
+ self.kernel_size = kernel_size
+ assert ghost_cells % 2 == 0, "ghost_cells must be even"
+ self.ghost_cells = ghost_cells
+
+ self.ghost_padding = LinearExtrapolationPadding1D(self.ghost_cells + self.kernel_size)
+
+ # Apply weight normalization to the input convolution
+ self.conv_in = weight_norm(nn.Conv1d(1, hidden_channels, kernel_size=1, bias=True))
+
+ layers = [ResidualBlock1D(hidden_channels, kernel_size, activation=activation) for _ in range(num_blocks)]
+ self.res_blocks = nn.Sequential(*layers)
+
+ self.conv_out = nn.Conv1d(hidden_channels, 1, kernel_size=1)
+
+ def forward(self, x):
+
+ if x.dim() == 2:
+ x = x.unsqueeze(1) # Add channel dim: (B, 1, L)
+
+ if not self.ghost_cells == 0:
+ x_padded = self.ghost_padding(x)
+
+ else:
+ x_padded = x
+
+ total_pad_each_side = self.ghost_padding.pad_beg + self.ghost_cells // 2
+
+ out = self.activation(self.conv_in(x_padded)) # no extra padding here
+ out = self.res_blocks(out)
+ out = self.conv_out(out) # no extra padding here
+
+ if not self.ghost_cells == 0:
+ out = out[:, :, total_pad_each_side:-total_pad_each_side] # remove ghost cells, return only internal domain
+
+ return out.squeeze(1)
\ No newline at end of file
diff --git a/partitioned-burgers-1d/neumann-surrogate/requirements.txt b/partitioned-burgers-1d/neumann-surrogate/requirements.txt
new file mode 100644
index 000000000..8184da500
--- /dev/null
+++ b/partitioned-burgers-1d/neumann-surrogate/requirements.txt
@@ -0,0 +1,5 @@
+numpy
+pyprecice
+scipy
+torch
+matplotlib
\ No newline at end of file
diff --git a/partitioned-burgers-1d/neumann-surrogate/run.sh b/partitioned-burgers-1d/neumann-surrogate/run.sh
new file mode 100755
index 000000000..7e8f987b5
--- /dev/null
+++ b/partitioned-burgers-1d/neumann-surrogate/run.sh
@@ -0,0 +1,28 @@
+#!/usr/bin/env bash
+set -e -u
+
+if [ -d ".venv" ]; then
+ echo "Using existing virtual environment"
+ . .venv/bin/activate
+else
+ echo "Creating new virtual environment"
+ python3 -m venv .venv
+ . .venv/bin/activate
+ pip install -r requirements.txt && pip freeze > pip-installed-packages.log
+fi
+
+if [ -f "../initial_condition.npz" ]; then
+ :
+else
+ echo "Error, missing initial condition file."
+ echo "Run 'python3 ../generate_ic.py --epoch ' to create one."
+ exit 1
+fi
+
+. ../../tools/log.sh
+exec > >(tee --append "$LOGFILE") 2>&1
+
+echo "[preCICE] Waiting for Dirichlet participant..."
+python3 solver.py
+
+close_log
\ No newline at end of file
diff --git a/partitioned-burgers-1d/neumann-surrogate/solver.py b/partitioned-burgers-1d/neumann-surrogate/solver.py
new file mode 100644
index 000000000..1e1ba8129
--- /dev/null
+++ b/partitioned-burgers-1d/neumann-surrogate/solver.py
@@ -0,0 +1,136 @@
+import torch
+import numpy as np
+import precice
+import os
+import sys
+import json
+
+from model import CNN_RES
+from config import INPUT_SIZE, OUTPUT_SIZE, HIDDEN_SIZE, NUM_RES_BLOCKS, KERNEL_SIZE, MODEL_NAME, ACTIVATION
+
+GHOST_CELLS = INPUT_SIZE - OUTPUT_SIZE
+
+def main():
+ participant_name = "Neumann"
+
+ script_dir = os.path.dirname(os.path.abspath(__file__))
+ case_dir = os.path.abspath(os.path.join(script_dir, '..'))
+ config_path = os.path.join(case_dir, "precice-config.xml")
+
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+ model = CNN_RES(
+ hidden_channels=HIDDEN_SIZE,
+ num_blocks=NUM_RES_BLOCKS,
+ kernel_size=KERNEL_SIZE,
+ activation=ACTIVATION,
+ ghost_cells=GHOST_CELLS
+ )
+
+ if not os.path.exists(MODEL_NAME):
+ print(f"Model file not found at {MODEL_NAME}")
+ sys.exit(1)
+
+ model.load_state_dict(torch.load(MODEL_NAME))
+ model.to(device)
+ model.eval()
+ print("Neural surrogate model loaded successfully.")
+
+ # Read initial condition
+ with open(os.path.join(case_dir, "ic_params.json"), 'r') as f:
+ domain_config = json.load(f)["domain"]
+
+ nelems_total = domain_config["nelems_total"]
+ nelems_local = nelems_total // 2
+ full_domain_min = domain_config["full_domain_min"]
+ full_domain_max = domain_config["full_domain_max"]
+ dx = (full_domain_max - full_domain_min) / nelems_total
+
+ ic_data = np.load(os.path.join(case_dir, "initial_condition.npz"))
+ full_ic = ic_data['initial_condition']
+ u = full_ic[nelems_local:]
+ solution_history = {0.0: u.copy()}
+
+ # Set domain and preCICE setup
+ participant = precice.Participant(participant_name, config_path, 0, 1)
+
+ mesh_name = "Neumann-Mesh"
+ read_data_name = "Gradient"
+ write_data_name = "Velocity"
+ local_domain_min = full_domain_min + nelems_local * dx
+ coupling_point = [[local_domain_min, 0.0]]
+ vertex_id = participant.set_mesh_vertices(mesh_name, coupling_point)
+
+ if participant.requires_initial_data():
+ participant.write_data(mesh_name, write_data_name, vertex_id, [u[0]])
+
+ participant.initialize()
+
+ dt = participant.get_max_time_step_size()
+ t = 0.0
+ saved_t = 0.0
+ t_index = 0
+ solution_history = {int(0): u.copy()}
+
+ # Main Coupling Loop
+ with torch.no_grad():
+ while participant.is_coupling_ongoing():
+ if participant.requires_writing_checkpoint():
+ saved_u = u.copy()
+ saved_t = t
+ saved_t_index = t_index
+ if participant.requires_reading_checkpoint():
+ u = saved_u.copy()
+ t = saved_t
+ t_index = saved_t_index
+
+ du_dx_recv = participant.read_data(mesh_name, read_data_name, vertex_id, dt)[0]
+
+ # Calculate ghost cell value from received gradient
+ bc_left = u[0] - dx * du_dx_recv
+ bc_right = u[-1] # Zero gradient on right wall
+
+ u_padded = np.empty(len(u) + 2)
+ u_padded[0] = bc_left
+ u_padded[-1] = bc_right
+ u_padded[1:-1] = u
+
+ input_tensor = torch.from_numpy(u_padded).float().unsqueeze(0).unsqueeze(0).to(device)
+
+ output_tensor = model(input_tensor)
+ u = output_tensor.squeeze().cpu().numpy()
+
+ bc_left = u[0] - dx * du_dx_recv
+
+ du_dx = (u[0] - bc_left) / dx
+ u_interface = (bc_left + u[0]) / 2
+
+ participant.write_data(mesh_name, write_data_name, vertex_id, [u[0]])
+
+ print(f"[{participant_name:9s}] t={t:6.4f} | u_coupling={u_interface:8.4f} | du_dx={du_dx:8.4f}")
+
+ t = saved_t + dt
+ t_index += 1
+ solution_history[t_index] = u.copy()
+ participant.advance(dt)
+
+ # Finalize and save data to npz array
+ participant.finalize()
+
+ run_dir = os.getcwd()
+ output_filename = os.path.join(run_dir, "surrogate.npz")
+
+ cell_centers_x = np.linspace(local_domain_min + dx/2, local_domain_min + (nelems_local - 0.5) * dx, nelems_local)
+ internal_coords = np.array([cell_centers_x, np.zeros(nelems_local)]).T
+
+ sorted_times_index = sorted(solution_history.keys())
+ final_solution = np.array([solution_history[t_index] for t_index in sorted_times_index])
+
+ np.savez(
+ output_filename,
+ internal_coordinates=internal_coords,
+ **{"Solver-Mesh-1D-Internal": final_solution}
+ )
+ print(f"[Surrogate] Results saved to {output_filename}")
+
+if __name__ == '__main__':
+ main()
\ No newline at end of file
diff --git a/partitioned-burgers-1d/precice-config.xml b/partitioned-burgers-1d/precice-config.xml
new file mode 100644
index 000000000..71541eff9
--- /dev/null
+++ b/partitioned-burgers-1d/precice-config.xml
@@ -0,0 +1,56 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/partitioned-burgers-1d/run-partitioned-scipy.sh b/partitioned-burgers-1d/run-partitioned-scipy.sh
new file mode 100755
index 000000000..3288ebe4c
--- /dev/null
+++ b/partitioned-burgers-1d/run-partitioned-scipy.sh
@@ -0,0 +1,27 @@
+#!/usr/bin/env bash
+set -e -u
+
+./clean-tutorial.sh
+
+solver_path="./solver-scipy"
+
+if [ -d "$solver_path/.venv" ]; then
+ echo "Using existing virtual environment"
+ . "$solver_path/.venv/bin/activate"
+else
+ echo "Creating new virtual environment"
+ python3 -m venv "$solver_path/.venv"
+ . "$solver_path/.venv/bin/activate"
+ pip install -r $solver_path/requirements.txt && pip freeze > $solver_path/pip-installed-packages.log
+fi
+
+python3 generate_ic.py --epoch ${1:-0}
+
+# full domain reference solution
+echo "Running monolithic reference solution..."
+cd solver-scipy; ./run.sh; cd ..
+
+cd dirichlet-scipy; pwd; ./run.sh &
+cd ../neumann-scipy; pwd; ./run.sh && cd ..
+
+python3 visualize_partitioned_domain.py
\ No newline at end of file
diff --git a/partitioned-burgers-1d/run-partitioned-surrogate.sh b/partitioned-burgers-1d/run-partitioned-surrogate.sh
new file mode 100755
index 000000000..99ce024ad
--- /dev/null
+++ b/partitioned-burgers-1d/run-partitioned-surrogate.sh
@@ -0,0 +1,27 @@
+#!/usr/bin/env bash
+set -e -u
+
+./clean-tutorial.sh
+
+solver_path="./solver-scipy"
+
+if [ -d "$solver_path/.venv" ]; then
+ echo "Using existing virtual environment"
+ . "$solver_path/.venv/bin/activate"
+else
+ echo "Creating new virtual environment"
+ python3 -m venv "$solver_path/.venv"
+ . "$solver_path/.venv/bin/activate"
+ pip install -r $solver_path/requirements.txt && pip freeze > $solver_path/pip-installed-packages.log
+fi
+
+python3 generate_ic.py --epoch ${1:-0}
+
+# full domain reference solution
+echo "Running monolithic reference solution..."
+cd solver-scipy; ./run.sh; cd ..
+
+cd dirichlet-scipy; pwd; ./run.sh &
+cd ../neumann-surrogate; pwd; ./run.sh && cd ..
+
+python3 visualize_partitioned_domain.py --neumann neumann-surrogate/surrogate.npz
\ No newline at end of file
diff --git a/partitioned-burgers-1d/solver-scipy/requirements.txt b/partitioned-burgers-1d/solver-scipy/requirements.txt
new file mode 100644
index 000000000..fd4fc55cf
--- /dev/null
+++ b/partitioned-burgers-1d/solver-scipy/requirements.txt
@@ -0,0 +1,4 @@
+numpy
+pyprecice
+scipy
+matplotlib
\ No newline at end of file
diff --git a/partitioned-burgers-1d/solver-scipy/run.sh b/partitioned-burgers-1d/solver-scipy/run.sh
new file mode 100755
index 000000000..0e33b557c
--- /dev/null
+++ b/partitioned-burgers-1d/solver-scipy/run.sh
@@ -0,0 +1,22 @@
+#!/usr/bin/env bash
+set -e -u
+
+if [ -d .venv ]; then
+ echo "Using existing virtual environment"
+ . .venv/bin/activate
+else
+ echo "Creating new virtual environment"
+ python3 -m venv .venv
+ . .venv/bin/activate
+ pip install -r requirements.txt && pip freeze > pip-installed-packages.log
+fi
+
+if [ -f "../initial_condition.npz" ]; then
+ :
+else
+ echo "Error, missing initial condition file."
+ echo "Run 'python3 ../generate_ic.py --epoch ' to create one."
+ exit 1
+fi
+
+python3 solver.py None # run monolithic reference solution
diff --git a/partitioned-burgers-1d/solver-scipy/solver.py b/partitioned-burgers-1d/solver-scipy/solver.py
new file mode 100644
index 000000000..b5965ac27
--- /dev/null
+++ b/partitioned-burgers-1d/solver-scipy/solver.py
@@ -0,0 +1,345 @@
+"""
+This script solves the 1D viscous Burgers' equation for a partitioned domain problem.
+The two participants:
+- 'Dirichlet': Solves the left half of the domain. Receives Dirichlet BC from 'Neumann'. Provides Neumann BC to 'Neumann'.
+- 'Neumann': Solves the right half of the domain.
+
+# |<---------------------Dirichlet---------------------->|<--------------------Neumann----------------------->|
+# | du_dx=0 | ... | ... | ... | u[-1] <|> bc_right | bc_left <|> u[0] | ... | ... | ... | du_dx=0 |
+# |<----------------------- u -------------------------->|<----------------------- u ------------------------>|
+# | bc_left | | | bc_right |
+"""
+import numpy as np
+from scipy.integrate import solve_ivp
+from scipy.sparse import diags, identity
+from scipy.optimize import root
+import precice
+import json
+import os
+import argparse
+
+def lax_friedrichs_flux(u_left, u_right): # Flux at cell interface between i-1/2 and i+1/2
+ return 0.5 * (0.5 * u_left**2 + 0.5 * u_right**2) - 0.5 * (u_right - u_left)
+
+def flux_function(u_left, u_right):
+ return lax_friedrichs_flux(u_left, u_right)
+
+def burgers_rhs(t, u, dx, C, bc_left, bc_right):
+ # Ghost cells for BCs
+ u_padded = np.empty(len(u) + 2)
+ u_padded[0] = bc_left
+ u_padded[-1] = bc_right
+ u_padded[1:-1] = u
+
+ flux = np.empty(len(u) + 1)
+ for i in range(len(flux)):
+ flux[i] = flux_function(u_padded[i], u_padded[i+1])
+
+ # viscosity
+ viscosity = C * (u_padded[2:] - 2 * u_padded[1:-1] + u_padded[:-2]) / dx**2
+ return -(flux[1:] - flux[:-1]) / dx + viscosity
+
+def burgers_jacobian(t, u, dx, C, bc_left, bc_right):
+ n = len(u)
+
+ u_padded = np.empty(n + 2)
+ u_padded[0] = bc_left
+ u_padded[-1] = bc_right
+ u_padded[1:-1] = u
+
+ # --- viscosity (constant) ---
+ d_visc_di = -2 * C / dx**2
+ d_visc_off = C / dx**2
+
+ # --- flux (Lax-Friedrichs) ---
+ df_du_di = -1 / dx
+
+ # Upper diagonal
+ df_du_upper = -(0.5 * u_padded[2:n+1] - 0.5) / dx
+
+ # Lower diagonal
+ df_du_lower = (0.5 * u_padded[0:n-1] + 0.5) / dx
+
+ main_diag = df_du_di + d_visc_di
+ upper_diag = df_du_upper + d_visc_off
+ lower_diag = df_du_lower + d_visc_off
+
+ jac = diags([main_diag, upper_diag, lower_diag], [0, 1, -1], shape=(n, n), format='csc')
+
+ return jac
+
+def burgers_residual(u_new, u_old, dt, dx, C, bc_left, bc_right):
+ return u_new - u_old - dt * burgers_rhs(0, u_new, dx, C, bc_left, bc_right)
+
+def burgers_jacobian_residual(u_new, u_old, dt, dx, C, bc_left, bc_right):
+ n = len(u_new)
+ I = identity(n, format='csc')
+ J_rhs = burgers_jacobian(0, u_new, dx, C, bc_left, bc_right)
+ return (I - dt * J_rhs).toarray() # doesn't work with sparse matrix in root solver for some reason
+
+class BoundaryWrapper:
+ """
+ Wrap the RHS and Jacobian to dynamically set BCs during the solve iterations with the updated state u.
+ """
+ def __init__(self, dx, C, participant_name, u_from_neumann=None, du_dx_recv=None):
+ self.dx = dx
+ self.C = C
+ self.participant_name = participant_name
+ self.u_from_neumann = u_from_neumann
+ self.du_dx_recv = du_dx_recv
+
+ def bc_left(self, u):
+ if self.participant_name == "Neumann":
+ return u[0] - self.du_dx_recv * self.dx
+ # zero gradient at outer boundary
+ elif self.participant_name == "Dirichlet":
+ return u[0]
+ else:
+ return u[0]
+
+ def bc_right(self, u):
+ if self.participant_name == "Dirichlet":
+ return self.u_from_neumann
+ # zero gradient at outer boundary
+ elif self.participant_name == "Neumann":
+ return u[-1]
+ else:
+ return u[-1]
+
+ def rhs(self, t, u):
+ bc_left = self.bc_left(u)
+ bc_right = self.bc_right(u)
+ return burgers_rhs(t, u, self.dx, self.C, bc_left, bc_right)
+
+ def jac(self, t, u):
+ bc_left = self.bc_left(u)
+ bc_right = self.bc_right(u)
+
+ J_rhs = burgers_jacobian(t, u, self.dx, self.C, bc_left, bc_right)
+ return J_rhs
+
+ def rhs_residual(self, u_new, u_old, dt):
+ bc_left = self.bc_left(u_new)
+ bc_right = self.bc_right(u_new)
+ return burgers_residual(u_new, u_old, dt, self.dx, self.C, bc_left, bc_right)
+
+ def jac_residual(self, u_new, u_old, dt):
+ bc_left = self.bc_left(u_new)
+ bc_right = self.bc_right(u_new)
+ return burgers_jacobian_residual(u_new, u_old, dt, self.dx, self.C, bc_left, bc_right)
+
+def main(participant_name: str, savefile_path: str = None):
+ script_dir = os.path.dirname(os.path.abspath(__file__))
+ case_dir = os.path.abspath(os.path.join(script_dir, '..'))
+ run_dir = os.getcwd()
+
+ config_path = os.path.join(case_dir, "precice-config.xml")
+
+ if participant_name == 'None':
+ # read precice config to get t_final and dt
+ import re
+ print("Participant not specified. Running full domain without preCICE")
+ participant_name = None
+
+ with open(config_path, 'r') as f:
+ precice_config = f.read()
+ max_time_match = re.search(r'', precice_config)
+ t_final = float(max_time_match.group(1))
+ dt_match = re.search(r'', precice_config)
+ dt = float(dt_match.group(1))
+ print(f"t_final = {t_final}, dt = {dt}")
+ else:
+ participant = precice.Participant(participant_name, config_path, 0, 1)
+
+ # Read initial condition
+ with open(os.path.join(case_dir, "ic_params.json"), 'r') as f:
+ domain_config = json.load(f)["domain"]
+
+ nelems_total = domain_config["nelems_total"]
+ nelems_local = nelems_total // 2
+ full_domain_min = domain_config["full_domain_min"]
+ full_domain_max = domain_config["full_domain_max"]
+ dx = (full_domain_max - full_domain_min) / nelems_total
+
+ # Set domain and preCICE setup
+ if participant_name == "Dirichlet":
+ mesh_name = "Dirichlet-Mesh"
+ read_data_name = "Velocity"
+ write_data_name = "Gradient"
+ local_domain_min = full_domain_min
+ local_domain_max = full_domain_min + nelems_local * dx
+ coupling_point = [[local_domain_max, 0.0]]
+ elif participant_name == "Neumann":
+ mesh_name = "Neumann-Mesh"
+ read_data_name = "Gradient"
+ write_data_name = "Velocity"
+ local_domain_min = full_domain_min + nelems_local * dx
+ local_domain_max = full_domain_max
+ coupling_point = [[local_domain_min, 0.0]]
+ else: #full domain run
+ local_domain_min = full_domain_min
+ local_domain_max = full_domain_max
+ nelems_local = nelems_total
+
+ ic_data = np.load(os.path.join(case_dir, "initial_condition.npz"))
+ full_ic = ic_data['initial_condition']
+ if participant_name == "Dirichlet":
+ u = full_ic[:nelems_local]
+ elif participant_name == "Neumann":
+ u = full_ic[nelems_local:]
+ else:
+ u = full_ic
+
+ if participant_name is not None:
+ vertex_id = participant.set_mesh_vertices(mesh_name, coupling_point)
+
+ if participant.requires_initial_data():
+ if participant_name == "Dirichlet":
+ du_dx_send = (u[-1] - u[-2]) / dx # take forward difference inside domain for initial send
+ participant.write_data(mesh_name, write_data_name, vertex_id, [du_dx_send])
+ if participant_name == "Neumann":
+ participant.write_data(mesh_name, write_data_name, vertex_id, [u[0]])
+
+ participant.initialize()
+ dt = participant.get_max_time_step_size()
+
+ solution_history = {int(0): u.copy()}
+
+ t = 0.0
+ t_index = 0
+ saved_t = 0.0
+ C_viscosity = 1e-12
+ aborted = False
+
+ # --- Serial Coupling Loop ---
+ if participant_name == "Dirichlet":
+ while participant.is_coupling_ongoing():
+ if participant.requires_writing_checkpoint():
+ # print(f"[Dirichlet] Writing checkpoint at t={t:.4f}")
+ saved_u = u.copy()
+ saved_t = t
+ saved_t_index = t_index
+ if participant.requires_reading_checkpoint():
+ u = saved_u.copy()
+ t = saved_t
+ t_index = saved_t_index
+ # print(f"[Dirichlet] Reading checkpoint at t={t:.4f}")
+
+ u_from_neumann = participant.read_data(mesh_name, read_data_name, vertex_id, dt)[0]
+
+ t_end = t + dt
+ wrapper = BoundaryWrapper(dx, C_viscosity, "Dirichlet", u_from_neumann=u_from_neumann)
+
+ # sol = solve_ivp(wrapper.rhs, (t, t_end), u, method='BDF', t_eval=[t_end], jac=wrapper.jac) # BDF higher order implicit timestepping
+ # u = sol.y[:, -1]
+ # u = u + dt * burgers_rhs(t, u, dx, C_viscosity, wrapper.bc_left(u), wrapper.bc_right(u)) # Explicit Euler
+
+ sol = root(wrapper.rhs_residual, u, args=(u, dt), jac=wrapper.jac_residual, method='hybr')
+ u = sol.x
+
+ bc_right = wrapper.bc_right(u)
+
+ du_dx_send = (bc_right - u[-1]) / dx
+ flux_across_interface = flux_function(u[-1], bc_right)
+ u_interface = (u[-1] + bc_right) / 2
+
+ participant.write_data(mesh_name, write_data_name, vertex_id, [du_dx_send])
+
+ print(f"[{participant_name:9s}] t={t:6.4f} | u_coupling={u_interface:8.4f} | du_dx={du_dx_send:8.4f} | flux_across={flux_across_interface:8.4f}")
+
+ t = saved_t + dt
+ t_index += 1
+ solution_history[t_index] = u.copy()
+ participant.advance(dt)
+
+ elif participant_name == "Neumann":
+ while participant.is_coupling_ongoing():
+ if participant.requires_writing_checkpoint():
+ # print(f"[Neumann] Writing checkpoint at t={t:.4f}")
+ saved_u = u.copy()
+ saved_t = t
+ saved_t_index = t_index
+ if participant.requires_reading_checkpoint():
+ u = saved_u.copy()
+ t = saved_t
+ t_index = saved_t_index
+ # print(f"[Neumann] Reading checkpoint at t={t:.4f}")
+
+ du_dx_recv = participant.read_data(mesh_name, read_data_name, vertex_id, dt)[0]
+
+ t_end = t + dt
+ wrapper = BoundaryWrapper(dx, C_viscosity, "Neumann", du_dx_recv=du_dx_recv)
+ # sol = solve_ivp(wrapper.rhs, (t, t_end), u, method='BDF', t_eval=[t_end], jac=wrapper.jac) # BDF higher order implicit timestepping
+ # u = sol.y[:, -1]
+ # u = u + dt * burgers_rhs(t, u, dx, C_viscosity, wrapper.bc_left(u), wrapper.bc_right(u)) # Explicit Euler
+
+ sol = root(wrapper.rhs_residual, u, args=(u, dt), jac=wrapper.jac_residual, method='hybr')
+ u = sol.x
+
+ bc_left = wrapper.bc_left(u)
+ flux_across_interface = flux_function(bc_left, u[0])
+ du_dx = (u[0] - bc_left) / dx
+ u_interface = (bc_left + u[0]) / 2
+
+ participant.write_data(mesh_name, write_data_name, vertex_id, [u[0]])
+
+ print(f"[{participant_name:9s}] t={t:6.4f} | u_coupling={u_interface:8.4f} | du_dx={du_dx:8.4f} | flux_across={flux_across_interface:8.4f}")
+
+ t = saved_t + dt
+ t_index += 1
+ solution_history[t_index] = u.copy()
+ participant.advance(dt)
+
+ if participant_name is not None:
+ # Finalize and save data to npz array
+ participant.finalize()
+ output_filename = os.path.join(run_dir, f"{participant_name.lower()}.npz")
+ else:
+ if savefile_path:
+ output_filename = savefile_path
+ else:
+ output_filename = os.path.join(script_dir, "full_domain.npz")
+ print("Starting monolithic simulation without preCICE")
+ bc_left, bc_right = 0, 0
+
+ while t < t_final:
+
+ print(f"[Monolithic ] t={t:6.4f}")
+ t_end = t + dt
+ wrapper = BoundaryWrapper(dx, C_viscosity, "None")
+ # sol = solve_ivp(wrapper.rhs, (t, t_end), u, method='BDF', t_eval=[t_end], jac=wrapper.jac) # BDF higher order implicit timestepping
+ # u = sol.y[:, -1]
+ # u = u + dt * burgers_rhs(t, u, dx, C_viscosity, wrapper.bc_left(u), wrapper.bc_right(u)) # Explicit Euler
+
+ sol = root(wrapper.rhs_residual, u, args=(u, dt), jac=wrapper.jac_residual, method='hybr')
+ u = sol.x
+
+ t = t + dt
+ t_index += 1
+ solution_history[t_index] = u.copy()
+
+ if not aborted:
+
+ cell_centers_x = np.linspace(local_domain_min + dx/2, local_domain_max - dx/2, nelems_local)
+ internal_coords = np.array([cell_centers_x, np.zeros(nelems_local)]).T
+
+ sorted_times_index = sorted(solution_history.keys())
+ final_solution = np.array([solution_history[t_index] for t_index in sorted_times_index])
+
+ np.savez(
+ output_filename,
+ internal_coordinates=internal_coords,
+ **{"Solver-Mesh-1D-Internal": final_solution}
+ )
+ print(f"Results saved to {output_filename}")
+ else:
+ raise RuntimeError("Simulation aborted.")
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser()
+ parser.add_argument("participant", help="Name of the participant", choices=['Dirichlet', 'Neumann', 'None'])
+ parser.add_argument("--savefile", help="Path to save the output npz file")
+ args = parser.parse_args()
+
+
+ main(args.participant, savefile_path=args.savefile)
\ No newline at end of file
diff --git a/partitioned-burgers-1d/visualize_partitioned_domain.py b/partitioned-burgers-1d/visualize_partitioned_domain.py
new file mode 100644
index 000000000..32e4c37c8
--- /dev/null
+++ b/partitioned-burgers-1d/visualize_partitioned_domain.py
@@ -0,0 +1,141 @@
+import numpy as np
+import matplotlib.pyplot as plt
+import os
+import argparse
+
+parser = argparse.ArgumentParser()
+parser.add_argument('TIMESTEP_TO_PLOT', nargs='?', type=int, default=10, help="Timestep to plot, default is 10.")
+parser.add_argument("--neumann", default="neumann-scipy/neumann.npz", help="Path to the neumann participant's data file relative to the case directory.")
+args = parser.parse_args()
+TIMESTEP_TO_PLOT = args.TIMESTEP_TO_PLOT
+
+CASE_DIR = os.path.dirname(os.path.abspath(__file__))
+IMAGES_DIR = os.path.join(CASE_DIR, "images")
+DIRICHLET_DATA_PATH = os.path.join(CASE_DIR, "dirichlet-scipy", "dirichlet.npz")
+NEUMANN_DATA_PATH = os.path.join(CASE_DIR, args.neumann)
+
+MONOLITHIC_DATA_PATH = os.path.join(CASE_DIR, "solver-scipy", "full_domain.npz")
+if os.path.exists(MONOLITHIC_DATA_PATH):
+ gt_exists = True
+else:
+ print(f"Monolithic data not found at {MONOLITHIC_DATA_PATH}.\nPlease run python3 solver-scipy/solver.py None.")
+ gt_exists = False
+
+print(f"Loading data from {DIRICHLET_DATA_PATH}")
+data_d = np.load(DIRICHLET_DATA_PATH)
+coords_d = data_d['internal_coordinates']
+solution_d = data_d['Solver-Mesh-1D-Internal']
+
+print(f"Loading data from {NEUMANN_DATA_PATH}")
+data_n = np.load(NEUMANN_DATA_PATH)
+coords_n = data_n['internal_coordinates']
+solution_n = data_n['Solver-Mesh-1D-Internal']
+
+full_coords = np.concatenate((coords_d[:, 0], coords_n[:, 0]))
+full_solution_history = np.concatenate((solution_d, solution_n), axis=1)
+
+print(f"Full domain shape: {full_solution_history.shape}")
+
+if gt_exists:
+ print(f"Loading Monolithic data from {MONOLITHIC_DATA_PATH}")
+ data_gt = np.load(MONOLITHIC_DATA_PATH)
+ coords_gt = data_gt['internal_coordinates']
+ solution_gt = data_gt['Solver-Mesh-1D-Internal']
+
+ print(f"Monolithic shape: {solution_gt.shape}")
+
+# --- plot single timestep ---
+plt.figure(figsize=(10, 5), dpi=300)
+plt.plot(full_coords, full_solution_history[TIMESTEP_TO_PLOT, :], marker='.', markersize=8, linestyle='-', label=f'Partitioned Solution\n{args.neumann.split("/")[0]}')
+
+if gt_exists:
+ plt.plot(coords_gt[:, 0], solution_gt[TIMESTEP_TO_PLOT, :], marker='+', linestyle=':', c="crimson", alpha=1, label='Monolithic Solution')
+plt.title(f'Solution at Timestep {TIMESTEP_TO_PLOT}')
+plt.xlabel('Spatial Coordinate (x)')
+plt.ylabel('Solution Value (u)')
+plt.grid(True)
+
+u_max = np.max(full_solution_history[TIMESTEP_TO_PLOT, :])
+u_min = np.min(full_solution_history[TIMESTEP_TO_PLOT, :])
+u_interface = full_solution_history[TIMESTEP_TO_PLOT, full_coords.searchsorted(1.0)]
+u_offset = (u_max-u_min)*0.125
+plt.vlines(x=1, ymin=u_min-u_offset, ymax=u_interface-u_offset, color='gray', linestyle='--', label='Interface')
+plt.vlines(x=1, ymin=u_interface+u_offset, ymax=u_max+u_offset*2, color='gray', linestyle='--')
+
+plt.legend(loc='upper left')
+plt.savefig(os.path.join(IMAGES_DIR, f'full_domain_timestep_slice.png'))
+print(f"Saved plot to images/full_domain_timestep_slice.png")
+
+if gt_exists:
+ # residual
+ residual = full_solution_history[TIMESTEP_TO_PLOT, :] - solution_gt[TIMESTEP_TO_PLOT, :]
+ mse = np.mean(np.square(residual))
+ mse_gt_vs_zero = np.mean(np.square(solution_gt[TIMESTEP_TO_PLOT, :]))
+ relative_mse = mse / mse_gt_vs_zero if mse_gt_vs_zero > 1e-9 else 0.0
+
+ nelems_total = solution_gt.shape[1]
+ interface_idx = nelems_total // 2 - 1
+ dx = coords_gt[1, 0] - coords_gt[0, 0]
+
+ # t = 0
+ u0_gt = solution_gt[0, :]
+ val_at_interface_t0 = (u0_gt[interface_idx] + u0_gt[interface_idx + 1]) / 2.0
+ grad_at_interface_t0 = (u0_gt[interface_idx + 1] - u0_gt[interface_idx]) / dx
+
+ # t = TIMESTEP_TO_PLOT
+ u_plot_gt = solution_gt[TIMESTEP_TO_PLOT, :]
+ val_at_interface_plot = (u_plot_gt[interface_idx] + u_plot_gt[interface_idx + 1]) / 2.0
+ grad_at_interface_plot = (u_plot_gt[interface_idx + 1] - u_plot_gt[interface_idx]) / dx
+
+ print("---")
+ print("Monolithic solution u at interface:")
+ print(f" t=0: u = {val_at_interface_t0:8.4f}, du/dx = {grad_at_interface_t0:8.4f}")
+ print(f" t={TIMESTEP_TO_PLOT}: u = {val_at_interface_plot:8.4f}, du/dx = {grad_at_interface_plot:8.4f}")
+ print()
+ print(f"Residual at t={TIMESTEP_TO_PLOT}:")
+ print(f" Mean Squared Error (MSE): {mse:10.6e}")
+ print(f" Relative MSE: {relative_mse:10.6e}")
+ print("---")
+
+# --- plot gradient at single timestep ---
+solution_slice = full_solution_history[TIMESTEP_TO_PLOT, :]
+du_dx = np.gradient(solution_slice, full_coords)
+
+plt.figure(figsize=(10, 5), dpi=300)
+plt.plot(full_coords, du_dx, marker='.', markersize=8, linestyle='-', label=f'Partitioned Solution\n{args.neumann.split("/")[0]}')
+
+if gt_exists:
+ solution_gt_slice = solution_gt[TIMESTEP_TO_PLOT, :]
+ du_dx_gt = np.gradient(solution_gt_slice, coords_gt[:, 0])
+ plt.plot(coords_gt[:, 0], du_dx_gt, marker='+', linestyle=':', c="crimson", alpha=1, label='Monolithic Solution')
+
+plt.title(f'Gradient (du/dx) at Timestep {TIMESTEP_TO_PLOT}')
+plt.xlabel('Spatial Coordinate (x)')
+plt.ylabel('Gradient Value (du/dx)')
+plt.grid(True)
+
+u_max = np.max(du_dx)
+u_min = np.min(du_dx)
+u_interface = du_dx[full_coords.searchsorted(1.0)]
+u_offset = (u_max-u_min)*0.125
+plt.vlines(x=1, ymin=u_min-u_offset, ymax=u_interface-u_offset, color='gray', linestyle='--', label='Interface')
+plt.vlines(x=1, ymin=u_interface+u_offset, ymax=u_max+u_offset, color='gray', linestyle='--')
+
+plt.legend(loc='upper left')
+plt.savefig(os.path.join(IMAGES_DIR, f'gradient_timestep_slice.png'))
+print(f"Saved plot to images/gradient_timestep_slice.png")
+plt.close()
+
+# --- plot time evolution ---
+plt.figure(figsize=(10, 6))
+plt.imshow(full_solution_history.T, aspect='auto', cmap='viridis', origin='lower',
+ extent=[0, full_solution_history.shape[0], full_coords.min(), full_coords.max()])
+plt.colorbar(label='Solution Value (u)')
+plt.title('Time Evolution of Partitioned Burgers eq.')
+plt.xlabel('Timestep')
+plt.ylabel('Spatial Coordinate (x)')
+plt.xticks(np.arange(0, full_solution_history.shape[0], step=max(1, full_solution_history.shape[0]//10)))
+plt.tight_layout()
+plt.savefig(os.path.join(IMAGES_DIR, 'full_domain_evolution.png'))
+print("Saved plot to images/full_domain_evolution.png")
+plt.close()
\ No newline at end of file