diff --git a/doc/source/user_guide/tutorials/index.rst b/doc/source/user_guide/tutorials/index.rst index e600bb8ca6f..91a8a086aa0 100644 --- a/doc/source/user_guide/tutorials/index.rst +++ b/doc/source/user_guide/tutorials/index.rst @@ -104,6 +104,13 @@ of basic features and concepts so you can start coding right away. Learn how to perform mathematical operations on data structures. + .. grid-item-card:: Parallelization + :link: ref_tutorials_parallelization + :link-type: ref + :text-align: center + + Learn how to accelerate DPF workflows using multithreading and operator configuration options. + .. grid-item-card:: Custom Python operator and plugin :link: ref_tutorials_custom_operators_and_plugins :link-type: ref @@ -142,4 +149,24 @@ of basic features and concepts so you can start coding right away. Learn how to manage licensing in PyDPF-Core. +++ - Coming soon \ No newline at end of file + Coming soon + +.. toctree:: + :maxdepth: 2 + :hidden: + + parallelization/index.rst + collections_tutorial.rst + data_structures/index.rst + post_processing_basics/index.rst + import_data/index.rst + export_data/index.rst + mesh/index.rst + plot/index.rst + operators_and_workflows/index.rst + mathematics/index.rst + distributed_files/index.rst + dpf_server/index.rst + custom_operators_and_plugins/index.rst + licensing/index.rst + animate/index.rst diff --git a/doc/source/user_guide/tutorials/parallelization/index.rst b/doc/source/user_guide/tutorials/parallelization/index.rst new file mode 100644 index 00000000000..84eab5662d2 --- /dev/null +++ b/doc/source/user_guide/tutorials/parallelization/index.rst @@ -0,0 +1,26 @@ +.. _ref_tutorials_parallelization: + +===================== +Parallelization in DPF +===================== + +This section covers how to leverage parallelization features in PyDPF-Core to accelerate computations. You will learn about multithreading, the `num_threads` and `mutex` operator configurations, and best practices for parallel workflows. + +.. grid:: 1 1 2 2 + :gutter: 2 + :padding: 2 + :margin: 2 + + .. grid-item-card:: Multithreading with Operator Configurations + :link: ref_tutorial_multithreading + :link-type: ref + :text-align: center + + Learn how to use the `num_threads` and `mutex` configuration options to control operator parallelism. + +.. toctree:: + :maxdepth: 2 + :hidden: + + multithreading.rst + diff --git a/doc/source/user_guide/tutorials/parallelization/multithreading.rst b/doc/source/user_guide/tutorials/parallelization/multithreading.rst new file mode 100644 index 00000000000..73e1548c880 --- /dev/null +++ b/doc/source/user_guide/tutorials/parallelization/multithreading.rst @@ -0,0 +1,125 @@ +.. _ref_tutorial_multithreading: + +============================================ +Multithreading with Operator Configurations +============================================ + +This tutorial demonstrates how to use the `num_threads` and `mutex` configuration options of DPF operators to control parallel execution and thread safety. + +Overview +-------- + +Many DPF operators support parallel execution to speed up computations. +The `num_threads` configuration option allows you to specify the number of threads used by an operator. +The `mutex` option can be used to ensure thread safety when required. + +Setting Up +---------- + +.. jupyter-execute:: + + from ansys.dpf import core as dpf + from ansys.dpf.core import operators as op + from ansys.dpf.core import examples + import numpy as np + + # Load a result file and create a Model + model = dpf.Model(examples.find_simple_bar()) + fields_container = model.results.displacement.on_all_time_freqs.eval() + +Using num_threads +----------------- + +You can control the number of threads used by an operator by setting the `num_threads` option in its configuration. + +Below, we compare the execution time of the norm operator with the default configuration (single-threaded) and with `num_threads=2`. + +.. jupyter-execute:: + + import time + + # Run with default configuration (single-threaded) + norm_op_default = op.math.norm_fc() + norm_op_default.inputs.fields_container.connect(fields_container) + start = time.time() + result_fc_default = norm_op_default.outputs.fields_container() + elapsed_default = time.time() - start + print(f"Norm (default config): {len(result_fc_default)} fields, time: {elapsed_default:.4f} s") + + # Run with num_threads=2 + config = op.math.norm_fc.default_config() + config.options["num_threads"] = 2 + norm_op_mt = op.math.norm_fc(config=config) + norm_op_mt.inputs.fields_container.connect(fields_container) + start = time.time() + result_fc_mt = norm_op_mt.outputs.fields_container() + elapsed_mt = time.time() - start + print(f"Norm (num_threads=2): {len(result_fc_mt)} fields, time: {elapsed_mt:.4f} s") + + print(f"Speedup: {elapsed_default/elapsed_mt:.2f}x (if >1, multithreading is faster)") + +Using mutex for Thread Safety +---------------------------- + +The `mutex` option can be set to `true` to ensure that the operator executes in a thread-safe manner. This is useful if you are running multiple operators in parallel and want to avoid race conditions. + +Below, we demonstrate a potential race condition by running two norm operators in parallel threads, +first without mutex (which may cause inconsistent results), and then with mutex enabled (which ensures thread safety). + +.. jupyter-execute:: + + import threading + import copy + + # Function to run a norm operator and collect the result + def run_norm_op(fc, config, results, idx): + op = op.math.norm_fc(config=config) + op.inputs.fields_container.connect(fc) + results[idx] = op.outputs.fields_container()[0].data.copy() + + # Prepare results containers + results_no_mutex = [None, None] + results_mutex = [None, None] + + # Run two norm operators in parallel WITHOUT mutex + config_no_mutex = op.math.norm_fc.default_config() + config_no_mutex.options["num_threads"] = 2 + config_no_mutex.options["mutex"] = "false" + threads = [ + threading.Thread(target=run_norm_op, args=(fields_container, config_no_mutex, results_no_mutex, 0)), + threading.Thread(target=run_norm_op, args=(fields_container, config_no_mutex, results_no_mutex, 1)), + ] + for t in threads: + t.start() + for t in threads: + t.join() + print("Results without mutex:") + print("Thread 0 result:", results_no_mutex[0]) + print("Thread 1 result:", results_no_mutex[1]) + print("Equal results?", np.allclose(results_no_mutex[0], results_no_mutex[1])) + + # Run two norm operators in parallel WITH mutex + config_mutex = op.math.norm_fc.default_config() + config_mutex.options["num_threads"] = 2 + config_mutex.options["mutex"] = "true" + threads = [ + threading.Thread(target=run_norm_op, args=(fields_container, config_mutex, results_mutex, 0)), + threading.Thread(target=run_norm_op, args=(fields_container, config_mutex, results_mutex, 1)), + ] + for t in threads: + t.start() + for t in threads: + t.join() + print("\nResults with mutex:") + print("Thread 0 result:", results_mutex[0]) + print("Thread 1 result:", results_mutex[1]) + print("Equal results?", np.allclose(results_mutex[0], results_mutex[1])) + + # Note: Without mutex, results may differ due to race conditions. With mutex, results should always match. + +Summary +------- + +- Use `num_threads` to control the number of threads for operator execution. +- Use `mutex` to ensure thread safety when running operators in parallel. +- These options can help you optimize performance and reliability in parallel DPF workflows.